├── app ├── config │ ├── production.json │ ├── test.json │ ├── default.json │ └── custom-environment-variables.json ├── .prettierrc ├── src │ ├── routes │ │ └── v2 │ │ │ ├── health.js │ │ │ ├── fileTypes.js │ │ │ ├── index.js │ │ │ └── template.js │ ├── middleware │ │ ├── openapi.js │ │ ├── authorizedParty.js │ │ └── authorization.js │ ├── docs │ │ └── index.js │ └── components │ │ ├── log.js │ │ ├── upload.js │ │ ├── carboneRender.js │ │ ├── carboneCopyApi.js │ │ ├── utils.js │ │ └── fileCache.js ├── .dockerignore ├── docker │ ├── bindPython.sh │ └── python ├── lcov-fix.js ├── tests │ ├── unit │ │ └── components │ │ │ ├── log.spec.js │ │ │ ├── validation │ │ │ ├── middleware.spec.js │ │ │ ├── modelValidation.spec.js │ │ │ └── validatorUtils.spec.js │ │ │ ├── utils.spec.js │ │ │ └── authorization.spec.js │ └── common │ │ └── helper.js ├── bin │ └── www ├── package.json ├── cacheCleaner.js ├── app.js ├── USAGE.md └── README.md ├── .gitattributes ├── _config.yml ├── examples ├── template.txt ├── .prettierrc ├── package.json ├── 01-authenticated.sh ├── 01-unauthenticated.sh ├── README.md ├── server.js ├── doc-caching.js └── package-lock.json ├── .github ├── ISSUE_TEMPLATE │ ├── custom.md │ ├── feature_request.md │ └── bug_report.md ├── environments │ ├── values.pr.yaml │ ├── values.dev.yaml │ ├── values.prod.yaml │ └── values.test.yaml ├── CODEOWNERS ├── workflows │ ├── charts-release.yaml │ ├── on-pr-opened.yaml │ ├── on-pr-closed.yaml │ ├── codeql-analysis.yaml │ ├── unit-tests.yaml │ └── on-push.yaml ├── pull_request_template.md └── actions │ ├── build-push-container │ └── action.yaml │ └── deploy-to-environment │ └── action.yaml ├── COMPLIANCE.yaml ├── charts └── cdogs │ ├── templates │ ├── serviceaccount.yaml │ ├── service.yaml │ ├── networkpolicy.yaml │ ├── persistentvolumeclaim.yaml │ ├── route.yaml │ ├── hpa.yaml │ ├── secret.yaml │ ├── cronjob.yaml │ ├── _helpers.tpl │ ├── NOTES.txt │ ├── deploymentconfig.yaml │ └── configmap.yaml │ ├── .helmignore │ ├── Chart.yaml │ ├── README.md │ └── values.yaml ├── .gitignore ├── .dockerignore ├── .editorconfig ├── CONTRIBUTING.md ├── .codeclimate.yml ├── k6 ├── README.md ├── sample_contexts.json └── templating.js ├── bcgovpubcode.yml ├── Dockerfile ├── SECURITY.md ├── CODE-OF-CONDUCT.md └── README.md /app/config/production.json: -------------------------------------------------------------------------------- 1 | {} 2 | -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | * text=auto eol=lf 2 | -------------------------------------------------------------------------------- /_config.yml: -------------------------------------------------------------------------------- 1 | theme: jekyll-theme-cayman -------------------------------------------------------------------------------- /examples/template.txt: -------------------------------------------------------------------------------- 1 | Hello {d.firstName} {d.lastName}! 2 | -------------------------------------------------------------------------------- /app/.prettierrc: -------------------------------------------------------------------------------- 1 | { 2 | "semi": true, 3 | "singleQuote": true 4 | } 5 | -------------------------------------------------------------------------------- /examples/.prettierrc: -------------------------------------------------------------------------------- 1 | { 2 | "semi": true, 3 | "singleQuote": true 4 | } 5 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/custom.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Custom issue template 3 | about: Describe this issue template's purpose here. 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | -------------------------------------------------------------------------------- /app/config/test.json: -------------------------------------------------------------------------------- 1 | { 2 | "keycloak": { 3 | "clientId": "clientId", 4 | "clientSecret": "clientSecret" 5 | }, 6 | "server": { 7 | "logLevel": "silent" 8 | } 9 | } 10 | -------------------------------------------------------------------------------- /.github/environments/values.pr.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | features: 3 | authentication: true 4 | 5 | persistentVolumeClaim: 6 | enabled: false 7 | 8 | cronJob: 9 | enabled: false 10 | 11 | fluentBit: 12 | enabled: false 13 | -------------------------------------------------------------------------------- /app/src/routes/v2/health.js: -------------------------------------------------------------------------------- 1 | const healthRouter = require('express').Router(); 2 | 3 | /** Returns the status of correspondent APIs */ 4 | healthRouter.get('/', (_req, res) => { 5 | res.sendStatus(200); 6 | }); 7 | 8 | module.exports = healthRouter; 9 | -------------------------------------------------------------------------------- /.github/CODEOWNERS: -------------------------------------------------------------------------------- 1 | # These users will be the default owners for everything in the repo. 2 | # Unless a later match takes precedence, the following users will be 3 | # requested for review when someone opens a pull request. 4 | @TimCsaky @jatindersingh93 @norrisng-bc 5 | -------------------------------------------------------------------------------- /COMPLIANCE.yaml: -------------------------------------------------------------------------------- 1 | name: compliance 2 | description: | 3 | This document is used to track a projects PIA and STRA 4 | compliance. 5 | spec: 6 | - name: PIA 7 | status: TBD 8 | last-updated: '2020-01-17T23:00:00.000Z' 9 | - name: STRA 10 | status: TBD 11 | last-updated: '2020-01-17T23:00:00.000Z' 12 | -------------------------------------------------------------------------------- /app/config/default.json: -------------------------------------------------------------------------------- 1 | { 2 | "server": { 3 | "bodyLimit": "100mb", 4 | "logLevel": "http", 5 | "port": "3000" 6 | }, 7 | "carbone": { 8 | "cacheDir": "/var/lib/file-cache/data", 9 | "cacheSize": "2GB", 10 | "converterFactoryTimeout": "60000", 11 | "formFieldName": "template", 12 | "startCarbone": "true", 13 | "uploadCount": "1", 14 | "uploadSize": "25MB" 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /charts/cdogs/templates/serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.serviceAccount.enabled -}} 2 | --- 3 | apiVersion: v1 4 | kind: ServiceAccount 5 | metadata: 6 | name: {{ include "cdogs.serviceAccountName" . }} 7 | labels: 8 | {{- include "cdogs.labels" . | nindent 4 }} 9 | {{- with .Values.serviceAccount.annotations }} 10 | annotations: 11 | {{- toYaml . | nindent 4 }} 12 | {{- end }} 13 | {{- end }} 14 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | .gradle 3 | .nyc_output 4 | .scannerwork 5 | build 6 | coverage 7 | dist 8 | node_modules 9 | 10 | # local env files 11 | local.* 12 | local-*.* 13 | .env.local 14 | .env.*.local 15 | 16 | # Log files 17 | npm-debug.log* 18 | yarn-debug.log* 19 | yarn-error.log* 20 | 21 | # Editor directories and files 22 | .idea 23 | .vscode 24 | *.iml 25 | *.suo 26 | *.ntvs* 27 | *.njsproj 28 | *.sln 29 | *.sw* 30 | -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | .gradle 3 | .nyc_output 4 | .scannerwork 5 | build 6 | coverage 7 | dist 8 | node_modules 9 | 10 | # local env files 11 | local.* 12 | local-*.* 13 | .env.local 14 | .env.*.local 15 | 16 | # Log files 17 | npm-debug.log* 18 | yarn-debug.log* 19 | yarn-error.log* 20 | 21 | # Editor directories and files 22 | .idea 23 | .vscode 24 | *.iml 25 | *.suo 26 | *.ntvs* 27 | *.njsproj 28 | *.sln 29 | *.sw* 30 | -------------------------------------------------------------------------------- /app/.dockerignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | .gradle 3 | .nyc_output 4 | .scannerwork 5 | build 6 | coverage 7 | dist 8 | node_modules 9 | 10 | # local env files 11 | local.* 12 | local-*.* 13 | .env.local 14 | .env.*.local 15 | 16 | # Log files 17 | npm-debug.log* 18 | yarn-debug.log* 19 | yarn-error.log* 20 | 21 | # Editor directories and files 22 | .idea 23 | .vscode 24 | *.iml 25 | *.suo 26 | *.ntvs* 27 | *.njsproj 28 | *.sln 29 | *.sw* 30 | -------------------------------------------------------------------------------- /app/docker/bindPython.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # find where python was installed 3 | encodings_root=$(find /usr/lib/python* -type d -name 'encodings') 4 | python_root=$(dirname $encodings_root) 5 | 6 | mv ${APP_ROOT}/docker/python /usr/lib/libreoffice/program 7 | ln -sf /usr/bin/python3 /usr/lib/libreoffice/program/python.bin 8 | ln -sf ${python_root} /usr/lib/libreoffice/program/python-core 9 | chmod a+rx /usr/lib/libreoffice/program/python 10 | 11 | exit 0 12 | -------------------------------------------------------------------------------- /.editorconfig: -------------------------------------------------------------------------------- 1 | root = true 2 | 3 | [*] 4 | charset = utf-8 5 | end_of_line = lf 6 | insert_final_newline = true 7 | trim_trailing_whitespace = true 8 | 9 | [*.html] 10 | indent_style = space 11 | indent_size = 2 12 | 13 | [*.{css,js,json,jsx,scss,ts,tsx,vue}] 14 | indent_style = space 15 | indent_size = 2 16 | 17 | [.{babelrc,eslintrc}] 18 | indent_style = space 19 | indent_size = 2 20 | 21 | [Jenkinsfile*] 22 | indent_style = space 23 | indent_size = 2 24 | -------------------------------------------------------------------------------- /charts/cdogs/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *.orig 18 | *~ 19 | # Various IDEs 20 | .project 21 | .idea/ 22 | *.tmproj 23 | .vscode/ 24 | -------------------------------------------------------------------------------- /examples/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "examples", 3 | "private": true, 4 | "type": "module", 5 | "version": "1.0.0", 6 | "description": "", 7 | "main": "server.js", 8 | "scripts": { 9 | "test": "echo \"Error: no test specified\" && exit 1", 10 | "start": "node server.js" 11 | }, 12 | "author": "", 13 | "license": "Apache-2.0", 14 | "dependencies": { 15 | "fetch-blob": "^3.1.3", 16 | "formdata-polyfill": "^4.0.10", 17 | "node-fetch": "^3.1.0" 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /charts/cdogs/templates/service.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: {{ include "cdogs.fullname" . }} 6 | labels: 7 | {{- include "cdogs.labels" . | nindent 4 }} 8 | spec: 9 | type: {{ .Values.service.type }} 10 | ports: 11 | - name: {{ .Values.service.portName }} 12 | port: {{ .Values.service.port }} 13 | protocol: TCP 14 | targetPort: {{ .Values.service.port }} 15 | selector: 16 | {{- include "cdogs.selectorLabels" . | nindent 4 }} 17 | -------------------------------------------------------------------------------- /app/src/routes/v2/fileTypes.js: -------------------------------------------------------------------------------- 1 | 2 | const Problem = require('api-problem'); 3 | 4 | const fileTypesRouter = require('express').Router(); 5 | const { fileTypes } = require('../../components/carboneRender'); 6 | 7 | /** Returns the dictionary of input/output file types */ 8 | fileTypesRouter.get('/fileTypes', (_req, res, next) => { 9 | if (fileTypes instanceof Object) { 10 | res.status(200).json({ dictionary: fileTypes }); 11 | } else { 12 | next(new Problem(500, { detail: 'Unable to get file types dictionary' })); 13 | } 14 | }); 15 | 16 | module.exports = fileTypesRouter; 17 | -------------------------------------------------------------------------------- /app/lcov-fix.js: -------------------------------------------------------------------------------- 1 | // 2 | // see stackoverflow (Since Jest 25, coverage reports are having different source path) 3 | // https://stackoverflow.com/questions/60323177/since-jest-25-coverage-reports-are-having-a-different-source-path 4 | const fs = require('fs'); 5 | 6 | const file = './coverage/lcov.info'; 7 | 8 | fs.readFile(file, 'utf8', (err,data) => { 9 | if (err) { 10 | return console.error(err); 11 | } 12 | const result = data.replace(/src/g, `${process.cwd()}/src`); 13 | 14 | fs.writeFile(file, result, 'utf8', (err) => { 15 | if (err) return console.error(err); 16 | }); 17 | }); 18 | -------------------------------------------------------------------------------- /app/src/middleware/openapi.js: -------------------------------------------------------------------------------- 1 | const { dump } = require('js-yaml'); 2 | 3 | const { getDocHTML, getSpec } = require('../docs'); 4 | 5 | module.exports = { 6 | /** OpenAPI Docs */ 7 | getDocs: (version) => (_req, res) => { 8 | res.send(getDocHTML(version)); 9 | }, 10 | 11 | /** OpenAPI JSON Spec */ 12 | getJsonSpec: (version) => (_req, res) => { 13 | res.status(200).json(getSpec(version)); 14 | }, 15 | 16 | /** OpenAPI YAML Spec */ 17 | getYamlSpec: (version) => (_req, res) => { 18 | res.status(200).type('application/yaml').send(dump(getSpec(version))); 19 | } 20 | }; 21 | -------------------------------------------------------------------------------- /.github/workflows/charts-release.yaml: -------------------------------------------------------------------------------- 1 | name: Release Charts 2 | 3 | on: 4 | push: 5 | paths: 6 | - 'charts/**' 7 | branches: 8 | - master 9 | 10 | jobs: 11 | release: 12 | runs-on: ubuntu-latest 13 | steps: 14 | - name: Checkout 15 | uses: actions/checkout@v4 16 | with: 17 | fetch-depth: 0 18 | 19 | - name: Configure Git 20 | run: | 21 | git config user.name "$GITHUB_ACTOR" 22 | git config user.email "$GITHUB_ACTOR@users.noreply.github.com" 23 | 24 | - name: Run chart-releaser 25 | uses: helm/chart-releaser-action@v1.5.0 26 | env: 27 | CR_TOKEN: "${{ secrets.GITHUB_TOKEN }}" 28 | -------------------------------------------------------------------------------- /charts/cdogs/templates/networkpolicy.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.networkPolicy.enabled }} 2 | --- 3 | apiVersion: networking.k8s.io/v1 4 | kind: NetworkPolicy 5 | metadata: 6 | name: allow-openshift-ingress-to-{{ include "cdogs.fullname" . }}-app 7 | labels: 8 | {{- include "cdogs.labels" . | nindent 4 }} 9 | spec: 10 | ingress: 11 | - from: 12 | - namespaceSelector: 13 | matchLabels: 14 | network.openshift.io/policy-group: ingress 15 | ports: 16 | - port: {{ default "8080" .Values.config.configMap.SERVER_PORT | atoi }} 17 | protocol: TCP 18 | podSelector: 19 | matchLabels: {{- include "cdogs.selectorLabels" . | nindent 6 }} 20 | {{- end }} 21 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | #### Is your feature request related to a problem? Please describe. 11 | 12 | 13 | #### Describe the solution you'd like 14 | 15 | 16 | #### Describe alternatives you've considered 17 | 18 | 19 | #### Additional context 20 | 21 | -------------------------------------------------------------------------------- /app/config/custom-environment-variables.json: -------------------------------------------------------------------------------- 1 | { 2 | "keycloak": { 3 | "clientId": "KC_CLIENTID", 4 | "clientSecret": "KC_CLIENTSECRET", 5 | "enabled": "KC_ENABLED", 6 | "publicKey": "KC_PUBLICKEY", 7 | "realm": "KC_REALM", 8 | "serverUrl": "KC_SERVERURL" 9 | }, 10 | "server": { 11 | "bodyLimit": "SERVER_BODYLIMIT", 12 | "logFile": "SERVER_LOGFILE", 13 | "logLevel": "SERVER_LOGLEVEL", 14 | "port": "SERVER_PORT" 15 | }, 16 | "carbone": { 17 | "cacheDir": "CACHE_DIR", 18 | "cacheSize": "CACHE_SIZE", 19 | "converterFactoryTimeout": "CONVERTER_FACTORY_TIMEOUT", 20 | "formFieldName": "UPLOAD_FIELD_NAME", 21 | "startCarbone": "START_CARBONE", 22 | "uploadCount": "UPLOAD_FILE_COUNT", 23 | "uploadSize": "UPLOAD_FILE_SIZE" 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /charts/cdogs/templates/persistentvolumeclaim.yaml: -------------------------------------------------------------------------------- 1 | {{- $pvcName := printf "%s-%s" (include "cdogs.configname" .) "cache" }} 2 | {{- $pvc := (lookup "v1" "PersistentVolumeClaim" .Release.Namespace $pvcName ) }} 3 | 4 | {{- if and (not $pvc) (and .Values.persistentVolumeClaim.enabled) }} 5 | --- 6 | apiVersion: v1 7 | kind: PersistentVolumeClaim 8 | metadata: 9 | {{- if not .Values.config.releaseScoped }} 10 | annotations: 11 | "helm.sh/resource-policy": keep 12 | {{- else }} 13 | labels: {{ include "cdogs.labels" . | nindent 4 }} 14 | {{- end }} 15 | name: {{ include "cdogs.configname" . }}-cache 16 | spec: 17 | accessModes: 18 | - ReadWriteMany 19 | storageClassName: {{ .Values.persistentVolumeClaim.storageClassName }} 20 | resources: 21 | requests: 22 | storage: {{ .Values.persistentVolumeClaim.storageSize }} 23 | {{- end }} 24 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # How to contribute 2 | 3 | Government employees, public and members of the private sector are encouraged to contribute to the repository by **forking and submitting a pull request**. 4 | 5 | (If you are new to GitHub, you might start with a [basic tutorial](https://help.github.com/articles/set-up-git) and check out a more detailed guide to [pull requests](https://help.github.com/articles/using-pull-requests/).) 6 | 7 | Pull requests will be evaluated by the repository guardians on a schedule and if deemed beneficial will be committed to the master. 8 | 9 | All contributors retain the original copyright to their stuff, but by contributing to this project, you grant a world-wide, royalty-free, perpetual, irrevocable, non-exclusive, transferable license to all users **under the terms of the [license](./LICENSE) under which this project is distributed**. 10 | -------------------------------------------------------------------------------- /charts/cdogs/templates/route.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.route.enabled -}} 2 | --- 3 | apiVersion: route.openshift.io/v1 4 | kind: Route 5 | metadata: 6 | name: {{ include "cdogs.fullname" . }} 7 | labels: 8 | {{- include "cdogs.labels" . | nindent 4 }} 9 | {{- with .Values.route.annotations }} 10 | annotations: 11 | {{- toYaml . | nindent 4 }} 12 | {{- end }} 13 | spec: 14 | host: {{ .Values.route.host | quote }} 15 | {{- if .Values.route.path }} 16 | path: {{ .Values.route.path }} 17 | {{- end }} 18 | port: 19 | targetPort: {{ .Values.service.portName }} 20 | tls: 21 | insecureEdgeTerminationPolicy: {{ .Values.route.tls.insecureEdgeTerminationPolicy }} 22 | termination: {{ .Values.route.tls.termination }} 23 | to: 24 | kind: Service 25 | name: {{ include "cdogs.fullname" . }} 26 | weight: 100 27 | wildcardPolicy: {{ .Values.route.wildcardPolicy }} 28 | {{- end }} 29 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | #### Describe the bug 11 | 12 | 13 | #### To Reproduce 14 | 15 | Steps to reproduce the behavior: 16 | 17 | 1. Go to '...' 18 | 2. Click on '....' 19 | 3. Scroll down to '....' 20 | 4. See error 21 | 22 | #### Expected behavior 23 | 24 | 25 | #### Screenshots 26 | 27 | 28 | #### Desktop (please complete the following information): 29 | 30 | - OS: [e.g. iOS] 31 | - Browser [e.g. chrome, safari] 32 | - Version [e.g. 22] 33 | 34 | #### Smartphone (please complete the following information): 35 | 36 | - Device: [e.g. iPhone6] 37 | - OS: [e.g. iOS8.1] 38 | - Browser [e.g. stock browser, safari] 39 | - Version [e.g. 22] 40 | 41 | #### Additional context 42 | 43 | -------------------------------------------------------------------------------- /.codeclimate.yml: -------------------------------------------------------------------------------- 1 | version: "2" 2 | exclude_patterns: 3 | - config/ 4 | - db/ 5 | - dist/ 6 | - features/ 7 | - "**/node_modules/" 8 | - script/ 9 | - "**/spec/" 10 | - "**/test/" 11 | - "**/tests/" 12 | - Tests/ 13 | - "**/vendor/" 14 | - "**/*_test.go" 15 | - "**/*.d.ts" 16 | plugins: 17 | csslint: 18 | enabled: true 19 | editorconfig: 20 | enabled: true 21 | checks: 22 | END_OF_LINE: 23 | enabled: false 24 | INDENTATION_SPACES: 25 | enabled: false 26 | INDENTATION_SPACES_AMOUNT: 27 | enabled: false 28 | TRAILINGSPACES: 29 | enabled: false 30 | # eslint: 31 | # enabled: true 32 | # channel: "eslint-7" 33 | # config: 34 | # config: app/.eslintrc.js 35 | fixme: 36 | enabled: true 37 | git-legal: 38 | enabled: true 39 | markdownlint: 40 | enabled: true 41 | checks: 42 | MD002: 43 | enabled: false 44 | MD013: 45 | enabled: false 46 | MD029: 47 | enabled: false 48 | MD046: 49 | enabled: false 50 | nodesecurity: 51 | enabled: true 52 | sass-lint: 53 | enabled: true 54 | -------------------------------------------------------------------------------- /app/tests/unit/components/log.spec.js: -------------------------------------------------------------------------------- 1 | const config = require('config'); 2 | 3 | const getLogger = require('../../../src/components/log'); 4 | const httpLogger = require('../../../src/components/log').httpLogger; 5 | 6 | describe('getLogger', () => { 7 | const assertLogger = (log) => { 8 | expect(log).toBeTruthy(); 9 | expect(typeof log).toBe('object'); 10 | expect(typeof log.pipe).toBe('function'); 11 | expect(log.exitOnError).toBeFalsy(); 12 | expect(log.format).toBeTruthy(); 13 | expect(log.level).toBe(config.get('server.logLevel')); 14 | expect(log.transports).toHaveLength(1); 15 | }; 16 | 17 | it('should return a winston logger', () => { 18 | const result = getLogger(); 19 | assertLogger(result); 20 | }); 21 | 22 | it('should return a child winston logger with metadata overrides', () => { 23 | const result = getLogger('test'); 24 | assertLogger(result); 25 | }); 26 | }); 27 | 28 | describe('httpLogger', () => { 29 | it('should return a winston middleware function', () => { 30 | const result = httpLogger; 31 | 32 | expect(result).toBeTruthy(); 33 | expect(typeof result).toBe('function'); 34 | expect(result.length).toBe(3); 35 | }); 36 | }); 37 | -------------------------------------------------------------------------------- /app/tests/common/helper.js: -------------------------------------------------------------------------------- 1 | const express = require('express'); 2 | const Problem = require('api-problem'); 3 | 4 | /** This class provides helper utilities that are commonly used in tests */ 5 | const helper = { 6 | /** 7 | * Creates a stripped-down simple Express server object 8 | * @param {string} basePath The path to mount the `router` on 9 | * @param {object} router An express router object to mount 10 | * @returns A simple express server object with `router` mounted to `basePath` 11 | */ 12 | expressHelper: (basePath, router) => { 13 | const app = express(); 14 | 15 | app.use(express.json()); 16 | app.use(express.urlencoded({ 17 | extended: false 18 | })); 19 | app.use(basePath, router); 20 | 21 | // Handle 500 22 | // eslint-disable-next-line no-unused-vars 23 | app.use((err, _req, res, _next) => { 24 | if (err instanceof Problem) { 25 | err.send(res); 26 | } else { 27 | new Problem(500, { 28 | details: (err.message) ? err.message : err 29 | }).send(res); 30 | } 31 | }); 32 | 33 | // Handle 404 34 | app.use((_req, res) => { 35 | new Problem(404).send(res); 36 | }); 37 | 38 | return app; 39 | }, 40 | }; 41 | 42 | module.exports = helper; 43 | -------------------------------------------------------------------------------- /charts/cdogs/templates/hpa.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.autoscaling.enabled }} 2 | --- 3 | apiVersion: autoscaling/v2 4 | kind: HorizontalPodAutoscaler 5 | metadata: 6 | name: {{ include "cdogs.fullname" . }} 7 | labels: 8 | {{- include "cdogs.labels" . | nindent 4 }} 9 | spec: 10 | scaleTargetRef: 11 | apiVersion: apps.openshift.io/v1 12 | kind: DeploymentConfig 13 | name: {{ include "cdogs.fullname" . }} 14 | minReplicas: {{ .Values.autoscaling.minReplicas }} 15 | maxReplicas: {{ .Values.autoscaling.maxReplicas }} 16 | metrics: 17 | {{- if .Values.autoscaling.targetCPUUtilizationPercentage }} 18 | - type: Resource 19 | resource: 20 | name: cpu 21 | target: 22 | type: Utilization 23 | averageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }} 24 | {{- end }} 25 | {{- if .Values.autoscaling.targetMemoryUtilizationPercentage }} 26 | - type: Resource 27 | resource: 28 | name: memory 29 | target: 30 | type: Utilization 31 | averageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }} 32 | {{- end }} 33 | {{- with .Values.autoscaling.behavior }} 34 | behavior: 35 | {{- toYaml . | nindent 4 }} 36 | {{- end }} 37 | {{- end }} 38 | -------------------------------------------------------------------------------- /examples/01-authenticated.sh: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env bash 2 | 3 | # Retrieve a valid bearer token from keycloak. 4 | token=$(curl --request POST \ 5 | --url 'https://dev.loginproxy.gov.bc.ca/auth/realms/your-realm-name/protocol/openid-connect/token' \ 6 | -H 'content-type: application/x-www-form-urlencoded' \ 7 | --data grant_type=client_credentials \ 8 | --data client_id="$CLIENT_ID" \ 9 | --data client_secret="$CLIENT_SECRET" | jq -r '.access_token') 10 | 11 | # The template to be rendered is base64 encoded so we can POST the info to CDOGS. 12 | base64_encoded_template=$(base64 -i template.txt) 13 | 14 | # This sends data to CDOGS so that our template.txt can be rendered out to file test.pdf. 15 | curl --request POST \ 16 | --url 'https://cdogs-dev.api.gov.bc.ca/api/v2/template/render' \ 17 | -H "Authorization: Bearer $token" \ 18 | -H 'content-type: application/json' \ 19 | -o 'test.pdf' \ 20 | --data-binary @- << EOF 21 | { 22 | "data": { 23 | "firstName": "Jane", 24 | "lastName": "Smith" 25 | }, 26 | "template": { 27 | "encodingType": "base64", 28 | "fileType": "txt", 29 | "content": "$base64_encoded_template" 30 | }, 31 | "options": { 32 | "convertTo": "pdf", 33 | "overwrite": true, 34 | "reportName": "{d.firstName}-{d.lastName}.pdf" 35 | } 36 | } 37 | EOF 38 | -------------------------------------------------------------------------------- /.github/environments/values.dev.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | features: 3 | authentication: true 4 | 5 | config: 6 | enabled: true 7 | configMap: 8 | KC_ENABLED: "true" 9 | KC_PUBLICKEY: >- 10 | MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsmuiI3bzde2avKEZd8P/ivnUFL9JxwfykQ9eC6qULkkALW4Nj+m0rH+yTuWTWPwZsYyICtWXbQui6Yh+EP0+bTeDzeDaD1oSUgqC4c9EySs64sGPQLE3sHllwONo8AegNQSiPw+KHE2Rf3ky61KuQHOhrszHKq1qT71ct5iPK9oeZ4vkr6A0XfNNOzih6Jp+XrmNmU85+ssNJ7Oy9plwxHBC73Ff3Jw8UF2dRPPLOJT3oAtYJYD4uuQebPq3bKv2n8iQftLG80l1frM1iG/jddoKItSYBRYsi4Mc65gGgkfsTQGk+Up0SULpO/nIASHWLSYNZOTiqAqMgkWQ+Ga9vQIDAQAB 11 | KC_REALM: comsvcauth 12 | KC_SERVERURL: "https://dev.loginproxy.gov.bc.ca/auth" 13 | 14 | SERVER_BODYLIMIT: 100mb 15 | SERVER_LOGLEVEL: http 16 | SERVER_PORT: "3000" 17 | 18 | CACHE_DIR: "/var/lib/file-cache/data" 19 | CACHE_SIZE: 2GB 20 | CONVERTER_FACTORY_TIMEOUT: "60000" 21 | START_CARBONE: "true" 22 | UPLOAD_FIELD_NAME: template 23 | UPLOAD_FILE_COUNT: "1" 24 | UPLOAD_FILE_SIZE: 25MB 25 | 26 | persistentVolumeClaim: 27 | # -- Specifies whether a persistent volume claim should be created 28 | enabled: true 29 | # -- Default storage class type 30 | storageClassName: netapp-file-standard 31 | # -- PVC Storage size (use M or G, not Mi or Gi) 32 | storageSize: 5G 33 | 34 | fluentBit: 35 | enabled: true 36 | -------------------------------------------------------------------------------- /.github/environments/values.prod.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | features: 3 | authentication: true 4 | 5 | config: 6 | enabled: true 7 | configMap: 8 | KC_ENABLED: "true" 9 | KC_PUBLICKEY: >- 10 | MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAtq5+xTKB1FRCwufdluEx8mNIZU3eXpo91QUrqlpq226HcyF9WPihdpuZzdlS+kW6EC2prZfJpvDvzT/Og4fx9ZoQVKV0uYvWvEg5Sc3ikQMfO0ngqUC6FkxElj9LzpijpdEQd0JAmmem2329lITwWroX70imLhYu3aY+Q3hSY2lg5OJCBw0I2pulfQiMsGn3vgkwSvmIsDhOgSnwPJhPxD9TY6kjvTff6LB1bFSwlxh1l8dRBqRabDunoHn/uHfhVAHB/SgPkvP5Ybc5bP/idBNP0kiQcWiT02Z7aB1r7Fnd5YH9FEDhhF5OIYYBzHP4hPm6AgqG/IDhAuiqf/F9eQIDAQAB 11 | KC_REALM: comsvcauth 12 | KC_SERVERURL: "https://loginproxy.gov.bc.ca/auth" 13 | 14 | SERVER_BODYLIMIT: 100mb 15 | SERVER_LOGLEVEL: http 16 | SERVER_PORT: "3000" 17 | 18 | CACHE_DIR: "/var/lib/file-cache/data" 19 | CACHE_SIZE: 2GB 20 | CONVERTER_FACTORY_TIMEOUT: "60000" 21 | START_CARBONE: "true" 22 | UPLOAD_FIELD_NAME: template 23 | UPLOAD_FILE_COUNT: "1" 24 | UPLOAD_FILE_SIZE: 25MB 25 | 26 | persistentVolumeClaim: 27 | # -- Specifies whether a persistent volume claim should be created 28 | enabled: true 29 | # -- Default storage class type 30 | storageClassName: netapp-file-standard 31 | # -- PVC Storage size (use M or G, not Mi or Gi) 32 | storageSize: 5G 33 | 34 | fluentBit: 35 | enabled: true 36 | -------------------------------------------------------------------------------- /.github/environments/values.test.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | features: 3 | authentication: true 4 | 5 | config: 6 | enabled: true 7 | configMap: 8 | KC_ENABLED: "true" 9 | KC_PUBLICKEY: >- 10 | MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA1dLa3e2Q65cvzoKYdqSu/Qkoi5fbG9FF++u0TVm3461sS7uLo2aIviNXHrfzgu4RKnzoy51VXhAfeT58mGiAweJiRyILr0OtwbDEd/W9D4Y8FdWV37Ltmb4D1M0kCHPHo9wkl5aGZj0VFgExdFXenSDlNuglZpNuAVOwWTL7hX1Cc+5Z5hX8891fcxcdAF/GkDGcyIteHV04aeoCWEMak0gSpGsNUgEhn7FUHI6maqiaBdoyOWn0Jbw/JpqewcD9ZYFLzZTMuBssTXJt4ipTibeqbI/bAdCtxXLJgmpkyMSA/KQeHLmBPdq3ayctoDRpqVMHsnC31Hm7Fz8aDHKaDwIDAQAB 11 | KC_REALM: comsvcauth 12 | KC_SERVERURL: "https://test.loginproxy.gov.bc.ca/auth" 13 | 14 | SERVER_BODYLIMIT: 100mb 15 | SERVER_LOGLEVEL: http 16 | SERVER_PORT: "3000" 17 | 18 | CACHE_DIR: "/var/lib/file-cache/data" 19 | CACHE_SIZE: 2GB 20 | CONVERTER_FACTORY_TIMEOUT: "60000" 21 | START_CARBONE: "true" 22 | UPLOAD_FIELD_NAME: template 23 | UPLOAD_FILE_COUNT: "1" 24 | UPLOAD_FILE_SIZE: 25MB 25 | 26 | persistentVolumeClaim: 27 | # -- Specifies whether a persistent volume claim should be created 28 | enabled: true 29 | # -- Default storage class type 30 | storageClassName: netapp-file-standard 31 | # -- PVC Storage size (use M or G, not Mi or Gi) 32 | storageSize: 5G 33 | 34 | fluentBit: 35 | enabled: true 36 | -------------------------------------------------------------------------------- /examples/01-unauthenticated.sh: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env bash 2 | 3 | # Examples below are using a locally running docker image without keycloak. 4 | 5 | # This uploads a template to CDOGS, caches it for later rendering, and returns a resulting template hash. 6 | template_hash=$(curl -v -F template=@template.txt http://localhost:3000/api/v2/template) 7 | 8 | echo "template_hash $template_hash" 9 | 10 | # Response body is a template string ex. 11 | # bffe2a344ec1f8fb4fc1a1496df4ca29277da310f64eaa8748a1888b7a2198c5 12 | 13 | # If the template is already cached an error is returned: 14 | # template_hash { 15 | # "type":"https://httpstatuses.com/405", 16 | # "title":"Method Not Allowed", 17 | # "status":405, 18 | # "detail":"File already cached. Hash 'bffe2a344ec1f8fb4fc1a1496df4ca29277da310f64eaa8748a1888b7a2198c5'." 19 | # } 20 | 21 | # This sends data to CDOGS so that our template.txt can be rendered out to file test.pdf. 22 | curl --request POST \ 23 | --url "http://localhost:3000/api/v2/template/$template_hash/render" \ 24 | -H 'content-type: application/json' \ 25 | -o 'test.pdf' \ 26 | --data-binary @- << EOF 27 | { 28 | "data": { 29 | "firstName": "Joe", 30 | "lastName": "Smith" 31 | }, 32 | "options": { 33 | "convertTo": "pdf", 34 | "overwrite": true, 35 | "reportName": "{d.firstName}_{d.lastName}.pdf" 36 | } 37 | } 38 | EOF 39 | -------------------------------------------------------------------------------- /k6/README.md: -------------------------------------------------------------------------------- 1 | # Load testing with K6 2 | 3 | [K6](https://k6.io/docs/) is a load testing tool. 4 | Using the K6 command line interface, you can run the scripts found in this directory to test the performance of CDOGS API features. 5 | 6 | Note: It is important to not run load tests against production environments. Always check with your server administrators before load testing in a shared server environment. 7 | 8 | ## Prerequesites 9 | 10 | The simple test scripts (for example: [templating.js](templating.js) can be updated with actual values specific to your envionment (for example: your CDOGS api url and authorization token) or could also pass these values using parameters of the K6 command used to trigger the test. See more K6 details on how [Environment Variables](https://k6.io/docs/using-k6/environment-variables/) work. 11 | 12 | ### Running the tests 13 | 14 | ```sh 15 | k6 run -e API_PATH=http://cdogs-dev.api.gov.bc.ca/api/v2 -e AUTH_TOKEN=InsertJwtHere templating.js 16 | ``` 17 | 18 | To enable logging, add `--log-output=file=./output.json --log-format=json`. At the moment, the tests currently only log the HTTP response code. 19 | 20 | By default, the tests will make 200 evenly-spaced requests within 1 minute. To increase the number of requests the tests will make, add `-e RATE=x` (`x` is a multiplier that gets applied against the rate limit being tested against). 21 | 22 | To change the rate limit being tested against, add `-e RATE_LIMIT=300`. By default, this value is `200`. 23 | -------------------------------------------------------------------------------- /bcgovpubcode.yml: -------------------------------------------------------------------------------- 1 | --- 2 | data_management_roles: 3 | data_custodian: Fraser Marshall 4 | product_owner: Shabari Khaniyan Kunnumel 5 | product_external_dependencies: 6 | identity_authorization: 7 | - Custom-Keycloak 8 | notification_standard: [] 9 | product_information: 10 | api_specifications: 11 | - https://cdogs.api.gov.bc.ca/api/v2/docs 12 | business_capabilities_custom: Rich document templating support,Merge complex datasets 13 | business_capabilities_standard: 14 | - Other 15 | ministry: 16 | - Water, Land and Resource Stewardship 17 | product_acronym: CDOGS 18 | product_description: >- 19 | Hosted API service to Leverage your structured datasets and your business 20 | templates to automatically populate printable documents, spreadsheets, 21 | presentations, or PDFs 22 | product_name: Common Document Generation Service 23 | product_status: stable 24 | product_urls: 25 | - https://api.gov.bc.ca/devportal/api-directory/3181?preview=false 26 | program_area: NRIDS 27 | product_technology_information: 28 | backend_frameworks: 29 | - name: Express 30 | version: 4.18.2 31 | - name: Other 32 | version: Carbone 33 | backend_languages_version: 34 | - name: JavaScript 35 | version: ecmaVersion 9 / es2018 36 | ci_cd_tools: 37 | - Jenkins 38 | data_storage_platforms: 39 | - Other 40 | data_storage_platforms_custom: PVC Cache 41 | frontend_languages: [] 42 | hosting_platforms: 43 | - Private-Cloud-Openshift 44 | version: 1 45 | -------------------------------------------------------------------------------- /app/src/middleware/authorizedParty.js: -------------------------------------------------------------------------------- 1 | const atob = require('atob'); 2 | const Problem = require('api-problem'); 3 | 4 | /** Authorized Party Middleware 5 | * This middleware will add a property to the request: authorizedParty. 6 | * 7 | * We will use the azp (Authorized Party) from the JWT and store that as 8 | * authorizedParty. 9 | * 10 | * @see module:keycloak 11 | */ 12 | 13 | const authorizedParty = async (req, res, next) => { 14 | try { 15 | const token = req.headers.authorization.split(' ')[1]; 16 | const base64Url = token.split('.')[1]; 17 | const base64 = base64Url.replace('-', '+').replace('_', '/'); 18 | const jwt = atob.atob(base64); 19 | const jsonWebToken = JSON.parse(jwt); 20 | req.authorizedParty = jsonWebToken.azp; 21 | } catch (err) { 22 | req.authorizedParty = undefined; 23 | } 24 | next(); 25 | }; 26 | 27 | /** Authorized Party Validator Middleware 28 | * 29 | * This middleware must be called after our keycloak protect and after authorizedParty middleware. 30 | * 31 | * This middleware will check if the authorized party token is on the request. 32 | * 33 | * @see module:keycloak 34 | */ 35 | 36 | const authorizedPartyValidator = async (req, res, next) => { 37 | try { 38 | if (!req.authorizedParty) throw Error('No AZP'); 39 | } catch (err) { 40 | return new Problem(400, { 41 | detail: 'Could not determine Authorized Party' 42 | }).send(res); 43 | } 44 | next(); 45 | }; 46 | 47 | module.exports = { authorizedParty, authorizedPartyValidator }; 48 | -------------------------------------------------------------------------------- /.github/pull_request_template.md: -------------------------------------------------------------------------------- 1 | 2 | # Description 3 | 4 | 5 | 6 | 7 | 8 | ## Types of changes 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | ## Checklist 18 | 19 | 20 | 21 | 22 | - [ ] I have read the [CONTRIBUTING](CONTRIBUTING.md) doc 23 | - [ ] I have checked that unit tests pass locally with my changes 24 | - [ ] I have added tests that prove my fix is effective or that my feature works 25 | - [ ] I have updated the OpenAPI 3.0 `v*.api-spec.yaml` documentation (if appropriate) 26 | - [ ] I have added necessary documentation (if appropriate) 27 | 28 | ## Further comments 29 | 30 | 31 | -------------------------------------------------------------------------------- /charts/cdogs/templates/secret.yaml: -------------------------------------------------------------------------------- 1 | {{- $awsSecretName := printf "%s-%s" (include "cdogs.configname" .) "aws" }} 2 | {{- $awsSecret := (lookup "v1" "Secret" .Release.Namespace $awsSecretName ) }} 3 | {{- $kcSecretName := printf "%s-%s" (include "cdogs.configname" .) "keycloak" }} 4 | {{- $kcSecret := (lookup "v1" "Secret" .Release.Namespace $kcSecretName ) }} 5 | 6 | {{- if and (not $awsSecret) (and .Values.awsSecretOverride.password .Values.awsSecretOverride.username) }} 7 | --- 8 | apiVersion: v1 9 | kind: Secret 10 | metadata: 11 | {{- if not .Values.config.releaseScoped }} 12 | annotations: 13 | "helm.sh/resource-policy": keep 14 | {{- else }} 15 | labels: {{ include "cdogs.labels" . | nindent 4 }} 16 | {{- end }} 17 | name: {{ $awsSecretName }} 18 | type: kubernetes.io/basic-auth 19 | data: 20 | password: {{ .Values.awsSecretOverride.password | b64enc | quote }} 21 | username: {{ .Values.awsSecretOverride.username | b64enc | quote }} 22 | {{- end }} 23 | {{- if and (not $kcSecret) (and .Values.keycloakSecretOverride.password .Values.keycloakSecretOverride.username) }} 24 | --- 25 | apiVersion: v1 26 | kind: Secret 27 | metadata: 28 | {{- if not .Values.config.releaseScoped }} 29 | annotations: 30 | "helm.sh/resource-policy": keep 31 | {{- else }} 32 | labels: {{ include "cdogs.labels" . | nindent 4 }} 33 | {{- end }} 34 | name: {{ $kcSecretName }} 35 | type: kubernetes.io/basic-auth 36 | data: 37 | password: {{ .Values.keycloakSecretOverride.password | b64enc | quote }} 38 | username: {{ .Values.keycloakSecretOverride.username | b64enc | quote }} 39 | {{- end }} 40 | -------------------------------------------------------------------------------- /charts/cdogs/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: common-document-generation-service 3 | # This is the chart version. This version number should be incremented each time you make changes 4 | # to the chart and its templates, including the app version. 5 | # Versions are expected to follow Semantic Versioning (https://semver.org/) 6 | version: 0.0.9 7 | kubeVersion: ">= 1.13.0" 8 | description: A microservice for merging JSON data into xml-based templates (powered by Carbone.io) 9 | # A chart can be either an 'application' or a 'library' chart. 10 | # Application charts are a collection of templates that can be packaged into versioned archives 11 | # to be deployed. 12 | # Library charts provide useful utilities or functions for the chart developer. They're included as 13 | # a dependency of application charts to inject those utilities and functions into the rendering 14 | # pipeline. Library charts do not define any templates and therefore cannot be deployed. 15 | type: application 16 | keywords: 17 | - nodejs 18 | - javascript 19 | - docker 20 | - microservice 21 | - document-generation 22 | - templating 23 | - cdogs 24 | home: https://github.com/bcgov/common-document-generation-service 25 | sources: 26 | - https://github.com/bcgov/common-document-generation-service 27 | dependencies: [] 28 | maintainers: 29 | - name: NR Common Service Showcase Team 30 | email: NR.CommonServiceShowcase@gov.bc.ca 31 | url: https://bcgov.github.io/common-service-showcase/team.html 32 | # This is the version number of the application being deployed. This version number should be 33 | # incremented each time you make changes to the application. Versions are not expected to 34 | # follow Semantic Versioning. They should reflect the version the application is using. 35 | # It is recommended to use it with quotes. 36 | appVersion: "2.5.0" 37 | deprecated: false 38 | annotations: {} 39 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM docker.io/node:20.19.0-alpine 2 | 3 | ARG APP_ROOT=/opt/app-root/src 4 | ENV NO_UPDATE_NOTIFIER=true \ 5 | PATH="/usr/lib/libreoffice/program:${PATH}" \ 6 | PYTHONUNBUFFERED=1 7 | WORKDIR ${APP_ROOT} 8 | 9 | # Install LibreOffice & Common Fonts 10 | RUN apk --no-cache add bash libreoffice util-linux \ 11 | font-droid-nonlatin font-droid ttf-dejavu ttf-freefont ttf-liberation && \ 12 | rm -rf /var/cache/apk/* 13 | 14 | # Install Microsoft Core Fonts 15 | RUN apk --no-cache add msttcorefonts-installer fontconfig && \ 16 | update-ms-fonts && \ 17 | fc-cache -f && \ 18 | rm -rf /var/cache/apk/* 19 | 20 | # Install Zip 21 | RUN apk --no-cache add zip && \ 22 | rm -rf /var/cache/apk/* 23 | 24 | # Install BCSans Font 25 | RUN wget https://www2.gov.bc.ca/assets/gov/british-columbians-our-governments/services-policies-for-government/policies-procedures-standards/web-content-development-guides/corporate-identity-assets/bcsansfont_print.zip?forcedownload=true -O bcsans.zip && \ 26 | unzip bcsans.zip && \ 27 | rm bcsans.zip && \ 28 | mkdir -p /usr/share/fonts/bcsans && \ 29 | install -m 644 ./BcSansFont_Print/*.ttf /usr/share/fonts/bcsans/ && \ 30 | rm -rf ./BcSansFont_Print && \ 31 | fc-cache -f 32 | 33 | # enable PDF/UA compliance in LibreOffice registry 34 | RUN sed -i \ 35 | 's|false|true|' \ 36 | /usr/lib/libreoffice/share/registry/main.xcd 37 | 38 | # NPM Permission Fix 39 | RUN mkdir -p /.npm 40 | RUN chown -R 1001:0 /.npm 41 | 42 | # Install Application 43 | COPY .git ${APP_ROOT}/.git 44 | COPY app ${APP_ROOT} 45 | RUN chown -R 1001:0 ${APP_ROOT} 46 | USER 1001 47 | RUN npm ci --omit=dev 48 | 49 | EXPOSE ${APP_PORT} 50 | CMD ["node", "./bin/www"] 51 | -------------------------------------------------------------------------------- /examples/README.md: -------------------------------------------------------------------------------- 1 | # Example usage of CDOGS 2 | 3 | If you would like to use the same Keycloak Realm as our hosted service 4 | (`comsvcauth`, as used by this token endpoint `https://dev.loginproxy.gov.bc.ca/auth/realms/comsvcauth/protocol/openid-connect/token`), you can request client setup through [the API Services Portal](https://api.gov.bc.ca/devportal/api-directory/3181). 5 | 6 | ## Node 7 | 8 | There is an example of using node.js in file `server.js`. 9 | 10 | To run the example: 11 | 12 | ``` 13 | npm install 14 | ``` 15 | 16 | ``` 17 | CLIENT_ID="your_keycloak_client_id" CLIENT_SECRET="your_keycloak_client_secret" node server.js 18 | ``` 19 | 20 | ## Curl 21 | 22 | Assuming you have an environment including 23 | 24 | ``` 25 | CLIENT_ID="your_keycloak_client_id" 26 | CLIENT_SECRET="your_keycloak_client_secret" 27 | ``` 28 | 29 | where authentication is required, there are some example bash scripts. 30 | 31 | # CDOGS with Docker 32 | 33 | ```sh 34 | > docker pull ghcr.io/bcgov/common-document-generation-service:latest 35 | ``` 36 | 37 | ## CDOGS without auth 38 | 39 | ### Quickstart 40 | 41 | ```sh 42 | > docker run -it --rm -p 3000:3000 bcgov/common-document-generation-service:latest 43 | ``` 44 | 45 | ### Creating a volume to persist the document cache 46 | ```sh 47 | > docker volume create carbone-cache 48 | # View details about your new volume 49 | > docker volume inspect carbone-cache 50 | # Start the CDOGS container with the new volume to persist the document cache. 51 | # /tmp/carbone-files is the default for CACHE_DIR 52 | > docker run -d -p 3000:3000 --name CDOGS -v carbone-cache:/tmp/carbone-files bcgov/common-document-generation-service:latest 53 | ``` 54 | 55 | ## CDOGS with auth 56 | ```sh 57 | > docker run -it --rm -p 3000:3000 -e KC_CLIENTID= -e KC_CLIENTSECRET= -e KC_ENABLED=true -e KC_PUBLICKEY= -e KC_REALM= -e KC_SERVERURL= bcgov/common-document-generation-service:latest 58 | ``` 59 | -------------------------------------------------------------------------------- /app/bin/www: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | 3 | /** 4 | * Module dependencies. 5 | */ 6 | const config = require('config'); 7 | const http = require('http'); 8 | 9 | const app = require('../app'); 10 | const log = require('../src/components/log')(module.filename); 11 | 12 | /** 13 | * Get port from environment and store in Express. 14 | */ 15 | const port = normalizePort(config.get('server.port')); 16 | app.set('port', port); 17 | 18 | /** 19 | * Create HTTP server. 20 | */ 21 | const server = http.createServer(app); 22 | 23 | /** 24 | * Listen on provided port, on all network interfaces. 25 | */ 26 | server.listen(port); 27 | server.on('error', onError); 28 | server.on('listening', onListening); 29 | 30 | /** 31 | * Normalize a port into a number, string, or false. 32 | */ 33 | function normalizePort(val) { 34 | const port = parseInt(val, 10); 35 | 36 | if (isNaN(port)) { 37 | // named pipe 38 | return val; 39 | } 40 | 41 | if (port >= 0) { 42 | // port number 43 | return port; 44 | } 45 | 46 | return false; 47 | } 48 | 49 | /** 50 | * Event listener for HTTP server "error" event. 51 | */ 52 | function onError(error) { 53 | if (error.syscall !== 'listen') { 54 | throw error; 55 | } 56 | 57 | var bind = typeof port === 'string' ? 58 | 'Pipe ' + port : 59 | 'Port ' + port; 60 | 61 | // handle specific listen errors with friendly messages 62 | switch (error.code) { 63 | case 'EACCES': 64 | log.error(bind + ' requires elevated privileges'); 65 | process.exit(1); 66 | break; 67 | case 'EADDRINUSE': 68 | log.error(bind + ' is already in use'); 69 | process.exit(1); 70 | break; 71 | default: 72 | throw error; 73 | } 74 | } 75 | 76 | /** 77 | * Event listener for HTTP server "listening" event. 78 | */ 79 | function onListening() { 80 | const addr = server.address(); 81 | const bind = typeof addr === 'string' ? 82 | 'pipe ' + addr : 83 | 'port ' + addr.port; 84 | log.info('Listening on ' + bind); 85 | } 86 | -------------------------------------------------------------------------------- /app/src/routes/v2/index.js: -------------------------------------------------------------------------------- 1 | const router = require('express').Router(); 2 | const helmet = require('helmet'); 3 | 4 | const fileTypesRouter = require('./fileTypes'); 5 | const healthRouter = require('./health'); 6 | const templateRouter = require('./template'); 7 | 8 | const { authenticate } = require('../../middleware/authorization'); 9 | const { getDocs, getJsonSpec, getYamlSpec } = require('../../middleware/openapi'); 10 | 11 | const version = 'v2'; 12 | const docsHelmet = helmet({ 13 | contentSecurityPolicy: { 14 | directives: { 15 | 'img-src': ['data:', 'https://cdn.redoc.ly'], 16 | 'script-src': ['blob:', 'https://cdn.redoc.ly'] 17 | } 18 | } 19 | }); 20 | 21 | // Base Responder 22 | router.get('/', (_req, res) => { 23 | res.status(200).json({ 24 | endpoints: [ 25 | { name: '/api-spec.json', operations: ['GET'] }, 26 | { name: '/api-spec.yaml', operations: ['GET'] }, 27 | { name: '/docs', operations: ['GET'] }, 28 | { name: '/fileTypes', operations: ['GET'] }, 29 | { name: '/health', operations: ['GET'] }, 30 | { name: '/render/{id}', operations: ['GET', 'DELETE'] }, 31 | { name: '/template', operations: ['POST'] }, 32 | { name: '/template/render', operations: ['POST'] }, 33 | { name: '/template/{id}', operations: ['GET', 'DELETE'] }, 34 | { name: '/template/{id}/render', operations: ['POST'] } 35 | ] 36 | }); 37 | }); 38 | 39 | /** OpenAPI JSON Spec */ 40 | router.get('/api-spec.json', docsHelmet, getJsonSpec(version)); 41 | 42 | /** OpenAPI YAML Spec */ 43 | router.get('/api-spec.yaml', docsHelmet, getYamlSpec(version)); 44 | 45 | /** OpenAPI Docs */ 46 | router.get('/docs', docsHelmet, getDocs(version)); 47 | 48 | /** File Types Router */ 49 | router.get('/fileTypes', authenticate, fileTypesRouter); 50 | 51 | /** Health Router */ 52 | router.use('/health', authenticate, healthRouter); 53 | 54 | /** Template Router */ 55 | router.use('/template', authenticate, templateRouter); 56 | 57 | module.exports = router; 58 | -------------------------------------------------------------------------------- /app/docker/python: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # 3 | # This file is part of the LibreOffice project. 4 | # 5 | # This Source Code Form is subject to the terms of the Mozilla Public 6 | # License, v. 2.0. If a copy of the MPL was not distributed with this 7 | # file, You can obtain one at http://mozilla.org/MPL/2.0/. 8 | # 9 | # This file incorporates work covered by the following license notice: 10 | # 11 | # Licensed to the Apache Software Foundation (ASF) under one or more 12 | # contributor license agreements. See the NOTICE file distributed 13 | # with this work for additional information regarding copyright 14 | # ownership. The ASF licenses this file to you under the Apache 15 | # License, Version 2.0 (the "License"); you may not use this file 16 | # except in compliance with the License. You may obtain a copy of 17 | # the License at http://www.apache.org/licenses/LICENSE-2.0 . 18 | # 19 | 20 | # resolve installation directory 21 | sd_cwd="`pwd`" 22 | if [ -h "$0" ] ; then 23 | sd_basename=`basename "$0"` 24 | sd_script=`ls -l "$0" | sed "s/.*${sd_basename} -> //g"` 25 | cd "`dirname "$0"`" 26 | cd "`dirname "$sd_script"`" 27 | else 28 | cd "`dirname "$0"`" 29 | fi 30 | sd_prog=`pwd` 31 | cd "$sd_cwd" 32 | 33 | # Set PATH so that crash_report is found: 34 | PATH=$sd_prog${PATH+:$PATH} 35 | export PATH 36 | 37 | # Set UNO_PATH so that "officehelper.bootstrap()" can find soffice executable: 38 | : ${UNO_PATH=$sd_prog} 39 | export UNO_PATH 40 | 41 | # Set URE_BOOTSTRAP so that "uno.getComponentContext()" bootstraps a complete 42 | # OOo UNO environment: 43 | : ${URE_BOOTSTRAP=vnd.sun.star.pathname:$sd_prog/fundamentalrc} 44 | export URE_BOOTSTRAP 45 | 46 | PYTHONPATH=$sd_prog:$sd_prog/python-core/lib:$sd_prog/python-core/lib/lib-dynload:$sd_prog/python-core/lib/lib-tk:$sd_prog/python-core/lib/site-packages${PYTHONPATH+:$PYTHONPATH} 47 | export PYTHONPATH 48 | 49 | # This part doesn't work for some reason and is fine without it 50 | # PYTHONHOME=$sd_prog/python-core 51 | # export PYTHONHOME 52 | 53 | # execute binary 54 | exec "$sd_prog/python.bin" "$@" 55 | -------------------------------------------------------------------------------- /examples/server.js: -------------------------------------------------------------------------------- 1 | import fetch from 'node-fetch'; 2 | import fs from 'fs'; 3 | 4 | const client_id = process.env.CLIENT_ID; 5 | const client_secret = process.env.CLIENT_SECRET; 6 | 7 | function base64_encode(file) { 8 | const contents = fs.readFileSync(file); 9 | return contents.toString('base64'); 10 | } 11 | 12 | // We need the oidc api to generate a token for us 13 | const oidcResponse = await fetch( 14 | 'https://dev.loginproxy.gov.bc.ca/auth/realms/your-realm-name/protocol/openid-connect/token', 15 | { 16 | method: 'POST', 17 | body: `grant_type=client_credentials&client_id=${client_id}&client_secret=${client_secret}`, 18 | headers: { 19 | 'Content-Type': 'application/x-www-form-urlencoded', 20 | }, 21 | } 22 | ); 23 | 24 | const keycloak = await oidcResponse.json(); 25 | 26 | console.log(keycloak); 27 | 28 | // { 29 | // access_token: 'secret_token', 30 | // expires_in: 300, 31 | // refresh_expires_in: 0, 32 | // token_type: 'bearer', 33 | // 'not-before-policy': 0, 34 | // scope: '' 35 | // } 36 | 37 | const templateContent = base64_encode('./template.txt'); 38 | 39 | // #template.txt 40 | // Hello {d.firstName} {d.lastName}! 41 | 42 | const cdogsResponse = await fetch( 43 | 'https://cdogs-dev.api.gov.bc.ca/api/v2/template/render', 44 | { 45 | method: 'POST', 46 | body: JSON.stringify({ 47 | data: { 48 | firstName: 'Jane', 49 | lastName: 'Smith', 50 | }, 51 | template: { 52 | encodingType: 'base64', 53 | fileType: 'txt', 54 | content: templateContent, 55 | }, 56 | options: { 57 | convertTo: 'pdf', 58 | overwrite: true, 59 | reportName: '{d.firstName}-{d.lastName}.pdf', 60 | }, 61 | }), 62 | headers: { 63 | Authorization: `Bearer ${keycloak.access_token}`, 64 | 'Content-Type': 'application/json', 65 | }, 66 | } 67 | ); 68 | 69 | const pdf = await cdogsResponse.arrayBuffer(); 70 | 71 | // saves a file test.pdf - the CDOGS output. 72 | fs.writeFileSync('test.pdf', Buffer.from(pdf), 'binary'); 73 | -------------------------------------------------------------------------------- /SECURITY.md: -------------------------------------------------------------------------------- 1 | # Security Policies and Procedures 2 | 3 | This document outlines security procedures and general policies for the Common 4 | Document Generation Service project. 5 | 6 | - [Supported Versions](#supported-versions) 7 | - [Reporting a Bug](#reporting-a-bug) 8 | - [Disclosure Policy](#disclosure-policy) 9 | - [Comments on this Policy](#comments-on-this-policy) 10 | 11 | ## Supported Versions 12 | 13 | At this time, only the latest version of Common Document Generation Service is supported. 14 | 15 | | Version | Supported | 16 | | ------- | ------------------ | 17 | | 2.5.x | :white_check_mark: | 18 | | < 2.5.x | :x: | 19 | 20 | ## Reporting a Bug 21 | 22 | The `CSS` team and community take all security bugs in `CDOGS` seriously. 23 | Thank you for improving the security of `CDOGS`. We appreciate your efforts and 24 | responsible disclosure and will make every effort to acknowledge your 25 | contributions. 26 | 27 | Report security bugs by sending an email to . 28 | 29 | The `CSS` team will acknowledge your email within 48 hours, and will send a 30 | more detailed response within 48 hours indicating the next steps in handling 31 | your report. After the initial reply to your report, the security team will 32 | endeavor to keep you informed of the progress towards a fix and full 33 | announcement, and may ask for additional information or guidance. 34 | 35 | Report security bugs in third-party modules to the person or team maintaining 36 | the module. 37 | 38 | ## Disclosure Policy 39 | 40 | When the security team receives a security bug report, they will assign it to a 41 | primary handler. This person will coordinate the fix and release process, 42 | involving the following steps: 43 | 44 | - Confirm the problem and determine the affected versions. 45 | - Audit code to find any potential similar problems. 46 | - Prepare fixes for all releases still under maintenance. These fixes will be 47 | released as fast as possible. 48 | 49 | ## Comments on this Policy 50 | 51 | If you have suggestions on how this process could be improved please submit a 52 | pull request. 53 | -------------------------------------------------------------------------------- /.github/actions/build-push-container/action.yaml: -------------------------------------------------------------------------------- 1 | name: Build & Push Container 2 | description: Builds a container from a Dockerfile and pushes to registry 3 | 4 | inputs: 5 | context: 6 | description: Effective Working Directory 7 | required: true 8 | default: "./" 9 | image_name: 10 | description: Image Name 11 | required: true 12 | github_username: 13 | description: Github Container Registry Username 14 | required: true 15 | github_token: 16 | description: Github Container Registry Authorization Token 17 | required: true 18 | 19 | runs: 20 | using: composite 21 | steps: 22 | - name: Checkout repository 23 | uses: actions/checkout@v4 24 | 25 | - name: Parse Input Values 26 | shell: bash 27 | run: | 28 | echo "GH_USERNAME=$(tr '[:upper:]' '[:lower:]' <<< '${{ inputs.github_username }}')" >> $GITHUB_ENV 29 | 30 | - name: Login to Github Container Registry 31 | uses: docker/login-action@v3 32 | with: 33 | registry: ghcr.io 34 | username: ${{ env.GH_USERNAME }} 35 | password: ${{ inputs.github_token }} 36 | 37 | - name: Prepare Container Metadata tags 38 | id: meta 39 | uses: docker/metadata-action@v5 40 | with: 41 | images: | 42 | ghcr.io/${{ env.GH_USERNAME }}/${{ inputs.image_name }} 43 | # Always updates the 'latest' tag 44 | flavor: | 45 | latest=true 46 | # Creates tags based off of branch names and semver tags 47 | tags: | 48 | type=ref,event=branch 49 | type=ref,event=pr 50 | type=semver,pattern={{version}} 51 | type=semver,pattern={{major}}.{{minor}} 52 | type=semver,pattern={{major}} 53 | type=sha 54 | 55 | - name: Build and Push to Container Registry 56 | id: builder 57 | uses: docker/build-push-action@v5 58 | with: 59 | context: ${{ inputs.context }} 60 | push: true 61 | tags: ${{ steps.meta.outputs.tags }} 62 | labels: ${{ steps.meta.outputs.labels }} 63 | 64 | - name: Inspect Docker Image 65 | shell: bash 66 | run: | 67 | docker image inspect ghcr.io/${{ env.GH_USERNAME }}/${{ inputs.image_name }}:latest 68 | -------------------------------------------------------------------------------- /app/src/middleware/authorization.js: -------------------------------------------------------------------------------- 1 | const config = require('config'); 2 | const jwt = require('jsonwebtoken'); 3 | const Problem = require('api-problem'); 4 | 5 | const { getConfigBoolean } = require('../components/utils'); 6 | 7 | /** 8 | * @function _spkiWrapper 9 | * Wraps an SPKI key with PEM header and footer 10 | * @param {string} spki The PEM-encoded Simple public-key infrastructure string 11 | * @returns {string} The PEM-encoded SPKI with PEM header and footer 12 | */ 13 | const _spkiWrapper = (spki) => `-----BEGIN PUBLIC KEY-----\n${spki}\n-----END PUBLIC KEY-----`; 14 | 15 | module.exports = { 16 | /** 17 | * Enables JWT verification only if environment has it enabled. 18 | */ 19 | authenticate: (req, res, next) => { 20 | 21 | if (getConfigBoolean('keycloak.enabled')) { 22 | const authorization = req.get('Authorization'); 23 | if (!authorization || !authorization.startsWith('Bearer ')) { 24 | return new Problem(401, { 25 | detail: 'An authorization header of the format "Bearer {token}" is required' 26 | }).send(res); 27 | } 28 | const bearerToken = authorization.substring(7); 29 | 30 | try { 31 | const publicKey = config.get('keycloak.publicKey'); 32 | const pemKey = publicKey.startsWith('-----BEGIN') ? publicKey : _spkiWrapper(publicKey); 33 | 34 | jwt.verify(bearerToken, pemKey, { 35 | issuer: `${config.get('keycloak.serverUrl')}/realms/${config.get('keycloak.realm')}`, 36 | audience: config.get('keycloak.clientId') 37 | }); 38 | next(); 39 | 40 | } catch (err) { 41 | if (err instanceof jwt.JsonWebTokenError || err instanceof jwt.TokenExpiredError || err instanceof jwt.NotBeforeError) { 42 | return new Problem(401, { 43 | detail: err.message 44 | }).send(res); 45 | } 46 | else { 47 | // Return HTTP 401 only for JWT errors; the rest should be HTTP 500 48 | if (!config.has('keycloak.publicKey')) { 49 | throw new Error('OIDC environment variable KC_PUBLICKEY or keycloak.publicKey must be defined'); 50 | } else { 51 | throw(err); 52 | } 53 | } 54 | } 55 | 56 | } else { 57 | next(); 58 | } 59 | } 60 | }; 61 | -------------------------------------------------------------------------------- /.github/workflows/on-pr-opened.yaml: -------------------------------------------------------------------------------- 1 | name: Pull Request Opened 2 | 3 | env: 4 | ACRONYM: cdogs 5 | APP_NAME: common-document-generation-service 6 | NAMESPACE_PREFIX: 2250c5 7 | 8 | on: 9 | pull_request: 10 | branches: 11 | - master 12 | types: 13 | - opened 14 | - reopened 15 | - synchronize 16 | 17 | concurrency: 18 | group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} 19 | cancel-in-progress: true 20 | 21 | jobs: 22 | build: 23 | name: Build & Push 24 | if: "! github.event.pull_request.head.repo.fork" 25 | runs-on: ubuntu-latest 26 | timeout-minutes: 10 27 | steps: 28 | - name: Checkout 29 | uses: actions/checkout@v4 30 | - name: Build & Push 31 | uses: ./.github/actions/build-push-container 32 | with: 33 | context: . 34 | image_name: ${{ env.APP_NAME }} 35 | github_username: ${{ github.repository_owner }} 36 | github_token: ${{ secrets.GITHUB_TOKEN }} 37 | 38 | deploy-pr-dev: 39 | name: Deploy Pull Request to Dev 40 | environment: 41 | name: pr 42 | url: https://${{ env.ACRONYM }}-dev-pr-${{ github.event.number }}.apps.silver.devops.gov.bc.ca 43 | runs-on: ubuntu-latest 44 | needs: build 45 | timeout-minutes: 12 46 | steps: 47 | - name: Checkout 48 | uses: actions/checkout@v4 49 | - name: Deploy to Dev 50 | uses: ./.github/actions/deploy-to-environment 51 | with: 52 | app_name: ${{ env.APP_NAME }} 53 | acronym: ${{ env.ACRONYM }} 54 | aws_role_arn: ${{ secrets.AWS_ROLE_ARN }} 55 | environment: pr 56 | job_name: pr-${{ github.event.number }} 57 | namespace_prefix: ${{ env.NAMESPACE_PREFIX }} 58 | namespace_environment: dev 59 | openshift_server: ${{ secrets.OPENSHIFT_SERVER }} 60 | openshift_token: ${{ secrets.OPENSHIFT_TOKEN }} 61 | - name: Release Comment on PR 62 | uses: marocchino/sticky-pull-request-comment@v2.9.0 63 | if: success() 64 | with: 65 | header: release 66 | message: | 67 | Release ${{ github.sha }} deployed at 68 | -------------------------------------------------------------------------------- /app/tests/unit/components/validation/middleware.spec.js: -------------------------------------------------------------------------------- 1 | 2 | const bytes = require('bytes'); 3 | const config = require('config'); 4 | 5 | const { middleware, modelValidation } = require('../../../../src/components/validation'); 6 | 7 | const maxFileSize = bytes.parse(config.get('carbone.uploadSize')); 8 | 9 | describe('validateCarbone', () => { 10 | const carboneSpy = jest.spyOn(modelValidation, 'carbone'); 11 | const handleValidationErrorsSpy = jest.spyOn(middleware, '_handleValidationErrors'); 12 | 13 | beforeEach(() => { 14 | carboneSpy.mockReset(); 15 | handleValidationErrorsSpy.mockReset(); 16 | }); 17 | 18 | afterAll(() => { 19 | carboneSpy.mockRestore(); 20 | handleValidationErrorsSpy.mockRestore(); 21 | }); 22 | 23 | it('should call modelValidation.carbone and _handleValidationErrors', () => { 24 | const req = { body: {} }; 25 | const fn = () => {}; 26 | carboneSpy.mockReturnValue([]); 27 | 28 | middleware.validateCarbone(req, {}, fn); 29 | 30 | expect(carboneSpy).toHaveBeenCalledTimes(1); 31 | expect(carboneSpy).toHaveBeenCalledWith(req.body); 32 | expect(handleValidationErrorsSpy).toHaveBeenCalledTimes(1); 33 | expect(handleValidationErrorsSpy).toHaveBeenCalledWith({}, fn, []); 34 | }); 35 | }); 36 | 37 | describe('validateTemplate', () => { 38 | const templateSpy = jest.spyOn(modelValidation, 'template'); 39 | const handleValidationErrorsSpy = jest.spyOn(middleware, '_handleValidationErrors'); 40 | 41 | beforeEach(() => { 42 | templateSpy.mockReset(); 43 | handleValidationErrorsSpy.mockReset(); 44 | }); 45 | 46 | afterAll(() => { 47 | templateSpy.mockRestore(); 48 | handleValidationErrorsSpy.mockRestore(); 49 | }); 50 | 51 | it('should call modelValidation.template and _handleValidationErrors', () => { 52 | const req = { body: {} }; 53 | const fn = () => {}; 54 | templateSpy.mockReturnValue([]); 55 | 56 | middleware.validateTemplate(req, {}, fn); 57 | 58 | expect(templateSpy).toHaveBeenCalledTimes(1); 59 | expect(templateSpy).toHaveBeenCalledWith(req.body, maxFileSize); 60 | // TODO: Figure out why this test spy isn't working 61 | // expect(handleValidationErrorsSpy).toHaveBeenCalledTimes(1); 62 | // expect(handleValidationErrorsSpy).toHaveBeenCalledWith({}, fn, []); 63 | }); 64 | }); 65 | -------------------------------------------------------------------------------- /.github/workflows/on-pr-closed.yaml: -------------------------------------------------------------------------------- 1 | name: Pull Request Closed 2 | 3 | env: 4 | ACRONYM: cdogs 5 | APP_NAME: common-document-generation-service 6 | NAMESPACE_PREFIX: 2250c5 7 | 8 | on: 9 | pull_request: 10 | branches: 11 | - master 12 | types: 13 | - closed 14 | 15 | concurrency: 16 | group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} 17 | cancel-in-progress: true 18 | 19 | jobs: 20 | remove-pr-dev: 21 | name: Remove PR build from dev namespace 22 | if: "! github.event.pull_request.head.repo.fork" 23 | environment: 24 | name: pr 25 | url: https://${{ env.ACRONYM }}-dev-pr-${{ github.event.number }}.apps.silver.devops.gov.bc.ca 26 | runs-on: ubuntu-latest 27 | timeout-minutes: 12 28 | steps: 29 | - name: Checkout 30 | uses: actions/checkout@v4 31 | - name: Install CLI tools from OpenShift Mirror 32 | uses: redhat-actions/openshift-tools-installer@v1 33 | with: 34 | oc: "4" 35 | - name: Login to OpenShift and select project 36 | shell: bash 37 | run: | 38 | # OC Login 39 | OC_TEMP_TOKEN=$(curl -k -X POST ${{ secrets.OPENSHIFT_SERVER }}/api/v1/namespaces/${{ env.NAMESPACE_PREFIX }}-dev/serviceaccounts/pipeline/token --header "Authorization: Bearer ${{ secrets.OPENSHIFT_TOKEN }}" -d '{"spec": {"expirationSeconds": 600}}' -H 'Content-Type: application/json; charset=utf-8' | jq -r '.status.token' ) 40 | oc login --token=$OC_TEMP_TOKEN --server=${{ secrets.OPENSHIFT_SERVER }} 41 | # move to project context 42 | oc project ${{ env.NAMESPACE_PREFIX }}-dev 43 | - name: Remove PR Deployment 44 | shell: bash 45 | run: | 46 | helm uninstall --namespace ${{ env.NAMESPACE_PREFIX }}-dev pr-${{ github.event.number }} --timeout 10m --wait 47 | oc delete --namespace ${{ env.NAMESPACE_PREFIX }}-dev cm,secret --selector app.kubernetes.io/instance=pr-${{ github.event.number }} 48 | - name: Remove Release Comment on PR 49 | uses: marocchino/sticky-pull-request-comment@v2.9.0 50 | with: 51 | header: release 52 | delete: true 53 | - name: Remove Github Deployment Environment 54 | uses: strumwolf/delete-deployment-environment@v3 55 | with: 56 | environment: pr 57 | onlyRemoveDeployments: true 58 | token: ${{ secrets.GITHUB_TOKEN }} 59 | -------------------------------------------------------------------------------- /charts/cdogs/templates/cronjob.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.cronJob.enabled .Values.persistentVolumeClaim.enabled }} 2 | apiVersion: batch/v1 3 | kind: CronJob 4 | metadata: 5 | name: {{ template "cdogs.fullname" . }} 6 | labels: {{ include "cdogs.labels" . | nindent 4 }} 7 | spec: 8 | concurrencyPolicy: Forbid 9 | failedJobsHistoryLimit: 3 10 | successfulJobsHistoryLimit: 3 11 | startingDeadlineSeconds: 60 12 | jobTemplate: 13 | metadata: 14 | labels: {{ include "cdogs.labels" . | nindent 8 }} 15 | spec: 16 | backoffLimit: 6 17 | activeDeadlineSeconds: 600 18 | parallelism: 1 19 | completions: 1 20 | template: 21 | metadata: 22 | labels: {{ include "cdogs.labels" . | nindent 12 }} 23 | spec: 24 | {{- with .Values.imagePullSecrets }} 25 | imagePullSecrets: {{ toYaml . | nindent 8 }} 26 | {{- end }} 27 | {{- if .Values.serviceAccount.create }} 28 | serviceAccountName: {{ include "cdogs.serviceAccountName" . }} 29 | {{- end }} 30 | {{- with .Values.podSecurityContext }} 31 | securityContext: {{ toYaml . | nindent 8 }} 32 | {{- end }} 33 | containers: 34 | - name: job 35 | {{- with .Values.securityContext }} 36 | securityContext: {{ toYaml . | nindent 12 }} 37 | {{- end }} 38 | image: "{{ .Values.image.repository }}/{{ .Chart.Name }}:{{ .Values.image.tag | default .Chart.AppVersion }}" 39 | imagePullPolicy: {{ .Values.image.pullPolicy }} 40 | command: 41 | - node 42 | - ./cacheCleaner.js 43 | resources: {{ toYaml .Values.resources | nindent 16 }} 44 | env: 45 | - name: NODE_ENV 46 | value: production 47 | envFrom: 48 | - configMapRef: 49 | name: {{ include "cdogs.configname" . }}-config 50 | volumeMounts: 51 | - name: file-cache-data 52 | mountPath: /var/lib/file-cache/data 53 | restartPolicy: Never 54 | volumes: 55 | - name: file-cache-data 56 | persistentVolumeClaim: 57 | claimName: {{ include "cdogs.configname" . }}-cache 58 | schedule: {{ .Values.cronJob.schedule }} 59 | suspend: {{ .Values.cronJob.suspend }} 60 | {{- end }} 61 | -------------------------------------------------------------------------------- /app/src/docs/index.js: -------------------------------------------------------------------------------- 1 | const config = require('config'); 2 | const fs = require('fs'); 3 | const path = require('path'); 4 | const { load } = require('js-yaml'); 5 | const { getConfigBoolean } = require('../components/utils'); 6 | 7 | module.exports = { 8 | /** 9 | * @function getDocHTML 10 | * Gets and formats a ReDoc HTML page 11 | * @param {string} version Desired version (`v1` or `v2`) 12 | * @returns {string} A ReDoc HTML page string 13 | */ 14 | getDocHTML: (version) => ` 15 | 16 | 17 | Common Document Generation Service API - Documentation ${version} 18 | 19 | 20 | 21 | 22 | 23 | 24 | 30 | 31 | 32 | 33 | 34 | 35 | `, 36 | 37 | /** 38 | * @function getSpec 39 | * Gets and formats an OpenAPI spec object 40 | * @param {string} version Desired version (`v1` or `v2`) 41 | * @returns {object} An OpenAPI spec object 42 | */ 43 | getSpec: (version) => { 44 | const rawSpec = fs.readFileSync(path.join(__dirname, `../docs/${version}.api-spec.yaml`), 'utf8'); 45 | const spec = load(rawSpec); 46 | spec.servers[0].url = `/api/${version}`; 47 | 48 | if (getConfigBoolean('keycloak.enabled')) { 49 | // Dynamically update OIDC endpoint url 50 | spec.components.securitySchemes.OpenID.openIdConnectUrl = `${config.get('keycloak.serverUrl')}/realms/${config.get('keycloak.realm')}/.well-known/openid-configuration`; 51 | } else { 52 | // Drop all security clauses as keycloak is not enabled 53 | delete spec.security; 54 | delete spec.components.securitySchemes; 55 | Object.keys(spec.paths).forEach((path) => { 56 | Object.keys(path).forEach((method) => { 57 | if (method.security) delete method.security; 58 | }); 59 | }); 60 | } 61 | 62 | return spec; 63 | } 64 | }; 65 | -------------------------------------------------------------------------------- /charts/cdogs/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | {{/* 2 | Expand the name of the chart. 3 | */}} 4 | {{- define "cdogs.name" -}} 5 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} 6 | {{- end }} 7 | 8 | {{/* 9 | Create a default fully qualified app name. 10 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). 11 | If release name contains chart name it will be used as a full name. 12 | */}} 13 | {{- define "cdogs.fullname" -}} 14 | {{- if .Values.fullnameOverride }} 15 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} 16 | {{- else }} 17 | {{- $name := default .Chart.Name .Values.nameOverride }} 18 | {{- if contains $name .Release.Name }} 19 | {{- .Release.Name | trunc 63 | trimSuffix "-" }} 20 | {{- else }} 21 | {{- printf "%s-%s" $name .Release.Name | trunc 63 | trimSuffix "-" }} 22 | {{- end }} 23 | {{- end }} 24 | {{- end }} 25 | 26 | {{/* 27 | Define the config pattern of the chart based on options. 28 | */}} 29 | {{- define "cdogs.configname" -}} 30 | {{- if .Values.config.releaseScoped }} 31 | {{- include "cdogs.fullname" . }} 32 | {{- else }} 33 | {{- include "cdogs.name" . }} 34 | {{- end }} 35 | {{- end }} 36 | 37 | {{/* 38 | Create chart name and version as used by the chart label. 39 | */}} 40 | {{- define "cdogs.chart" -}} 41 | {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} 42 | {{- end }} 43 | 44 | {{/* 45 | Common labels 46 | */}} 47 | {{- define "cdogs.labels" -}} 48 | helm.sh/chart: {{ include "cdogs.chart" . }} 49 | app: {{ include "cdogs.fullname" . }} 50 | {{ include "cdogs.selectorLabels" . }} 51 | {{- if .Chart.AppVersion }} 52 | app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} 53 | {{- end }} 54 | app.kubernetes.io/component: app 55 | app.kubernetes.io/managed-by: {{ .Release.Service }} 56 | app.kubernetes.io/part-of: {{ .Release.Name }} 57 | app.openshift.io/runtime: nodejs 58 | {{- end }} 59 | 60 | {{/* 61 | Selector labels 62 | */}} 63 | {{- define "cdogs.selectorLabels" -}} 64 | app.kubernetes.io/name: {{ include "cdogs.name" . }} 65 | app.kubernetes.io/instance: {{ .Release.Name }} 66 | {{- end }} 67 | 68 | {{/* 69 | Create the name of the service account to use 70 | */}} 71 | {{- define "cdogs.serviceAccountName" -}} 72 | {{- if .Values.serviceAccount.create }} 73 | {{- default (include "cdogs.fullname" .) .Values.serviceAccount.name }} 74 | {{- else }} 75 | {{- default "default" .Values.serviceAccount.name }} 76 | {{- end }} 77 | {{- end }} 78 | -------------------------------------------------------------------------------- /k6/sample_contexts.json: -------------------------------------------------------------------------------- 1 | { 2 | "isaDate": "20191127", 3 | "notes": "This is a long bit of text that is eventually broken up by some newlines and/or newlines/carriageReturns. \n So that means that the json can have slash-n, slash-r in it and the template will treat that as line returns in a plain text representation, or in the case of DOCX and ODT will replace it with the appropriate document-type XML line break tag. Here's a slash-n: \n Now here's a slash-r slash-n \r\n And another set of line breaks here\n\n\n See the Carbone documentation here https://carbone.io/documentation.html#formatters and look for convCRLF() \r\n ", 4 | "admins": [ 5 | { 6 | "party": "X", 7 | "name": "John Smith", 8 | "phone": "555-123-4567", 9 | "fax": "555-098-6543", 10 | "email": "fakeemail@email.com" 11 | }, 12 | { 13 | "party": "Y", 14 | "name": "Anna Johnson", 15 | "phone": "555-333-4444", 16 | "fax": "555-111-2222", 17 | "email": "anotheremail@email.com" 18 | } 19 | ], 20 | "otherParties": [ 21 | { 22 | "name": "Lucas O'Neil" 23 | }, 24 | { 25 | "name": "Matthew Hall" 26 | }, 27 | { 28 | "name": "Jeremy" 29 | }, 30 | { 31 | "name": "Jason" 32 | }, 33 | { 34 | "name": "Bill" 35 | }, 36 | { 37 | "name": "Ted" 38 | }, 39 | { 40 | "name": "Excellent Adventure" 41 | } 42 | ], 43 | "offices": [ 44 | { 45 | "office": "Victoria", 46 | "applications": [ 47 | { 48 | "name": "Mines", 49 | "contact": "Jane Smith" 50 | }, 51 | { 52 | "name": "Water", 53 | "contact": "Bob Bobby" 54 | }, 55 | { 56 | "name": "Forests", 57 | "contact": "Alan" 58 | }, 59 | { 60 | "name": "Roads", 61 | "contact": "Harvey" 62 | } 63 | ] 64 | }, 65 | { 66 | "office": "Kamloops", 67 | "applications": [ 68 | { 69 | "name": "Licencing", 70 | "contact": "Jane Smith" 71 | }, 72 | { 73 | "name": "Rejections", 74 | "contact": "Bob Bobby" 75 | } 76 | ] 77 | }, 78 | { 79 | "office": "Vancouver", 80 | "applications": [ 81 | { 82 | "name": "Secret Application" 83 | }, 84 | { 85 | "name": "Mountains" 86 | } 87 | ] 88 | } 89 | ] 90 | } 91 | -------------------------------------------------------------------------------- /app/tests/unit/components/validation/modelValidation.spec.js: -------------------------------------------------------------------------------- 1 | const { customValidators } = require('../../../../src/components/validation'); 2 | 3 | // TODO: Refactor this to point to modelValidation contents 4 | describe.skip('customValidators.docGen', () => { 5 | let body; 6 | 7 | beforeEach(() => { 8 | body = { 9 | contexts: [{ 10 | x: 1, 11 | y: 2 12 | }], 13 | template: { 14 | content: 'ZHNmc2Rmc2RmZHNmc2Rmc2Rmc2Rm', 15 | contentEncodingType: 'base64', 16 | contentFileType: 'docx', 17 | outputFileType: 'pdf', 18 | outputFileName: 'abc_123_{d.firstname}-{d.lastname}', 19 | } 20 | }; 21 | }); 22 | 23 | it('should return an empty error array when valid', async () => { 24 | const result = await customValidators.docGen(body); 25 | 26 | expect(result).toBeTruthy(); 27 | expect(Array.isArray(result)).toBeTruthy(); 28 | expect(result.length).toEqual(0); 29 | }); 30 | 31 | it('should return an error with validation error when invalid', async () => { 32 | body.contexts = 'garbage'; 33 | 34 | const result = await customValidators.docGen(body); 35 | 36 | expect(result).toBeTruthy(); 37 | expect(Array.isArray(result)).toBeTruthy(); 38 | expect(result.length).toEqual(1); 39 | expect(result[0].value).toMatch('garbage'); 40 | expect(result[0].message).toMatch('Invalid value `contexts`.'); 41 | }); 42 | 43 | it('should return an empty error array when valid (using the minimum required request fields)', async () => { 44 | const simpleBody = { 45 | contexts: [{ 46 | x: 1 47 | }], 48 | template: { 49 | content: 'ZHNmc2Rmc2RmZHNmc2Rmc2Rmc2Rm', 50 | contentFileType: 'docx', 51 | } 52 | }; 53 | const result = await customValidators.docGen(simpleBody); 54 | 55 | expect(result).toBeTruthy(); 56 | expect(Array.isArray(result)).toBeTruthy(); 57 | expect(result.length).toEqual(0); 58 | }); 59 | 60 | it('should return an error array when file type conversion is not supported', async () => { 61 | body = { 62 | contexts: [{ 63 | x: 1, 64 | y: 2 65 | }], 66 | template: { 67 | content: 'ZHNmc2Rmc2RmZHNmc2Rmc2Rmc2Rm', 68 | contentEncodingType: 'base64', 69 | contentFileType: 'DOCX', 70 | outputFileType: 'ppt', 71 | } 72 | }; 73 | const result = await customValidators.docGen(body); 74 | 75 | expect(result).toBeTruthy(); 76 | expect(Array.isArray(result)).toBeTruthy(); 77 | expect(result.length).toEqual(1); 78 | expect(result[0].message).toMatch('Unsupported file type conversion'); 79 | }); 80 | }); 81 | -------------------------------------------------------------------------------- /.github/workflows/codeql-analysis.yaml: -------------------------------------------------------------------------------- 1 | # For most projects, this workflow file will not need changing; you simply need 2 | # to commit it to your repository. 3 | # 4 | # You may wish to alter this file to override the set of languages analyzed, 5 | # or to provide custom queries or build logic. 6 | # 7 | # ******** NOTE ******** 8 | # We have attempted to detect the languages in your repository. Please check 9 | # the `language` matrix defined below to confirm you have the correct set of 10 | # supported CodeQL languages. 11 | # 12 | --- 13 | name: "CodeQL" 14 | 15 | on: 16 | push: 17 | branches: 18 | - master 19 | pull_request: 20 | # The branches below must be a subset of the branches above 21 | branches: 22 | - master 23 | schedule: 24 | - cron: "38 6 * * 5" 25 | 26 | jobs: 27 | analyze: 28 | name: Analyze 29 | runs-on: ubuntu-latest 30 | permissions: 31 | actions: read 32 | contents: read 33 | security-events: write 34 | 35 | strategy: 36 | fail-fast: false 37 | matrix: 38 | language: 39 | - javascript 40 | # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby' ] 41 | # Learn more about CodeQL language support at https://git.io/codeql-language-support 42 | 43 | steps: 44 | - name: Checkout repository 45 | uses: actions/checkout@v4 46 | 47 | # Initializes the CodeQL tools for scanning. 48 | - name: Initialize CodeQL 49 | uses: github/codeql-action/init@v3 50 | with: 51 | languages: ${{ matrix.language }} 52 | # If you wish to specify custom queries, you can do so here or in a config file. 53 | # By default, queries listed here will override any specified in a config file. 54 | # Prefix the list here with "+" to use these queries and those in the config file. 55 | # queries: ./path/to/local/query, your-org/your-repo/queries@main 56 | 57 | # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). 58 | # If this step fails, then you should remove it and run the build manually (see below) 59 | - name: Autobuild 60 | uses: github/codeql-action/autobuild@v3 61 | 62 | # ℹ️ Command-line programs to run using the OS shell. 63 | # 📚 https://git.io/JvXDl 64 | 65 | # ✏️ If the Autobuild fails above, remove it and uncomment the following three lines 66 | # and modify them (or add more) to build your code if your project 67 | # uses a compiled language 68 | 69 | #- run: | 70 | # make bootstrap 71 | # make release 72 | 73 | - name: Perform CodeQL Analysis 74 | uses: github/codeql-action/analyze@v3 75 | -------------------------------------------------------------------------------- /examples/doc-caching.js: -------------------------------------------------------------------------------- 1 | import fetch from 'node-fetch'; 2 | import fs from 'fs'; 3 | import { fileFromSync } from 'fetch-blob/from.js'; 4 | import { FormData } from 'formdata-polyfill/esm.min.js'; 5 | 6 | const template = fileFromSync('./template.txt'); 7 | const fd = new FormData(); 8 | let templateHash; 9 | 10 | fd.append('template', template); 11 | 12 | const cdogsTemplateCacheResponse = await fetch( 13 | 'http://localhost:3000/api/v2/template', 14 | { 15 | method: 'POST', 16 | body: fd, 17 | } 18 | ); 19 | 20 | if (cdogsTemplateCacheResponse.ok) { 21 | templateHash = await cdogsTemplateCacheResponse.text(); 22 | 23 | /* 24 | * If this response is successful, it will return the hash that relates to this uploaded template. 25 | * It must be saved for further api usage. 26 | */ 27 | 28 | console.log(templateHash); 29 | // bffe2a344ec1f8fb4fc1a1496df4ca29277da310f64eaa8748a1888b7a2198c5 30 | } else { 31 | const apiError = await cdogsTemplateCacheResponse.json(); 32 | 33 | /* 34 | * If this response is not successful an (RFC 7807) `api-problem` is returned. 35 | * https://www.npmjs.com/package/api-problem 36 | */ 37 | 38 | console.log(apiError); 39 | // { 40 | // type: 'https://httpstatuses.com/405', 41 | // title: 'Method Not Allowed', 42 | // status: 405, 43 | // detail: "File already cached. Hash 'bffe2a344ec1f8fb4fc1a1496df4ca29277da310f64eaa8748a1888b7a2198c5'." 44 | // } 45 | 46 | process.exit(1); 47 | } 48 | 49 | const cdogsRenderResponse = await fetch( 50 | `http://localhost:3000/api/v2/template/${templateHash}/render`, 51 | { 52 | method: 'POST', 53 | body: JSON.stringify({ 54 | data: { 55 | firstName: 'Common', 56 | lastName: 'Services', 57 | }, 58 | options: { 59 | convertTo: 'pdf', 60 | }, 61 | }), 62 | headers: { 63 | 'Content-Type': 'application/json', 64 | }, 65 | } 66 | ); 67 | 68 | const pdf = await cdogsRenderResponse.arrayBuffer(); 69 | 70 | // saves a file test.pdf - the CDOGS output. 71 | fs.writeFileSync('test.pdf', Buffer.from(pdf), 'binary'); 72 | 73 | // Removing the template from the cache 74 | const cdogsTemplateDeleteResponse = await fetch( 75 | `http://localhost:3000/api/v2/template/${templateHash}`, 76 | { 77 | method: 'DELETE', 78 | } 79 | ); 80 | 81 | if (cdogsTemplateDeleteResponse.ok) { 82 | const OK = await cdogsTemplateDeleteResponse.text(); 83 | 84 | // just prints OK. 85 | console.log(OK); 86 | } else { 87 | const apiError = await cdogsTemplateDeleteResponse.json(); 88 | 89 | /* 90 | * If this response is not successful an (RFC 7807) `api-problem` is returned. 91 | * https://www.npmjs.com/package/api-problem 92 | */ 93 | 94 | console.log(apiError); 95 | } 96 | -------------------------------------------------------------------------------- /charts/cdogs/templates/NOTES.txt: -------------------------------------------------------------------------------- 1 | {{- $configMapName := printf "%s-%s" (include "cdogs.configname" .) "config" }} 2 | {{- $configMap := (lookup "v1" "ConfigMap" .Release.Namespace $configMapName ) }} 3 | {{- $awsSecretName := printf "%s-%s" (include "cdogs.configname" .) "aws" }} 4 | {{- $awsSecret := (lookup "v1" "Secret" .Release.Namespace $awsSecretName ) }} 5 | {{- $kcSecretName := printf "%s-%s" (include "cdogs.configname" .) "keycloak" }} 6 | {{- $kcSecret := (lookup "v1" "Secret" .Release.Namespace $kcSecretName ) }} 7 | Get the application URL by running these commands: 8 | {{- if .Values.route.enabled }} 9 | http{{ if $.Values.route.tls }}s{{ end }}://{{ .Values.route.host }}{{ .Values.route.path }} 10 | {{- else if contains "NodePort" .Values.service.type }} 11 | export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "cdogs.fullname" . }}) 12 | export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") 13 | echo http://$NODE_IP:$NODE_PORT 14 | {{- else if contains "LoadBalancer" .Values.service.type }} 15 | NOTE: It may take a few minutes for the LoadBalancer IP to be available. 16 | You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "cdogs.fullname" . }}' 17 | export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "cdogs.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") 18 | echo http://$SERVICE_IP:{{ .Values.service.port }} 19 | {{- else if contains "ClusterIP" .Values.service.type }} 20 | export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "cdogs.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") 21 | export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}") 22 | echo "Visit http://127.0.0.1:8080 to use your application" 23 | kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT 24 | {{- end }} 25 | {{- if not $configMap }} 26 | 27 | Make sure that ConfigMap "{{ $configMapName }}" is defined in the namespace; the deployment will fail to run without it! 28 | {{- end }} 29 | {{- if and (not $awsSecret) (.Values.fluentBit.enabled) }} 30 | 31 | Make sure that Secret "{{ $awsSecretName }}" is defined in the namespace; the deployment will fail to run without it! 32 | {{- end }} 33 | {{- if and (not $kcSecret) (.Values.config.configMap.KC_ENABLED) }} 34 | 35 | Make sure that Secret "{{ $kcSecretName }}" is defined in the namespace; the deployment will fail to run without it! 36 | {{- end }} 37 | -------------------------------------------------------------------------------- /k6/templating.js: -------------------------------------------------------------------------------- 1 | import http from 'k6/http'; 2 | import { check, sleep } from 'k6'; 3 | import encoding from 'k6/encoding'; 4 | 5 | // ------------------------------------------------------------------------------------------------- 6 | // Init 7 | // ------------------------------------------------------------------------------------------------- 8 | // https://k6.io/docs/using-k6/environment-variables 9 | 10 | const apiPath = `${__ENV.API_PATH}`; // include "/api/v2" 11 | const authToken = `${__ENV.AUTH_TOKEN}`; // exchange token elsewhere, then pass JWT here 12 | const multiplier = parseInt(`${__ENV.RATE}`) ?? 4; // change multiplier to run test faster 13 | const RATE_LIMIT_PER_MINUTE = parseInt(`${__ENV.RATE_LIMIT}`) ?? 200; 14 | 15 | // k6 options (https://k6.io/docs/using-k6/k6-options/) 16 | export const options = { 17 | scenarios: { 18 | rateLimitTest: { 19 | executor: 'constant-arrival-rate', 20 | rate: RATE_LIMIT_PER_MINUTE * multiplier, // requests to make per minute 21 | duration: '1m', // duration must be <5m due to JWT expiry 22 | preAllocatedVUs: 10, 23 | timeUnit: '1m', 24 | maxVUs: 100, 25 | }, 26 | }, 27 | }; 28 | 29 | const url = `${apiPath}/template/render`; 30 | 31 | const headers = { 32 | 'Authorization': `Bearer ${authToken}`, 33 | 'Content-Type': 'application/json' 34 | }; 35 | 36 | const body = { 37 | // Data File for template_information_sharing_agreement.docx from DGRSC 38 | data: JSON.parse('sample_contexts.json'), 39 | options: { 40 | reportName: 'information_sharing_agreement', 41 | convertTo: 'pdf', 42 | overwrite: true 43 | }, 44 | template: { 45 | // template_information_sharing_agreement.docx from DGRSC 46 | content: open('sample_template.txt'), 47 | encodingType: 'base64', 48 | fileType: 'docx' 49 | } 50 | } 51 | 52 | // run k6 53 | export default function () { 54 | 55 | // make the http request 56 | const res = http.post(url, JSON.stringify(body), {headers: headers}); 57 | 58 | // To enable logging: --log-output=file=./output.json --log-format=json 59 | console.log(res.status); 60 | 61 | // tests 62 | // rate limit headers: https://docs.konghq.com/hub/kong-inc/rate-limiting/#headers-sent-to-the-client 63 | check(res, { 64 | 'is status 200 or 429': (r) => r.status === 200 || r.status === 429, 65 | 'is returning the correct templated response': (r) => r.body == `Hello ${body.data.firstName} ${body.data.lastName}!`, 66 | 'is returning the correct RateLimit-Limit header': (r) => r.headers['Ratelimit-Limit'] == RATE_LIMIT_PER_MINUTE, 67 | 'is returning the correct RateLimit-Remaining header': (r) => r.headers['Ratelimit-Remaining'] < RATE_LIMIT_PER_MINUTE, 68 | 'is returning the correct X-RateLimit-Limit-Minute header': (r) => r.headers['X-Ratelimit-Limit-Minute'] == RATE_LIMIT_PER_MINUTE, 69 | 'is returning the correct X-RateLimit-Remaining-Minute header': (r) => r.headers['X-Ratelimit-Remaining-Minute'] < RATE_LIMIT_PER_MINUTE, 70 | }); 71 | 72 | } 73 | -------------------------------------------------------------------------------- /.github/actions/deploy-to-environment/action.yaml: -------------------------------------------------------------------------------- 1 | name: Deploy to Environment 2 | description: Deploys an image to the defined environment 3 | inputs: 4 | app_name: 5 | description: Application general Name 6 | required: true 7 | acronym: 8 | description: Application acronym 9 | required: true 10 | aws_role_arn: 11 | description: AWS Role ARN 12 | required: true 13 | environment: 14 | description: Logical Github Environment 15 | required: true 16 | job_name: 17 | description: Job/Instance name 18 | required: true 19 | namespace_prefix: 20 | description: Openshift Namespace common prefix 21 | required: true 22 | namespace_environment: 23 | description: Openshift Namespace environment suffix 24 | required: true 25 | openshift_server: 26 | description: Openshift API Endpoint 27 | required: true 28 | openshift_token: 29 | description: Openshift Service Account Token 30 | required: true 31 | 32 | runs: 33 | using: composite 34 | steps: 35 | - name: Checkout repository 36 | uses: actions/checkout@v4 37 | 38 | - name: Install CLI tools from OpenShift Mirror 39 | uses: redhat-actions/openshift-tools-installer@v1 40 | with: 41 | oc: "4" 42 | 43 | - name: Login to OpenShift and select project 44 | shell: bash 45 | run: | 46 | # OC Login 47 | OC_TEMP_TOKEN=$(curl -k -X POST ${{ inputs.openshift_server }}/api/v1/namespaces/${{ inputs.namespace_prefix }}-${{ inputs.namespace_environment }}/serviceaccounts/pipeline/token --header "Authorization: Bearer ${{ inputs.openshift_token }}" -d '{"spec": {"expirationSeconds": 600}}' -H 'Content-Type: application/json; charset=utf-8' | jq -r '.status.token' ) 48 | oc login --token=$OC_TEMP_TOKEN --server=${{ inputs.openshift_server }} 49 | # move to project context 50 | oc project ${{ inputs.namespace_prefix }}-${{ inputs.namespace_environment }} 51 | 52 | - name: Helm Deploy 53 | shell: bash 54 | run: >- 55 | helm upgrade --install --atomic ${{ inputs.job_name }} ${{ inputs.app_name }} 56 | --namespace ${{ inputs.namespace_prefix }}-${{ inputs.namespace_environment }} 57 | --repo https://bcgov.github.io/common-document-generation-service 58 | --values ./.github/environments/values.${{ inputs.environment }}.yaml 59 | --set image.repository=ghcr.io/${{ github.repository_owner }} 60 | --set image.tag=sha-$(git rev-parse --short HEAD) 61 | --set route.host=${{ inputs.acronym }}-${{ inputs.namespace_environment }}-${{ inputs.job_name }}.apps.silver.devops.gov.bc.ca 62 | --set fluentBit.config.aws.roleArn=${{ inputs.aws_role_arn }} 63 | --set fluentBit.config.namespace=${{ inputs.namespace_prefix }}-${{ inputs.namespace_environment }} 64 | --timeout 10m 65 | --wait 66 | 67 | - name: Wait on Deployment 68 | shell: bash 69 | run: | 70 | oc rollout --namespace ${{ inputs.namespace_prefix }}-${{ inputs.namespace_environment }} status dc/${{ inputs.app_name }}-${{ inputs.job_name }} --watch=true 71 | -------------------------------------------------------------------------------- /.github/workflows/unit-tests.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Tests 3 | 4 | on: 5 | - push 6 | - pull_request 7 | 8 | jobs: 9 | test-app: 10 | name: Unit Tests 11 | runs-on: ubuntu-latest 12 | defaults: 13 | run: 14 | working-directory: app 15 | outputs: 16 | HAS_CC_SECRETS: ${{ steps.check-secrets.outputs.HAS_CC_SECRETS }} 17 | timeout-minutes: 10 18 | strategy: 19 | fail-fast: true 20 | matrix: 21 | node-version: 22 | - '16.x' 23 | - '18.x' 24 | - '20.x' 25 | steps: 26 | - name: Checkout Repository 27 | uses: actions/checkout@v4 28 | - name: Check CodeClimate Secrets 29 | id: check-secrets 30 | run: | 31 | echo "HAS_CC_SECRETS=${{ secrets.CC_TEST_REPORTER_ID != '' }}" >> $GITHUB_OUTPUT 32 | - name: Use Node.js ${{ matrix.node-version }} 33 | uses: actions/setup-node@v4 34 | with: 35 | node-version: ${{ matrix.node-version }} 36 | - name: Cache node modules 37 | uses: actions/cache@v4 38 | id: cache-app 39 | env: 40 | cache-name: cache-node-modules 41 | with: 42 | path: ${{ github.workspace }}/app/node_modules 43 | key: ${{ runner.os }}-app-${{ env.cache-name }}-${{ hashFiles('**/app/package-lock.json') }} 44 | restore-keys: | 45 | ${{ runner.os }}-app-${{ env.cache-name }}- 46 | ${{ runner.os }}-app- 47 | ${{ runner.os }}- 48 | - name: Install dependencies 49 | if: steps.cache-app.outputs.cache-hit != 'true' 50 | run: npm ci 51 | - name: Test 52 | run: npm run test 53 | env: 54 | CI: true 55 | - name: Save Coverage Results 56 | if: matrix.node-version == '20.x' 57 | uses: actions/upload-artifact@v4 58 | with: 59 | name: coverage-app 60 | path: ${{ github.workspace }}/app/coverage 61 | retention-days: 1 62 | - name: Monitor Coverage 63 | if: "matrix.node-version == '20.x' && ! github.event.pull_request.head.repo.fork" 64 | uses: slavcodev/coverage-monitor-action@v1 65 | with: 66 | comment_mode: update 67 | comment_footer: false 68 | coverage_path: app/coverage/clover.xml 69 | github_token: ${{ secrets.GITHUB_TOKEN }} 70 | threshold_alert: 50 71 | threshold_warning: 80 72 | 73 | test-coverage: 74 | name: Publish to Code Climate 75 | needs: test-app 76 | if: needs.test-app.outputs.HAS_CC_SECRETS == 'true' 77 | runs-on: ubuntu-latest 78 | timeout-minutes: 10 79 | steps: 80 | - name: Checkout Repository 81 | uses: actions/checkout@v4 82 | - name: Restore Coverage Results 83 | uses: actions/download-artifact@v4 84 | - name: Publish code coverage 85 | uses: paambaati/codeclimate-action@v5 86 | env: 87 | CC_TEST_REPORTER_ID: ${{ secrets.CC_TEST_REPORTER_ID }} 88 | with: 89 | coverageLocations: | 90 | ${{ github.workspace }}/**/lcov.info:lcov 91 | prefix: ${{ github.workplace }} 92 | -------------------------------------------------------------------------------- /CODE-OF-CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Contributor Covenant Code of Conduct 2 | 3 | ## Our Pledge 4 | 5 | In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation. 6 | 7 | ## Our Standards 8 | 9 | Examples of behavior that contributes to creating a positive environment include: 10 | 11 | * Using welcoming and inclusive language 12 | * Being respectful of differing viewpoints and experiences 13 | * Gracefully accepting constructive criticism 14 | * Focusing on what is best for the community 15 | * Showing empathy towards other community members 16 | 17 | Examples of unacceptable behavior by participants include: 18 | 19 | * The use of sexualized language or imagery and unwelcome sexual attention or advances 20 | * Trolling, insulting/derogatory comments, and personal or political attacks 21 | * Public or private harassment 22 | * Publishing others' private information, such as a physical or electronic address, without explicit permission 23 | * Other conduct which could reasonably be considered inappropriate in a professional setting 24 | 25 | ## Our Responsibilities 26 | 27 | Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. 28 | 29 | Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. 30 | 31 | ## Scope 32 | 33 | This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. 34 | 35 | ## Enforcement 36 | 37 | Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at matthew.hall@gov.bc.ca. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. 38 | 39 | Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership. 40 | 41 | ## Attribution 42 | 43 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version] 44 | 45 | [homepage]: http://contributor-covenant.org 46 | [version]: http://contributor-covenant.org/version/1/4/ 47 | -------------------------------------------------------------------------------- /app/src/routes/v2/template.js: -------------------------------------------------------------------------------- 1 | const Problem = require('api-problem'); 2 | const templateRouter = require('express').Router(); 3 | 4 | const { findAndRender, getFromCache } = require('../../components/carboneCopyApi'); 5 | const FileCache = require('../../components/fileCache'); 6 | const { upload } = require('../../components/upload'); 7 | const { truthy } = require('../../components/utils'); 8 | const { middleware } = require('../../components/validation'); 9 | const log = require('../../components/log')(module.filename); 10 | 11 | const fileCache = new FileCache(); 12 | 13 | /** 14 | * Upload a template to cache 15 | */ 16 | templateRouter.post('/', upload, async (req, res) => { 17 | log.verbose('Template upload'); 18 | 19 | if (!req.file) { 20 | return new Problem(422, { detail: 'Template file is missing or malformed.' }).send(res); 21 | } 22 | 23 | // TODO: If `carbone.uploadCount` is greater than 1, check `req.files` array 24 | const result = await fileCache.move(req.file.path, req.file.originalname); 25 | if (!result.success) { 26 | return new Problem(result.errorType, { detail: result.errorMsg, hash: result.hash }).send(res); 27 | } else { 28 | res.setHeader('X-Template-Hash', result.hash); 29 | return res.send(result.hash); 30 | } 31 | }); 32 | 33 | /** 34 | * Render a document from a template provided in JSON body 35 | */ 36 | templateRouter.post('/render', middleware.validateTemplate, async (req, res) => { 37 | log.verbose('Template upload and render'); 38 | 39 | let template = {}; 40 | try { 41 | template = { ...req.body.template }; 42 | if (!template || !template.content) throw Error('Template content not provided.'); 43 | if (!template.fileType) throw Error('Template file type not provided.'); 44 | if (!template.encodingType) throw Error('Template encoding type not provided.'); 45 | } catch (e) { 46 | return new Problem(400, { detail: e.message }).send(res); 47 | } 48 | 49 | // let the caller determine if they want to overwrite the template 50 | const options = req.body.options || {}; 51 | // write to disk... 52 | const content = await fileCache.write(template.content, template.fileType, template.encodingType, { overwrite: truthy('overwrite', options) }); 53 | if (!content.success) { 54 | return new Problem(content.errorType, { detail: content.errorMsg }).send(res); 55 | } 56 | 57 | return await findAndRender(content.hash, req, res); 58 | }); 59 | 60 | /** 61 | * Render a document from a cached template 62 | */ 63 | templateRouter.post('/:uid/render', middleware.validateCarbone, async (req, res) => { 64 | const hash = req.params.uid; 65 | log.verbose('Template render', { hash: hash }); 66 | return await findAndRender(hash, req, res); 67 | }); 68 | 69 | /** 70 | * get a template from cache 71 | */ 72 | templateRouter.get('/:uid', async (req, res) => { 73 | const hash = req.params.uid; 74 | const download = req.query.download !== undefined; 75 | const hashHeaderName = 'X-Template-Hash'; 76 | log.verbose('Get Template', { hash: hash, download: download }); 77 | return getFromCache(hash, hashHeaderName, download, false, res); 78 | }); 79 | 80 | /** 81 | * delete a template from cache 82 | */ 83 | templateRouter.delete('/:uid', async (req, res) => { 84 | const hash = req.params.uid; 85 | const download = req.query.download !== undefined; 86 | const hashHeaderName = 'X-Template-Hash'; 87 | log.verbose('Delete Template', { hash: hash, download: download }); 88 | return getFromCache(hash, hashHeaderName, download, true, res); 89 | }); 90 | 91 | module.exports = templateRouter; 92 | -------------------------------------------------------------------------------- /app/src/components/log.js: -------------------------------------------------------------------------------- 1 | const config = require('config'); 2 | const jwt = require('jsonwebtoken'); 3 | const { parse } = require('path'); 4 | const Transport = require('winston-transport'); 5 | const { createLogger, format, transports } = require('winston'); 6 | const { logger } = require('express-winston'); 7 | 8 | /** 9 | * Class representing a winston transport writing to null 10 | * @extends Transport 11 | */ 12 | class NullTransport extends Transport { 13 | /** 14 | * Constructor 15 | * @param {object} opts Winston Transport options 16 | */ 17 | constructor(opts) { 18 | super(opts); 19 | } 20 | 21 | /** 22 | * The transport logger 23 | * @param {object} _info Object to log 24 | * @param {function} callback Callback function 25 | */ 26 | log(_info, callback) { 27 | callback(); 28 | } 29 | } 30 | 31 | /** 32 | * Main Winston Logger 33 | * @returns {object} Winston Logger 34 | */ 35 | const log = createLogger({ 36 | exitOnError: false, 37 | format: format.combine( 38 | format.errors({ stack: true }), // Force errors to show stacktrace 39 | format.timestamp(), // Add ISO timestamp to each entry 40 | format.json(), // Force output to be in JSON format 41 | ), 42 | level: config.get('server.logLevel') 43 | }); 44 | 45 | if (process.env.NODE_ENV !== 'test') { 46 | log.add(new transports.Console({ handleExceptions: true })); 47 | } else { 48 | log.add(new NullTransport()); 49 | } 50 | 51 | if (config.has('server.logFile')) { 52 | log.add(new transports.File({ 53 | filename: config.get('server.logFile'), 54 | handleExceptions: true 55 | })); 56 | } 57 | 58 | /** 59 | * Returns a Winston Logger or Child Winston Logger 60 | * @param {string} [filename] Optional module filename path to annotate logs with 61 | * @returns {object} A child logger with appropriate metadata if `filename` is defined. Otherwise returns a standard logger. 62 | */ 63 | const getLogger = (filename) => { 64 | return filename ? log.child({ component: parse(filename).name }) : log; 65 | }; 66 | 67 | /** 68 | * Returns an express-winston middleware function for http logging 69 | * @returns {function} An express-winston middleware function 70 | */ 71 | const httpLogger = logger({ 72 | colorize: false, 73 | // Parses express information to insert into log output 74 | dynamicMeta: (req, res) => { 75 | const token = jwt.decode((req.get('authorization') || '').slice(7)); 76 | return { 77 | azp: token && token.azp || undefined, 78 | contentLength: res.get('content-length'), 79 | httpVersion: req.httpVersion, 80 | ip: req.ip, 81 | method: req.method, 82 | path: req.path, 83 | query: Object.keys(req.query).length ? req.query : undefined, 84 | responseTime: res.responseTime, 85 | statusCode: res.statusCode, 86 | userAgent: req.get('user-agent') 87 | }; 88 | }, 89 | expressFormat: true, // Use express style message strings 90 | level: 'http', 91 | meta: true, // Must be true for dynamicMeta to execute 92 | metaField: null, // Set to null for all attributes to be at top level object 93 | requestWhitelist: [], // Suppress default value output 94 | responseWhitelist: [], // Suppress default value output 95 | // Skip logging kube-probe requests 96 | skip: (req) => req.get('user-agent') && req.get('user-agent').includes('kube-probe'), 97 | winstonInstance: log, 98 | }); 99 | 100 | module.exports = getLogger; 101 | module.exports.httpLogger = httpLogger; 102 | module.exports.NullTransport = NullTransport; 103 | -------------------------------------------------------------------------------- /.github/workflows/on-push.yaml: -------------------------------------------------------------------------------- 1 | name: Push 2 | 3 | env: 4 | ACRONYM: cdogs 5 | APP_NAME: common-document-generation-service 6 | NAMESPACE_PREFIX: 2250c5 7 | 8 | on: 9 | push: 10 | branches: 11 | - master 12 | tags: 13 | - v*.*.* 14 | 15 | concurrency: 16 | group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} 17 | cancel-in-progress: true 18 | 19 | jobs: 20 | build: 21 | name: Build & Push 22 | runs-on: ubuntu-latest 23 | timeout-minutes: 10 24 | steps: 25 | - name: Checkout 26 | uses: actions/checkout@v4 27 | - name: Build & Push 28 | uses: ./.github/actions/build-push-container 29 | with: 30 | context: . 31 | image_name: ${{ env.APP_NAME }} 32 | github_username: ${{ github.repository_owner }} 33 | github_token: ${{ secrets.GITHUB_TOKEN }} 34 | 35 | deploy-dev: 36 | name: Deploy to Dev 37 | environment: 38 | name: dev 39 | url: https://${{ env.ACRONYM }}-dev-master.apps.silver.devops.gov.bc.ca 40 | runs-on: ubuntu-latest 41 | needs: build 42 | timeout-minutes: 12 43 | steps: 44 | - name: Checkout 45 | uses: actions/checkout@v4 46 | - name: Deploy to Dev 47 | uses: ./.github/actions/deploy-to-environment 48 | with: 49 | app_name: ${{ env.APP_NAME }} 50 | acronym: ${{ env.ACRONYM }} 51 | aws_role_arn: ${{ secrets.AWS_ROLE_ARN }} 52 | environment: dev 53 | job_name: master 54 | namespace_prefix: ${{ env.NAMESPACE_PREFIX }} 55 | namespace_environment: dev 56 | openshift_server: ${{ secrets.OPENSHIFT_SERVER }} 57 | openshift_token: ${{ secrets.OPENSHIFT_TOKEN }} 58 | 59 | deploy-test: 60 | name: Deploy to Test 61 | environment: 62 | name: test 63 | url: https://${{ env.ACRONYM }}-test-master.apps.silver.devops.gov.bc.ca 64 | runs-on: ubuntu-latest 65 | needs: 66 | - build 67 | - deploy-dev 68 | timeout-minutes: 12 69 | steps: 70 | - name: Checkout 71 | uses: actions/checkout@v4 72 | - name: Deploy to Test 73 | uses: ./.github/actions/deploy-to-environment 74 | with: 75 | app_name: ${{ env.APP_NAME }} 76 | acronym: ${{ env.ACRONYM }} 77 | aws_role_arn: ${{ secrets.AWS_ROLE_ARN }} 78 | environment: test 79 | job_name: master 80 | namespace_prefix: ${{ env.NAMESPACE_PREFIX }} 81 | namespace_environment: test 82 | openshift_server: ${{ secrets.OPENSHIFT_SERVER }} 83 | openshift_token: ${{ secrets.OPENSHIFT_TOKEN }} 84 | 85 | deploy-prod: 86 | name: Deploy to Prod 87 | environment: 88 | name: prod 89 | url: https://${{ env.ACRONYM }}-prod-master.apps.silver.devops.gov.bc.ca 90 | runs-on: ubuntu-latest 91 | needs: 92 | - build 93 | - deploy-dev 94 | - deploy-test 95 | timeout-minutes: 12 96 | steps: 97 | - name: Checkout 98 | uses: actions/checkout@v4 99 | - name: Deploy to Prod 100 | uses: ./.github/actions/deploy-to-environment 101 | with: 102 | app_name: ${{ env.APP_NAME }} 103 | acronym: ${{ env.ACRONYM }} 104 | aws_role_arn: ${{ secrets.AWS_ROLE_ARN }} 105 | environment: prod 106 | job_name: master 107 | namespace_prefix: ${{ env.NAMESPACE_PREFIX }} 108 | namespace_environment: prod 109 | openshift_server: ${{ secrets.OPENSHIFT_SERVER }} 110 | openshift_token: ${{ secrets.OPENSHIFT_TOKEN }} 111 | -------------------------------------------------------------------------------- /app/src/components/upload.js: -------------------------------------------------------------------------------- 1 | const bytes = require('bytes'); 2 | const config = require('config'); 3 | const fs = require('fs-extra'); 4 | const multer = require('multer'); 5 | const os = require('os'); 6 | const Problem = require('api-problem'); 7 | 8 | const fileUploadsDir = config.get('carbone.cacheDir'); 9 | const formFieldName = config.get('carbone.formFieldName'); 10 | const maxFileSize = bytes.parse(config.get('carbone.uploadSize')); 11 | const maxFileCount = parseInt(config.get('carbone.uploadCount')); 12 | const osTempDir = fs.realpathSync(os.tmpdir()); 13 | 14 | let storage = undefined; 15 | let uploader = undefined; 16 | 17 | // Cache directory check 18 | try { 19 | fs.ensureDirSync(fileUploadsDir); 20 | } catch (e) { 21 | console.warn(`Unable to use cache directory "${fileUploadsDir}". Cache will fall back to default OS temp directory "${osTempDir}"`); 22 | } 23 | 24 | // Setup storage location 25 | if (!storage) { 26 | storage = multer.diskStorage({ 27 | destination: (_req, _file, cb) => { 28 | // Always write transiently uploaded files to os temp scratch space 29 | cb(null, osTempDir); 30 | } 31 | }); 32 | } 33 | 34 | // Setup the multer 35 | if (!uploader) { 36 | if (maxFileCount > 1) { 37 | uploader = multer({ 38 | storage: storage, 39 | limits: { fileSize: maxFileSize, files: maxFileCount } 40 | }).array(formFieldName); 41 | } else { 42 | // In case maxFileCount is negative, hard set to 1 43 | uploader = multer({ 44 | storage: storage, 45 | limits: { fileSize: maxFileSize, files: 1 } 46 | }).single(formFieldName); 47 | } 48 | } 49 | 50 | module.exports = { 51 | upload(req, res, next) { 52 | if (!uploader) { 53 | return next(new Problem(500, 'File Upload middleware has not been configured.')); 54 | } 55 | 56 | uploader(req, res, (err) => { 57 | // Detect multer errors, send back nicer through the middleware stack... 58 | if (err instanceof multer.MulterError) { 59 | switch (err.code) { 60 | case 'LIMIT_FILE_SIZE': 61 | next(new Problem(400, 'Upload file error', { detail: `Upload file size is limited to ${maxFileSize} bytes` })); 62 | break; 63 | case 'LIMIT_FILE_COUNT': 64 | next(new Problem(400, 'Upload file error', { detail: `Upload is limited to ${maxFileCount} files` })); 65 | break; 66 | case 'LIMIT_UNEXPECTED_FILE': 67 | next(new Problem(400, 'Upload file error', { detail: 'Upload encountered an unexpected file' })); 68 | break; 69 | // We don't expect that we will encounter these in our api/app, but here for completeness 70 | case 'LIMIT_PART_COUNT': 71 | next(new Problem(400, 'Upload file error', { detail: 'Upload rejected: upload form has too many parts' })); 72 | break; 73 | case 'LIMIT_FIELD_KEY': 74 | next(new Problem(400, 'Upload file error', { detail: 'Upload rejected: upload field name for the files is too long' })); 75 | break; 76 | case 'LIMIT_FIELD_VALUE': 77 | next(new Problem(400, 'Upload file error', { detail: 'Upload rejected: upload field is too long' })); 78 | break; 79 | case 'LIMIT_FIELD_COUNT': 80 | next(new Problem(400, 'Upload file error', { detail: 'Upload rejected: too many fields' })); 81 | break; 82 | default: 83 | next(new Problem(400, 'Upload file error', { detail: `Upload failed with the following error: ${err.message}` })); 84 | } 85 | } else if (err) { 86 | next(new Problem(400, 'Unknown upload file error', { detail: err.message })); 87 | } else { 88 | next(); 89 | } 90 | }); 91 | } 92 | }; 93 | -------------------------------------------------------------------------------- /app/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "common-document-generation-service", 3 | "version": "2.5.0", 4 | "description": "CDOGS - A common document generation API", 5 | "private": true, 6 | "scripts": { 7 | "build": "echo Nothing to build", 8 | "serve": "nodemon ./bin/www", 9 | "start": "node ./bin/www", 10 | "prune": "node ./cacheCleaner.js", 11 | "lint": "eslint . --no-fix --ignore-pattern 'node_modules' --ext .js", 12 | "lint:fix": "eslint . --fix --ignore-pattern 'node_modules' --ext .js", 13 | "test": "jest --verbose --forceExit --detectOpenHandles", 14 | "clean": "rm -rf coverage dist", 15 | "pretest": "npm run lint", 16 | "posttest": "node ./lcov-fix.js", 17 | "purge": "rm -rf node_modules", 18 | "rebuild": "npm run clean && npm run build", 19 | "reinstall": "npm run purge && npm install" 20 | }, 21 | "repository": { 22 | "type": "git", 23 | "url": "git+https://github.com/bcgov/common-document-generation-service.git" 24 | }, 25 | "author": "NR Common Service Showcase ", 26 | "license": "Apache-2.0", 27 | "bugs": { 28 | "url": "https://github.com/bcgov/common-document-generation-service/issues" 29 | }, 30 | "homepage": "https://bcgov.github.io/common-document-generation-service", 31 | "dependencies": { 32 | "api-problem": "^9.0.2", 33 | "atob": "^2.1.2", 34 | "bytes": "^3.1.2", 35 | "carbone": "^3.5.6", 36 | "compression": "^1.7.4", 37 | "config": "^3.3.12", 38 | "cors": "^2.8.5", 39 | "express": "^4.19.2", 40 | "express-winston": "^4.2.0", 41 | "fs-extra": "^11.2.0", 42 | "helmet": "^7.1.0", 43 | "js-yaml": "^4.1.0", 44 | "jsonwebtoken": "^9.0.2", 45 | "lockfile": "^1.0.4", 46 | "mime-types": "^2.1.35", 47 | "multer": "^1.4.4-lts.1", 48 | "telejson": "^7.2.0", 49 | "tmp": "^0.2.3", 50 | "uuid": "^10.0.0", 51 | "validator": "^13.12.0", 52 | "winston": "^3.13.1", 53 | "winston-transport": "^4.7.1" 54 | }, 55 | "devDependencies": { 56 | "eslint": "^8.57.0", 57 | "jest": "^29.7.0", 58 | "nodemon": "^3.1.4" 59 | }, 60 | "eslintConfig": { 61 | "root": true, 62 | "env": { 63 | "commonjs": true, 64 | "es6": true, 65 | "jest": true, 66 | "node": true 67 | }, 68 | "extends": [ 69 | "eslint:recommended" 70 | ], 71 | "globals": { 72 | "Atomics": "readonly", 73 | "SharedArrayBuffer": "readonly", 74 | "_": false 75 | }, 76 | "parserOptions": { 77 | "ecmaVersion": 9 78 | }, 79 | "rules": { 80 | "eol-last": [ 81 | "error", 82 | "always" 83 | ], 84 | "indent": [ 85 | "error", 86 | 2, 87 | { 88 | "SwitchCase": 1 89 | } 90 | ], 91 | "linebreak-style": [ 92 | "error", 93 | "unix" 94 | ], 95 | "quotes": [ 96 | "error", 97 | "single" 98 | ], 99 | "semi": [ 100 | "error", 101 | "always" 102 | ] 103 | } 104 | }, 105 | "browserslist": [ 106 | "> 1%", 107 | "last 2 versions", 108 | "not ie <= 8" 109 | ], 110 | "jest": { 111 | "moduleFileExtensions": [ 112 | "js", 113 | "json" 114 | ], 115 | "moduleNameMapper": { 116 | "^@/(.*)$": "/src/$1" 117 | }, 118 | "testMatch": [ 119 | "**/tests/**/*.spec.(js|jsx|ts|tsx)|**/__tests__/*.(js|jsx|ts|tsx)" 120 | ], 121 | "testURL": "http://localhost/", 122 | "collectCoverage": true, 123 | "collectCoverageFrom": [ 124 | "src/**/*.js", 125 | "!src/middleware/*.*", 126 | "!src/docs/*.*" 127 | ] 128 | } 129 | } 130 | -------------------------------------------------------------------------------- /app/tests/unit/components/validation/validatorUtils.spec.js: -------------------------------------------------------------------------------- 1 | 2 | const { validatorUtils } = require('../../../../src/components/validation'); 3 | 4 | describe('isInt', () => { 5 | it('should return true for a int', () => { 6 | expect(validatorUtils.isInt(123)).toBeTruthy(); 7 | }); 8 | 9 | it('should return true for a integer as string', () => { 10 | expect(validatorUtils.isInt('123456')).toBeTruthy(); 11 | }); 12 | 13 | it('should return true for a integer as string object', () => { 14 | expect(validatorUtils.isInt(123456)).toBeTruthy(); 15 | }); 16 | 17 | it('should return false for a non-numeric string', () => { 18 | expect(validatorUtils.isInt('abcdefg1234567')).toBeFalsy(); 19 | }); 20 | 21 | it('should return false for a float', () => { 22 | expect(validatorUtils.isInt(123.45)).toBeFalsy(); 23 | }); 24 | 25 | it('should return false for a float string', () => { 26 | expect(validatorUtils.isInt('123.45')).toBeFalsy(); 27 | }); 28 | 29 | it('should return false for an array', () => { 30 | expect(validatorUtils.isInt([{ value: 123 }])).toBeFalsy(); 31 | }); 32 | 33 | it('should return false for a function', () => { 34 | expect(validatorUtils.isInt((x) => String(x))).toBeFalsy(); 35 | }); 36 | }); 37 | 38 | describe('isString', () => { 39 | it('should return true for a string', () => { 40 | expect(validatorUtils.isString('this is a string')).toBeTruthy(); 41 | }); 42 | 43 | it('should return true for a string object', () => { 44 | expect(validatorUtils.isString(String(123456))).toBeTruthy(); 45 | }); 46 | 47 | it('should return false for a number ', () => { 48 | expect(validatorUtils.isString(123456)).toBeFalsy(); 49 | }); 50 | 51 | it('should return false for a non-string object', () => { 52 | expect(validatorUtils.isString({ value: 'string' })).toBeFalsy(); 53 | }); 54 | 55 | it('should return false for an array', () => { 56 | expect(validatorUtils.isString([{ value: 'string' }])).toBeFalsy(); 57 | }); 58 | 59 | it('should return false for a function', () => { 60 | expect(validatorUtils.isString((x) => String(x))).toBeFalsy(); 61 | }); 62 | }); 63 | 64 | describe('isNonEmptyString', () => { 65 | it('should return true for a non-empty string', () => { 66 | expect(validatorUtils.isNonEmptyString('this is a string')).toBeTruthy(); 67 | }); 68 | 69 | it('should return true for a string object', () => { 70 | expect(validatorUtils.isNonEmptyString(String(123456))).toBeTruthy(); 71 | }); 72 | 73 | it('should return false for an empty string', () => { 74 | expect(validatorUtils.isNonEmptyString('')).toBeFalsy(); 75 | }); 76 | 77 | it('should return false for a whitespace string', () => { 78 | expect(validatorUtils.isNonEmptyString(' ')).toBeFalsy(); 79 | }); 80 | 81 | it('should return false for undefined', () => { 82 | expect(validatorUtils.isNonEmptyString(undefined)).toBeFalsy(); 83 | }); 84 | 85 | it('should return false for null', () => { 86 | expect(validatorUtils.isNonEmptyString(null)).toBeFalsy(); 87 | }); 88 | 89 | it('should return false for empty String object', () => { 90 | expect(validatorUtils.isNonEmptyString(String(' '))).toBeFalsy(); 91 | }); 92 | }); 93 | 94 | describe('isObject', () => { 95 | it('should return false for a non-object', () => { 96 | expect(validatorUtils.isObject('foo')).toBeFalsy(); 97 | }); 98 | 99 | it('should return false for null', () => { 100 | expect(validatorUtils.isObject(null)).toBeFalsy(); 101 | }); 102 | 103 | it('should return false for undefined', () => { 104 | expect(validatorUtils.isObject(undefined)).toBeFalsy(); 105 | }); 106 | 107 | it('should return true for objects', () => { 108 | expect(validatorUtils.isObject({})).toBeTruthy(); 109 | }); 110 | }); 111 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Common Document Generation Service 2 | 3 | [![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](LICENSE) [![img](https://img.shields.io/badge/Lifecycle-Stable-97ca00)](https://github.com/bcgov/repomountie/blob/master/doc/lifecycle-badges.md) 4 | 5 | ![Tests](https://github.com/bcgov/common-document-generation-service/workflows/Tests/badge.svg) 6 | [![Maintainability](https://api.codeclimate.com/v1/badges/b360d0b4c9ad56149499/maintainability)](https://codeclimate.com/github/bcgov/common-document-generation-service/maintainability) 7 | [![Test Coverage](https://api.codeclimate.com/v1/badges/b360d0b4c9ad56149499/test_coverage)](https://codeclimate.com/github/bcgov/common-document-generation-service/test_coverage) 8 | 9 | [![version](https://img.shields.io/docker/v/bcgovimages/common-document-generation-service.svg?sort=semver)](https://hub.docker.com/r/bcgovimages/common-document-generation-service) 10 | [![pulls](https://img.shields.io/docker/pulls/bcgovimages/common-document-generation-service.svg)](https://hub.docker.com/r/bcgovimages/common-document-generation-service) 11 | [![size](https://img.shields.io/docker/image-size/bcgovimages/common-document-generation-service.svg)](https://hub.docker.com/r/bcgovimages/common-document-generation-service) 12 | 13 | CDOGS - A common hosted service (API) for generating documents from templates, data documents, and assets 14 | 15 | To learn more about the **Common Services** available visit the [Common Services Showcase](https://bcgov.github.io/common-service-showcase/) page. 16 | 17 | ## Directory Structure 18 | 19 | ```txt 20 | .github/ - PR and Issue templates 21 | app/ - Application Root 22 | ├── docker/ - Auxillary support scripts for LibreOffice Python wrapper 23 | ├── src/ - Node.js backend web application 24 | └── tests/ - Node.js backend web application tests 25 | charts/ - General Helm Charts 26 | └── cdogs/ - CDOGS Helm Chart Repository 27 | └── templates/ - COCDOGSS Helm Chart Template manifests 28 | examples/ - Collection of scripts demonstrating CDOGS usage 29 | bcgovpubcode.yml - BCGov public code asset tracking 30 | CODE-OF-CONDUCT.md - Code of Conduct 31 | COMPLIANCE.yaml - BCGov PIA/STRA compliance status 32 | CONTRIBUTING.md - Contributing Guidelines 33 | Dockerfile - Dockerfile Image definition 34 | LICENSE - License 35 | SECURITY.md - Security Policy and Reporting 36 | ``` 37 | 38 | ## Documentation 39 | 40 | * [Application Readme](app/README.md) 41 | * [API Specification](app/README.md#openapi-specification) 42 | * [Product Roadmap](https://github.com/bcgov/common-document-generation-service/wiki/Product-Roadmap) 43 | * [Product Wiki](https://github.com/bcgov/common-document-generation-service/wiki) 44 | * [Security Reporting](SECURITY.md) 45 | 46 | ## Getting Help or Reporting an Issue 47 | 48 | To report bugs/issues/features requests, please file an issue. 49 | 50 | ## How to Contribute 51 | 52 | If you would like to contribute, please see our [contributing](CONTRIBUTING.md) guidelines. 53 | 54 | Please note that this project is released with a [Contributor Code of Conduct](CODE-OF-CONDUCT.md). By participating in this project you agree to abide by its terms. 55 | 56 | ## License 57 | 58 | ```txt 59 | Copyright 2019 Province of British Columbia 60 | 61 | Licensed under the Apache License, Version 2.0 (the "License"); 62 | you may not use this file except in compliance with the License. 63 | You may obtain a copy of the License at 64 | 65 | http://www.apache.org/licenses/LICENSE-2.0 66 | 67 | Unless required by applicable law or agreed to in writing, software 68 | distributed under the License is distributed on an "AS IS" BASIS, 69 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 70 | See the License for the specific language governing permissions and 71 | limitations under the License. 72 | ``` 73 | -------------------------------------------------------------------------------- /app/cacheCleaner.js: -------------------------------------------------------------------------------- 1 | const config = require('config'); 2 | const { readdirSync, realpathSync, rmSync, statSync } = require('fs-extra'); 3 | const { tmpdir } = require('os'); 4 | const { join } = require('path'); 5 | 6 | const log = require('./src/components/log')(module.filename); 7 | 8 | const RATIO = 0.7; // Best practice is to keep the cache no more than 70% full 9 | 10 | const osTempDir = realpathSync(tmpdir()); 11 | const cacheDir = (() => { 12 | if (config.has('carbone.cacheDir')) { 13 | return realpathSync(config.get('carbone.cacheDir')); 14 | } else { 15 | return osTempDir; 16 | } 17 | })(); 18 | 19 | const cacheSize = (() => { 20 | const parseRegex = /^(\d+(?:\.\d+)?) *([kmgtp]?b)$/i; 21 | const unitMap = { 22 | b: Math.pow(10, 0), 23 | kb: Math.pow(10, 3), 24 | mb: Math.pow(10, 6), 25 | gb: Math.pow(10, 9), 26 | tb: Math.pow(10, 12), 27 | pb: Math.pow(10, 15) 28 | }; 29 | 30 | if (config.has('carbone.cacheSize')) { 31 | const result = parseRegex.exec(config.get('carbone.cacheSize')); 32 | if (result && Array.isArray(result)) { 33 | return parseInt(result[1]) * unitMap[result[2].toLowerCase()]; 34 | } 35 | } else { 36 | return null; 37 | } 38 | })(); 39 | const cacheSizeLimit = Math.ceil(cacheSize * RATIO); 40 | 41 | log.info(`Cache directory ${cacheDir} with max size of ${cacheSizeLimit}`); 42 | 43 | // Short circuit exits 44 | if (!cacheSize) { 45 | log.info('Maximum cache size not defined - Exiting'); 46 | process.exit(0); 47 | } else if (cacheDir === osTempDir) { 48 | log.info('Cache points to OS temp directory - Exiting'); 49 | process.exit(0); 50 | } 51 | 52 | // Check cache size and prune oldest files away as needed 53 | try { 54 | const items = getSortedPaths(cacheDir); 55 | const currCacheSize = items 56 | .map(p => p.size) 57 | .reduce((i, size) => i + size, 0); 58 | const isWithinLimit = currCacheSize < cacheSizeLimit; 59 | const status = isWithinLimit ? 'below' : 'above'; 60 | 61 | log.info(`Current cache size ${currCacheSize} ${status} threshold of ${cacheSizeLimit}`, { 62 | cacheLimit: cacheSizeLimit, 63 | cacheSize: currCacheSize 64 | }); 65 | 66 | // Prune if necessary 67 | const pruneList = []; 68 | if (!isWithinLimit) { 69 | const difference = currCacheSize - cacheSizeLimit; 70 | let i = 0, pruneSum = 0; 71 | 72 | // Determine list to prune 73 | while (pruneSum < difference) { 74 | pruneSum += items[i].size; 75 | pruneList.push(items[i].name); 76 | i++; 77 | } 78 | 79 | for (const obj of pruneList) { 80 | const path = join(cacheDir, obj); 81 | rmSync(path, { recursive: true, force: true }); 82 | log.info('Object pruned', { object: obj }); 83 | } 84 | } 85 | 86 | log.info(`${pruneList.length} objects were pruned from the cache - Exiting`, { pruneCount: pruneList.length }); 87 | process.exit(0); 88 | } catch (err) { 89 | log.error(err); 90 | process.exit(1); 91 | } 92 | 93 | /** 94 | * @function pathSize 95 | * Recursively calculates the size of `path` 96 | * @param {string} path The path to calculate 97 | * @returns {number} The size of the path in bytes 98 | */ 99 | function pathSize(path) { 100 | const dirStat = statSync(path); 101 | 102 | if (dirStat.isDirectory()) { 103 | return readdirSync(path) 104 | .flatMap(file => pathSize(join(path, file))) 105 | .reduce((i, size) => i + size, 0); 106 | } 107 | else if (dirStat.isFile()) return dirStat.size; 108 | else return 0; 109 | } 110 | 111 | /** 112 | * @function getSortedPaths 113 | * Acquires a list of paths ordered from oldest to newest modified 114 | * @param {string} path The path to inspect 115 | * @returns {Array} The list of files and directories in `path`. 116 | * Each object contains `name`, `size` and `time` attributes. 117 | */ 118 | function getSortedPaths(path) { 119 | return readdirSync(path) 120 | .map(file => { 121 | const fullDir = join(path, file); 122 | return { 123 | name: file, 124 | size: pathSize(fullDir), 125 | time: statSync(fullDir).mtime.getTime(), 126 | }; 127 | }) 128 | .sort((a, b) => a.time - b.time); 129 | } 130 | -------------------------------------------------------------------------------- /app/src/components/carboneRender.js: -------------------------------------------------------------------------------- 1 | const carbone = require('carbone'); 2 | const config = require('config'); 3 | const fs = require('fs-extra'); 4 | const path = require('path'); 5 | const { v4: uuidv4 } = require('uuid'); 6 | 7 | const log = require('./log')(module.filename); 8 | const utils = require('./utils'); 9 | 10 | // Initialize carbone formatters and add a marker to indicate defaults... 11 | // Carbone is a singleton and we cannot set formatters for each render call 12 | const DEFAULT_CARBONE_FORMATTERS = Object.freeze(Object.assign({}, carbone.formatters)); 13 | 14 | const fileTypes = Object.freeze({ 15 | csv: ['csv', 'doc', 'docx', 'html', 'odt', 'pdf', 'rtf', 'txt'], 16 | docx: ['doc', 'docx', 'html', 'odt', 'pdf', 'rtf', 'txt'], 17 | html: ['html', 'odt', 'pdf', 'rtf', 'txt'], 18 | ods: ['csv', 'ods', 'pdf', 'txt', 'xls', 'xlsx'], 19 | odt: ['doc', 'docx', 'html', 'odt', 'pdf', 'rtf', 'txt'], 20 | pptx: ['odt', 'pdf', 'ppt', 'pptx'], 21 | rtf: ['docx', 'pdf'], 22 | txt: ['doc', 'docx', 'html', 'odt', 'pdf', 'rtf', 'txt'], 23 | xlsx: ['csv', 'ods', 'pdf', 'rtf', 'txt', 'xls', 'xlsx'] 24 | }); 25 | 26 | function addFormatters(formatters) { 27 | if (Object.keys(formatters).length) { 28 | carbone.formatters = Object.assign({}, DEFAULT_CARBONE_FORMATTERS); 29 | carbone.addFormatters(formatters); 30 | return true; 31 | } 32 | return false; 33 | } 34 | 35 | function resetFormatters(reset) { 36 | if (reset) { 37 | carbone.formatters = Object.assign({}, DEFAULT_CARBONE_FORMATTERS); 38 | } 39 | } 40 | 41 | async function asyncRender(template, data, options) { 42 | return new Promise(((resolve, reject) => { 43 | carbone.render(template, data, options, (err, result, reportName) => { 44 | if (err) { 45 | reject(err); 46 | } else { 47 | resolve({ report: result, reportName: reportName }); 48 | } 49 | }); 50 | })); 51 | } 52 | 53 | async function render(template, data = {}, options = {}, formatters = {}) { 54 | const result = { 55 | success: false, 56 | errorType: null, 57 | errorMsg: null, 58 | reportName: null, 59 | report: null 60 | }; 61 | 62 | if (!template) { 63 | result.errorType = 400; 64 | result.errorMsg = 'Template not specified.'; 65 | return result; 66 | } 67 | if (!fs.existsSync(template)) { 68 | result.errorType = 404; 69 | result.errorMsg = 'Template not found.'; 70 | return result; 71 | } 72 | 73 | // some defaults if options not set... 74 | if (!options.convertTo || !options.convertTo.trim().length) { 75 | // set convert to template type (no conversion) 76 | options.convertTo = path.extname(template).slice(1); 77 | } 78 | if (!options.reportName || !options.reportName.trim().length) { 79 | // no report name, set to UUID 80 | options.reportName = `${uuidv4()}.${options.convertTo}`; 81 | } 82 | 83 | // ensure the reportName has the same extension as the convertTo... 84 | if (options.convertTo !== path.extname(options.reportName).slice(1)) { 85 | options.reportName = `${path.parse(options.reportName).name}.${options.convertTo}`; 86 | } 87 | 88 | const reset = addFormatters(formatters); 89 | try { 90 | const renderResult = await asyncRender(template, data, options); 91 | result.report = renderResult.report; 92 | result.reportName = renderResult.reportName; 93 | result.success = true; 94 | } catch (e) { 95 | result.errorType = utils.determineCarboneErrorCode(e); 96 | result.errorMsg = `Could not render template. ${e}`; 97 | log.warn('Could not render template', { function: 'render', error: e }); 98 | } 99 | resetFormatters(reset); 100 | return result; 101 | } 102 | 103 | function carboneSet() { 104 | const options = {}; 105 | if (config.has('carbone.startCarbone')) { 106 | options.startFactory = true; 107 | log.info('Carbone LibreOffice worker initialized', { function: 'carboneSet' }); 108 | } 109 | if (config.has('carbone.converterFactoryTimeout')) { 110 | options.converterFactoryTimeout = config.get('carbone.converterFactoryTimeout'); 111 | log.info(`Carbone converterFactoryTimeout: ${config.get('carbone.converterFactoryTimeout')}`, { function: 'carboneSet' }); 112 | } 113 | 114 | carbone.set(options); 115 | } 116 | 117 | module.exports = { 118 | carboneSet, 119 | fileTypes: fileTypes, 120 | render 121 | }; 122 | 123 | -------------------------------------------------------------------------------- /app/app.js: -------------------------------------------------------------------------------- 1 | const Problem = require('api-problem'); 2 | const compression = require('compression'); 3 | const config = require('config'); 4 | const cors = require('cors'); 5 | const express = require('express'); 6 | const helmet = require('helmet'); 7 | 8 | const { name: appName, version: appVersion } = require('./package.json'); 9 | const carboneCopyApi = require('./src/components/carboneCopyApi'); 10 | const log = require('./src/components/log')(module.filename); 11 | const httpLogger = require('./src/components/log').httpLogger; 12 | const { getConfigBoolean, getGitRevision, prettyStringify } = require('./src/components/utils'); 13 | const v2Router = require('./src/routes/v2'); 14 | 15 | const { authorizedParty } = require('./src/middleware/authorizedParty'); 16 | 17 | const apiRouter = express.Router(); 18 | const state = { 19 | gitRev: getGitRevision(), 20 | ready: false, 21 | shutdown: false 22 | }; 23 | 24 | const app = express(); 25 | app.use(compression()); 26 | app.use(cors({ 27 | /** Tells browsers to cache preflight requests for Access-Control-Max-Age seconds */ 28 | maxAge: 600, 29 | /** Set true to dynamically set Access-Control-Allow-Origin based on Origin */ 30 | origin: true 31 | })); 32 | app.use(express.json({ limit: config.get('server.bodyLimit') })); 33 | app.use(express.urlencoded({ extended: false })); 34 | app.use(helmet()); 35 | 36 | // Print out configuration settings in verbose startup 37 | log.verbose('Config', prettyStringify(config)); 38 | 39 | // Skip if running tests 40 | if (process.env.NODE_ENV !== 'test') { 41 | app.use(authorizedParty); 42 | app.use(httpLogger); 43 | 44 | // API statistics disabled 45 | // initializeApiTracker(app); 46 | 47 | // Initialize Carbone Copy Api 48 | carboneCopyApi.init(); 49 | state.ready = true; 50 | log.info('Service ready to accept traffic'); 51 | } 52 | 53 | // Use Keycloak OIDC Middleware 54 | if (getConfigBoolean('keycloak.enabled')) { 55 | log.info('Running in authenticated mode'); 56 | } else { 57 | log.info('Running in public mode'); 58 | } 59 | 60 | // Block requests until service is ready and mounted 61 | app.use((_req, res, next) => { 62 | if (state.shutdown) { 63 | new Problem(503, { details: 'Server is shutting down' }).send(res); 64 | } else if (!state.ready) { 65 | new Problem(503, { details: 'Server is not ready' }).send(res); 66 | } else { 67 | next(); 68 | } 69 | }); 70 | 71 | // Base API Directory 72 | apiRouter.get('/', (_req, res) => { 73 | res.status(200).json({ 74 | app: { 75 | gitRev: state.gitRev, 76 | name: appName, 77 | nodeVersion: process.version, 78 | version: appVersion 79 | }, 80 | endpoints: ['/api/v2'], 81 | versions: [2] 82 | }); 83 | }); 84 | 85 | // v2 Router 86 | apiRouter.use('/v2', v2Router); 87 | 88 | // Root level Router 89 | app.use(/(\/api)?/, apiRouter); 90 | 91 | // Handle 500 92 | // eslint-disable-next-line no-unused-vars 93 | app.use((err, _req, res, _next) => { 94 | if (err.stack) { 95 | log.error(err); 96 | } 97 | 98 | if (err instanceof Problem) { 99 | err.send(res); 100 | } else { 101 | new Problem(500, { 102 | details: (err.message) ? err.message : err 103 | }).send(res); 104 | } 105 | }); 106 | 107 | // Handle 404 108 | app.use((_req, res) => { 109 | new Problem(404).send(res); 110 | }); 111 | 112 | // Prevent unhandled promise errors from crashing application 113 | process.on('unhandledRejection', err => { 114 | if (err && err.stack) { 115 | log.error(err); 116 | } 117 | }); 118 | 119 | // Graceful shutdown support 120 | process.on('SIGTERM', shutdown); 121 | process.on('SIGINT', shutdown); 122 | process.on('SIGUSR1', shutdown); 123 | process.on('SIGUSR2', shutdown); 124 | process.on('exit', () => { 125 | log.info('Exiting...'); 126 | }); 127 | 128 | /** 129 | * @function shutdown 130 | * Shuts down this application after at least 5 seconds. 131 | */ 132 | function shutdown() { 133 | log.info('Received kill signal. Shutting down...'); 134 | // Wait 5 seconds before starting cleanup 135 | if (!state.shutdown) setTimeout(cleanup, 5000); 136 | } 137 | 138 | /** 139 | * @function cleanup 140 | * Cleans up resources in this application. 141 | */ 142 | function cleanup() { 143 | log.info('Service no longer accepting traffic'); 144 | state.shutdown = true; 145 | // Wait 5 seconds max before hard exiting 146 | setTimeout(() => process.exit(), 5000); 147 | } 148 | 149 | module.exports = app; 150 | -------------------------------------------------------------------------------- /examples/package-lock.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "examples", 3 | "version": "1.0.0", 4 | "lockfileVersion": 2, 5 | "requires": true, 6 | "packages": { 7 | "": { 8 | "name": "examples", 9 | "version": "1.0.0", 10 | "license": "Apache-2.0", 11 | "dependencies": { 12 | "fetch-blob": "^3.1.3", 13 | "formdata-polyfill": "^4.0.10", 14 | "node-fetch": "^3.1.0" 15 | } 16 | }, 17 | "node_modules/data-uri-to-buffer": { 18 | "version": "4.0.0", 19 | "resolved": "https://registry.npmjs.org/data-uri-to-buffer/-/data-uri-to-buffer-4.0.0.tgz", 20 | "integrity": "sha512-Vr3mLBA8qWmcuschSLAOogKgQ/Jwxulv3RNE4FXnYWRGujzrRWQI4m12fQqRkwX06C0KanhLr4hK+GydchZsaA==", 21 | "engines": { 22 | "node": ">= 12" 23 | } 24 | }, 25 | "node_modules/fetch-blob": { 26 | "version": "3.1.3", 27 | "resolved": "https://registry.npmjs.org/fetch-blob/-/fetch-blob-3.1.3.tgz", 28 | "integrity": "sha512-ax1Y5I9w+9+JiM+wdHkhBoxew+zG4AJ2SvAD1v1szpddUIiPERVGBxrMcB2ZqW0Y3PP8bOWYv2zqQq1Jp2kqUQ==", 29 | "funding": [ 30 | { 31 | "type": "github", 32 | "url": "https://github.com/sponsors/jimmywarting" 33 | }, 34 | { 35 | "type": "paypal", 36 | "url": "https://paypal.me/jimmywarting" 37 | } 38 | ], 39 | "dependencies": { 40 | "web-streams-polyfill": "^3.0.3" 41 | }, 42 | "engines": { 43 | "node": "^12.20 || >= 14.13" 44 | } 45 | }, 46 | "node_modules/formdata-polyfill": { 47 | "version": "4.0.10", 48 | "resolved": "https://registry.npmjs.org/formdata-polyfill/-/formdata-polyfill-4.0.10.tgz", 49 | "integrity": "sha512-buewHzMvYL29jdeQTVILecSaZKnt/RJWjoZCF5OW60Z67/GmSLBkOFM7qh1PI3zFNtJbaZL5eQu1vLfazOwj4g==", 50 | "dependencies": { 51 | "fetch-blob": "^3.1.2" 52 | }, 53 | "engines": { 54 | "node": ">=12.20.0" 55 | } 56 | }, 57 | "node_modules/node-fetch": { 58 | "version": "3.1.0", 59 | "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-3.1.0.tgz", 60 | "integrity": "sha512-QU0WbIfMUjd5+MUzQOYhenAazakV7Irh1SGkWCsRzBwvm4fAhzEUaHMJ6QLP7gWT6WO9/oH2zhKMMGMuIrDyKw==", 61 | "dependencies": { 62 | "data-uri-to-buffer": "^4.0.0", 63 | "fetch-blob": "^3.1.2", 64 | "formdata-polyfill": "^4.0.10" 65 | }, 66 | "engines": { 67 | "node": "^12.20.0 || ^14.13.1 || >=16.0.0" 68 | }, 69 | "funding": { 70 | "type": "opencollective", 71 | "url": "https://opencollective.com/node-fetch" 72 | } 73 | }, 74 | "node_modules/web-streams-polyfill": { 75 | "version": "3.2.0", 76 | "resolved": "https://registry.npmjs.org/web-streams-polyfill/-/web-streams-polyfill-3.2.0.tgz", 77 | "integrity": "sha512-EqPmREeOzttaLRm5HS7io98goBgZ7IVz79aDvqjD0kYXLtFZTc0T/U6wHTPKyIjb+MdN7DFIIX6hgdBEpWmfPA==", 78 | "engines": { 79 | "node": ">= 8" 80 | } 81 | } 82 | }, 83 | "dependencies": { 84 | "data-uri-to-buffer": { 85 | "version": "4.0.0", 86 | "resolved": "https://registry.npmjs.org/data-uri-to-buffer/-/data-uri-to-buffer-4.0.0.tgz", 87 | "integrity": "sha512-Vr3mLBA8qWmcuschSLAOogKgQ/Jwxulv3RNE4FXnYWRGujzrRWQI4m12fQqRkwX06C0KanhLr4hK+GydchZsaA==" 88 | }, 89 | "fetch-blob": { 90 | "version": "3.1.3", 91 | "resolved": "https://registry.npmjs.org/fetch-blob/-/fetch-blob-3.1.3.tgz", 92 | "integrity": "sha512-ax1Y5I9w+9+JiM+wdHkhBoxew+zG4AJ2SvAD1v1szpddUIiPERVGBxrMcB2ZqW0Y3PP8bOWYv2zqQq1Jp2kqUQ==", 93 | "requires": { 94 | "web-streams-polyfill": "^3.0.3" 95 | } 96 | }, 97 | "formdata-polyfill": { 98 | "version": "4.0.10", 99 | "resolved": "https://registry.npmjs.org/formdata-polyfill/-/formdata-polyfill-4.0.10.tgz", 100 | "integrity": "sha512-buewHzMvYL29jdeQTVILecSaZKnt/RJWjoZCF5OW60Z67/GmSLBkOFM7qh1PI3zFNtJbaZL5eQu1vLfazOwj4g==", 101 | "requires": { 102 | "fetch-blob": "^3.1.2" 103 | } 104 | }, 105 | "node-fetch": { 106 | "version": "3.1.0", 107 | "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-3.1.0.tgz", 108 | "integrity": "sha512-QU0WbIfMUjd5+MUzQOYhenAazakV7Irh1SGkWCsRzBwvm4fAhzEUaHMJ6QLP7gWT6WO9/oH2zhKMMGMuIrDyKw==", 109 | "requires": { 110 | "data-uri-to-buffer": "^4.0.0", 111 | "fetch-blob": "^3.1.2", 112 | "formdata-polyfill": "^4.0.10" 113 | } 114 | }, 115 | "web-streams-polyfill": { 116 | "version": "3.2.0", 117 | "resolved": "https://registry.npmjs.org/web-streams-polyfill/-/web-streams-polyfill-3.2.0.tgz", 118 | "integrity": "sha512-EqPmREeOzttaLRm5HS7io98goBgZ7IVz79aDvqjD0kYXLtFZTc0T/U6wHTPKyIjb+MdN7DFIIX6hgdBEpWmfPA==" 119 | } 120 | } 121 | } 122 | -------------------------------------------------------------------------------- /app/src/components/carboneCopyApi.js: -------------------------------------------------------------------------------- 1 | const mime = require('mime-types'); 2 | const path = require('path'); 3 | const Problem = require('api-problem'); 4 | const telejson = require('telejson'); 5 | 6 | const log = require('./log')(module.filename); 7 | const carboneRender = require('./carboneRender'); 8 | const FileCache = require('./fileCache'); 9 | 10 | const fileCache = new FileCache(); 11 | 12 | const carboneCopyApi = { 13 | init() { 14 | carboneRender.carboneSet(); 15 | }, 16 | 17 | findAndRender: async (hash, req, res) => { 18 | const template = fileCache.find(hash); 19 | if (!template.success) { 20 | new Problem(template.errorType, { detail: template.errorMsg }).send(res); 21 | } else { 22 | return await carboneCopyApi.renderTemplate(template, req, res); 23 | } 24 | }, 25 | 26 | /** 27 | * @function getFromCache 28 | * Attempts to fetch a specific file based off the sha-256 `hash` provided 29 | * @param {string} hash A sha-256 hash 30 | * @param {string} hashHeaderName The request header name for the hash 31 | * @param {boolean} download Determines whether to provide the file as a payload 32 | * @param {boolean} remove Determines whether to delete the file after the operation 33 | * @param {object} res Express response object 34 | */ 35 | getFromCache: (hash, hashHeaderName, download, remove, res) => { 36 | const file = fileCache.find(hash); 37 | if (!file.success) { 38 | return new Problem(file.errorType, { detail: file.errorMsg }).send(res); 39 | } 40 | 41 | let cached = undefined; 42 | if (download) { 43 | try { 44 | cached = fileCache.read(hash); 45 | } catch (e) { 46 | return new Problem(500, { detail: e.message }).send(res); 47 | } 48 | } 49 | 50 | if (remove) { 51 | const removed = fileCache.remove(hash); 52 | if (!removed.success) { 53 | return new Problem(removed.errorType, { detail: removed.errorMsg }).send(res); 54 | } 55 | } 56 | 57 | res.setHeader(hashHeaderName, file.hash); 58 | if (cached) { 59 | res.setHeader('Content-Disposition', `attachment; filename=${file.name}`); 60 | res.setHeader('Content-Transfer-Encoding', 'binary'); 61 | res.setHeader('Content-Type', mime.contentType(path.extname(file.name))); 62 | res.setHeader('Content-Length', cached.length); 63 | log.info('Template found', { function: 'getFromCache' }); 64 | return res.send(cached); 65 | } else { 66 | return res.sendStatus(200); 67 | } 68 | }, 69 | 70 | renderTemplate: async (template, req, res) => { 71 | let data = req.body.data; 72 | let options = {}; 73 | let formatters = {}; 74 | 75 | try { 76 | options = req.body.options; 77 | } catch (e) { 78 | return new Problem(400, { detail: 'options not provided or formatted incorrectly' }).send(res); 79 | } 80 | 81 | options.convertTo = options.convertTo || template.ext; 82 | if (options.convertTo.startsWith('.')) { 83 | options.convertTo = options.convertTo.slice(1); 84 | } 85 | 86 | options.reportName = options.reportName || `${path.parse(template.name).name}.${options.convertTo}`; 87 | // ensure the reportName has the same extension as the convertTo... 88 | if (options.convertTo !== path.extname(options.reportName).slice(1)) { 89 | options.reportName = `${path.parse(options.reportName).name}.${options.convertTo}`; 90 | } 91 | 92 | if (typeof data !== 'object' || data === null) { 93 | try { 94 | data = req.body.data; 95 | } catch (e) { 96 | return new Problem(400, { detail: 'data not provided or formatted incorrectly' }).send(res); 97 | } 98 | } 99 | 100 | try { 101 | formatters = telejson.parse(req.body.formatters); 102 | // TODO: Consider adding warning message to log 103 | // eslint-disable-next-line no-empty 104 | } catch (e) { 105 | } 106 | 107 | const output = await carboneRender.render(template.path, data, options, formatters); 108 | if (output.success) { 109 | res.setHeader('Content-Disposition', `attachment; filename=${output.reportName}`); 110 | res.setHeader('Content-Transfer-Encoding', 'binary'); 111 | res.setHeader('Content-Type', mime.contentType(path.extname(output.reportName))); 112 | res.setHeader('Content-Length', output.report.length); 113 | res.setHeader('X-Report-Name', output.reportName); 114 | res.setHeader('X-Template-Hash', template.hash); 115 | 116 | log.info('Template rendered', { function: 'renderTemplate' }); 117 | 118 | // log metrics 119 | log.verbose('Template rendered', { function: 'renderTemplate', metrics: { data: data, options: options, template: template } }); 120 | 121 | return res.send(output.report); 122 | } else { 123 | const errOutput = { detail: output.errorMsg }; 124 | if (output.errorType === 422) { 125 | // Format template syntax errors to be the same as our validation errors 126 | errOutput.detail = 'Error in supplied template'; 127 | errOutput.errors = [{ message: output.errorMsg }]; 128 | } 129 | return new Problem(output.errorType, errOutput).send(res); 130 | } 131 | } 132 | }; 133 | 134 | module.exports = carboneCopyApi; 135 | -------------------------------------------------------------------------------- /app/src/components/utils.js: -------------------------------------------------------------------------------- 1 | const config = require('config'); 2 | const { existsSync, readFileSync } = require('fs'); 3 | const { join } = require('path'); 4 | const { v4: uuidv4 } = require('uuid'); 5 | 6 | const log = require('./log')(module.filename); 7 | 8 | module.exports = { 9 | 10 | /** 11 | * @function determineCarboneErrorCode 12 | * We want to return 422s if the template has a user error in it's construction. 13 | * Carbone doesn't throw specific errors in this case, so we'll do a best-effort of 14 | * determining if it should be a 422 or not (keep doing a 500 in any other case) 15 | * @param {err} String The thrown exception from Carbone 16 | * @returns {integer} The output filename for the response 17 | */ 18 | determineCarboneErrorCode: err => { 19 | try { 20 | if (err && /formatter .*does not exist|missing at least one|cannot access parent object in/gmi.test(err)) 21 | return 422; 22 | } catch (e) { 23 | // Safety here, this method should never cause any unhandled exception since it's an error code determiner 24 | log.warn('Error while determining carbone error code: ${e}', { function: 'determineCarboneErrorCode' }); 25 | 26 | } 27 | return 500; 28 | }, 29 | 30 | /** 31 | * @function determineOutputReportName 32 | * For the DocGen component, determine what the outputted (response) filename should be based 33 | * on the template object from the request body, 34 | * @param {template} obj The template field from the request 35 | * @returns {string} The output filename for the response 36 | */ 37 | determineOutputReportName: template => { 38 | const extension = template.outputFileType ? template.outputFileType : template.contentFileType; 39 | const name = template.outputFileName ? template.outputFileName : uuidv4(); 40 | return `${name}.${extension}`; 41 | }, 42 | 43 | /** 44 | * @function getConfigBoolean 45 | * Gets the value of a boolean node-config key. 46 | * Keys that don't exist in the config are automatically converted to `false`, 47 | * thus avoiding the need to either call `config.has()` first, or wrap `config.get()` 48 | * inside a try-catch block every time. 49 | * @param {string} key the configuration value to look up. Must be either true, false, or not exist in the config. 50 | * @returns {boolean} `true` if key exists in config and is true, `false` otherwise 51 | */ 52 | getConfigBoolean(key) { 53 | try { 54 | const getConfig = config.get(key); 55 | 56 | // isTruthy() can't handle undefined / null, so we have to do that here 57 | // @see {@link https://github.com/node-config/node-config/wiki/Common-Usage#using-config-values} 58 | if (getConfig === undefined || getConfig === null) return false; 59 | else { 60 | return module.exports.isTruthy(getConfig); 61 | } 62 | } 63 | catch (e) { 64 | return false; 65 | } 66 | }, 67 | 68 | /** 69 | * @function getFileExtension 70 | * From a string representing a filename, get the extension if there is one 71 | * @param {filename} string A filename in a string 72 | * @returns {string} The extension, ie ".docx", or undefined if there is none 73 | */ 74 | getFileExtension: filename => { 75 | const re = /(?:\.([^.]+))?$/; 76 | return re.exec(filename)[1]; 77 | }, 78 | 79 | /** 80 | * @function getGitRevision 81 | * Gets the current git revision hash 82 | * @see {@link https://stackoverflow.com/a/34518749} 83 | * @returns {string} The git revision hash, or empty string 84 | */ 85 | getGitRevision() { 86 | try { 87 | const gitDir = (() => { 88 | let dir = '.git', i = 0; 89 | while (!existsSync(join(__dirname, dir)) && i < 5) { 90 | dir = '../' + dir; 91 | i++; 92 | } 93 | return dir; 94 | })(); 95 | 96 | const head = readFileSync(join(__dirname, `${gitDir}/HEAD`)).toString().trim(); 97 | return (head.indexOf(':') === -1) 98 | ? head 99 | : readFileSync(join(__dirname, `${gitDir}/${head.substring(5)}`)).toString().trim(); 100 | } catch (err) { 101 | log.warn(err.message, { function: 'getGitRevision' }); 102 | return ''; 103 | } 104 | }, 105 | 106 | /** 107 | * @function isTruthy 108 | * Returns true if the element name in the object contains a truthy value 109 | * @param {object} value The object to evaluate 110 | * @returns {boolean} True if truthy, false if not, and undefined if undefined 111 | */ 112 | isTruthy(value) { 113 | if (value === undefined) return value; 114 | 115 | const isStr = typeof value === 'string' || value instanceof String; 116 | const trueStrings = ['true', 't', 'yes', 'y', '1']; 117 | return value === true || value === 1 || isStr && trueStrings.includes(value.toLowerCase()); 118 | }, 119 | 120 | /** 121 | * @function prettyStringify 122 | * Returns a pretty JSON representation of an object 123 | * @param {object} obj A JSON Object 124 | * @param {integer} indent Number of spaces to indent 125 | * @returns {string} A pretty printed string representation of `obj` with `indent` indentation 126 | */ 127 | prettyStringify: (obj, indent = 2) => JSON.stringify(obj, null, indent), 128 | 129 | /** 130 | * @function truthy 131 | * Returns true if the element name in the object contains a truthy value 132 | * @param {string} name The attribute name 133 | * @param {object} obj The object to evaluate 134 | * @returns {boolean} True if truthy, false otherwise 135 | */ 136 | truthy: (name, obj = {}) => { 137 | const value = obj[name] || false; 138 | return (value === true || value === 'true' || value === '1' || value === 'yes' || value === 'y' || value === 't' || value === 1); 139 | } 140 | }; 141 | -------------------------------------------------------------------------------- /app/tests/unit/components/utils.spec.js: -------------------------------------------------------------------------------- 1 | const utils = require('../../../src/components/utils'); 2 | 3 | describe('determineCarboneErrorCode', () => { 4 | it('should return a 422 for expected error strings', () => { 5 | expect(utils.determineCarboneErrorCode('Formatter \\"convDe\\" does not exist. Do you mean \\"convDate\\"?"')).toEqual(422); 6 | expect(utils.determineCarboneErrorCode('Error: formatter "ifEkual" DOES NOT exist. Do you mean "ifEqual"?')).toEqual(422); 7 | expect(utils.determineCarboneErrorCode('Error: Cannot access parent object in "d.site...name" (too high)')).toEqual(422); 8 | expect(utils.determineCarboneErrorCode('cannot access parent object in whatever')).toEqual(422); 9 | expect(utils.determineCarboneErrorCode('Missing at least one showBegin or hideBegin')).toEqual(422); 10 | expect(utils.determineCarboneErrorCode('missing at least one showEnd or hideEnd')).toEqual(422); 11 | }); 12 | 13 | it('should return a 500 for anything else', () => { 14 | expect(utils.determineCarboneErrorCode('XML not valid')).toEqual(500); 15 | expect(utils.determineCarboneErrorCode('')).toEqual(500); 16 | expect(utils.determineCarboneErrorCode(' ')).toEqual(500); 17 | expect(utils.determineCarboneErrorCode(null)).toEqual(500); 18 | expect(utils.determineCarboneErrorCode(undefined)).toEqual(500); 19 | expect(utils.determineCarboneErrorCode([])).toEqual(500); 20 | expect(utils.determineCarboneErrorCode({})).toEqual(500); 21 | }); 22 | }); 23 | 24 | describe('getGitRevision', () => { 25 | expect(typeof utils.getGitRevision()).toBe('string'); 26 | }); 27 | 28 | describe('prettyStringify', () => { 29 | const obj = { 30 | foo: 'bar' 31 | }; 32 | 33 | it('should return a formatted json string with 2 space indent', () => { 34 | const result = utils.prettyStringify(obj); 35 | 36 | expect(result).toBeTruthy(); 37 | expect(result).toEqual('{\n "foo": "bar"\n}'); 38 | }); 39 | 40 | it('should return a formatted json string with 4 space indent', () => { 41 | const result = utils.prettyStringify(obj, 4); 42 | 43 | expect(result).toBeTruthy(); 44 | expect(result).toEqual('{\n "foo": "bar"\n}'); 45 | }); 46 | }); 47 | 48 | describe('getFileExtension', () => { 49 | it('should return a the file extension when there is one', () => { 50 | expect(utils.getFileExtension('abc_123.docx')).toEqual('docx'); 51 | expect(utils.getFileExtension('my file name here.docx')).toEqual('docx'); 52 | expect(utils.getFileExtension('file.name.with.dots.docx')).toEqual('docx'); 53 | expect(utils.getFileExtension('mx_permit_{d.permitNumber}.docx')).toEqual('docx'); 54 | }); 55 | 56 | it('should return undefined if no extension', () => { 57 | expect(utils.getFileExtension('abc_123')).toEqual(undefined); 58 | expect(utils.getFileExtension('')).toEqual(undefined); 59 | expect(utils.getFileExtension(' ')).toEqual(undefined); 60 | expect(utils.getFileExtension(null)).toEqual(undefined); 61 | expect(utils.getFileExtension(undefined)).toEqual(undefined); 62 | }); 63 | }); 64 | 65 | 66 | describe('determineOutputReportName', () => { 67 | it('should return the specified output file name with the specified extension', () => { 68 | const template = { 69 | contentFileType: 'docx', 70 | outputFileType: 'pdf', 71 | outputFileName: 'abc_123_{d.firstname}-{d.lastname}', 72 | }; 73 | expect(utils.determineOutputReportName(template)).toMatch('abc_123_{d.firstname}-{d.lastname}.pdf'); 74 | }); 75 | 76 | it('should return the specified output file name with the input extension if no output specified', () => { 77 | const template = { 78 | contentFileType: 'xlsx', 79 | outputFileName: 'abc_123_{d.firstname}-{d.lastname}', 80 | }; 81 | expect(utils.determineOutputReportName(template)).toMatch('abc_123_{d.firstname}-{d.lastname}.xlsx'); 82 | }); 83 | 84 | it('should return random uuid as the output file name if none specified, with the specified extension', () => { 85 | const template = { 86 | contentFileType: 'odt', 87 | outputFileType: 'pdf' 88 | }; 89 | expect(utils.determineOutputReportName(template)).toMatch(/^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}.pdf/); 90 | 91 | }); 92 | 93 | it('should return random uuid as the output file name if none specified, with input extension if no output specified', () => { 94 | const template = { 95 | contentFileType: 'docx' 96 | }; 97 | expect(utils.determineOutputReportName(template)).toMatch(/^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}.docx/); 98 | 99 | }); 100 | }); 101 | 102 | describe('truthy', () => { 103 | it('should return false with invalid name', () => { 104 | expect(utils.truthy({})).toBeFalsy(); 105 | }); 106 | 107 | it('should return false with non-existent name attribute', () => { 108 | expect(utils.truthy('foo', {})).toBeFalsy(); 109 | }); 110 | 111 | it('should return false with falsy attribute', () => { 112 | expect(utils.truthy('foo', { foo: false })).toBeFalsy(); 113 | }); 114 | 115 | it('should return true with boolean true', () => { 116 | expect(utils.truthy('foo', { foo: true })).toBeTruthy(); 117 | }); 118 | 119 | it('should return true with string true', () => { 120 | expect(utils.truthy('foo', { foo: 'true' })).toBeTruthy(); 121 | }); 122 | 123 | it('should return true with string one', () => { 124 | expect(utils.truthy('foo', { foo: '1' })).toBeTruthy(); 125 | }); 126 | 127 | it('should return true with string yes', () => { 128 | expect(utils.truthy('foo', { foo: 'yes' })).toBeTruthy(); 129 | }); 130 | 131 | it('should return true with string y', () => { 132 | expect(utils.truthy('foo', { foo: 'y' })).toBeTruthy(); 133 | }); 134 | 135 | it('should return true with string t', () => { 136 | expect(utils.truthy('foo', { foo: 't' })).toBeTruthy(); 137 | }); 138 | 139 | it('should return true with integer one', () => { 140 | expect(utils.truthy('foo', { foo: 1 })).toBeTruthy(); 141 | }); 142 | }); 143 | -------------------------------------------------------------------------------- /charts/cdogs/templates/deploymentconfig.yaml: -------------------------------------------------------------------------------- 1 | {{ $kcSecretName := printf "%s-%s" (include "cdogs.fullname" .) "keycloak" }} 2 | --- 3 | apiVersion: apps.openshift.io/v1 4 | kind: DeploymentConfig 5 | metadata: 6 | name: {{ include "cdogs.fullname" . }} 7 | labels: 8 | {{- include "cdogs.labels" . | nindent 4 }} 9 | spec: 10 | replicas: {{ .Values.replicaCount }} 11 | revisionHistoryLimit: 10 12 | selector: 13 | {{- include "cdogs.selectorLabels" . | nindent 4 }} 14 | strategy: 15 | resources: {} 16 | rollingParams: 17 | timeoutSeconds: 600 18 | type: Rolling 19 | template: 20 | metadata: 21 | labels: {{ include "cdogs.selectorLabels" . | nindent 8 }} 22 | spec: 23 | {{- with .Values.imagePullSecrets }} 24 | imagePullSecrets: {{ toYaml . | nindent 8 }} 25 | {{- end }} 26 | {{- if .Values.serviceAccount.create }} 27 | serviceAccountName: {{ include "cdogs.serviceAccountName" . }} 28 | {{- end }} 29 | {{- with .Values.podSecurityContext }} 30 | securityContext: {{ toYaml . | nindent 8 }} 31 | {{- end }} 32 | containers: 33 | - name: app 34 | {{- with .Values.securityContext }} 35 | securityContext: {{ toYaml . | nindent 12 }} 36 | {{- end }} 37 | image: "{{ .Values.image.repository }}/{{ .Chart.Name }}:{{ .Values.image.tag | default .Chart.AppVersion }}" 38 | imagePullPolicy: {{ .Values.image.pullPolicy }} 39 | ports: 40 | - containerPort: {{ .Values.service.port }} 41 | protocol: TCP 42 | livenessProbe: 43 | failureThreshold: 3 44 | httpGet: 45 | path: {{ .Values.route.path }} 46 | port: {{ .Values.service.port }} 47 | scheme: HTTP 48 | initialDelaySeconds: 10 49 | timeoutSeconds: 1 50 | readinessProbe: 51 | failureThreshold: 3 52 | httpGet: 53 | path: {{ .Values.route.path }} 54 | port: {{ .Values.service.port }} 55 | scheme: HTTP 56 | initialDelaySeconds: 10 57 | timeoutSeconds: 1 58 | resources: {{ toYaml .Values.resources | nindent 12 }} 59 | env: 60 | - name: NODE_ENV 61 | value: production 62 | {{- if or .Values.features.authentication .Values.config.configMap.KC_ENABLED }} 63 | - name: KC_CLIENTID 64 | valueFrom: 65 | secretKeyRef: 66 | key: username 67 | name: {{ include "cdogs.configname" . }}-keycloak 68 | - name: KC_CLIENTSECRET 69 | valueFrom: 70 | secretKeyRef: 71 | key: password 72 | name: {{ include "cdogs.configname" . }}-keycloak 73 | {{- end }} 74 | {{- if .Values.fluentBit.enabled }} 75 | - name: SERVER_LOGFILE 76 | value: /var/log/app.log 77 | {{- end }} 78 | envFrom: 79 | - configMapRef: 80 | name: {{ include "cdogs.configname" . }}-config 81 | volumeMounts: 82 | - name: file-cache-data 83 | mountPath: /var/lib/file-cache/data 84 | {{- if .Values.fluentBit.enabled }} 85 | - name: log-storage 86 | mountPath: /var/log 87 | {{- end }} 88 | {{- if .Values.fluentBit.enabled }} 89 | - name: fluent-bit 90 | {{- with .Values.securityContext }} 91 | securityContext: {{ toYaml . | nindent 12 }} 92 | {{- end }} 93 | image: "{{ .Values.fluentBit.image.repository }}/{{ .Values.fluentBit.image.name }}:{{ .Values.fluentBit.image.tag }}" 94 | imagePullPolicy: {{ .Values.image.pullPolicy }} 95 | ports: 96 | - containerPort: {{ .Values.fluentBit.service.metrics.port }} 97 | name: {{ .Values.fluentBit.service.metrics.name }} 98 | protocol: TCP 99 | - containerPort: {{ .Values.fluentBit.service.httpPlugin.port }} 100 | name: {{ .Values.fluentBit.service.httpPlugin.name }} 101 | protocol: TCP 102 | livenessProbe: 103 | failureThreshold: 3 104 | httpGet: 105 | path: {{ .Values.fluentBit.route.metrics.path }} 106 | port: {{ .Values.fluentBit.service.metrics.port }} 107 | scheme: HTTP 108 | initialDelaySeconds: 10 109 | timeoutSeconds: 1 110 | readinessProbe: 111 | failureThreshold: 3 112 | httpGet: 113 | path: {{ .Values.fluentBit.route.metrics.path }} 114 | port: {{ .Values.fluentBit.service.metrics.port }} 115 | scheme: HTTP 116 | initialDelaySeconds: 10 117 | timeoutSeconds: 1 118 | resources: {{ toYaml .Values.fluentBit.resources | nindent 12 }} 119 | env: 120 | - name: AWS_ACCESS_KEY_ID 121 | valueFrom: 122 | secretKeyRef: 123 | key: username 124 | name: {{ include "cdogs.configname" . }}-aws 125 | - name: AWS_SECRET_ACCESS_KEY 126 | valueFrom: 127 | secretKeyRef: 128 | key: password 129 | name: {{ include "cdogs.configname" . }}-aws 130 | {{- if .Values.fluentBit.enabled }} 131 | - name: SERVER_LOGFILE 132 | value: /var/log/app.log 133 | {{- end }} 134 | volumeMounts: 135 | - name: fluent-bit-config 136 | mountPath: /fluent-bit/etc/ 137 | - name: log-storage 138 | mountPath: /var/log 139 | {{- end }} 140 | restartPolicy: Always 141 | terminationGracePeriodSeconds: 30 142 | volumes: 143 | - name: file-cache-data 144 | {{- if .Values.persistentVolumeClaim.enabled }} 145 | persistentVolumeClaim: 146 | claimName: {{ include "cdogs.configname" . }}-cache 147 | {{- else }} 148 | emptyDir: {} 149 | {{- end }} 150 | {{- if .Values.fluentBit.enabled }} 151 | - name: log-storage 152 | emptyDir: {} 153 | - name: fluent-bit-config 154 | configMap: 155 | name: {{ include "cdogs.configname" . }}-fluent-bit 156 | {{- end }} 157 | test: false 158 | triggers: 159 | - type: ConfigChange 160 | -------------------------------------------------------------------------------- /app/src/components/fileCache.js: -------------------------------------------------------------------------------- 1 | const config = require('config'); 2 | const crypto = require('crypto'); 3 | const fs = require('fs-extra'); 4 | const os = require('os'); 5 | const path = require('path'); 6 | const { v4: uuidv4 } = require('uuid'); 7 | const log = require('./log')(module.filename); 8 | 9 | class FileCache { 10 | constructor() { 11 | this._cachePath = config.has('carbone.cacheDir') ? config.get('carbone.cacheDir') : fs.realpathSync(os.tmpdir()); 12 | // Ensure no trailing path separator 13 | if (this._cachePath.endsWith(path.sep)) { 14 | this._cachePath = this._cachePath.slice(0, -1); 15 | } 16 | 17 | try { 18 | fs.ensureDirSync(this._cachePath); 19 | } catch (e) { 20 | log.error(`Could not access cache directory '${this._cachePath}'.`, { function: 'FileCache constructor', directory: this._cachePath }); 21 | throw new Error(`Could not access cache directory '${this._cachePath}'.`); 22 | } 23 | 24 | // Private helper functions 25 | this._getHash = async (file) => { 26 | const hash = crypto.createHash('sha256'); 27 | const stream = fs.createReadStream(file); 28 | return new Promise((resolve, reject) => { 29 | stream.on('readable', () => { 30 | let chunk; 31 | while (null !== (chunk = stream.read())) { 32 | hash.update(chunk); 33 | } 34 | }); 35 | stream.on('end', () => resolve(hash.digest('hex'))); 36 | stream.on('error', error => reject(error)); 37 | }); 38 | }; 39 | this._getHashPath = hash => `${this._cachePath}${path.sep}${hash}`; 40 | this._getTempFilePath = () => `${this._cachePath}${path.sep}${uuidv4()}`; 41 | } 42 | 43 | find(hash) { 44 | const result = { 45 | success: false, 46 | errorType: null, 47 | errorMsg: null, 48 | hash: null, 49 | name: null, 50 | ext: null, 51 | dir: null, 52 | path: null 53 | }; 54 | if (!hash) { 55 | result.errorType = 400; 56 | result.errorMsg = 'Cannot find file; hash parameter is required.'; 57 | return result; 58 | } 59 | 60 | try { 61 | const hashPath = this._getHashPath(hash); 62 | 63 | if (!fs.existsSync(hashPath)) { 64 | result.errorType = 404; 65 | result.errorMsg = `Hash '${hash}' not found.`; 66 | log.error(`Hash '${hash}' not found.`, { function: 'fileCache.find', result }); 67 | return result; 68 | } 69 | result.hash = hash; 70 | 71 | const files = fs.readdirSync(hashPath); 72 | if (!files || files.length === 0) { 73 | result.errorType = 404; 74 | result.errorMsg = 'Hash found; could not read file from cache.'; 75 | log.error('Hash found. could not read file from cache', { function: 'fileCache.find', result }); 76 | return result; 77 | } else { 78 | result.name = files[0]; 79 | result.ext = path.extname(result.name).slice(1); 80 | result.dir = hashPath; 81 | result.path = `${hashPath}${path.sep}${result.name}`; 82 | result.success = true; 83 | return result; 84 | } 85 | } catch (e) { 86 | result.errorType = 500; 87 | log.error(`Unknown error getting file for hash '${hash}'.`, { function: 'find' }); 88 | result.errorMsg = `Unknown error getting file for hash '${hash}'.`; 89 | return result; 90 | } 91 | } 92 | 93 | read(hash) { 94 | const file = this.find(hash); 95 | if (file.success) { 96 | return fs.readFileSync(file.path); 97 | } else { 98 | throw Error(file.errorMsg); 99 | } 100 | } 101 | 102 | async move(source, name, options = { overwrite: false }) { 103 | const result = { success: false, errorType: null, errorMsg: null, hash: null }; 104 | 105 | if (!source) { 106 | result.errorType = 400; 107 | result.errorMsg = 'Cannot move file; source parameter is required.'; 108 | return result; 109 | } 110 | if (!name) { 111 | result.errorType = 400; 112 | result.errorMsg = 'Cannot move file; file name parameter is required.'; 113 | return result; 114 | } 115 | 116 | try { 117 | // get a hash of the file from contents 118 | result.hash = await this._getHash(source); 119 | } catch (e) { 120 | result.errorType = 500; 121 | result.errorMsg = `Error creating hash for file '${source}'.`; 122 | return result; 123 | } 124 | 125 | const hashPath = this._getHashPath(result.hash); 126 | // if template exists 127 | if (fs.existsSync(hashPath)) { 128 | if (options.overwrite) { 129 | // remove template 130 | fs.removeSync(hashPath); 131 | } else { 132 | // Remove temporary file from cache 133 | fs.removeSync(source); 134 | 135 | result.errorType = 405; 136 | result.errorMsg = `File already cached. Hash '${result.hash}'.`; 137 | return result; 138 | } 139 | } 140 | 141 | const dest = `${hashPath}${path.sep}${name}`; 142 | fs.ensureDirSync(hashPath); 143 | try { 144 | fs.moveSync(source, dest, options); 145 | result.success = fs.existsSync(dest); 146 | } catch (e) { 147 | result.errorType = 500; 148 | result.errorMsg = 'Error moving file to cache.'; 149 | } 150 | return result; 151 | } 152 | 153 | remove(hash) { 154 | const result = { success: false, errorType: null, errorMsg: null }; 155 | const file = this.find(hash); 156 | if (file.success) { 157 | fs.removeSync(file.dir); 158 | result.success = !fs.existsSync(file.dir); 159 | } else { 160 | result.errorType = 404; 161 | result.errorMsg = `Could not remove file. Hash '${hash}', not found.`; 162 | } 163 | return result; 164 | } 165 | 166 | async write(content, fileType, contentEncodingType = 'base64', options = { overwrite: false }) { 167 | let result = { success: false, errorType: null, errorMsg: null, hash: null }; 168 | 169 | if (!content) { 170 | result.errorType = 400; 171 | result.errorMsg = 'Cannot write file; content parameter is required.'; 172 | return result; 173 | } 174 | if (!fileType) { 175 | result.errorType = 400; 176 | result.errorMsg = 'Cannot write file; fileType parameter is required.'; 177 | return result; 178 | } 179 | const tmpFile = this._getTempFilePath(); 180 | // save template to temp directory 181 | await fs.outputFileSync(tmpFile, content, { encoding: contentEncodingType }); 182 | 183 | // move temp file to file cache 184 | let destFilename = path.format({ 185 | name: uuidv4(), 186 | ext: fileType.replace(/\./g, '') 187 | }); 188 | result = await this.move(tmpFile, destFilename, options); 189 | log.info('Template cached', { function: 'fileCache.write' }); 190 | if (!result.success) { 191 | result.errorMsg = `Error writing content to cache. ${result.errorMsg}`; 192 | } 193 | return result; 194 | } 195 | } 196 | 197 | module.exports = FileCache; 198 | -------------------------------------------------------------------------------- /app/tests/unit/components/authorization.spec.js: -------------------------------------------------------------------------------- 1 | const config = require('config'); 2 | const jwt = require('jsonwebtoken'); 3 | 4 | const { getConfigBoolean } = require('../../../src/components/utils'); 5 | const { authenticate } = require('../../../src/middleware/authorization'); 6 | const Problem = require('api-problem'); 7 | 8 | jest.mock('config'); 9 | jest.mock('jsonwebtoken'); 10 | jest.mock('../../../src/components/utils'); // getConfigBoolean 11 | 12 | const mockReq = { 13 | get: jest.fn() 14 | }; 15 | const mockRes = jest.fn(); 16 | const mockNext = jest.fn(); 17 | 18 | describe('authenticate', () => { 19 | 20 | describe('Keycloak is not enabled', () => { 21 | 22 | beforeEach(() => { 23 | jest.resetAllMocks(); 24 | getConfigBoolean.mockReturnValueOnce(false); 25 | }); 26 | 27 | afterAll(() => { 28 | jest.restoreAllMocks(); 29 | }); 30 | 31 | it('does not validate JWT when keycloak is disabled in the config', () => { 32 | authenticate(mockReq, mockRes, mockNext); 33 | 34 | expect(getConfigBoolean).toHaveBeenCalledTimes(1); 35 | expect(mockReq.get).toHaveBeenCalledTimes(0); 36 | expect(mockNext).toHaveBeenCalledTimes(1); 37 | }); 38 | }); 39 | 40 | describe('Keycloak is enabled', () => { 41 | 42 | const authHeader = 'Bearer xxxxx'; 43 | const publicKey = '-----BEGIN PUBLIC KEY-----\ninsert_spki_here\n-----END PUBLIC KEY-----'; 44 | const keycloakServerUrl = 'https://dev.loginproxy.gov.bc.ca/auth'; 45 | const keycloakRealm = 'comsvcauth'; 46 | const clientId = 'CDOGS'; 47 | 48 | beforeEach(() => { 49 | jest.resetAllMocks(); 50 | getConfigBoolean.mockReturnValueOnce(true); 51 | }); 52 | 53 | afterAll(() => { 54 | jest.restoreAllMocks(); 55 | }); 56 | 57 | it('authenticates when keycloak is enabled and JWT is valid', () => { 58 | mockReq.get.mockReturnValueOnce(authHeader); 59 | config.has.mockReturnValueOnce(true); // keycloak.publicKey 60 | config.get.mockReturnValueOnce(publicKey); 61 | config.get.mockReturnValueOnce(keycloakServerUrl); 62 | config.get.mockReturnValueOnce(keycloakRealm); 63 | config.get.mockReturnValueOnce(clientId); 64 | jwt.verify.mockReturnValueOnce(undefined); // jwt.verify throws error on fail; it doesn't return anything on success 65 | 66 | authenticate(mockReq, mockRes, mockNext); 67 | 68 | expect(getConfigBoolean).toHaveBeenCalledTimes(1); 69 | expect(config.get).toHaveBeenCalledTimes(4); 70 | expect(jwt.verify).toHaveBeenCalledTimes(1); 71 | 72 | expect(mockNext).toHaveBeenCalledTimes(1); 73 | }); 74 | 75 | it('throws an error when Keycloak server public key is not defined in config', () => { 76 | mockReq.get.mockReturnValueOnce(authHeader); 77 | config.has.mockReturnValueOnce(false); // keycloak.publicKey 78 | 79 | expect(() => { 80 | authenticate(mockReq, mockRes, mockNext); 81 | }).toThrow(Error); 82 | 83 | expect(config.has).toHaveBeenCalledTimes(1); 84 | expect(config.get).toHaveBeenCalledTimes(1); 85 | expect(jwt.verify).toHaveBeenCalledTimes(0); 86 | expect(mockNext).toHaveBeenCalledTimes(0); 87 | }); 88 | 89 | it('throws an error when Keycloak server URL is not defined in config', () => { 90 | mockReq.get.mockReturnValueOnce(authHeader); 91 | config.has.mockReturnValueOnce(true); // keycloak.publicKey 92 | config.get.mockReturnValueOnce(publicKey); 93 | config.get.mockImplementation(() => { 94 | throw new Error(); 95 | }); 96 | config.get.mockReturnValueOnce(keycloakRealm); 97 | 98 | expect(() => { 99 | authenticate(mockReq, mockRes, mockNext); 100 | }).toThrow(Error); 101 | 102 | expect(config.has).toHaveBeenCalledTimes(1); 103 | expect(config.get).toHaveBeenCalledTimes(3); 104 | expect(jwt.verify).toHaveBeenCalledTimes(0); 105 | expect(mockNext).toHaveBeenCalledTimes(0); 106 | }); 107 | 108 | it('throws an error when Keycloak realm is not defined in config', () => { 109 | mockReq.get.mockReturnValueOnce(authHeader); 110 | config.has.mockReturnValueOnce(true); // keycloak.publicKey 111 | config.get.mockReturnValueOnce(publicKey); 112 | config.get.mockReturnValueOnce(keycloakServerUrl); 113 | config.get.mockImplementation(() => { 114 | throw new Error(); 115 | }); 116 | 117 | expect(() => { 118 | authenticate(mockReq, mockRes, mockNext); 119 | }).toThrow(Error); 120 | 121 | expect(config.has).toHaveBeenCalledTimes(1); 122 | expect(config.get).toHaveBeenCalledTimes(3); 123 | expect(jwt.verify).toHaveBeenCalledTimes(0); 124 | expect(mockNext).toHaveBeenCalledTimes(0); 125 | }); 126 | 127 | it('fails when JWT is expired', () => { 128 | const apiProblemSpy = jest.spyOn(Problem.prototype, 'send').mockImplementation(() => this); 129 | 130 | mockReq.get.mockReturnValueOnce(authHeader); 131 | config.has.mockReturnValueOnce(true); // keycloak.publicKey 132 | config.get.mockReturnValueOnce(publicKey); 133 | jwt.verify.mockImplementation(() => { 134 | throw new jwt.TokenExpiredError({ 135 | name: 'TokenExpiredError', 136 | message: 'jwt expired', 137 | expiredAt: 1408621000 138 | }); 139 | }); 140 | 141 | authenticate(mockReq, mockRes, mockNext); 142 | 143 | expect(getConfigBoolean).toHaveBeenCalledTimes(1); 144 | expect(config.get).toHaveBeenCalledTimes(4); 145 | expect(apiProblemSpy).toHaveBeenCalledTimes(1); 146 | expect(mockNext).toHaveBeenCalledTimes(0); 147 | }); 148 | 149 | it('fails when JWT is not valid yet', () => { 150 | const apiProblemSpy = jest.spyOn(Problem.prototype, 'send').mockImplementation(() => this); 151 | 152 | mockReq.get.mockReturnValueOnce(authHeader); 153 | config.has.mockReturnValueOnce(true); // keycloak.publicKey 154 | config.get.mockReturnValueOnce(publicKey); 155 | jwt.verify.mockImplementation(() => { 156 | throw new jwt.NotBeforeError({ 157 | name: 'NotBeforeError', 158 | message: 'jwt not active', 159 | date: '2018-10-04T16:10:44.000Z' 160 | }); 161 | }); 162 | 163 | authenticate(mockReq, mockRes, mockNext); 164 | 165 | expect(getConfigBoolean).toHaveBeenCalledTimes(1); 166 | expect(config.get).toHaveBeenCalledTimes(4); 167 | expect(apiProblemSpy).toHaveBeenCalledTimes(1); 168 | expect(mockNext).toHaveBeenCalledTimes(0); 169 | }); 170 | 171 | it('fails when JWT is malformed', () => { 172 | const apiProblemSpy = jest.spyOn(Problem.prototype, 'send').mockImplementation(() => this); 173 | 174 | mockReq.get.mockReturnValueOnce(authHeader); 175 | config.has.mockReturnValueOnce(true); // keycloak.publicKey 176 | config.get.mockReturnValueOnce(publicKey); 177 | jwt.verify.mockImplementation(() => { 178 | throw new jwt.JsonWebTokenError({ 179 | name: 'JsonWebTokenError', 180 | message: 'jwt malformed', 181 | }); 182 | }); 183 | 184 | authenticate(mockReq, mockRes, mockNext); 185 | 186 | expect(getConfigBoolean).toHaveBeenCalledTimes(1); 187 | expect(config.get).toHaveBeenCalledTimes(4); 188 | expect(apiProblemSpy).toHaveBeenCalledTimes(1); 189 | expect(mockNext).toHaveBeenCalledTimes(0); 190 | }); 191 | 192 | }); 193 | 194 | }); 195 | -------------------------------------------------------------------------------- /charts/cdogs/templates/configmap.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.config.enabled }} 2 | --- 3 | apiVersion: v1 4 | kind: ConfigMap 5 | metadata: 6 | {{- if not .Values.config.releaseScoped }} 7 | annotations: 8 | "helm.sh/resource-policy": keep 9 | {{- else }} 10 | labels: {{ include "cdogs.labels" . | nindent 4 }} 11 | {{- end }} 12 | name: {{ include "cdogs.configname" . }}-config 13 | data: {{ toYaml .Values.config.configMap | nindent 2 }} 14 | {{- end }} 15 | {{- if .Values.fluentBit.enabled }} 16 | --- 17 | apiVersion: v1 18 | kind: ConfigMap 19 | metadata: 20 | {{- if not .Values.config.releaseScoped }} 21 | annotations: 22 | "helm.sh/resource-policy": keep 23 | {{- else }} 24 | labels: {{ include "cdogs.labels" . | nindent 4 }} 25 | {{- end }} 26 | name: {{ include "cdogs.configname" . }}-fluent-bit 27 | data: 28 | fluent-bit.conf: | 29 | [SERVICE] 30 | Flush 5 31 | Daemon Off 32 | # define the log format (see additional config map key/value) 33 | Parsers_File parsers.conf 34 | Log_Level info 35 | HTTP_Server On 36 | HTTP_Listen 0.0.0.0 37 | HTTP_Port 2020 38 | 39 | [INPUT] 40 | # get logs from file written by node app (eg: CDOGS) 41 | Name tail 42 | Path /var/log/* 43 | Tag app 44 | Offset_Key logFileOffset 45 | Path_Key logFilePath 46 | 47 | [FILTER] 48 | # exclude kube probe logs from app logs 49 | name grep 50 | match app 51 | Exclude agent kube* 52 | 53 | [FILTER] 54 | name parser 55 | match app 56 | Key_Name log 57 | Parser json 58 | Reserve_Data On 59 | Preserve_Key On 60 | 61 | [FILTER] 62 | # modify log entry to include more key/value pairs 63 | name record_modifier 64 | match app 65 | # add pod name 66 | Record hostname ${HOSTNAME} 67 | # add productname (eg: 'cdogs') 68 | Record product {{ .Values.fluentBit.config.product }} 69 | # add namespace 70 | Record namespace {{ .Values.fluentBit.config.namespace }} 71 | 72 | [FILTER] 73 | Name rewrite_tag 74 | Match app 75 | Rule $level ([a-zA-Z]*)$ $TAG.$level true 76 | Emitter_Name re_emitted 77 | 78 | # for now just send out http level ('access') logs to AWS 79 | [FILTER] 80 | Name lua 81 | Match app.* 82 | script script.lua 83 | time_as_table True 84 | call ecsMap 85 | 86 | # Note: only currently sending 'access' (level: http) logs to AWS 87 | # TODO: format 'metrics' logs to match a 'fingerprint' in Lambda 88 | [OUTPUT] 89 | Name kinesis_streams 90 | Match app.http 91 | region {{ .Values.fluentBit.config.aws.defaultRegion }} 92 | stream {{ .Values.fluentBit.config.aws.kinesisStream }} 93 | role_arn {{ .Values.fluentBit.config.aws.roleArn }} 94 | time_key @timestamp 95 | 96 | [OUTPUT] 97 | #### send logs to fluentd: 98 | Name http 99 | Match app 100 | Host {{ .Values.fluentBit.config.logHostname }} 101 | Port 80 102 | Format json 103 | # the URI becomes the Tag available in fluentd 104 | URI /app 105 | # we can also send tag as a header 106 | #header_tag app 107 | json_date_key timestamp 108 | 109 | ### security: 110 | #tls On 111 | #tls.debug 4 112 | #tls.verify On 113 | #tls.ca_file /fluent-bit/ssl/ca.crt.pem 114 | #tls.crt_file /fluent-bit/ssl/client.crt.pem 115 | #tls.key_file /fluent-bit/ssl/client.key.pem 116 | 117 | [OUTPUT] 118 | Name stdout 119 | Match * 120 | Format json_lines 121 | 122 | parsers.conf: | 123 | [PARSER] 124 | Name json 125 | Format json 126 | Time_Key timestamp 127 | Decode_Field_as escaped_utf8 log do_next 128 | Decode_Field_as json log 129 | 130 | script.lua: | 131 | -- add extra ECS fields 132 | function ecsMap(tag, timestamp, record) 133 | -- map existing fields to a new variable 134 | new_record = {} 135 | 136 | -- derive full environment (stage) name from namespace 137 | -- see: https://www.lua.org/pil/20.3.html 138 | _, _, part1, environmentAbbreviation = string.find(record["namespace"], "([a-zA-Z0-9_+-]+)-([a-zA-Z0-9_+-]+)") 139 | 140 | environmentsArray = { 141 | ["localhost"] = "development", 142 | ["dev"] = "development", 143 | ["test"] = "test", 144 | ["prod"] = "production" 145 | } 146 | 147 | -- get event.type from log.level 148 | eventTypesArray = { 149 | ["http"] = "access", 150 | ["info"] = "info", 151 | ["verbose"] = "metric" 152 | } 153 | 154 | ---- for all logs 155 | 156 | new_record["ecs"] = { 157 | ["version"] = "1.12" 158 | } 159 | 160 | new_record["log"] = { 161 | ["file"] = { 162 | ["path"] = record["logFilePath"] 163 | }, 164 | ["level"] = record["level"] 165 | } 166 | 167 | new_record["service"] = { 168 | ["environment"] = environmentsArray[environmentAbbreviation], 169 | ["name"] = record["product"], 170 | ["type"] = "node" 171 | } 172 | 173 | new_record["event"] = { 174 | ["kind"] = "event", 175 | ["category"] = "web", 176 | ["original"] = record["message"], 177 | ["duration"] = record["responseTime"], 178 | ["sequence"] = record["logFileOffset"], 179 | ["created"] = (os.date("!%Y-%m-%dT%H:%M:%S", timestamp["sec"]) .. '.' .. math.floor(timestamp["nsec"] / 1000000) .. 'Z') 180 | } 181 | 182 | new_record["agent"] = { 183 | ["type"] = "fluentbit", 184 | ["version"] = "1.8" 185 | } 186 | 187 | new_record["labels"] = { 188 | ["project"] = record["product"] 189 | } 190 | 191 | new_record["host"] = { 192 | ["hostname"] = record["hostname"], 193 | ["ip"] = record["ip"], 194 | ["name"] = record["namespace"] 195 | } 196 | 197 | new_record["user_agent"] = { 198 | ["original"] = record["userAgent"] 199 | } 200 | 201 | new_record["source.user.id"] = record["azp"] 202 | 203 | ---- access logs 204 | 205 | if record["level"] == "http" then 206 | new_record["event"]["type"] = eventTypesArray[record["level"]] 207 | new_record["event"]["dataset"] = "express." .. eventTypesArray[record["level"]] 208 | new_record["http"] = { 209 | ["request"] = { 210 | ["body"] = { 211 | ["bytes"] = record["contentLength"] 212 | }, 213 | ["method"] = record["method"], 214 | ["referrer"] = record["path"] 215 | }, 216 | ["response"] = { 217 | ["status_code"] = record["statusCode"] 218 | }, 219 | ["version"] = record["httpVersion"] 220 | } 221 | end 222 | 223 | ---- metrics logs 224 | 225 | -- if log contains a 'metrics' field 226 | if record["metrics"] ~= nill then 227 | new_record["metrics"] = record["metrics"] 228 | new_record["event"]["type"] = eventTypesArray[record["level"]] 229 | end 230 | 231 | -- return the transformed new record 232 | return 2, timestamp, new_record 233 | end 234 | {{- end }} 235 | -------------------------------------------------------------------------------- /app/USAGE.md: -------------------------------------------------------------------------------- 1 | # Application Details 2 | 3 | The application is a node server which serves the Common Document Generation Service API. It uses the following dependencies from NPM: 4 | 5 | Authentication & Password Management 6 | 7 | - [keycloak-connect](https://www.npmjs.com/package/keycloak-connect) - Node adapter for Keycloak OIDC 8 | 9 | Networking 10 | 11 | - [api-problem](https://www.npmjs.com/package/api-problem) - RFC 7807 problem details 12 | - [express](https://www.npmjs.com/package/express) - Server middleware 13 | 14 | Configuration 15 | 16 | - [config](https://www.npmjs.com/package/config) - organizes hierarchical configurations for your app deployments; handles environment variables, command line parameters and external sources. 17 | 18 | Logging 19 | 20 | - [morgan](https://www.npmjs.com/package/morgan) - HTTP request logger 21 | - [npmlog](https://www.npmjs.com/package/npmlog) - General log framework 22 | 23 | ## Design and Usage 24 | 25 | The `/template/render` endpoint request body is composed of 3 main parts. 26 | 27 | 1. The set of **data**, an object containing the set of replacement variables to merge into the template. This can be an array of objects. 28 | 2. **options**, an object to override default behaviours. Callers should be setting: convertTo = (output file type), reportName = (output file name), and overwrite=true. 29 | 3. The document **template**, currently only accepts this as a base64 encoding. 30 | 31 | ``` json 32 | { 33 | "data": { 34 | "firstName": "Jane", 35 | "lastName": "Smith", 36 | "title": "CEO" 37 | }, 38 | "options": { 39 | "convertTo": "pdf", 40 | "reportName": "{d.firstName}-{d.lastName}.docx", 41 | "overwrite": "true" 42 | }, 43 | "template": { 44 | "fileType": "docx", 45 | "encodingType": "base64", 46 | "content": "base64 encoded content..." 47 | } 48 | } 49 | ``` 50 | 51 | The functionality of this endpoint is relatively simple, being that it functions mostly as a pass-through to the Carbone library to do the generation logic. Templates and rendered reports are written to disk and can fetched or deleted through the api. Refer to the [carbone-copy-api](https://github.com/bcgov/common-services-team-library/tree/master/npm/carbone-copy-api/docs) documentation. 52 | 53 | The templating engine is XML-agnostic. It means the template engine works on any valid XML-based documents, not only XML-documents created by Microsoft Office™, LibreOffice™ or OpenOffice™. 54 | 55 | ### Concepts 56 | 57 | In order to provide template substitution of variables into the supplied document, we have to pass in a **data*- object. The data object is a free-form JSON object which consists of key-value pairs. The purpose is to provide a key-value mapping between an inline variable in the template document and the intended merged document output after the values are replaced. **data*- can be an array of JSON objects. 58 | 59 | Carbone can behave as a glorified string-replacement engine, or more complex conditional or iterative logic can be built into the template variables. See below sections for documentation. 60 | In the event the Context object has extra variables that are not used in the template document, nothing happens. You can expect to see blanks where no value was substituted. 61 | 62 | ## Templating 63 | 64 | We currently leverage the Carbone JS library for variable replacement into document templates. Carbone finds all markers `{}` in your document (xlsx, odt, docx, ...) and replaces these markers by Context variables representing the data. According to the syntax of your markers, you can make a number of complex operations if desired, further documentation below describes this more. 65 | 66 | The convention of having "d." before variable names from the Context (d for "data") is used by the templating engine. 67 | 68 | As repetitions (loops of arrays) are a core component of the templating engine, the data object in the request body can be an array, or contain arrays. 69 | 70 | ### [Variable Substitution](https://carbone.io/documentation.html#substitutions) 71 | 72 | The Carbone templating engine allows variables to be in-line displayed through the use of double curly braces. Suppose you wanted a variable `foo` to be displayed. You can do so by adding the following into a document template: 73 | 74 | ``` sh 75 | {{d.foo}} 76 | ``` 77 | 78 | Nested objects in the Context are supported. You can lookup properties that have dots in them just like you would in Javascript. Suppose for example you have the following context object and template string: 79 | 80 | Context 81 | 82 | ``` json 83 | { 84 | "something": { 85 | "greeting": "Hello", 86 | "target": "World" 87 | }, 88 | "someone": "user" 89 | } 90 | ``` 91 | 92 | Template document 93 | 94 | ``` sh 95 | "My template is: {{d.something.greeting}} {{d.someone}} content {{d.something.target }}" 96 | ``` 97 | 98 | You can expect the template engine to yield the following: 99 | 100 | ``` sh 101 | "My template is: Hello user content World" 102 | ``` 103 | 104 | ### [Repetitions](https://carbone.io/documentation.html#repetitions) 105 | 106 | Carbone can repeat a section (rows, title, pages...) of the document. 107 | 108 | We don't need to describe where the repetition starts and ends, we just need to design a "repetition example" in the template using the reserved key word i and i+1. Carbone will find automatically the pattern to repeat using the first row (i) as an example. The second row (i+1) is removed before rendering the result. 109 | 110 | For the simplest example, suppose you have the following context object and template document: 111 | 112 | Context 113 | 114 | ``` json 115 | { 116 | "cars" : [ 117 | { "brand": "Lumeneo" }, 118 | { "brand": "Tesla" }, 119 | { "brand": "Toyota" }, 120 | { "brand": "Ford" } 121 | ] 122 | } 123 | ``` 124 | 125 | Template document 126 | 127 | | Cars | 128 | | --------------------- | 129 | | {d.cars[i].brand} | 130 | | {d.cars[i+1].brand} | 131 | 132 | You can expect the template engine to yield the following: 133 | 134 | | Cars | 135 | | --------------------- | 136 | | Lumeneo | 137 | | Tesla | 138 | | Toyota | 139 | | Ford | 140 | 141 | See the Carbone Repetition documentation for the much more complex examples 142 | 143 | ### File Name 144 | 145 | The `options` object in the request body contains an optional `reportName` field. This field will serve as the requested file name for the resultant merged document. 146 | If not supplied, a random UUID (such as 6a2f41a3-c54c-fce8-32d2-0324e1c32e22) will serve as the placeholder. 147 | 148 | You can template the output file name in the same manner as the contents. 149 | 150 | An example request is shown below: 151 | 152 | ``` json 153 | { 154 | "data": [ 155 | { 156 | "office": { 157 | "id": "Dx1997", 158 | "location": "Hello", 159 | "phone": "World" 160 | }, 161 | "contact": "Bob" 162 | }], 163 | "options" : { 164 | "convertTo": "pdf", 165 | "reportName": "office_contact_{d.office.id}.docx", 166 | }, 167 | "template": { 168 | "content": "", 169 | "encodingType": "base64", 170 | "fileType": "docx" 171 | } 172 | } 173 | ``` 174 | 175 | This will yield a resultant file in the response named 176 | `office_contact_Dx1997.pdf` 177 | 178 | #### Further templating functionality 179 | 180 | The templating engine in Carbone has a lot of power, refer to the Carbone documentation 181 | 182 | - 183 | - 184 | - 185 | - 186 | -------------------------------------------------------------------------------- /charts/cdogs/README.md: -------------------------------------------------------------------------------- 1 | # common-document-generation-service 2 | 3 | ![Version: 0.0.9](https://img.shields.io/badge/Version-0.0.9-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 2.5.0](https://img.shields.io/badge/AppVersion-2.5.0-informational?style=flat-square) 4 | 5 | A microservice for merging JSON data into xml-based templates (powered by Carbone.io) 6 | 7 | **Homepage:** 8 | 9 | ## Maintainers 10 | 11 | | Name | Email | Url | 12 | | ---- | ------ | --- | 13 | | NR Common Service Showcase Team | | | 14 | 15 | ## Source Code 16 | 17 | * 18 | 19 | ## Requirements 20 | 21 | Kubernetes: `>= 1.13.0` 22 | 23 | ## Values 24 | 25 | | Key | Type | Default | Description | 26 | |-----|------|---------|-------------| 27 | | autoscaling.behavior | object | `{"scaleDown":{"policies":[{"periodSeconds":120,"type":"Pods","value":1}],"selectPolicy":"Max","stabilizationWindowSeconds":120},"scaleUp":{"policies":[{"periodSeconds":30,"type":"Pods","value":2}],"selectPolicy":"Max","stabilizationWindowSeconds":0}}` | behavior configures the scaling behavior of the target in both Up and Down directions (scaleUp and scaleDown fields respectively). | 28 | | autoscaling.enabled | bool | `false` | Specifies whether the Horizontal Pod Autoscaler should be created | 29 | | autoscaling.maxReplicas | int | `16` | | 30 | | autoscaling.minReplicas | int | `2` | | 31 | | autoscaling.targetCPUUtilizationPercentage | int | `80` | | 32 | | awsSecretOverride.password | string | `nil` | AWS Kinesis password - used by fluent-bit | 33 | | awsSecretOverride.username | string | `nil` | AWS Kinesis username - used by fluent-bit | 34 | | config.configMap | object | `{"CACHE_DIR":"/var/lib/file-cache/data","CACHE_SIZE":"2GB","CONVERTER_FACTORY_TIMEOUT":"60000","KC_PUBLICKEY":null,"KC_REALM":null,"KC_SERVERURL":null,"SERVER_BODYLIMIT":"100mb","SERVER_LOGLEVEL":"http","SERVER_PORT":"3000","START_CARBONE":"true","UPLOAD_FIELD_NAME":"template","UPLOAD_FILE_COUNT":"1","UPLOAD_FILE_SIZE":"25MB"}` | These values will be wholesale added to the configmap as is; refer to the cdogs documentation for what each of these values mean and whether you need them defined. Ensure that all values are represented explicitly as strings, as non-string values will not translate over as expected into container environment variables. For configuration keys named `*_ENABLED`, either leave them commented/undefined, or set them to string value "true". | 35 | | config.enabled | bool | `false` | | 36 | | config.releaseScoped | bool | `false` | This should be set to true if and only if you require configmaps and secrets to be release scoped. In the event you want all instances in the same namespace to share a similar configuration, this should be set to false | 37 | | cronJob.enabled | bool | `true` | Specifies whether a cache cleaning cronjob should be created | 38 | | cronJob.schedule | string | `"0 0 * * 1,4"` | Every Monday & Thursday - https://crontab.guru/#0_0_*_*_1,4 | 39 | | cronJob.suspend | bool | `false` | In test environments, you might want to create the cronjob for consistency, but suspend it | 40 | | failurePolicy | string | `"Retry"` | | 41 | | features.authentication | bool | `false` | Specifies whether to run in authenticated mode | 42 | | fluentBit.config.aws.defaultRegion | string | `"ca-central-1"` | AWS Kinesis default region | 43 | | fluentBit.config.aws.kinesisStream | string | `"nr-apm-stack-documents"` | AWS Kinesis stream name | 44 | | fluentBit.config.aws.roleArn | string | `nil` | AWS Kinesis role ARN | 45 | | fluentBit.config.logHostname | string | `"fluentd-csst.apps.silver.devops.gov.bc.ca"` | Fluentd logging hostname endpoint | 46 | | fluentBit.config.namespace | string | `nil` | The openshift/k8s namespace identifier | 47 | | fluentBit.config.product | string | `"cdogs"` | The application/product name identifier | 48 | | fluentBit.enabled | bool | `false` | Specifies whether the fluent-bit logging sidecar should be enabled | 49 | | fluentBit.image.name | string | `"fluent-bit"` | Default image name | 50 | | fluentBit.image.repository | string | `"docker.io/fluent"` | Default image repository | 51 | | fluentBit.image.tag | string | `"3.1.9"` | Default image tag | 52 | | fluentBit.resources.limits.cpu | string | `"100m"` | Limit Peak CPU (in millicores ex. 1000m) | 53 | | fluentBit.resources.limits.memory | string | `"64Mi"` | Limit Peak Memory (in gigabytes Gi or megabytes Mi ex. 2Gi) | 54 | | fluentBit.resources.requests.cpu | string | `"10m"` | Requested CPU (in millicores ex. 500m) | 55 | | fluentBit.resources.requests.memory | string | `"16Mi"` | Requested Memory (in gigabytes Gi or megabytes Mi ex. 500Mi) | 56 | | fluentBit.route.metrics.path | string | `"/"` | | 57 | | fluentBit.service.httpPlugin.name | string | `"http-plugin"` | HTTP Plugin service name | 58 | | fluentBit.service.httpPlugin.port | int | `80` | HTTP Plugin service port | 59 | | fluentBit.service.metrics.name | string | `"metrics"` | Metrics service name | 60 | | fluentBit.service.metrics.port | int | `2020` | Metrics service port | 61 | | fullnameOverride | string | `nil` | String to fully override fullname | 62 | | image.pullPolicy | string | `"IfNotPresent"` | Default image pull policy | 63 | | image.tag | string | `nil` | Overrides the image tag whose default is the chart appVersion. | 64 | | imagePullSecrets | list | `[]` | Specify docker-registry secret names as an array | 65 | | keycloakSecretOverride.password | string | `nil` | Keycloak password | 66 | | keycloakSecretOverride.username | string | `nil` | Keycloak username | 67 | | nameOverride | string | `nil` | String to partially override fullname | 68 | | networkPolicy.enabled | bool | `true` | Specifies whether a network policy should be created | 69 | | persistentVolumeClaim.enabled | bool | `true` | Specifies whether a persistent volume claim should be created | 70 | | persistentVolumeClaim.storageClassName | string | `"netapp-file-standard"` | Default storage class type | 71 | | persistentVolumeClaim.storageSize | string | `"2G"` | PVC Storage size (use M or G, not Mi or Gi) | 72 | | podAnnotations | object | `{}` | Annotations for cdogs pods | 73 | | podSecurityContext | object | `{}` | Privilege and access control settings | 74 | | replicaCount | int | `2` | | 75 | | resources.limits.cpu | string | `"1000m"` | Limit Peak CPU (in millicores ex. 1000m) | 76 | | resources.limits.memory | string | `"1Gi"` | Limit Peak Memory (in gigabytes Gi or megabytes Mi ex. 2Gi) | 77 | | resources.requests.cpu | string | `"50m"` | Requested CPU (in millicores ex. 500m) | 78 | | resources.requests.memory | string | `"256Mi"` | Requested Memory (in gigabytes Gi or megabytes Mi ex. 500Mi) | 79 | | route.annotations | object | `{"haproxy.router.openshift.io/timeout":"60s"}` | Annotations to add to the route | 80 | | route.enabled | bool | `true` | Specifies whether a route should be created | 81 | | route.host | string | `"chart-example.local"` | | 82 | | route.tls.insecureEdgeTerminationPolicy | string | `"Redirect"` | | 83 | | route.tls.termination | string | `"edge"` | | 84 | | route.wildcardPolicy | string | `"None"` | | 85 | | securityContext | object | `{}` | Privilege and access control settings | 86 | | service.port | int | `3000` | Service port | 87 | | service.portName | string | `"http"` | Service port name | 88 | | service.type | string | `"ClusterIP"` | Service type | 89 | | serviceAccount.annotations | object | `{}` | Annotations to add to the service account | 90 | | serviceAccount.enabled | bool | `false` | Specifies whether a service account should be created | 91 | | serviceAccount.name | string | `nil` | The name of the service account to use. If not set and create is true, a name is generated using the fullname template | 92 | 93 | ---------------------------------------------- 94 | Autogenerated from chart metadata using [helm-docs v1.14.2](https://github.com/norwoodj/helm-docs/releases/v1.14.2) 95 | -------------------------------------------------------------------------------- /charts/cdogs/values.yaml: -------------------------------------------------------------------------------- 1 | # Default values for cdogs. 2 | # This is a YAML-formatted file. 3 | # Declare variables to be passed into your templates. 4 | 5 | replicaCount: 2 6 | 7 | image: 8 | # -- Default image repository 9 | repository: ghcr.io/bcgov 10 | # -- Default image pull policy 11 | pullPolicy: IfNotPresent 12 | # -- Overrides the image tag whose default is the chart appVersion. 13 | tag: ~ 14 | 15 | # -- Specify docker-registry secret names as an array 16 | imagePullSecrets: [] 17 | # -- String to partially override fullname 18 | nameOverride: ~ 19 | # -- String to fully override fullname 20 | fullnameOverride: ~ 21 | 22 | # DeploymentConfig pre-hook failure behavior 23 | failurePolicy: Retry 24 | 25 | # -- Annotations for cdogs pods 26 | podAnnotations: {} 27 | 28 | # -- Privilege and access control settings 29 | podSecurityContext: 30 | {} 31 | # fsGroup: 2000 32 | 33 | # -- Privilege and access control settings 34 | securityContext: 35 | {} 36 | # capabilities: 37 | # drop: 38 | # - ALL 39 | # readOnlyRootFilesystem: true 40 | # runAsNonRoot: true 41 | # runAsUser: 1000 42 | 43 | autoscaling: 44 | # -- Specifies whether the Horizontal Pod Autoscaler should be created 45 | enabled: false 46 | 47 | # -- behavior configures the scaling behavior of the target in both Up and Down directions (scaleUp and scaleDown fields respectively). 48 | behavior: 49 | scaleDown: 50 | stabilizationWindowSeconds: 120 51 | selectPolicy: Max 52 | policies: 53 | - type: Pods 54 | value: 1 55 | periodSeconds: 120 56 | scaleUp: 57 | stabilizationWindowSeconds: 0 58 | selectPolicy: Max 59 | policies: 60 | - type: Pods 61 | value: 2 62 | periodSeconds: 30 63 | minReplicas: 2 64 | maxReplicas: 16 65 | targetCPUUtilizationPercentage: 80 66 | # targetMemoryUtilizationPercentage: 80 67 | 68 | serviceAccount: 69 | # -- Specifies whether a service account should be created 70 | enabled: false 71 | # -- Annotations to add to the service account 72 | annotations: {} 73 | # -- The name of the service account to use. 74 | # If not set and create is true, a name is generated using the fullname template 75 | name: ~ 76 | 77 | networkPolicy: 78 | # -- Specifies whether a network policy should be created 79 | enabled: true 80 | 81 | service: 82 | # -- Service type 83 | type: ClusterIP 84 | # -- Service port 85 | port: 3000 86 | # -- Service port name 87 | portName: http 88 | 89 | route: 90 | # -- Specifies whether a route should be created 91 | enabled: true 92 | # -- Annotations to add to the route 93 | annotations: 94 | haproxy.router.openshift.io/timeout: 60s 95 | # kubernetes.io/ingress.class: nginx 96 | # kubernetes.io/tls-acme: "true" 97 | host: chart-example.local 98 | # path: / 99 | tls: 100 | insecureEdgeTerminationPolicy: Redirect 101 | termination: edge 102 | wildcardPolicy: None 103 | 104 | resources: 105 | # We usually recommend not to specify default resources and to leave this as a conscious 106 | # choice for the user. This also increases chances charts run on environments with little 107 | # resources, such as Minikube. If you do want to specify resources, uncomment the following 108 | # lines, adjust them as necessary, and remove the curly braces after 'resources:'. 109 | limits: 110 | # -- Limit Peak CPU (in millicores ex. 1000m) 111 | cpu: 1000m 112 | # -- Limit Peak Memory (in gigabytes Gi or megabytes Mi ex. 2Gi) 113 | memory: 1Gi 114 | requests: 115 | # -- Requested CPU (in millicores ex. 500m) 116 | cpu: 50m 117 | # -- Requested Memory (in gigabytes Gi or megabytes Mi ex. 500Mi) 118 | memory: 256Mi 119 | 120 | persistentVolumeClaim: 121 | # -- Specifies whether a persistent volume claim should be created 122 | enabled: true 123 | # -- Default storage class type 124 | storageClassName: netapp-file-standard 125 | # -- PVC Storage size (use M or G, not Mi or Gi) 126 | storageSize: 2G 127 | 128 | cronJob: 129 | # -- Specifies whether a cache cleaning cronjob should be created 130 | enabled: true 131 | # -- Every Monday & Thursday - https://crontab.guru/#0_0_*_*_1,4 132 | schedule: 0 0 * * 1,4 133 | # -- In test environments, you might want to create the cronjob for consistency, but suspend it 134 | suspend: false 135 | 136 | config: 137 | # Set to true if you want to let Helm manage and overwrite your configmaps. 138 | enabled: false 139 | 140 | # -- This should be set to true if and only if you require configmaps and secrets to be release 141 | # scoped. In the event you want all instances in the same namespace to share a similar 142 | # configuration, this should be set to false 143 | releaseScoped: false 144 | 145 | # -- These values will be wholesale added to the configmap as is; refer to the cdogs 146 | # documentation for what each of these values mean and whether you need them defined. 147 | # Ensure that all values are represented explicitly as strings, as non-string values will 148 | # not translate over as expected into container environment variables. 149 | # For configuration keys named `*_ENABLED`, either leave them commented/undefined, or set them 150 | # to string value "true". 151 | configMap: 152 | # KC_ENABLED: "true" 153 | KC_PUBLICKEY: ~ 154 | KC_REALM: ~ 155 | KC_SERVERURL: ~ 156 | 157 | SERVER_BODYLIMIT: 100mb 158 | SERVER_LOGLEVEL: http 159 | SERVER_PORT: "3000" 160 | 161 | CACHE_DIR: /var/lib/file-cache/data 162 | CACHE_SIZE: 2GB 163 | CONVERTER_FACTORY_TIMEOUT: "60000" 164 | START_CARBONE: "true" 165 | UPLOAD_FIELD_NAME: template 166 | UPLOAD_FILE_COUNT: "1" 167 | UPLOAD_FILE_SIZE: 25MB 168 | 169 | features: 170 | # -- Specifies whether to run in authenticated mode 171 | authentication: false 172 | 173 | # Modify the following variables if you need to acquire secret values from a custom-named resource 174 | awsSecretOverride: 175 | # -- AWS Kinesis username - used by fluent-bit 176 | username: ~ 177 | # -- AWS Kinesis password - used by fluent-bit 178 | password: ~ 179 | keycloakSecretOverride: 180 | # -- Keycloak username 181 | username: ~ 182 | # -- Keycloak password 183 | password: ~ 184 | 185 | fluentBit: 186 | # -- Specifies whether the fluent-bit logging sidecar should be enabled 187 | enabled: false 188 | 189 | config: 190 | aws: 191 | # -- AWS Kinesis default region 192 | defaultRegion: ca-central-1 193 | # -- AWS Kinesis stream name 194 | kinesisStream: nr-apm-stack-documents 195 | # -- AWS Kinesis role ARN 196 | roleArn: ~ 197 | # -- Fluentd logging hostname endpoint 198 | logHostname: fluentd-csst.apps.silver.devops.gov.bc.ca 199 | # -- The openshift/k8s namespace identifier 200 | namespace: ~ 201 | # -- The application/product name identifier 202 | product: cdogs 203 | 204 | image: 205 | # -- Default image name 206 | name: fluent-bit 207 | # -- Default image repository 208 | repository: docker.io/fluent 209 | # -- Default image tag 210 | tag: "3.1.9" 211 | 212 | resources: 213 | # We usually recommend not to specify default resources and to leave this as a conscious 214 | # choice for the user. This also increases chances charts run on environments with little 215 | # resources, such as Minikube. If you do want to specify resources, uncomment the following 216 | # lines, adjust them as necessary, and remove the curly braces after 'resources:'. 217 | limits: 218 | # -- Limit Peak CPU (in millicores ex. 1000m) 219 | cpu: 100m 220 | # -- Limit Peak Memory (in gigabytes Gi or megabytes Mi ex. 2Gi) 221 | memory: 64Mi 222 | requests: 223 | # -- Requested CPU (in millicores ex. 500m) 224 | cpu: 10m 225 | # -- Requested Memory (in gigabytes Gi or megabytes Mi ex. 500Mi) 226 | memory: 16Mi 227 | 228 | route: 229 | metrics: 230 | path: / 231 | 232 | service: 233 | httpPlugin: 234 | # -- HTTP Plugin service name 235 | name: http-plugin 236 | # -- HTTP Plugin service port 237 | port: 80 238 | metrics: 239 | # -- Metrics service name 240 | name: metrics 241 | # -- Metrics service port 242 | port: 2020 243 | -------------------------------------------------------------------------------- /app/README.md: -------------------------------------------------------------------------------- 1 | # Common Document Generation Service [![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](LICENSE) [![img](https://img.shields.io/badge/Lifecycle-Stable-97ca00)](https://github.com/bcgov/repomountie/blob/master/doc/lifecycle-badges.md) 2 | 3 | ![Tests](https://github.com/bcgov/common-document-generation-service/workflows/Tests/badge.svg) 4 | [![Maintainability](https://api.codeclimate.com/v1/badges/b360d0b4c9ad56149499/maintainability)](https://codeclimate.com/github/bcgov/common-document-generation-service/maintainability) 5 | [![Test Coverage](https://api.codeclimate.com/v1/badges/b360d0b4c9ad56149499/test_coverage)](https://codeclimate.com/github/bcgov/common-document-generation-service/test_coverage) 6 | 7 | 8 | CDOGS - A common hosted service (API) for generating documents from templates, data documents, and assets 9 | 10 | To learn more about the **Common Services** available visit the [Common Services Showcase](https://bcgov.github.io/common-service-showcase/) page. 11 | 12 | ## Table of Contents 13 | 14 | - [OpenAPI Specification](#openapi-specification) 15 | - [Environment Variables](#environment-variables) 16 | - [Carbone Variables](#carbone-variables) 17 | - [Keycloak Variables](#keycloak-variables) 18 | - [Server Variables](#server-variables) 19 | - [Quick Start](#quick-start) 20 | - [Docker](#docker) 21 | - [Local Machine](#local-machine) 22 | - [License](#license) 23 | 24 | ## OpenAPI Specification 25 | 26 | This API is defined and described in OpenAPI 3.0 specification. 27 | 28 | When the API is running, you should be able to view the specification through ReDoc at (assuming you are running this microservice locally). 29 | 30 | The hosted CDOGS API can usually be found at . 31 | 32 | For more details on using CDOGS and its underlying Carbone library, take a look at the [Usage guide](/app/USAGE.md). 33 | 34 | ## Environment Variables 35 | 36 | CDOGS behavior is highly customizable through Environment Variables. The following will provide you with the main settings that you should be aware of. However, the complete list of supported variables can be found under [/app/config/custom-environment-variables.json](config/custom-environment-variables.json). Reference the [NPM Config](https://www.npmjs.com/package/config) library for more details on how configuration is cascaded and managed. 37 | 38 | ### Carbone Variables 39 | 40 | The following variables alter the behavior of Carbone and its caching behavior. 41 | 42 | | Config Var | Env Var | Default | Notes | 43 | | --- | --- | --- | --- | 44 | | `cacheDir` | `CACHE_DIR` | `/var/lib/file-cache/data` | This is the root location to read/write files. Error will be thrown if directory does not exist and cannot be created. Will attempt to fall back to operating system temp file location. | 45 | | `cacheSize` | `CACHE_SIZE` | `2GB` | The maximum size of the `cacheDir` directory. Oldest timestamped files will be cycled out to make room for new files. Uses the [bytes](https://www.npmjs.com/package/bytes) library for parsing values. | 46 | | `converterFactoryTimeout` | `CONVERTER_FACTORY_TIMEOUT` | `60000` | Maximum amount of time (in milliseconds) that Carbone will use to convert files before timing out. | 47 | | `formFieldName` | `UPLOAD_FIELD_NAME` | `template` | Field name for multipart form data upload when uploading templates via /template api. | 48 | | `startCarbone` | `START_CARBONE` | `true` | If true, then the carbone converter will be started on application start. This will ensure that the first call to /render will not incur the overhead of starting the converter. | 49 | | `uploadCount` | `UPLOAD_FILE_COUNT` | `1` | Limit the number of files uploaded per call. Default is 1; not recommended to use any other value. | 50 | | `uploadSize` | `UPLOAD_FILE_SIZE` | `25MB` | Limit size of template files. Uses the [bytes](https://www.npmjs.com/package/bytes) library for parsing values. | 51 | 52 | ### Keycloak Variables 53 | 54 | The following variables alter CDOGS authentication behavior. By default, if `KC_ENABLED` is left unset/undefined, CDOGS will run in unauthenticated mode, ignoring the rest of the Keycloak environment variables. Should you want CDOGS to require authentication, you will need to set `KC_ENABLED` to `true`. 55 | 56 | | Config Var | Env Var | Default | Notes | 57 | | --- | --- | --- | --- | 58 | | `clientId` | `KC_CLIENTID` | | Keycloak client id for CDOGS | 59 | | `clientSecret` | `KC_CLIENTSECRET` | | Keycloak client secret for CDOGS | 60 | | `enabled` | `KC_ENABLED` | | Whether to run CDOGS in unauthenticated or Keycloak protected mode | 61 | | `publicKey` | `KC_PUBLICKEY` | | If specified, verify all incoming JWT signatures off of the provided public key | 62 | | `realm` | `KC_REALM` | | Keycloak realm for CDOGS | 63 | | `serverUrl` | `KC_SERVERURL` | | Keycloak server url for CDOGS authentication | 64 | 65 | ### Server Variables 66 | 67 | The following variables alter the general Express application behavior. For most situations, the defaults should be sufficient. 68 | 69 | | Config Var | Env Var | Default | Notes | 70 | | --- | --- | --- | --- | 71 | | `bodyLimit` | `SERVER_BODYLIMIT` | `100mb` | Maximum request body length that CDOGS will accept | 72 | | `logFile` | `SERVER_LOGFILE` | | If defined, will attempt to write log output to | 73 | | `logLevel` | `SERVER_LOGLEVEL` | `info` | The log level/verbosity to report at | 74 | | `morganFormat` | `SERVER_MORGANFORMAT` | `dev` | The morgan format to log http level requests in. Options: `dev` and `combined` | 75 | | `port` | `SERVER_PORT` | `3000` | The port that CDOGS application will bind to | 76 | 77 | ## Quick Start 78 | 79 | The following sections provide you a quick way to get CDOGS set up and running. 80 | 81 | ### Docker 82 | 83 | This section assumes you have a recent version of Docker available to work with on your environment. Make sure to have an understanding of what environment variables are passed into the application before proceeding. 84 | 85 | Get CDOGS image (change latest tag to specific version if needed): 86 | 87 | ```sh 88 | docker pull ghcr.io/bcgov/common-document-generation-service:latest 89 | ``` 90 | 91 | Run CDOGS in unauthenticated mode 92 | 93 | ```sh 94 | docker run -it --rm -p 3000:3000 bcgov/common-document-generation-service:latest 95 | ``` 96 | 97 | Run CDOGS in Keycloak protected mode (replace environment values as necessary) 98 | 99 | ```sh 100 | docker run -it --rm -p 3000:3000 -e KC_CLIENTID= -e KC_CLIENTSECRET= -e KC_ENABLED=true -e KC_PUBLICKEY= -e KC_REALM= -e KC_SERVERURL= bcgov/common-document-generation-service:latest 101 | ``` 102 | 103 | For more dedicated deployments of CDOGS in a Docker environment, make sure to consider using persistent volumes for the cache directories. 104 | 105 | ### Local Machine 106 | 107 | This section assumes you have a recent version of Node.js (16.x or higher) and LibreOffice™ (6.3.4.x or higher) installed. Make sure to have an understanding of what environment variables are passed into the application before proceeding. 108 | 109 | #### Configuration 110 | 111 | Configuration management is done using the [config](https://www.npmjs.com/package/config) library. There are two ways to configure: 112 | 113 | 1. Look at [custom-environment-variables.json](/app/config/custom-environment-variables.json) and ensure you have the environment variables locally set. Create a `local.json` file in the config folder. This file should never be added to source control. Consider creating a `local-test.json` file in the config folder if you want to use different configurations while running unit tests. 114 | 2. Look at [custom-environment-variables.json](/app/config/custom-environment-variables.json) and use explicit environment variables in your environment as mentioned [above](#environment-variables) to configure your application behavior. 115 | 116 | For more details, please consult the config library [documentation](https://github.com/lorenwest/node-config/wiki/Configuration-Files). 117 | 118 | #### Common Commands 119 | 120 | Install node dependencies with either `npm ci` or `npm install`. 121 | 122 | Run the server with hot-reloading for development 123 | 124 | ``` sh 125 | npm run serve 126 | ``` 127 | 128 | Run the server without hot-reloading 129 | 130 | ``` sh 131 | npm run start 132 | ``` 133 | 134 | Run your tests 135 | 136 | ``` sh 137 | npm run test 138 | ``` 139 | 140 | Lint the codebase 141 | 142 | ``` sh 143 | npm run lint 144 | ``` 145 | 146 | ## License 147 | 148 | ```txt 149 | Copyright 2019 Province of British Columbia 150 | 151 | Licensed under the Apache License, Version 2.0 (the "License"); 152 | you may not use this file except in compliance with the License. 153 | You may obtain a copy of the License at 154 | 155 | http://www.apache.org/licenses/LICENSE-2.0 156 | 157 | Unless required by applicable law or agreed to in writing, software 158 | distributed under the License is distributed on an "AS IS" BASIS, 159 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 160 | See the License for the specific language governing permissions and 161 | limitations under the License. 162 | ``` 163 | --------------------------------------------------------------------------------