├── src ├── assets │ └── index.postcss ├── main.ts ├── types.ts ├── utils.ts ├── search.ts ├── components │ ├── ResultTitle.vue │ ├── SearchResults.vue │ ├── ResultList.vue │ ├── ResultListItem.vue │ ├── SearchInput.vue │ └── JsonSearch.vue ├── env.d.ts └── App.vue ├── tailwind.config.js ├── vercel.json ├── postcss.config.js ├── .github ├── renovate.json └── workflows │ └── main.yml ├── .gitignore ├── tsconfig.json ├── CHANGES.md ├── vite.site.config.ts ├── index.html ├── .eslintrc.js ├── playwright.config.ts ├── LICENSE.md ├── vite.config.ts ├── package.json ├── tests └── e2e │ └── JsonSearch.spec.ts ├── README.md └── public ├── copy.json └── index.json /src/assets/index.postcss: -------------------------------------------------------------------------------- 1 | @tailwind base; 2 | @tailwind components; 3 | @tailwind utilities; 4 | -------------------------------------------------------------------------------- /tailwind.config.js: -------------------------------------------------------------------------------- 1 | module.exports = { 2 | content: ['./index.html', './src/**/*.{vue,ts}'], 3 | plugins: [require('@tailwindcss/typography')], 4 | } 5 | -------------------------------------------------------------------------------- /vercel.json: -------------------------------------------------------------------------------- 1 | { 2 | "rewrites": [ 3 | { 4 | "source": "/(.*)", 5 | "destination": "/index.html" 6 | } 7 | ] 8 | } -------------------------------------------------------------------------------- /src/main.ts: -------------------------------------------------------------------------------- 1 | import { createApp } from 'vue' 2 | import App from './App.vue' 3 | import '@/assets/index.postcss' 4 | 5 | const app = createApp(App) 6 | app.mount('#app') 7 | -------------------------------------------------------------------------------- /postcss.config.js: -------------------------------------------------------------------------------- 1 | module.exports = { 2 | plugins: [ 3 | require('postcss-import'), 4 | require('tailwindcss/nesting')(require('postcss-nesting')), 5 | require('tailwindcss'), 6 | require('autoprefixer'), 7 | ], 8 | } 9 | -------------------------------------------------------------------------------- /src/types.ts: -------------------------------------------------------------------------------- 1 | import Fuse from 'fuse.js' 2 | 3 | export type FuseResult = Fuse.FuseResult 4 | export type SearchResultItem = { 5 | title: string 6 | permalink: string 7 | summary: string 8 | tags: string[] 9 | } 10 | -------------------------------------------------------------------------------- /src/utils.ts: -------------------------------------------------------------------------------- 1 | export function renderTag(tagRoot: string, tag: string, index: number, tags: string[]) { 2 | let result = '' 3 | if (tag.length > 0) { 4 | result += '' 5 | 6 | if (index < tags.length - 1) { 7 | result += ', ' 8 | } 9 | } 10 | return result 11 | } 12 | -------------------------------------------------------------------------------- /src/search.ts: -------------------------------------------------------------------------------- 1 | import JsonSearch from '@/components/JsonSearch.vue' 2 | import ResultList from '@/components/ResultList.vue' 3 | import ResultListItem from '@/components/ResultListItem.vue' 4 | import ResultTitle from '@/components/ResultTitle.vue' 5 | import SearchInput from '@/components/SearchInput.vue' 6 | import SearchResults from '@/components/SearchResults.vue' 7 | 8 | export { JsonSearch, ResultList, ResultListItem, ResultTitle, SearchInput, SearchResults } 9 | -------------------------------------------------------------------------------- /.github/renovate.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": [ 3 | "config:base", 4 | "group:all", 5 | "schedule:weekly" 6 | ], 7 | "packageRules": [ 8 | { 9 | "updateTypes": [ 10 | "patch" 11 | ], 12 | "enabled": false 13 | }, 14 | { 15 | "matchManagers": [ 16 | "npm" 17 | ], 18 | "stabilityDays": 2, 19 | "prCreation": "not-pending" 20 | } 21 | ], 22 | "timezone": "Europe/Helsinki", 23 | "dependencyDashboard": true 24 | } -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | node_modules 3 | dist 4 | dist-ssr 5 | 6 | /tests/e2e/videos/ 7 | /tests/e2e/screenshots/ 8 | /instrumented 9 | /coverage 10 | .nyc_output 11 | 12 | # local env files 13 | .env 14 | .env.local 15 | .env.*.local 16 | 17 | # Log files 18 | npm-debug.log* 19 | yarn-debug.log* 20 | yarn-error.log* 21 | pnpm-debug.log* 22 | 23 | # Editor directories and files 24 | .idea 25 | .vscode 26 | *.suo 27 | *.ntvs* 28 | *.njsproj 29 | *.sln 30 | *.sw? 31 | 32 | *.local 33 | LOCAL_NOTES.md 34 | -------------------------------------------------------------------------------- /src/components/ResultTitle.vue: -------------------------------------------------------------------------------- 1 | 8 | 20 | -------------------------------------------------------------------------------- /src/components/SearchResults.vue: -------------------------------------------------------------------------------- 1 | 9 | 21 | -------------------------------------------------------------------------------- /tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "target": "esnext", 4 | "module": "esnext", 5 | "moduleResolution": "node", 6 | "strict": true, 7 | "jsx": "preserve", 8 | "sourceMap": false, 9 | "lib": ["esnext", "dom"], 10 | "types": ["vite/client"], 11 | "resolveJsonModule": true, 12 | "allowSyntheticDefaultImports": true, 13 | "skipLibCheck": true, 14 | "paths": { 15 | "@/*": ["./src/*"] 16 | } 17 | }, 18 | "include": ["src/**/*.ts", "src/**/*.d.ts", "src/**/*.vue", "tests/**/*.ts"], 19 | "exclude": ["node_modules", "dist", "public"] 20 | } 21 | -------------------------------------------------------------------------------- /CHANGES.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | 3 | ## 0.2.0 (2022-01-27) 4 | ### New Features 5 | 6 | - Made all markup fully customizable through slots. 7 | - Added following named exports: `JsonSearch`, `ResultList`, `ResultListItem`, `ResultTitle`, `SearchInput`, `SearchResults` 8 | 9 | ### Changed 10 | 11 | - Removed default import. 12 | - Changed default CSS to use classes instead of ids to allow multiple components on a page. 13 | 14 | ## 0.1.0 (2022-01-26) 15 | 16 | - Added configuration options. (#2) 17 | - Added documentation. (#1) 18 | - Added proper tests. 19 | ## 0.0.0 (2022-01-25) 20 | 21 | - Initial version. 22 | -------------------------------------------------------------------------------- /vite.site.config.ts: -------------------------------------------------------------------------------- 1 | import * as path from 'path' 2 | import { defineConfig } from 'vite' 3 | import vue from '@vitejs/plugin-vue' 4 | import pkg from './package.json' 5 | 6 | process.env.VITE_APP_VERSION = pkg.version 7 | if (process.env.NODE_ENV === 'production') { 8 | process.env.VITE_APP_BUILD_EPOCH = new Date().getTime().toString() 9 | } 10 | 11 | export default defineConfig({ 12 | plugins: [ 13 | vue({ 14 | script: { 15 | refSugar: true, 16 | }, 17 | }), 18 | ], 19 | resolve: { 20 | alias: { 21 | '@': path.resolve(__dirname, './src'), 22 | }, 23 | }, 24 | }) 25 | -------------------------------------------------------------------------------- /index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | Vue JSON Search 7 | 8 | 9 |
10 | 11 | 15 | 16 | 17 | -------------------------------------------------------------------------------- /src/components/ResultList.vue: -------------------------------------------------------------------------------- 1 | 8 | 21 | -------------------------------------------------------------------------------- /src/env.d.ts: -------------------------------------------------------------------------------- 1 | /// 2 | 3 | declare module '*.vue' { 4 | import { DefineComponent } from 'vue' 5 | // eslint-disable-next-line @typescript-eslint/no-explicit-any, @typescript-eslint/ban-types 6 | const component: DefineComponent 7 | export default component 8 | } 9 | 10 | interface ImportMetaEnv extends Readonly> { 11 | // Only string type here to avoid hard to debug cast problems in your components! 12 | readonly VITE_APP_VERSION: string 13 | readonly VITE_APP_BUILD_EPOCH?: string 14 | } 15 | interface ImportMeta { 16 | readonly env: ImportMetaEnv 17 | } 18 | -------------------------------------------------------------------------------- /.eslintrc.js: -------------------------------------------------------------------------------- 1 | module.exports = { 2 | root: true, 3 | env: { 4 | node: true, 5 | }, 6 | extends: [ 7 | 'plugin:vue/vue3-recommended', 8 | '@vue/typescript/recommended', 9 | 'plugin:security/recommended', 10 | 'prettier', 11 | ], 12 | parserOptions: { 13 | ecmaVersion: 2020, 14 | }, 15 | rules: { 16 | 'no-console': process.env.NODE_ENV === 'production' ? 'warn' : 'off', 17 | 'no-debugger': process.env.NODE_ENV === 'production' ? 'warn' : 'off', 18 | 'comma-dangle': ['error', 'only-multiline'], 19 | }, 20 | globals: { 21 | defineProps: 'readonly', 22 | defineEmits: 'readonly', 23 | defineExpose: 'readonly', 24 | withDefaults: 'readonly', 25 | }, 26 | } 27 | -------------------------------------------------------------------------------- /src/components/ResultListItem.vue: -------------------------------------------------------------------------------- 1 | 13 | 31 | -------------------------------------------------------------------------------- /src/components/SearchInput.vue: -------------------------------------------------------------------------------- 1 | 12 | 33 | -------------------------------------------------------------------------------- /.github/workflows/main.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | 3 | on: 4 | push: 5 | branches: [main] 6 | pull_request: 7 | branches: [main] 8 | 9 | jobs: 10 | ci: 11 | runs-on: ubuntu-latest 12 | 13 | env: 14 | PLAYWRIGHT_BROWSERS_PATH: 0 15 | 16 | steps: 17 | - uses: actions/checkout@v3 18 | - uses: actions/cache@v3 19 | with: 20 | path: /home/runner/.local/share/pnpm/store 21 | key: ${{ runner.os }}-${{ hashFiles('**/pnpm-lock.yaml') }} 22 | restore-keys: | 23 | ${{ runner.os }}- 24 | 25 | - uses: pnpm/action-setup@v2 26 | with: 27 | version: 7 28 | run_install: true 29 | 30 | - name: Install Playwright browsers 31 | run: npx playwright install --with-deps 32 | 33 | ## Test 34 | - name: Run tests 35 | run: yarn test:ci 36 | 37 | - name: Upload artifacts on fail 38 | uses: actions/upload-artifact@v3 39 | if: failure() 40 | with: 41 | name: videos 42 | path: tests/e2e/videos/ 43 | -------------------------------------------------------------------------------- /playwright.config.ts: -------------------------------------------------------------------------------- 1 | import { type PlaywrightTestConfig, devices } from '@playwright/test' 2 | 3 | const config: PlaywrightTestConfig = { 4 | testDir: './tests/e2e', 5 | webServer: { 6 | command: 'pnpm dev', 7 | url: 'http://localhost:3000/', 8 | timeout: 120 * 1000, 9 | reuseExistingServer: !process.env.CI, 10 | }, 11 | use: { 12 | // headless: false, 13 | baseURL: 'http://localhost:3000', 14 | trace: 'on-first-retry', 15 | }, 16 | // projects: [ 17 | // { 18 | // name: 'iPhone 6', 19 | // use: { 20 | // browserName: 'webkit', 21 | // ...devices['iPhone 6'], 22 | // }, 23 | // }, 24 | // { 25 | // name: 'Macbook 11', 26 | // use: { 27 | // browserName: 'firefox', 28 | // ...devices['Macbook 11'], 29 | // }, 30 | // }, 31 | // { 32 | // name: 'Desktop', 33 | // use: { 34 | // browserName: 'chromium', 35 | // ...devices['Macbook Pro'], 36 | // }, 37 | // }, 38 | // ], 39 | } 40 | 41 | export default config 42 | -------------------------------------------------------------------------------- /LICENSE.md: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2022 Ville Säävuori 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /vite.config.ts: -------------------------------------------------------------------------------- 1 | import * as path from 'path' 2 | import { defineConfig } from 'vite' 3 | import vue from '@vitejs/plugin-vue' 4 | import pkg from './package.json' 5 | import dts from 'vite-plugin-dts' 6 | 7 | process.env.VITE_APP_VERSION = pkg.version 8 | if (process.env.NODE_ENV === 'production') { 9 | process.env.VITE_APP_BUILD_EPOCH = new Date().getTime().toString() 10 | } 11 | 12 | export default defineConfig({ 13 | plugins: [ 14 | vue({ 15 | script: { 16 | refSugar: true, 17 | }, 18 | }), 19 | dts({ 20 | staticImport: true, 21 | copyDtsFiles: false, 22 | // skipDiagnostics: false, 23 | // logDiagnostics: true, 24 | }), 25 | ], 26 | resolve: { 27 | alias: { 28 | '@': path.resolve(__dirname, './src'), 29 | }, 30 | }, 31 | build: { 32 | emptyOutDir: true, 33 | sourcemap: true, 34 | lib: { 35 | entry: 'src/search.ts', 36 | name: 'vue-json-search', 37 | fileName: (format) => `index.${format}.js`, 38 | }, 39 | rollupOptions: { 40 | external: ['vue'], 41 | output: { 42 | globals: { 43 | vue: 'Vue', 44 | }, 45 | }, 46 | }, 47 | }, 48 | }) 49 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "vue-json-search", 3 | "version": "0.2.1", 4 | "main": "dist/index.es.js", 5 | "module": "dist/index.es.js", 6 | "exports": { 7 | ".": { 8 | "import": "./dist/index.es.js", 9 | "require": "./dist/index.umd.js" 10 | } 11 | }, 12 | "typings": "./dist/src/search.d.ts", 13 | "repository": "https://github.com/Uninen/vue-json-search", 14 | "author": "Ville Säävuori ", 15 | "license": "MIT", 16 | "scripts": { 17 | "dev": "vite --config vite.site.config.ts", 18 | "sitebuild": "vite --config vite.site.config.ts build", 19 | "build": "vite build", 20 | "preview": "vite --config vite.site.config.ts build && vite --config vite.site.config.ts preview", 21 | "start": "yarn dev & wait-on tcp:3000 -v", 22 | "test": "playwright test", 23 | "test:ci": "playwright test" 24 | }, 25 | "dependencies": { 26 | "fuse.js": "^6.6.2" 27 | }, 28 | "devDependencies": { 29 | "@playwright/experimental-ct-vue": "1.23.1", 30 | "@playwright/test": "1.23.1", 31 | "@tailwindcss/typography": "0.5.2", 32 | "@typescript-eslint/eslint-plugin": "5.30.5", 33 | "@typescript-eslint/parser": "5.30.5", 34 | "@vitejs/plugin-vue": "2.3.3", 35 | "@vue/eslint-config-typescript": "11.0.0", 36 | "@vue/test-utils": "2.0.2", 37 | "autoprefixer": "10.4.7", 38 | "eslint": "8.19.0", 39 | "eslint-config-prettier": "8.5.0", 40 | "eslint-plugin-import": "2.26.0", 41 | "eslint-plugin-node": "11.1.0", 42 | "eslint-plugin-promise": "6.0.0", 43 | "eslint-plugin-security": "1.5.0", 44 | "eslint-plugin-vue": "9.1.1", 45 | "postcss": "8.4.14", 46 | "postcss-import": "14.1.0", 47 | "postcss-nesting": "10.1.10", 48 | "tailwindcss": "3.1.4", 49 | "typescript": "4.7.4", 50 | "vite": "2.9.13", 51 | "vite-plugin-dts": "1.2.0", 52 | "vue": "3.2.37", 53 | "wait-on": "6.0.1" 54 | }, 55 | "files": [ 56 | "dist/index.es.js", 57 | "dist/index.es.js.map", 58 | "dist/index.umd.js", 59 | "dist/index.umd.js.map", 60 | "dist/src/env.d.ts", 61 | "dist/src/types.d.ts", 62 | "dist/src/utils.d.ts", 63 | "dist/src/search.d.ts", 64 | "dist/src/components/JsonSearch.vue.d.ts", 65 | "dist/src/components/ResultList.vue.d.ts", 66 | "dist/src/components/ResultListItem.vue.d.ts", 67 | "dist/src/components/ResultTitle.vue.d.ts", 68 | "dist/src/components/SearchInput.vue.d.ts", 69 | "dist/src/components/SearchResults.vue.d.ts", 70 | "README.md", 71 | "LICENSE.md" 72 | ], 73 | "keywords": [ 74 | "vue", 75 | "json", 76 | "search", 77 | "hugo", 78 | "fuse" 79 | ], 80 | "bugs": { 81 | "url": "https://github.com/Uninen/vue-json-search/issues" 82 | }, 83 | "homepage": "https://github.com/Uninen/vue-json-search" 84 | } -------------------------------------------------------------------------------- /src/components/JsonSearch.vue: -------------------------------------------------------------------------------- 1 | 81 | 86 | 99 | -------------------------------------------------------------------------------- /tests/e2e/JsonSearch.spec.ts: -------------------------------------------------------------------------------- 1 | import { test, expect } from '@playwright/test' 2 | 3 | test('Search', async ({ page }) => { 4 | await page.goto('/') 5 | 6 | await expect(page.locator('data-test-id=default')).toBeVisible() 7 | await expect(page.locator('data-test-id=searchinput')).toHaveCount(2) 8 | await page.locator('.jsonsearchinput').type('dja') 9 | await expect(page.locator('.result')).toHaveCount(5) 10 | await expect(page.locator('.tag')).toHaveCount(0) 11 | await expect(page.locator('data-test-id=searchresulttitle')).toHaveText('11 results') 12 | }) 13 | 14 | test('Results display', async ({ page }) => { 15 | await page.goto('/') 16 | 17 | // Test 0 results 18 | await expect(page.locator('data-test-id=default')).toBeVisible() 19 | await page.fill('.jsonsearchinput', 'hgjfdgklhdfjglkhdsgjlfdshgjdsflhgjdfslghdfsghdfsjklh') 20 | await expect(page.locator('.result')).toHaveCount(0) 21 | await expect(page.locator('.tag')).toHaveCount(0) 22 | await expect(page.locator('data-test-id=searchresulttitle')).toHaveText('0 results') 23 | 24 | // Test 1 result 25 | await page.fill('.jsonsearchinput', 'encrypted') 26 | await expect(page.locator('.result')).toHaveCount(1) 27 | await expect(page.locator('data-test-id=searchresulttitle')).toHaveText('1 result') 28 | 29 | // Test clearing results hides result text 30 | await page.fill('.jsonsearchinput', '') 31 | await expect(page.locator('.result')).toHaveCount(0) 32 | await expect(page.locator('data-test-id=searchresulttitle')).toHaveCount(0) 33 | }) 34 | 35 | test('Props', async ({ page }) => { 36 | await page.goto('/') 37 | 38 | // Test 0 results 39 | await expect(page.locator('data-test-id=default')).toBeVisible() 40 | await page.fill('.jsonsearchinput', 'hgjfdgklhdfjglkhdsgjlfdshgjdsflhgjdfslghdfsghdfsjklh') 41 | await expect(page.locator('.result')).toHaveCount(0) 42 | await expect(page.locator('.tag')).toHaveCount(0) 43 | await expect(page.locator('data-test-id=searchresulttitle')).toHaveText('0 results') 44 | 45 | // Test 1 result 46 | await page.fill('.jsonsearchinput', 'encrypted') 47 | await expect(page.locator('.result')).toHaveCount(1) 48 | await expect(page.locator('data-test-id=searchresulttitle')).toHaveText('1 result') 49 | 50 | // Test clearing results hides result text 51 | await page.fill('.jsonsearchinput', '') 52 | await expect(page.locator('.result')).toHaveCount(0) 53 | await expect(page.locator('data-test-id=searchresulttitle')).toHaveCount(0) 54 | }) 55 | 56 | test('Errors', async ({ page }) => { 57 | const errorLogs: unknown[] = [] 58 | page.on('console', (message) => { 59 | if (message.type() === 'error') { 60 | errorLogs.push(message) 61 | } 62 | }) 63 | 64 | await page.route('/index.json', (route) => 65 | route.fulfill({ 66 | status: 404, 67 | body: '', 68 | }) 69 | ) 70 | 71 | await page.goto('/') 72 | 73 | await expect(page.locator('.jsonsearchinput')).toHaveCount(0) 74 | expect(errorLogs.length === 4).toBeTruthy() 75 | }) 76 | 77 | // FIXME: Convert this to Playwright component test after it gains slots support 78 | // it('Passes props correctly', () => { 79 | // mount(JsonSearch, { 80 | // props: { 81 | // url: '/copy.json', 82 | // maxResults: 5, 83 | // showTags: true, 84 | // tagRoot: '/foo/', 85 | // }, 86 | // }) 87 | 88 | // cy.get('input').should('exist') 89 | // cy.get('input').type('dja') 90 | // cy.get('.result').should('have.length', 5) 91 | // cy.get('.tag').should('have.length', 25) 92 | // cy.get('h3').should('have.text', '11 results') 93 | 94 | // cy.get('.tag').first().should('have.attr', 'href').and('contain', '/foo/') 95 | // }) 96 | -------------------------------------------------------------------------------- /src/App.vue: -------------------------------------------------------------------------------- 1 | 14 | 15 | 95 | 96 | 109 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Headless Vue JSON Search 2 | 3 | Headless Vue (3.x) search component based on [Fuse.js](https://github.com/krisk/Fuse). Designed for static generators like [Hugo](https://github.com/gohugoio/hugo) but works with any site that's cabable of producing a JSON corpus. 4 | 5 | - **Easy to setup** with any software 6 | - **100% control of the markup and styles** using headless Vue components and slots 7 | - Lightweight and **minimal dependencies** (Fuse.js and Vue 3), **~8 Kb zipped** 8 | 9 | A [live demo](https://til.unessa.net/) is available. 10 | ## Simple Usage With Static Site 11 | 12 | The following instructions assume you have a `package.json` in your project. 13 | 14 | 1. Install `vue@next` and `vue-json-search` 15 | 1. Create a simple `search.js` script for your site: 16 | 17 | ```js 18 | import { createApp, h } from 'vue' 19 | import { JsonSearch } from 'vue-json-search' 20 | 21 | createApp({ 22 | render: () => h(JsonSearch, { showTags: true }), // Props argument dict is optional 23 | }).mount('#searchapp') 24 | ``` 25 | 26 | The above shows a minimal functional way to use this component. It's just JavaScript, use it however works best for you. (The example has an advantage of not needing Vue templates, thus resulting in a smaller bundle size.) 27 | 28 | 1. Add search component to your HTML template: 29 | 30 | ```html 31 |
32 |

Search

33 |
34 |
35 | ``` 36 | 37 | 2. Make `/index.json` available (see expected JSON format and configuration options below) 38 | 3. Add your styles and you're done. (See the default markup below.) 39 | 40 | ### Setting Up With Hugo Pipelines 41 | 42 | Setting this up with Hugo can be done in less than 5 minutes. [Read this blog post](https://til.unessa.net/hugo/adding-simple-search/) for step by step instructions. 43 | 44 | ## Using As Vue Component 45 | 46 | You can use this like any other Vue component. 47 | 48 | 1. Import the component in your project 49 | 50 | ```js 51 | import { JsonSearch } from 'vue-json-search' 52 | ``` 53 | 1. And then use it in your template as any other Vue component: 54 | 55 | ```html 56 | 57 | ``` 58 | 59 | ## Customizing Markup 60 | 61 | You can customize 100% of the markup using Vue slots. 62 | 63 | First import the components you need: 64 | 65 | ```js 66 | import { JsonSearch, ResultList, ResultListItem, ResultTitle, SearchInput, SearchResults } from 'vue-json-search' 67 | ``` 68 | 69 | Then do whatever you want with them. Here's a simple example: 70 | 71 | ```html 72 | 73 | 74 | 75 | 76 |
77 | 78 |

Title: {{ result.title }}

79 |

Tags: {{ result.tags }}

80 |
81 |
82 |
83 |
84 | ``` 85 | 86 | The documentation for the components is not great but the source is easy to read and understand if you are familiar with Vue. 87 | 88 | ## Configuration 89 | 90 | The component takes configuration options as props. All options are optional. 91 | 92 | | Option | Default | Description | 93 | | --- | --- | --- | 94 | | url | `'/index.json'` | The URL for search corpus JSON. (See the required format above.) | 95 | | fuseOptions | [Default options](/blob/main/src/components/JsonSearch.vue#L13-L20) | Options for Fuse.js. See [Fuse docs](https://fusejs.io/api/options.html) for all options. | 96 | | maxResults | `10` | Maximum number of results to show in the result list. | 97 | | showTags | `false` | List tags with every search result item. | 98 | | tagRoot | `'/tags/'` | Root URL to link tags. Links are formatted as `rootUrl + tag + '/'`. | 99 | 100 | ## Default Markup 101 | 102 | Here's the default markup you migth want to style yourself: 103 | 104 | ```html 105 |
106 |
107 | 115 | 116 |
117 |

N results

118 |
    119 |
    120 |
    121 | Result title 122 |
    123 | 124 |
    125 | , 126 | > 127 |
    128 |
    129 |
130 |
131 |
132 |
133 | ``` 134 | 135 | ## Styling With Tailwind 136 | 137 | The default markup is super easity to style. The component's don't ship with any CSS but if you want to have an easy template to copy from, here's an Tailwind example: 138 | 139 | ```css 140 | /* Tailwind PostCSS Example - live at https://til.unessa.net */ 141 | .jsonsearch { 142 | @apply mt-6 w-full; 143 | } 144 | 145 | .jsonsearch label { 146 | @apply sr-only; 147 | } 148 | 149 | .jsonsearch .jsonsearchinput { 150 | @apply flex bg-gray-800 px-2 py-1 text-gray-100 placeholder:text-gray-500 mx-auto w-5/6 md:w-1/2 md:mx-0; 151 | } 152 | 153 | .jsonsearch .searchresults h3 { 154 | @apply my-4 text-lg font-semibold; 155 | } 156 | 157 | .jsonsearch .searchresults ol { 158 | @apply space-y-4; 159 | } 160 | 161 | .jsonsearch .searchresults .title a { 162 | @apply inline-block text-lg leading-none text-indigo-400 align-top hover:underline; 163 | } 164 | 165 | .jsonsearch .result .tags { 166 | @apply block text-gray-400 text-xs; 167 | } 168 | 169 | .jsonsearch .result .tags a { 170 | @apply leading-tight text-gray-400 text-opacity-80 hover:text-gray-200; 171 | } 172 | ``` 173 | 174 | You **don't need Tailwind** or any other tool, just use plain CSS or whatever tool works best for you. 175 | 176 | ## Future Ideas 177 | 178 | - Ship Web Component version for users who don't want to set up JS build tooling 179 | - Separate search machinery from Fuse to allow other backends 180 | - Done! ~~Allow full control of markup by making the component headless~~ 181 | 182 | ## Sites Using This 183 | 184 | - [Kaizen - Today I Learned by Ville Säävuori](https://til.unessa.net/) 185 | 186 | (PR:s welcome -- add your own!) 187 | 188 | ## Elsewhere 189 | 190 | - Read my continuously updating learnings from Vite / Vue / TypeScript and other Web development topics from my [Today I Learned site](https://til.unessa.net/) 191 | - [Follow @uninen](https://twitter.com/uninen) on Twitter 192 | 193 | ## Contributing 194 | 195 | Contributions are welcome! Please follow the [code of conduct](https://www.contributor-covenant.org/version/2/0/code_of_conduct/) when interacting with others. -------------------------------------------------------------------------------- /public/copy.json: -------------------------------------------------------------------------------- 1 | [{"contents":"I was having problems installing dependencies to a not too old frontend project using yarn. The installation failed with a long traceback which included the following:\ngyp info spawn args [ 'BUILDTYPE=Release', '-C', 'build' ] CC(target) Release/obj.target/nothing/../node-addon-api/nothing.o LIBTOOL-STATIC Release/nothing.a warning: /Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin/libtool: archive library: Release/nothing.a the table of contents is empty (no object file members in the library define global symbols) TOUCH Release/obj.target/libvips-cpp.stamp CXX(target) Release/obj.target/sharp/src/common.o ../src/common.cc:23:10: fatal error: 'vips/vips8' file not found #include \u0026lt;vips/vips8\u0026gt; ^~~~~~~~~~~~ 1 error generated. make: *** [Release/obj.target/sharp/src/common.o] Error 1 gyp ERR! build error gyp ERR! stack Error: `make` failed with exit code: 2 gyp ERR! stack at ChildProcess.onExit (/Users/uninen/.nvm/versions/node/v14.18.2/lib/node_modules/npm/node_modules/node-gyp/lib/build.js:194:23) The problem was related to sharp-package and node-gyp-package, both which were some deeper dependencies in the project. I found a couple of seemingly related GitHub issues from both of these projects but the solution that fixed the problem was easy: I installed the missing vips-package with brew:\nbrew install vips These kind of issues are the main reason I try to keep all frontend projects as small as possible and make extra effort to not include anything that requires build tools if you absolutely don\u0026rsquo;t need to.\n","permalink":"https://til.unessa.net/node/vips/","tags":["node","yarn","error"],"title":"Resolving 'fatal error: 'vips/vips8' file not found' on macOS"},{"contents":"The problem with projects with NPM dependencies is the ridiculously fast pace the packages keep updating. JavaScript / Node world has an unique way of creating and depending on tiny community packages and the number of dependencies even in a small project can be enormous. Using automated services like Dependabot only takes you so far, you just need to keep weeding the projects manually from time to time.\nAbout the colors / fakerjs incident Node community got reminded once more about this problematic situation when the maintainer of colors and faker libraries decided to publish malicious versions of the packages as a political statement. NPM has already had experiences of this since the leftpad incident so they swiftly reinstated the original package. GitHub assumed the user account was hacked as the users behaviours were abormal so they locked the account and it immediately got mixed reactions among the community as people thought they were somehow censoring the author. This lead people to call out for boycotting GitHub and looking for more decentralized solutions for code hosting.\nPeople familiar with the author soon pointed out that he had previously voiced multiple times concerns of big corporations using his (open sourced) work for free. This lead people to blame the situation on the lack of sustainable monetization in important open source projects.\nMeanwhile, others pointed out that the author had also been struggling with mental issues for a long time without getting proper help. He had also recently been posting some far out conspiracy theories and stuff like that on his social media profiles. I believe the root cause of this incident stems here but is much deeper and complex.\nThings like QAnon are fuelled by a combination of broken things; a healthcare system that doesn\u0026rsquo;t work, a society that is so afraid of difficult discussions that rather censors inconvenient facts than allows discussion around them, and a serious case of woke mentality / tall puppy syndrome that totally suffocates any meaningful discourse around touchy issues. These kind of issues cannot be solved with technical solutions or any single magic bullet \u0026ndash; we need to fix the deep issues within our society instead.\nPublished My Docker Base Images I\u0026rsquo;ve been developing and using my own Docker base images for a while now. I decided to publish them as open source and last week I added a new image for testing in CI as well. The current images cover Django projects using Postgres and Postgis services, plus the new image that adds preinstalled Node as well (to make CI builds go faster).\nThe images are built upon great work by the awesome RevSys team, who also added a missing LICENCE to the repo only minutes after I asked Jeff about it. Great job!\n","permalink":"https://til.unessa.net/weeknotes/2022-02/","tags":["weeknotes","chores","javascript","politics","society","docker"],"title":"Weeknotes 2022/2 - Chores, and the colors / faker incident"},{"contents":"What better time to restart weeknotes than the first week of the year!\nI decided to take a proper long vacation from everything work-related in mid-December. It\u0026rsquo;s been years since my last proper vacation and I\u0026rsquo;m already noticing the positive effects. Staying away from the computer hasn\u0026rsquo;t been that easy, though.\nMy original \u0026ldquo;things I wanna do while on holiday\u0026rdquo;-list was full of code projects and all kinds of small computer-related chores. Luckily I realized that pretty quickly and just threw all plans away. It\u0026rsquo;s not a vacation if you\u0026rsquo;ve scheduled your days full of work.\nI\u0026rsquo;ve still been browsing GitHub, looking for interesting projects and reading code. You can learn a lot by reading other people\u0026rsquo;s code. I\u0026rsquo;ve also kept up to date with Vitest, a new and fast-evolving Vite-native testing framework by Anthony Fu. I\u0026rsquo;ve never had the stamina to add Jest-tests to my own projects (as I\u0026rsquo;ve always found e2e tests to be more important for frontend) so learning this world has been quite interesting. Combining Cypress for e2e and component testing with Vitest seems vey nice way to get full test coverage for frontend projects.\nNew M1 Mac mini inspired me to test iOS development after a pretty long hiatus. Turns out XCode is still painfully slow and hands down the worst development experience I\u0026rsquo;ve ever had to endure. Developing small apps for iOS devices is fun but the dumpster fire of Apple Developer Docs + XCode just makes everything really bad. Here\u0026rsquo;s to hoping Apple comes up with some innovations \u0026ndash; starting for example with code formatting support for XCode \u0026ndash; for developers in 2022!\n","permalink":"https://til.unessa.net/weeknotes/2022-01/","tags":["weeknotes","swift","vitest","testing"],"title":"Weeknotes 2022/1 - Vacationing, Vitest, and SwiftUI"},{"contents":"Setting uo PostgreSQL + PostGIX extension for GitLab CI was easy once I figured out how to configure the needed services right way. Here\u0026rsquo;s the gist of my setup for testing a Django app:\n Use a main image that includes needed Postgres + PostGIS deps:\nimage: \u0026#39;registry.gitlab.com/uninen/docker-images/python-postgis:3.9\u0026#39; I use my own Python Docker images which are based on optimized Revsys Python images and come with most of the commonly needed system packages preinstalled for speedy build process and testing.\n Instead of the typical postgres-service configure an image with PostGIS. I used postgis/postgis:13-master like so:\nservices: - name: postgis/postgis:13-master alias: postgres command: [\u0026#39;-c\u0026#39;, \u0026#39;fsync=off\u0026#39;, \u0026#39;-c\u0026#39;, \u0026#39;synchronous_commit=off\u0026#39;, \u0026#39;-c\u0026#39;, \u0026#39;full_page_writes=off\u0026#39;] The main thing here is the more verbose way to spell out the wanted service. You can use the same format for any Docker image.\n","permalink":"https://til.unessa.net/testing/postgis-gitlab/","tags":["testing","postgres","postgis","gitlab"],"title":"Testing Postgres + PostGIS on GitLab CI"},{"contents":"This blog is a static site built with Hugo using a 100% hand-built HTML template and custom Tailwind CSS config.\nThe code lies in a Git repository hosted on GitLab and is automatically deployed on Vercel on every push.\nThe name Kaizen (改善 in Kanji) is a personal motto of mine, it means continuous improvement. I try live by this motto and these TILs are one concrete example of that.\nFooter phraze \u0026ldquo;I\u0026rsquo;m just a bizarre little person who walks back and forth\u0026rdquo; is a sentence from the last public video of Terry A. Davis, which reminds me of the realities of the human society.\n","permalink":"https://til.unessa.net/colophon/","tags":null,"title":"Colophon"},{"contents":"Getting your development environment running and configured has become much easier over time but it\u0026rsquo;s still a hassle to get everything set up and configured from scratch. These notes are a continuously evolving task list for my personal setup.\nApplications Install Docker desktop, configure it as needed and remember to authenticate it. Install VS Code and log in to synchronize settings. Then some authentication: For GitLab, pull from any GL repo and then enter username + personal access token Install iTerm2. Terminal Install Homebrew and following packages node nvm pyenv virtual-pyenv gdal hugo Install Powerline fonts (clone, run install script) Install Oh my zsh To get ssh-agent starting up automatically you need to run sudo touch /var/db/useLS and then reboot. Generating a secure SSH key: ssh-keygen -t ed25519 -a 100 Git git config --global user.email \u0026#34;you@example.com\u0026#34; git config --global user.name \u0026#34;Your Name\u0026#34; git config --global init.defaultBranch main Node Node is almost as bad as Python with different versions so using nvm is a must. After installing it with brew, install few of the necessary versions and then set the default:\nnvm install 12 nvm install 14 nvm install 16 nvm alias default 16 Now the default version does not use the latest node but the latest 16.x instead. Whenever a project needs different version, just say nvm use x to activate the needed version.\nPython To minimize XKCD 1987 the best way to handle Python versions is to only use pyenv. So brew install pyenv (and brew install pyenv-virtualenv). Then, importantly, add both of these lines to make sure pyenv is loaded for new terminal sessions automatically:\necho \u0026#39;eval \u0026#34;$(pyenv init --path)\u0026#34;\u0026#39; \u0026gt;\u0026gt; ~/.zprofile echo \u0026#39;eval \u0026#34;$(pyenv init -)\u0026#34;\u0026#39; \u0026gt;\u0026gt; ~/.zshrc echo \u0026#39;eval \u0026#34;$(virtualenv-init init -)\u0026#34;\u0026#39; \u0026gt;\u0026gt; ~/.zshrc Now just install the latest usable (ie. not the point zero version of a new release) Python version and set it as global default.\nPoetry is easiest to install with the installation script.\nPostgis / GDAL / GEOS Previously I haven\u0026rsquo;t had much problem with these as they install easily with Homebrew, but on macOS Monterey and Apple Silicon (M1) I had to add these to Django settings to get them working:\nGDAL_LIBRARY_PATH = \u0026#39;/opt/homebrew/opt/gdal/lib/libgdal.dylib\u0026#39; GEOS_LIBRARY_PATH = \u0026#39;/opt/homebrew/opt/geos/lib/libgeos_c.dylib\u0026#39; ","permalink":"https://til.unessa.net/macos/setting-up-macos/","tags":["macos","terminal","node","python","postgresql"],"title":"Setting Up macOS for Web Development"},{"contents":"I needed to implement user data export feature to a Django project. The static and user media is handled by Backblaze B2 cloud storage (similar to Amazon S3) which also supports server side encryption that allows your data to be encrypted at rest.\nUsually when working with filed in Django you want to use the native storage api and storage backends. There are many for B2 as well and the project is configured to use one, but handling this one specific file that includes PII data was special enough case that I decided to write custom handlers for it manually.\nBefore goin on, a reminder that this is just one example that happened to work for this specific project and data. It most likely won\u0026rsquo;t work well for example large data. YMMW.\nHigh Level Overview Here\u0026rsquo;s the use case in a nutshell:\n User triggers a data export A Celery task then Collects the data Bundles it into an in-memory zip file Uploads the zip to an encrypted B2 bucket Saves the metadata of the export file to database Informs the user that the data is now available for upload User clicks a download button A custom download view Fetches the download from B2 Writes it into a http response as a downloadable file Periodic Celery task removes the export metadata from the db after it has expired. (The file itself is automatically deleted from the B2 bucket afyer the expiry.) Collecting And Uploading The custom user model has two methods for collecting and uploading the data.\nA method that does all the work:\ndef _build_and_upload_data_export(self): from .serializers import UserDataexportSerializer self._delete_data_export() serializer = UserDataExportSerializer(self) expires = timezone.now() + timedelta(days=7) export_item: DataExportItem = DataExportItem.objects.create( user=self, expires_at=expires, ) # Create the zip file in memory in_memory = BytesIO() zf = ZipFile(in_memory, mode=\u0026#34;w\u0026#34;) zf.writestr(export_item.file_name, orjson.dumps(serializer.data)) zf.close() in_memory.seek(0) # Upload the file to b2 bucket = b2_api.get_bucket_by_name(settings.B2_ENCRYPTED_BUCKET_NAME) uploaded = bucket.upload_bytes( data_bytes=in_memory.read(), file_name=export_item.b2_file_name ) export_item.size = uploaded.size export_item.is_ready = True export_item.save() Few things to note here:\n I\u0026rsquo;m using a DataExportItem Django model to collect the metadata. To make sure we only have one in any given time we delete possible previous ones before starting a new export. All data collection is handled by Django Rest Framework serializer class. Orjson works here great because it\u0026rsquo;s fast and it serializers to bytes. The B2 bucket has server-side encryption and lifecycle rules set to match the projects needs. Depending on the amount of user data and the server environment, this method will be slow to execute. You\u0026rsquo;ll want to run this in a background process detached from the Django request-response cycle. And again, if your data is big, you probably wouldn\u0026rsquo;t want to process it in memory. The public for the export just triggers the background Celery task:\ndef export_data(self): build_data_export.delay(self.uid) The Celery task itself is also very simple:\n@shared_task def build_data_export(uid: str): from .models import User user = User.objects.get(uid=uid) user._build_and_upload_data_export() # handle any user notifications here Handling The Download The custom user model has a method for getting the export from B2. It returns either a B2 object or None:\ndef _get_data_export(self): \u0026#34;Returns b2 DownloadedFile which can be saved w/ save()\u0026#34; try: export: DataExportItem = self.dataexport # type: ignore bucket = b2_api.get_bucket_by_name(settings.B2_ENCRYPTED_BUCKET_NAME) return bucket.download_file_by_name(export.b2_file_name) except DataExportItem.DoesNotExist: return None Finally there\u0026rsquo;s a Django view that passes the file to the user:\n@login_required def download_data_export(request): try: export: DataExportItem = request.user.dataexport # type: ignore export_file = request.user._get_data_export() if export_file is not None: in_memory_file = BytesIO() export_file.save(in_memory_file) in_memory_file.seek(0) response = HttpResponse(content=in_memory_file.read()) response[\u0026#34;Content-Type\u0026#34;] = \u0026#34;application/zip\u0026#34; response[\u0026#34;Content-Length\u0026#34;] = export.size response[\u0026#34;Content-Disposition\u0026#34;] = f\u0026#34;attachment; filename={export.download_file_name}\u0026#34; return response except DataExportItem.DoesNotExist: pass return HttpResponseNotFound() Conclusion Implementing these simple-sounding \u0026ldquo;let\u0026rsquo;s export the application data to the user\u0026rdquo; features takes a lot of work. Luckily we have great tools to do it safely in a way that doesn\u0026rsquo;t necessarily expose the data to anyone who shouldn\u0026rsquo;t see it. The method described here doesn\u0026rsquo;t work for all cases but if it does, it is pretty simple and straightforward. Storing user data in a way that is encrypted at rest and inaccessible without proper authentication leaves me sleeping better at night.\nOne important thing I intentionally left out here is testing. These kind of things can be tricky to test properly but as long as you keep the individual moving parts simple and small enough, it\u0026rsquo;s not impossible either.\n","permalink":"https://til.unessa.net/django/encrypted-archives/","tags":["django","python","encryption","backblaze","b2","gdpr","security","celery"],"title":"Encrypted Data Archives With Django And Backblaze B2"},{"contents":"Using a custom user model in a Django project is almost always a good idea. The documentation for adding one when starting a project is good and clear but switching to a custom model mid-project is not that easy. This blog post explains one way, and ticket #25313 has several helpful comments.\nThe following steps based on comment #18 worked for me:\nStage 1, in development Create new app (or use existing one that has no migrations yet) for the new user model Create a user model that is identical to the auth.User: class User(AbstractUser): class Meta: db_table = \u0026#34;auth_user\u0026#34; Add the new user model app to INSTALLED_APPS and add the new model as AUTH_USER_MODEL Replace all occurences of from django.contrib.auth.models import User with from newusermodelapp.models import User (Note: the documentation recommends using django.contrib.auth.get_user_model() instead but in practise the user model almost never changes to in my experience using normal module import is simpler and works better. YMMW) Delete all old migrations (NOTE: do NOT run following in a root that includes virtualenv folder): find . -path \u0026#34;*/migrations/*.py\u0026#34; -not -name \u0026#34;__init__.py\u0026#34; -delete find . -path \u0026#34;*/migrations/*.pyc\u0026#34; -delete Create new migrations from scratch: manage.py makemigrations The tests should pass now and the project should run normally. The last step is to manually reset the migrations table in the database by running TRUNCATE TABLE django_migrations; in the db shell and then finally run all migrations with a fake flag: manage.py migrate --fake Push the tested code for running the first part in production. Stage 2, in production Pull new code, update requirements Reset the migration table TRUNCATE TABLE django_migrations; and then fake all the migrations manage.py migrate --fake Stage 3, in development Now remove the db_table = \u0026quot;auth_user\u0026quot; from the custom user model, create migrations Apply the migrations manage.py migrate Lastly, the content types are now mixed up and they need fixing one way or another. An easy way is to execute the following SQL (example in PostgreSQL): UPDATE django_content_type SET app_label = \u0026#39;nonexistent\u0026#39; WHERE app_label = \u0026#39;newusermodelapp\u0026#39; and model = \u0026#39;user\u0026#39;; UPDATE django_content_type SET app_label = \u0026#39;newusermodelapp\u0026#39; WHERE app_label = \u0026#39;users\u0026#39; and model = \u0026#39;user\u0026#39;; Make sure the tests pass, then commit and push the code Stage 4, in production Pull the new code, then execute steps 2 and 3 from the previous stage in production.\nNow you have a custom user model in a fresh state an you can modify the model as you need using a normal development process.\nFinal notes Splitting the process in to smaller steps may or may not work for your project. If the production database is small enough, an easy way to simplify this process would be to copy the production database in to development environment, run all the steps in dev, and finally import the modified database back to production.\nFinally, in a somewhat related note to self; always start new projects with a custom user model to avoid this kind of unnecessary mess!\n","permalink":"https://til.unessa.net/django/custom-user-model/","tags":["django","python","migrations"],"title":"Migrating to a Custom Django User Model Mid-Project"},{"contents":"I\u0026rsquo;ve migrated a couple of old Django projects from MySQL to PostgreSQL lately and decided to document the process here to help make it go faster in the future. If your old database is not exotic in any way the migration process is pretty fast and simple. The hard part is figuring out how and how much you should tweak in the old database to get rid of warnings/errors if there are some.\nPrerequisites You can go about this many ways. This guide uses pgloader and local databases (for example with Docker). Before you start:\n Install pgloader (apt-get install pgloader on Linux or brew install pgloader on macOS) Start local MySQL and import the old database Start local PostgreSQL and create the new database + needed roles etc Optional: install and configure your favourite graphic MySQL management tool to do fast small edits if needed Note: you can obviously use remote databases for this as well, but the actual commands and networking options may vary. Working over the internet can also be quite a bit slower depending on your database size and internet connection.\nThe Migration This command (just change the connection details) starts the migration and displays possible warnings plus a summary table after the migration is finished:\npgloader mysql://[root]:[docker]@[127.0.0.1]/[djuninen] postgres://[postgres]@[127.0.0.1]/[djuninen] Cleanig Up The Data In the real world the data in your old database might have some issues and the summary report might have lots of errors / missing rows. Depending on the project and the data in question you might want to make this process more strict and have for example tests to measure the number of objects etc.\nI\u0026rsquo;m mostly working with non-sensitive data where its not a major issue if a blog post is missing a tag or if some individual rows get deleted in the migration so my tactic was just to find the source for any errors / warnings and iterate the process as many times as needed.\nMost typical cases for me were individual rows that had for example NOT_NULL date_modified columns but were undefined in the data. I just went through these by hand and added a date to these. I also bumped into few missing or invalid relations. Depending on the data I fixed or removed these, whatever made more sense. Having a tool in hand to make quick tweaks to the data helped a lot. I was also working with very small (\u0026lt;200Mb) databases so there weren\u0026rsquo;t that much work. For bigger databases or more important data I would just spend whatever time is needed to make sure the data is correct first before even trying to migrate it. The older and bigger your database is, the more work you\u0026rsquo;ll probably need to massage the data beforehand.\nTesting For critical production data (anything else than just marketing blog posts) I would write two kinds of tests; 1) simple tests that count rows, sums, whatever important and easy metrics you come up with that can be easily be verified before and after the migration, and 2) end-to-end tests that hit at the very least the most important pages on the site and make sure they are identical before and after the migration.\nFor personal projects with noncritical data I didn\u0026rsquo;t really see the point as the normal e2e tests already cover that the db change itself didn\u0026rsquo;t screw anything up and as long as there weren\u0026rsquo;t any visible issues with the pgloader script itself, everything should be fine.\nLastly, you of course should keep backups of both before and after databases at hand for a while in case something comes up after the migration.\n","permalink":"https://til.unessa.net/mysql/migrating-mysql-to-postgresql/","tags":["mysql","postgresql","howto","django"],"title":"How to migrate MySQL database to PostgreSQL"},{"contents":" See TILs tagged with Vercel Useful Links Environment Variables Docs vercel.json Project Configuration Configuration Redirects for JavaScript Routers For routers like vue-router with history mode enabled, something like this in vercel.json works:\n\u0026#34;rewrites\u0026#34;: [ { \u0026#34;source\u0026#34;: \u0026#34;/(.*)\u0026#34;, \u0026#34;destination\u0026#34;: \u0026#34;/index.html\u0026#34; } ] Redirects for Python Functions This (legacy) configuration is the only one that seems to work with Python at the moment:\n\u0026#34;routes\u0026#34;: [ { \u0026#34;src\u0026#34;: \u0026#34;/(.*)\u0026#34;, \u0026#34;dest\u0026#34;: \u0026#34;/\u0026#34; } ] Misc Gotcha when adding a new project: you can\u0026rsquo;t select a branch when creating a project; it automatically uses the main branch. So if your code is not available on the main branch, yu can\u0026rsquo;t create a project for it. (Easy workaround: create whatever works, then edit all the settings afterwards.) ","permalink":"https://til.unessa.net/cheatsheets/vercel/","tags":["vercel","cheatsheet"],"title":"Vercel Cheat Sheet"},{"contents":" Pytest Official Docs Running Fail fast / stop after first failure: pytest -x Run from module or directory: pytest dir/tests/footest.py, pytest dir/ Run specific markers: pytest -m marker (mark with @pytest.mark.mymarker) Drop to pdb on failure: pytest --pdb (set breakpoint w/ breakpoint()) Recreate database using pytest-django: --create-db Configuration pytest.ini conftest.py Use pytest-dotenv to read environment variables from .env files Use pytest-xdist and pytest -n NUMCPUS to parallelize test running Python debugging in VS Code ","permalink":"https://til.unessa.net/cheatsheets/pytest/","tags":["python","pytest","cheatsheet"],"title":"Pytest Cheat Sheet"},{"contents":"It\u0026rsquo;s very easy to add a custom domain to a Vercel project. It\u0026rsquo;s also easy to redirect the default Vercel project URL to another domain or set up separate domains for staging and production. But I had to do some digging to be able to migrate my project from domain foo to point to a new project on domain bar.\nThe Problem Back in January I created a simple dashboard for following my exercises and put it on health.unessa.net. The project has kept growing and growing, and eventually I decided to convert it to a full-blown blog. The blog needed a proper name and I wanted to change the domain to match it as well. I put the new project in a new repository and published it at tuonela.unessa.net. Now I had a problem; how do I redirect the (almost zero) traffic from the old project to the new without breaking the old URLs?\nThe Solution Prerequisites: two different Vercel projects w/ custom domains configured.\n Remove the old custom domain from the original Vercel project. Add the old custom domain to the new Vercel project. Configure a 301 redirect from the old domain to the new domain in the new Vercel project. Profit!1 This is a proper 301 redirect setup which respects your URLs as well. (So all your old URLs from the old domain are redirected to the same path on the new domain.)\n","permalink":"https://til.unessa.net/vercel/moving-domains/","tags":["vercel","dns","domain","migration","refactoring"],"title":"Migrating Vercel Project To Another On A Different Domain"},{"contents":"I\u0026rsquo;ve been playing with MongoDB Realm lately and my last exercise was to integrate the Realm Auth with Nuxt Auth. They both implement a standard Oauth2 workflow but turns out making them work together wasn\u0026rsquo;t as straightforward as one might think.\nMongoDB Realm And [Google] OAuth 2.0 From the documentation:\n The Google authentication provider allows users to log in with their existing Google account through Google Sign-In. When a user logs in, Google provides MongoDB Realm with an OAuth 2.0 access token for the user. Realm uses the token to identify the user and access approved data from Google APIs on their behalf.\n In order to unite the Nuxt Auth module and Realm Auth module, I needed to first add Google login to the Nuxt project, then intercept the oauth login flow, and finally inject the Realm login.\nBoth realm-web and @nuxtjs/auth-next packages are relatively young and the documentation is unfortunately on par with the rest of the JavaScript ecosystem. Both are also written in TypeScript and missing some exports so the task was mostly studying the code.\nAdding Google Login to a Nuxt App Nuxt Auth ships with a built-in Google provider so this part was very easy, but I was surprised of the lack of proper documentation or examples. Assuming you have already configured the Google Oauth 2.0 Client API, here\u0026rsquo;s a working configuration for nuxt.config.js auth section:\nstrategies: { google: { clientId: process.env.GOOGLE_CLIENT_ID, redirectUri: process.env.LOGIN_REDIRECT_URI, scope: [\u0026#39;profile\u0026#39;, \u0026#39;email\u0026#39;], responseType: \u0026#39;token id_token\u0026#39;, codeChallengeMethod: \u0026#39;\u0026#39;, }, }, To log in, just call this.$auth.loginWith('google'), that\u0026rsquo;s all there\u0026rsquo;s to it. See the official documentation for more details.\nImplementing A Custom Nuxt Auth Oauth2 Scheme After spending one whole day doing trial and error with all kinds of different variations and methods of using realm-web together with the built-in Google provider, writing a custom scheme for Nuxt Auth seemed to be the least bad option.\nThe greatest challenge here was the fact that some of the needed helper functions weren\u0026rsquo;t exported by the package so I needed to copy them in the project. I created a ticket for this and also added the whole scheme code as an example in the comments. Save the scheme (full code in linked issue) in ~/schemes/mongoAuth.ts and copy the needed utils from the repo into ~/schemes/utils.ts. (Didn\u0026rsquo;t say it was pretty!)\nAnother catch was the fact that for unknown reason imports from the @nuxtjs/auth-next package need to be written as import foo from '~auth/runtime' and for TypeScript to understand the magic, you need to have a shim.d.ts that resolves the exports.\nSo, in ~/shim.d.ts add the following:\ndeclare module \u0026#39;~auth/runtime\u0026#39; { export { Oauth2Scheme } from \u0026#39;@nuxtjs/auth-next\u0026#39; } Finally, modify the Nuxt Auth config to include the custom strategy (that now needs explicit endpoint URLs as we want to use Google but aren\u0026rsquo;t using the built-in provider):\nstrategies: { modifiedOauth: { scheme: \u0026#39;~/schemes/mongoAuth\u0026#39;, clientId: process.env.GOOGLE_CLIENT_ID, redirectUri: process.env.LOGIN_REDIRECT_URI, scope: [\u0026#39;profile\u0026#39;, \u0026#39;email\u0026#39;], responseType: \u0026#39;token id_token\u0026#39;, codeChallengeMethod: \u0026#39;\u0026#39;, endpoints: { authorization: \u0026#39;https://accounts.google.com/o/oauth2/auth\u0026#39;, userInfo: \u0026#39;https://www.googleapis.com/oauth2/v3/userinfo\u0026#39;, }, }, }, Now you have a fully working Google authentication in your Nuxt app which is also integrated to Realm!\nFinal Words Although this setup seems to work fine for now, probably I wouldn\u0026rsquo;t push anything like this into production on a mission critical site. I wouldn\u0026rsquo;t consider neither realm-web nor @nuxtjs/auth-next production ready by any stretch of the imagination. On the other hand, if you want an easy way to integrate your Nuxt app with MongoDB Realm, setting this whole workflow up takes less than an hour.\nI hope both of these packages get the love and attention they deserve to mature a bit as having these kind of cool tools is really great!\n","permalink":"https://til.unessa.net/mongodb/mongo_realm_auth/","tags":["mongodb","realm","nuxt","auth","oauth"],"title":"Integrating MongoDB Realm Auth With Nuxt Auth"},{"contents":"I\u0026rsquo;ve been working with modern JavaScript applications actively since 2016. One of the challenges of the JS ecosystem is the outrageous number of available packages. This is a continuously gardened list of useful and preferred JavaScript/TypeScript packages that I\u0026rsquo;ve personally worked with.\nDates Preferred: day.js (Github)\nDay.js is a great modern alternative to moment.js. It\u0026rsquo;s small, tree-shakeable, and ships with full TypeScript support. It also has a ton of plugins that will most likely cover all of your needs out of the box.\nAlternatives: date-fns, luxon\nAvoid / deprecate: moment.js (EOL)\nHTTP Preferred: axios (Github)\nIf you need to do HTTP queries from your app, just install axios and be done with it. Sure, you\u0026rsquo;ll save ~6k from the bundle if you just use fetch or some tiny wrapper for it but when your project grows, your needs will grow and you\u0026rsquo;ll end up installing axios anyway. It\u0026rsquo;s feature-complete, well documented, fully typed, and production ready \u0026ndash; just use it.\nAlternatives: gazillion.\nUtilities Preferred: Rambda (Github)\nMost JS developers know lodash or its predecessor underscore. Nowadays there are much better and modern alternatives. Rambda focuses on functional programming and speed. It has a smaller API but it\u0026rsquo;s fully typed and super fast. It\u0026rsquo;s also well documented.\nFirst consider not including any utility library at all. Modern ES has tons of functionality, yyou should try to use the native functions if possible. If you need more, try Rambda.\nAlternatives: Ramda\nAvoid / deprecate: underscore, lodash\n","permalink":"https://til.unessa.net/javascript/recommended-js-packages/","tags":["javascript","typescript","live"],"title":"Recommended JS/TS Packages"},{"contents":"MongoDB Realm is a great Firebase alternative that is backed by a \u0026ldquo;proper\u0026rdquo; database. I was missing automatic created_at and last_modified_at helpers from Django in a MongoDB project so I implemented them using triggers. Turns out it wasn\u0026rsquo;t as straightforward as I thought but still pretty simple in the end.\nMongoDB Realm has very powerful triggers which are JavaScript functions that operate on a linked Atlas (a hosted MongoDB) cluster. The possibilities with these are almost limitless \u0026ndash; you can even install your own npm packages! The only tricky part was to figure out a way to not fall into a forever loop when updating a document with a trigger that runs on every update.\nAFAIK there isn\u0026rsquo;t a \u0026ldquo;this update was triggered by a trigger\u0026rdquo;-flag so you need to figure out the bail out condition manually. The following tigger function runs on insert, update and replace. It updates \u0026lsquo;lastModifiedAt\u0026rsquo; field in a document to current timestamp using $currentDate operator but only if lastModifiedAt field is not updated in the changeEvent.\nexports = function(changeEvent) { const docId = changeEvent.documentKey._id; const description = changeEvent.updateDescription; const insertEvent = changeEvent.operationType === \u0026#34;insert\u0026#34;; if (!docId || (!insertEvent \u0026amp;\u0026amp; !description)) { return; } if (insertEvent || !Object.keys(description.updatedFields).includes(\u0026#34;lastModifiedAt\u0026#34;)) { context.services .get(\u0026#34;my-cluster-name\u0026#34;) .db(\u0026#34;my-db-name\u0026#34;) .collection(\u0026#34;my-collection-name\u0026#34;) .updateOne( { _id: docId }, { $currentDate: {lastModifiedAt: true} } ); } }; Adding automated created_at field is much simpler; just run the trigger with insert events and you\u0026rsquo;re set. (Or, you can retrieve the same data without any extra work by using ObjectId.getTimestamp. I always like to have a dedicated field for this, hence a trigger.)\n","permalink":"https://til.unessa.net/mongodb/automatic_last_modified/","tags":["mongodb","realm","firebase","databases"],"title":"Automatic Last Modified Field With MongoDB"},{"contents":"Publicly visible links to site admin/staff functionality is bad practice that is surprisingly common even today. Besides being harmful security-wise, it\u0026rsquo;s also bad for usability as it adds unnecessary navigation possibilities to non-admin users.\nThing is, it\u0026rsquo;s also very easy to fix. Here\u0026rsquo;s an oneliner that I\u0026rsquo;ve used in many projects:\nconst adminLinksVisible = localStorage.getItem(\u0026#39;admin-links\u0026#39;) === \u0026#39;true\u0026#39; Then just set this localstorage item on the admin index page and/or manually add the storage item from developer tools for those who need it (and document the practise so everyone can do it). In most cases most of the admin users aren\u0026rsquo;t so called expert users so you should make sure to have links to the admin functionality visible in multiple places throughout the organisation infra (such as Slack or intranet).\nLastly, DO NOT RELY on hidden admin links as a security measure. It\u0026rsquo;s useful layer of security by obscurity but it should be just that; an additional layer on top of normal security measures.\n","permalink":"https://til.unessa.net/javascript/simple-staff-detection/","tags":["tip","javascript","localstorage","usability"],"title":"TIP: Hiding and Showing Admin Links"},{"contents":"Tauri is an interesting lightweight alternative to Electron which recently graduated to beta. These are my first impressions and experiences trying it out.\nInstalling and Hello World Tauri docs are pretty good for a project that is just barely reached beta. Tauri can be added as an dependency to any Node project but before you can do that you need to install some Rust tooling first. I wrote my first Hello World with Rust just few months back so I got to skip the most part. But the installer broke with a generic error message due to too old rust version. Quick rustup update fixed the issue and the tooling installation was all done in few minutes.\nNext step was to integrate Tauri to my app. I created a small test repo using my Vite template and followed the instructions. In a couple of minutes I had the new dependencies installed, Tauri initialized and the native development window open. These steps were easy and fast and the Tauri CLI commands seemed pretty clear. Using Tauri in a Vite project means basically just adding one src-tauri source directory and native yarn tauri dev development window to your workflow.\nI\u0026rsquo;ve worked with Electron projects before but haven\u0026rsquo;t published a native app myself. As Tauri is very young compared to Electron it\u0026rsquo;s understandable that it\u0026rsquo;s not as polished or deep yet. Some first impressions:\n the initial native app doesn\u0026rsquo;t have any native menus (at least on macOS). It would be useful to have at least basic bare bones main menu with quit command to make the initial bootstrapping faster and easier. (In general it\u0026rsquo;s much faster to modify and extend than to learn how to do something from scratch.) Tauri ships with default icons for the app but not for Dock (\u0026ldquo;System Tray\u0026rdquo; in Tauri language). Again, the default app shows up in Dock so it would be helpful to have these icons configured by default as well. Tauri has a concept of patterns which is well documented but I still found it confusing. It would probably help if there was more documentation on how the default initialized app is configured in terms of these patterns and other features. I also would have appreciated some kind of easy default \u0026ldquo;use this if you are not sure what you want yet\u0026rdquo; path that would have gotten me started faster. (I\u0026rsquo;m actually not sure if one even needs to worry abut these patterns when just starting out with a first demo app.) All in all, my first impressions of Tauri Beta 2 were positive. I like the philosophy and I really hope the project matures to be a real competitor to Electron. I\u0026rsquo;m considering starting a new native app project using Tauri to ship it on all three desktop platforms, it would probably be an interesting excercise.\n","permalink":"https://til.unessa.net/node/installing-tauri/","tags":["node","tauri","electron","rust"],"title":"Testing Tauri Beta"},{"contents":"There are situations where your Vue app template may be displayed before it\u0026rsquo;s fully compiled and therefore expose the uncompiled vue moustache template tags. In most situations this is not a problem but if you happen to get bitten by this issue, you can use v-cloack directive and CSS to hide the element until the template is rendered:\n\u0026lt;div v-cloak\u0026gt; {{ message }} \u0026lt;/div\u0026gt; and CSS to hide it:\n[v-cloak] { display: none; } This is documented in Vue 3 docs and I\u0026rsquo;ve seen the implementations in the wild many times but for some reason I never seem to remember this when I need to. Maybe this note helps.\n","permalink":"https://til.unessa.net/css/hiding-vue-moustache-tags/","tags":["css","vue","moustache","templates"],"title":"Hiding Uncompiled Vue Moustache Tags"},{"contents":"I don\u0026rsquo;t quite understand how some problems that feel very commonplace and like they would obviously be solved by browsers a long time ago are still in very much flux and unsolved in 2021. One of those things is basic image handling in WYSIWYG editors. By basic image handling I mean aligning, resizing, and uploading. Most users who need these kind of things are probably using proper CMSes or SaaS services so building things like these are not as common as one might think. Anyways, I needed to build a simple note-taking app with basic image support and after spending almost a full day comparing and testing various solutions, I ended up choosing Ckeditor 5 because it seemed to have the biggest developer community, and mature and very user-friendly image plugins.\nCkeditor5 Ckeditor like TinyMCE have been around forever. The good: it has all the features you can possibly imagine already available, and the important ones are also mature enough to use safely. The bad: despite of being the \u0026ldquo;new\u0026rdquo; version of Ckeditor, it\u0026rsquo;s very much oldskool in many ways. For example there is no TypeScript support and it\u0026rsquo;s tightly coupled with old Webpack.\nCkeditor5 ships with a Vue (2 and 3) component but it has little to no actual value over using the vanilla JS class instead. The more unfortunate thing about Ckeditor5 is that using any plugins that are not included in the pre-configured builds requires you to manually configure and compile your own version of the editor. (If your project happens to use old version of Webpack, you can use the source version instead after adding the necessary configuration to your Webpack config.)\nSo, after banging my head long enough into a wall, I went ahead and created a custom build for the project.\nCreating A Custom Build This is luckily documented pretty well. You need to fork the ckeditor/ckeditor5 repo, create a new branch from stable, pick an existing build that most closely matches what you need, and then modify src/ckeditor.js and webpack.config.js under that builds directory.\nMost likely you also want to modify the corresponding package.json and publish the custom-built editor to a package registry so you can use it in your project.\nThe Costs Creating and maintaning a custom build of a third-party package is obviously quite painful and suboptimal way of adding a dependency to your project, but it is open source and free. Before adding this kind of dependency in a commercial project you need to think hard about the cost of keeping it updated and safe. If you don\u0026rsquo;t have an easy way to automate the process and/or follow the upstream releases, I would advice against of using this method. (Tip: try searching NPM for ckeditor5-build for a suitable third-party maintained package you could use instead. There are packages like @blowstack/ckeditor5-full-free-build that are fairly well maintained and might suit your needs. YMMW.)\nNote For Future The upcoming version 2 of TipTap seems very promising modern alternative to Ckeditor. It\u0026rsquo;s headless (you are fully in control of both markup and CSS), written TypeScript, and will support Vue 3 out of the box. Based on the current demos it should handle minimal image operations (adding, aligning) easily and writing the rest could be split for example into a separate open source project.\nI\u0026rsquo;m still amazed that something like this is not yet a solved problem!\n","permalink":"https://til.unessa.net/javascript/custom-ckeditor-builds/","tags":["yarn","ckeditor","javascript"],"title":"Creating Custom Ckeditor5 Builds"},{"contents":"I was bumping to SSL issues when creating new virtualenvs with pyenv. Found tons of similar issues but the main problem for me wasn\u0026rsquo;t a faulty pyenv or SSL installation but an old version of a specific pyenv Python version. As soon as I installed a new Python version, everything worked fine.\nThis isn\u0026rsquo;t exactly a huge surprise or fix but wanted to document the issue here to remind myself when I eventually run into similar issues in future.\n","permalink":"https://til.unessa.net/python/python-ssl-macos/","tags":["python","ssl","virtualenv"],"title":"Python SSL Issues On macOS"},{"contents":"Typing in Python is not yet very mature. This small example displays several issues:\nif request.user.is_authenticated: return JsonResponse( { \u0026#34;user\u0026#34;: { \u0026#34;uid\u0026#34;: request.user.uid, \u0026#34;username\u0026#34;: request.user.username, \u0026#34;email\u0026#34;: request.user.email, } } ) The original type of request.user is Union[AbstractBaseUser, AnonymousUser] (which itself is already wrong; the user is actually a Django model that inherits from AbstractBaseUser, not AbstractBaseUser). MyPy doesn\u0026rsquo;t understand that is_authenticated narrows the options, you need to do it manually using casting.\nHere\u0026rsquo;s the final, working code:\nif request.user.is_authenticated: user = cast(User, request.user) return JsonResponse( { \u0026#34;user\u0026#34;: { \u0026#34;uid\u0026#34;: user.uid, \u0026#34;username\u0026#34;: user.username, \u0026#34;email\u0026#34;: user.email, } } ) ","permalink":"https://til.unessa.net/python/mypy-casting/","tags":["python","mypy","typing","django"],"title":"Casting Types With MyPy"},{"contents":"Some Python dependencies require build tools which can be problematic when running inside Docker or CI environment. Often these packages are also needed only in development or production so having them as optional dependencies can be really useful. I just learned that Poetry can do this.\nFirst, install your dependency with --optional flag:\npoetry add -D pywatchman --optional Not sure if this step is needed, YMMV, but I needed to add a new section in the pyproject.toml file and run poetry update after:\n[tool.poetry.extras] pywatchman = [\u0026quot;pywatchman\u0026quot;] and now, when you do a normall install, the optional packages are not installed. If you want to install those, run:\npoetry install -E pywatchman This is very handy as I now can easily keep things like pywatchman and uwsgi in my Poetry deps without worrying about their build dependencies in CI and/or local environments.\nAs a very long time user of pip-tools, I really like Poetry as a modern alternative.\n","permalink":"https://til.unessa.net/python/poetry-extra-deps/","tags":["python","poetry","docker"],"title":"Optional Dependencies With Poetry"},{"contents":"Had some trouble getting Playwright to run properly on GitLab CI so decided to document my learnings here.\nPlaywright CI documentation was helpful for tracking and tackling the problems:\n Setting DEBUG=pw:browser* environment variable will output debug logs during the browser install which is really helpful. Most issues seem to relate to memory handling and consumption. Adding --disable-dev-shm-usage flag for Chromium should fix issues with environments like Docker with no access or limited size /dev/shm. Launch the broser like this: const browser = await playwright.chromium.launch({ args: [\u0026#39;--disable-dev-shm-usage\u0026#39;] }); Caching is also a common issue in CI environments. Setting PLAYWRIGHT_BROWSERS_PATH=0 environment variable makes Playwright store the browser binaries inside node_modules which resolves most issues. Microsoft also offers pre-build Docker images which should be an easy way to run inside GitLab CI. Use only one browser By default Playwright will install Chromium, Firefox and Webkit. If you don\u0026rsquo;t need all of these, there are specific versions of the package as well. As far as I can tell the only difference between the full package is the number of included browser engines.\n playwright-chromium playwright-webkit playwright-firefox Force install browsers To force broser install, run:\nnpx playwright-cli install\nRunning in Gitlab CI After trying out multiple different things, I ended up switching the runner to use mcr.microsoft.com/playwright:bionic Docker image which is not optimal as I run several other scripts in the same pipeline and wanted to optimize the install into one simple and lightweight step. This extended the pipeline runtime by about two minutes, but it works now.\nI did leave the install script in which still produces an error (that it doesn\u0026rsquo;t do locally):\n$ npx playwright-cli install (node:62) UnhandledPromiseRejectionWarning: Error: EACCES: permission denied, mkdir '/builds/uninen/personal-data/.npm/_npx/20/lib/node_modules/playwright-cli/node_modules/playwright/.local-browsers' I might revisit this sometime in the future when things mature a bit (and my nerves get a bit longer). It works, that\u0026rsquo;s enough for me for now.\nRunning in Vercel Creating the browser instance with --disable-dev-shm-usage flag and adding the PLAYWRIGHT_BROWSERS_PATH=0 environment variable is all you need for Vercel.\n","permalink":"https://til.unessa.net/gitlab/playwright-gitlab-ci/","tags":["gitlab","playwright","testing","ci","vercel"],"title":"Running Playwright in GitLab CI"},{"contents":"My days have been starting to look worryingly similar past few weeks, so I wanted to try to make this Christmas week stand out at least a little. After working a pretty hectic pace for a long time I\u0026rsquo;m finding it pretty difficult to put down the developer tools and wind down.\nI decided to take at least a couple of weeks off from normal work routines and instead do something different with no schedules and release targets. Still need to do a couple of small tasks for Slipmat but after that I\u0026rsquo;m going to drop all that work for a while.\nTo this end I spent the past week mostly studying and learning new things like SwitfUI and WebRTC.\nWebRTC WebRTC is a Web technology that I\u0026rsquo;ve been interested in and following for several years. It\u0026rsquo;s no surprise that the browser developers have put significantly more resources into WebRTC this year as all kinds of conference technologies are seeing increased usage of hundreds percents due to the pandemic.\nI started on working on a demo project that would allow me to build a audio-only chat for Slipmat live page to complement the text chat. This is purely an experimental project but it\u0026rsquo;s very interesting as it combines lots of new technologies and tools that I haven\u0026rsquo;t personally used in production yet. I want to get the demo running during this short break from other work.\nPersonal Data Archiving I\u0026rsquo;ve been collecting and archiving all my data over 20 years already. But most of that data has been suck in a SQL database or on various network disks, not in really usable form. Encouraged by Simon\u0026rsquo;s Dogsheep project I started to take small steps towards automating this data collection with GitLab CI into a portable form.\nAs most of the data is natively handled as JSON, I chose JSON as the base format for the archives as well. It\u0026rsquo;s really easy to work with, both machine and human readable, and easy to import to databases or use with static site tools like Hugo or Gridsome.\nThis week I added Garmin Connect data collector (garmin-connect-to-json on NPM) to the toolbelt. Writing these collectors is a endless process but having started it feels good.\nI put together a new private repository on GitLab with a scheduled CI pipeline that uses all these collectors (currently fetching Tweets, Wakatime, and Garmin data) and archives the data into JSON files. Next step will be to write some kind of frontend that can be used to browse and search it. I\u0026rsquo;m not planning to use Datasette for the final site but might still use it as an quick and easy temporary solution.\nThe primary end goal of this project is to get all my data in a state that if a Web site (that I\u0026rsquo;ve poured data in and that I find important in some sense) shuts down or becomes evil like FB, I can just stop using it and not lose any of that data. (That said, not sure if I want to touch my FB data at all.) Secondary utility for this kind of personal data collection is the easy access and reuse of all the data. Having a uniform and easy to use API to big collection of data is pretty nice thing to have.\nMisc Interestingly my WebRTC demo got stuck in a quest for finding a usable JavaScript rotary knob that would work like all audio software knobs do (meaning vertical and very precise control instead of mimicing a real knob). Browsers should have this element built in! I still haven\u0026rsquo;t written any of the year notes yet, but have been thinking about them quite a bit. I have few ideas on what I want to improve in 2021 and for example the Garmin data will be helpful in building something that\u0026rsquo;ll hopefully help mee stick to my goals. ","permalink":"https://til.unessa.net/weeknotes/2020-52/","tags":["weeknotes","swift","webrtc","garmin"],"title":"Weeknotes 2020/52 - WebRTC, SwiftUI and personal data archiving"},{"contents":"I wrote my first project using Playwright, an interesting headless browser API similar to Puppeteer but for all major browsers (Chromium, Firefox and Webkit). My first hurdle was reading a simple JSON response as opposed to a normal HTML page. The docs suggest attaching a listener to all requests and filtering what you want there, but I found the following to be easiest way:\npage .goto(url) .then(async (response) =\u0026gt; { const body = await response.body() content = await JSON.parse(body.toString()) }) This breaks the async/await convention, but it works.\n","permalink":"https://til.unessa.net/scraping/playwright-json/","tags":["playwright","json","node","chromium","firefox","webkit"],"title":"Fetching JSON With Playwright"},{"contents":"This week was broken into a number of small projects, mainly around Slipmat development. I also worked on various pieces of tooling that helps with this blog and various project dashboards.\nI had a birthday on Thursday, I played my radio show in Friday, and I managed to take one whole day off, so this week was much less productive in general than last week.\nFetching Tweets to a Hugo blog with GitLab CI I published not one but two NPM packages this week. Both of these were needed for this TIL blog of mine, but they also are the first steps for starting collecting my own data in JSON format for something like Simon Willisons dogsheep.\ntweets-to-json is a simple package that does what it says on the tin. I tried to make it flexible but there\u0026rsquo;s plenty of room for improvements still. To be able to update the tweets to this blog via the GitLab CI, I also wrote push-to-repo which is a small node wrapper for GitLab API that updates a single file from CI to back the repo. Together I can use these to update and rebuild this blog with a scheduled CI script.\nAutomatically updating and displaying Tweets with a Hugo blog Now that I have a CI script that periodically fetches my Tweets as a JSON file, I wanted to link them to these TIL-posts based on hashtags and username mentions. The latter is still a work in progress but I managed to update the blog templates to include latest related tweets to these posts. The relevant template code:\nfirst, selet tweets whose tags intersect the post tags:\n{{ $related_tweets := where $.Site.Data.tweets \u0026#34;.tags\u0026#34; \u0026#34;intersect\u0026#34; .Params.tags }} then, if there were related tweets, show an aside:\n{{if gt (len $related_tweets) 0}} \u0026lt;aside class=\u0026#34;md:w-5/12\u0026#34;\u0026gt; \u0026lt;h2 class=\u0026#34;my-4 text-lg font-semibold\u0026#34;\u0026gt; Latest related tweets from \u0026lt;a href=\u0026#34;https://twitter.com/uninen\u0026#34;\u0026gt;@Uninen\u0026lt;/a\u0026gt; \u0026lt;/h2\u0026gt; {{ range first 5 $related_tweets -}} \u0026lt;div class=\u0026#34;mb-4\u0026#34;\u0026gt; {{$pubdate := time (int .timestamp)}} {{ .text | markdownify }} \u0026lt;a href=\u0026#34;https://twitter.com/uninen/status/{{ .id }}\u0026#34;\u0026gt;\u0026lt;time class=\u0026#34;block font-mono text-sm leading-tight text-gray-400\u0026#34; datetime=\u0026#34;{{ dateFormat \u0026#34;2006-01-02T15:04:05-07:00\u0026#34; ($pubdate) }}\u0026#34;\u0026gt; {{ $pubdate.Format \u0026#34;Jan 2, 2006 15:04\u0026#34; }} \u0026lt;/time\u0026gt;\u0026lt;/a\u0026gt; \u0026lt;/div\u0026gt; {{ end }} \u0026lt;/aside\u0026gt; {{end}} I seem to use @-mentions more than hashtags in my tweets so I need to figure out a way to link Twitter usernames to tags and then filter with them as well.\nMisc Dove in to the new Apple SwitUI tutorial, and while I didn\u0026rsquo;t manage to complete it, I did manage to do some minor updates to the experimental Slipmat iOS Mobile app. Continued last weeks UI design experiments and wrote two new page layouts for Slipmat Live Page for dektop and mobile. The current page is only months old but it\u0026rsquo;s complex enough to grow into a beast when trying to acommodate both mobile and desktop views with the same markup so I decided that it makes more sense to split the design and write two good templates instead of one almost good. The new layouts are just empty placeholders with pure TailwindCSS that demo the idea and they seem to work pretty well on all browsers. More tuning needed, tho. After lots of pondering about the next small Slipmat project, I re-started Slipmat Polls app in order to maybe get an MVP in production next week. We\u0026rsquo;ll see. Hunted down an old architecture drawing of the current Slipmat project and doodled a new one from the frontends where every big component sits in their own repo. Not sure if I want to keep everything in their own repo or combine them in a single monorepo but having smaller separate projects for different parts of the site seems absolutely the best way to go now that we have a common UI library on NPM. Have lots of ideas for end of the year posts both from personal and Slipmatio POV. Hopefully I have the stamina to write at least one of those. I\u0026rsquo;ve been working non-stop for months now. I think I\u0026rsquo;m going to try to take a week or two off at some point, but not sure when exatly yet. ","permalink":"https://til.unessa.net/weeknotes/2020-51/","tags":["weeknotes","slipmatio","npm","gitlab","twitter","swiftui","architecturing"],"title":"Weeknotes 2020/51 - NPM packages, GitLab CI, SwiftUI, architecturing"},{"contents":"MySql gets usually installed on MacOS by Brew, which installs a bunch of dylib libraries in /usr/local/opt/mysql/lib/. For some reason my Python installation is looking for wrong version of these files and I\u0026rsquo;ve yet to find a proper solution for matching up the dependencies.\nThe error message looks like this:\n 'Did you install mysqlclient or MySQL-python?' % e django.core.exceptions.ImproperlyConfigured: Error loading MySQLdb module: dlopen(/Users/uninen/.envs/slipmatio/lib/python2.7/site-packages/MySQLdb/_mysql.so, 2): Library not loaded: /usr/local/opt/mysql/lib/libmysqlclient.20.dylib Referenced from: /Users/uninen/.envs/slipmatio/lib/python2.7/site-packages/MySQLdb/_mysql.so Reason: image not found. Did you install mysqlclient or MySQL-python? This old project uses mysqlclient which is installed properly but it seems that a recent brew update broke things. The library it wants (for MySQL 8) is libmysqlclient.20.dylib yet there is only a libmysqlclient.21.dylib in the lib folder. So, I just manually symlinked that in place and it seems to work fine. Obviously, not a proper solution, but so far only one that gets my local installation working.\n\u0026gt; cd /usr/local/opt/mysql/lib/ \u0026gt; ln -s libmysqlclient.21.dylib libmysqlclient.20.dylib \u0026gt; ls -la drwxr-xr-x 17 uninen staff 544 Dec 17 14:49 ./ drwxr-xr-x 19 uninen staff 608 Dec 17 14:38 ../ lrwxr-xr-x 1 uninen staff 23 Dec 17 14:49 libmysqlclient.20.dylib@ -\u0026gt; libmysqlclient.21.dylib -rw-r--r-- 1 uninen staff 7322032 Dec 17 14:38 libmysqlclient.21.dylib -r--r--r-- 1 uninen staff 8137760 Sep 23 16:05 libmysqlclient.a lrwxr-xr-x 1 uninen staff 23 Sep 23 16:05 libmysqlclient.dylib@ -\u0026gt; libmysqlclient.21.dylib There are a couple of 8+ years old related discussions on StackOverflow, not sure if this is a py27 issue or something else.\n","permalink":"https://til.unessa.net/mysql/error-loading-mysqldb-module/","tags":["mysql","python","issues","macos","brew"],"title":"Fixing \"Error loading MySQLdb module\" on macOS 10.15 Catalina"},{"contents":"I\u0026rsquo;ve been publishing my first NPM packages lately and have deeded to install NPM packages locally for testing. There are two ways to do this and they both have their separate use cases.\nUsing yarn add First, you can install local packages with Yarn simply by giving the package path as argument for add:\nyarn add /path/to/my/package This installs the package as any other package by copying it to your node_packages. This has the benefits of working exactly as any normal package but the downside of being an actual copy (instead of symlink) of your local project. If you want to test and develop your local project, you might want to try linking instead.\nIf you use this method for developing, you need to yarn add the path again whenever you want to test new changes.\nUsing yarn link Yarn (like npm) has link command that allows you to symlink any local NPM package in your project. This is very handy for fast development and testing but also the downside of the linked package not being physically in your node_modules which can break some functionality (that relies on looking up parent paths).\nLinking a local module happens in two steps:\n Run yarn link in the project you want to link Run yarn link \u0026quot;package-name\u0026quot; wherever you want to link it to ","permalink":"https://til.unessa.net/node/installing-local-projects/","tags":["yarn","npm","node"],"title":"Installing local NPM packages with Yarn"},{"contents":"Last few weeks I\u0026rsquo;ve been working on some small micro APIs for Slipmat. This week I built a new homepage for Slipmat Labs and added changelogs for Spotify Playlist Parser API, Tidal Playlist Parser API and the new Discogs Search API.\nI\u0026rsquo;ve really liked working with FastAPI and Vercel for making these kind of micro APIs. My typical Python stack runs on Docker and usually has lots of heavy dependencies like databases and background runners. These FastAPI projects are great because they\u0026rsquo;re literally just one file with some extra for docs and theming. What\u0026rsquo;s best, Vercel is amazingly fast deploying them, so the whole CI/CD pipeline feels really rewarding as any changes you push are in production in less than a minute.\nPrivacy-friendly analytics I wanted to have some idea of the usage of Slipmat Labs projects but I didn\u0026rsquo;t want to add Google Analytics for them so I\u0026rsquo;m experimenting with Plausible.io instead. Based on initial impressions, Plausible works really great for small sites like Slipmat. It unfortunately doesn\u0026rsquo;t have an API so I can use it for the whole Slipmat, but as the project is open source, I explained my use case in their discussions - maybe someday.\nWeb Design Excercises \nGot an inspiration from the new GitHum homepage to flex new Tailwind 2.0 muscles and created a new repo for Slipmat UI development. I spent hours on a couple of simple ideas but didn\u0026rsquo;t get very far. I really like the modern \u0026ldquo;let\u0026rsquo;s use the all available space\u0026rdquo; kind of designs, though.\nI\u0026rsquo;ve done quite a lot of work to open up all my Slipmat development but UI design is something that probably doesn\u0026rsquo;t work very well in the open. I\u0026rsquo;ll happily post some mockups and beta designs at some point but not yet.\nMisc Published Slipmat Beta 35 with a couple of nice long-awaited features plus several smaller fixes and tweaks. Added Discord integration to multiple Slipmat repositories and added a firehose channel to our community chat to make the development work even more visible to ordinary users. Sure, the channel doesn\u0026rsquo;t offer any useful information for most, but what it does show really well is the amount of work that goes into the repositories every single day. Reinstalled Wakatime plugin to VSCode to get more detailed statistics on my coding habbits. Based on the first week stats, I spend way too much time coding! ","permalink":"https://til.unessa.net/weeknotes/2020-50/","tags":["weeknotes","apis","vercel","fastapi","analytics","webdesign","slipmatio"],"title":"Weeknotes 2020/50 - Micro APIs, ethical analytics"},{"contents":"GitLab has two methods for third-party integrations: webhooks and official integrations. Discord has an official integration page but it turns out it has a bug where the save button just hangs and doesn\u0026rsquo;t save anything, which makes adding the integrations quite annoying.\nLearned through trial and error today that if you hit \u0026ldquo;Test Settings\u0026rdquo;-button before save, the save still hangs but now the integration is saved after some time.\n(Was going to report this bug, but gave up and wrote this TIL instead.)\n","permalink":"https://til.unessa.net/gitlab/discord-integrations/","tags":["gitlab","discord","integrations"],"title":"Integrating GitLab to Discord"},{"contents":"I\u0026rsquo;ve been working a lot with GitLab Merge Requests lately and stumbled by accident to a workflow that makes creating and working with MRs easy.\nWhen you have a ticket that relates to the MR, name your repo starting with the ticket number: 42-figure-out-life\nNow push the branch empty branh. GitLab project page suggests you to open a MR (and if the repo has configured it, the MR link also shows in the Git log as well). This creates a MR that has the ticket automatically referenced and the MR is marked as draft.\nNext just clean up the title to something a bit more human readable and add any details to the body (while keeping the link reference there) and you\u0026rsquo;re done. When the MR is merged, the ticket is closed automatically (no need for any special commit messages) and the MR is also linked to all related Milestones as well.\n","permalink":"https://til.unessa.net/gitlab/mr-naming/","tags":["gitlab","git"],"title":"Optimal Workflow For GitLab Merge Requests"},{"contents":"There seems to be almost no documentation and examples on how to test file downloads with Cypress.\nThere is a closed ticket #8089 with several \u0026ldquo;any updates on this\u0026rdquo;-comments and no recent replies from Cypress.\nIt looks like I won\u0026rsquo;t be testing file downloads anytime soon.\n","permalink":"https://til.unessa.net/testing/cypress-file-download/","tags":["cypress","issues","testing"],"title":"Downloading Files With Cypress.io"},{"contents":"I wanted a fast and easy way to publish TILs. Giving Hugo a go. It wasn\u0026rsquo;t the easiest thing to get started (documentation is a bit of a hit and miss) but got a basic bare bones site running in a couple of hours. Needs quite a bit of tweaking to get the UI fixed, but hey, I wanted to start from zero instead of trying to learn and strip down someone elses markup.\n","permalink":"https://til.unessa.net/meta/hello-hugo/","tags":["meta","hugo"],"title":"Hello, Hugo!"},{"contents":"Vscode Git branch dropdown becomes unwieldly fast when not kept up to date often. Here\u0026rsquo;s how to update the local list of remote branches:\ngit fetch --prune git pull --prune Make this automatic every time you run pull or fetch:\ngit config remote.origin.prune true ","permalink":"https://til.unessa.net/git/local-branches/","tags":["vscode","git"],"title":"Updating the list of local Git branches from remote"},{"contents":"Cypress has issues with drag\u0026amp;drop and SortableJS. The example repo has a dra\u0026amp;drop example but it doesn\u0026rsquo;t work with SortableJS. Tried all possible combination, no avail.\nThere is also cypress-drag-drop plugin and an old Gist for using Cypress with SortableJS but neither of these work with current (5.x) version of Cypress.\nI also asked about this at Cypress Gitter chat but didn\u0026rsquo;t get any usable responses. Haven\u0026rsquo;t so far figured any code that would work with Cypress and SortableJS. Maybe should investigate some other drag\u0026amp;drop library instead. (Worked around the e2e testing by programmatically changing Vuex store, which just skips the browser.)\n","permalink":"https://til.unessa.net/testing/cypress-drag-drop/","tags":["cypress","sortablejs","issues","testing"],"title":"Cypress.io Drag\u0026Drop Issues"},{"contents":"Adding custom snippets in VSCode is easy. Open Preferences -\u0026gt; User Snippets, add following to the global snippets file:\n\u0026#34;console.log\u0026#34;: { \u0026#34;scope\u0026#34;: \u0026#34;html,vue,javascript,typescript\u0026#34;, \u0026#34;prefix\u0026#34;: \u0026#34;cl\u0026#34;, \u0026#34;body\u0026#34;: \u0026#34;console.log($0)\u0026#34;, \u0026#34;description\u0026#34;: \u0026#34;Insert console log statement\u0026#34; } Now I get a console log statement when typing cl + TAB.\n","permalink":"https://til.unessa.net/vscode/vscode-snippets/","tags":["vscode","tools","productivity"],"title":"VSCode Custom Snippets"}] -------------------------------------------------------------------------------- /public/index.json: -------------------------------------------------------------------------------- 1 | [{"contents":"I was having problems installing dependencies to a not too old frontend project using yarn. The installation failed with a long traceback which included the following:\ngyp info spawn args [ 'BUILDTYPE=Release', '-C', 'build' ] CC(target) Release/obj.target/nothing/../node-addon-api/nothing.o LIBTOOL-STATIC Release/nothing.a warning: /Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin/libtool: archive library: Release/nothing.a the table of contents is empty (no object file members in the library define global symbols) TOUCH Release/obj.target/libvips-cpp.stamp CXX(target) Release/obj.target/sharp/src/common.o ../src/common.cc:23:10: fatal error: 'vips/vips8' file not found #include \u0026lt;vips/vips8\u0026gt; ^~~~~~~~~~~~ 1 error generated. make: *** [Release/obj.target/sharp/src/common.o] Error 1 gyp ERR! build error gyp ERR! stack Error: `make` failed with exit code: 2 gyp ERR! stack at ChildProcess.onExit (/Users/uninen/.nvm/versions/node/v14.18.2/lib/node_modules/npm/node_modules/node-gyp/lib/build.js:194:23) The problem was related to sharp-package and node-gyp-package, both which were some deeper dependencies in the project. I found a couple of seemingly related GitHub issues from both of these projects but the solution that fixed the problem was easy: I installed the missing vips-package with brew:\nbrew install vips These kind of issues are the main reason I try to keep all frontend projects as small as possible and make extra effort to not include anything that requires build tools if you absolutely don\u0026rsquo;t need to.\n","permalink":"https://til.unessa.net/node/vips/","tags":["node","yarn","error"],"title":"Resolving 'fatal error: 'vips/vips8' file not found' on macOS"},{"contents":"The problem with projects with NPM dependencies is the ridiculously fast pace the packages keep updating. JavaScript / Node world has an unique way of creating and depending on tiny community packages and the number of dependencies even in a small project can be enormous. Using automated services like Dependabot only takes you so far, you just need to keep weeding the projects manually from time to time.\nAbout the colors / fakerjs incident Node community got reminded once more about this problematic situation when the maintainer of colors and faker libraries decided to publish malicious versions of the packages as a political statement. NPM has already had experiences of this since the leftpad incident so they swiftly reinstated the original package. GitHub assumed the user account was hacked as the users behaviours were abormal so they locked the account and it immediately got mixed reactions among the community as people thought they were somehow censoring the author. This lead people to call out for boycotting GitHub and looking for more decentralized solutions for code hosting.\nPeople familiar with the author soon pointed out that he had previously voiced multiple times concerns of big corporations using his (open sourced) work for free. This lead people to blame the situation on the lack of sustainable monetization in important open source projects.\nMeanwhile, others pointed out that the author had also been struggling with mental issues for a long time without getting proper help. He had also recently been posting some far out conspiracy theories and stuff like that on his social media profiles. I believe the root cause of this incident stems here but is much deeper and complex.\nThings like QAnon are fuelled by a combination of broken things; a healthcare system that doesn\u0026rsquo;t work, a society that is so afraid of difficult discussions that rather censors inconvenient facts than allows discussion around them, and a serious case of woke mentality / tall puppy syndrome that totally suffocates any meaningful discourse around touchy issues. These kind of issues cannot be solved with technical solutions or any single magic bullet \u0026ndash; we need to fix the deep issues within our society instead.\nPublished My Docker Base Images I\u0026rsquo;ve been developing and using my own Docker base images for a while now. I decided to publish them as open source and last week I added a new image for testing in CI as well. The current images cover Django projects using Postgres and Postgis services, plus the new image that adds preinstalled Node as well (to make CI builds go faster).\nThe images are built upon great work by the awesome RevSys team, who also added a missing LICENCE to the repo only minutes after I asked Jeff about it. Great job!\n","permalink":"https://til.unessa.net/weeknotes/2022-02/","tags":["weeknotes","chores","javascript","politics","society","docker"],"title":"Weeknotes 2022/2 - Chores, and the colors / faker incident"},{"contents":"What better time to restart weeknotes than the first week of the year!\nI decided to take a proper long vacation from everything work-related in mid-December. It\u0026rsquo;s been years since my last proper vacation and I\u0026rsquo;m already noticing the positive effects. Staying away from the computer hasn\u0026rsquo;t been that easy, though.\nMy original \u0026ldquo;things I wanna do while on holiday\u0026rdquo;-list was full of code projects and all kinds of small computer-related chores. Luckily I realized that pretty quickly and just threw all plans away. It\u0026rsquo;s not a vacation if you\u0026rsquo;ve scheduled your days full of work.\nI\u0026rsquo;ve still been browsing GitHub, looking for interesting projects and reading code. You can learn a lot by reading other people\u0026rsquo;s code. I\u0026rsquo;ve also kept up to date with Vitest, a new and fast-evolving Vite-native testing framework by Anthony Fu. I\u0026rsquo;ve never had the stamina to add Jest-tests to my own projects (as I\u0026rsquo;ve always found e2e tests to be more important for frontend) so learning this world has been quite interesting. Combining Cypress for e2e and component testing with Vitest seems vey nice way to get full test coverage for frontend projects.\nNew M1 Mac mini inspired me to test iOS development after a pretty long hiatus. Turns out XCode is still painfully slow and hands down the worst development experience I\u0026rsquo;ve ever had to endure. Developing small apps for iOS devices is fun but the dumpster fire of Apple Developer Docs + XCode just makes everything really bad. Here\u0026rsquo;s to hoping Apple comes up with some innovations \u0026ndash; starting for example with code formatting support for XCode \u0026ndash; for developers in 2022!\n","permalink":"https://til.unessa.net/weeknotes/2022-01/","tags":["weeknotes","swift","vitest","testing"],"title":"Weeknotes 2022/1 - Vacationing, Vitest, and SwiftUI"},{"contents":"Setting uo PostgreSQL + PostGIX extension for GitLab CI was easy once I figured out how to configure the needed services right way. Here\u0026rsquo;s the gist of my setup for testing a Django app:\n Use a main image that includes needed Postgres + PostGIS deps:\nimage: \u0026#39;registry.gitlab.com/uninen/docker-images/python-postgis:3.9\u0026#39; I use my own Python Docker images which are based on optimized Revsys Python images and come with most of the commonly needed system packages preinstalled for speedy build process and testing.\n Instead of the typical postgres-service configure an image with PostGIS. I used postgis/postgis:13-master like so:\nservices: - name: postgis/postgis:13-master alias: postgres command: [\u0026#39;-c\u0026#39;, \u0026#39;fsync=off\u0026#39;, \u0026#39;-c\u0026#39;, \u0026#39;synchronous_commit=off\u0026#39;, \u0026#39;-c\u0026#39;, \u0026#39;full_page_writes=off\u0026#39;] The main thing here is the more verbose way to spell out the wanted service. You can use the same format for any Docker image.\n","permalink":"https://til.unessa.net/testing/postgis-gitlab/","tags":["testing","postgres","postgis","gitlab"],"title":"Testing Postgres + PostGIS on GitLab CI"},{"contents":"This blog is a static site built with Hugo using a 100% hand-built HTML template and custom Tailwind CSS config.\nThe code lies in a Git repository hosted on GitLab and is automatically deployed on Vercel on every push.\nThe name Kaizen (改善 in Kanji) is a personal motto of mine, it means continuous improvement. I try live by this motto and these TILs are one concrete example of that.\nFooter phraze \u0026ldquo;I\u0026rsquo;m just a bizarre little person who walks back and forth\u0026rdquo; is a sentence from the last public video of Terry A. Davis, which reminds me of the realities of the human society.\n","permalink":"https://til.unessa.net/colophon/","tags":null,"title":"Colophon"},{"contents":"Getting your development environment running and configured has become much easier over time but it\u0026rsquo;s still a hassle to get everything set up and configured from scratch. These notes are a continuously evolving task list for my personal setup.\nApplications Install Docker desktop, configure it as needed and remember to authenticate it. Install VS Code and log in to synchronize settings. Then some authentication: For GitLab, pull from any GL repo and then enter username + personal access token Install iTerm2. Terminal Install Homebrew and following packages node nvm pyenv virtual-pyenv gdal hugo Install Powerline fonts (clone, run install script) Install Oh my zsh To get ssh-agent starting up automatically you need to run sudo touch /var/db/useLS and then reboot. Generating a secure SSH key: ssh-keygen -t ed25519 -a 100 Git git config --global user.email \u0026#34;you@example.com\u0026#34; git config --global user.name \u0026#34;Your Name\u0026#34; git config --global init.defaultBranch main Node Node is almost as bad as Python with different versions so using nvm is a must. After installing it with brew, install few of the necessary versions and then set the default:\nnvm install 12 nvm install 14 nvm install 16 nvm alias default 16 Now the default version does not use the latest node but the latest 16.x instead. Whenever a project needs different version, just say nvm use x to activate the needed version.\nPython To minimize XKCD 1987 the best way to handle Python versions is to only use pyenv. So brew install pyenv (and brew install pyenv-virtualenv). Then, importantly, add both of these lines to make sure pyenv is loaded for new terminal sessions automatically:\necho \u0026#39;eval \u0026#34;$(pyenv init --path)\u0026#34;\u0026#39; \u0026gt;\u0026gt; ~/.zprofile echo \u0026#39;eval \u0026#34;$(pyenv init -)\u0026#34;\u0026#39; \u0026gt;\u0026gt; ~/.zshrc echo \u0026#39;eval \u0026#34;$(virtualenv-init init -)\u0026#34;\u0026#39; \u0026gt;\u0026gt; ~/.zshrc Now just install the latest usable (ie. not the point zero version of a new release) Python version and set it as global default.\nPoetry is easiest to install with the installation script.\nPostgis / GDAL / GEOS Previously I haven\u0026rsquo;t had much problem with these as they install easily with Homebrew, but on macOS Monterey and Apple Silicon (M1) I had to add these to Django settings to get them working:\nGDAL_LIBRARY_PATH = \u0026#39;/opt/homebrew/opt/gdal/lib/libgdal.dylib\u0026#39; GEOS_LIBRARY_PATH = \u0026#39;/opt/homebrew/opt/geos/lib/libgeos_c.dylib\u0026#39; ","permalink":"https://til.unessa.net/macos/setting-up-macos/","tags":["macos","terminal","node","python","postgresql"],"title":"Setting Up macOS for Web Development"},{"contents":"I needed to implement user data export feature to a Django project. The static and user media is handled by Backblaze B2 cloud storage (similar to Amazon S3) which also supports server side encryption that allows your data to be encrypted at rest.\nUsually when working with filed in Django you want to use the native storage api and storage backends. There are many for B2 as well and the project is configured to use one, but handling this one specific file that includes PII data was special enough case that I decided to write custom handlers for it manually.\nBefore goin on, a reminder that this is just one example that happened to work for this specific project and data. It most likely won\u0026rsquo;t work well for example large data. YMMW.\nHigh Level Overview Here\u0026rsquo;s the use case in a nutshell:\n User triggers a data export A Celery task then Collects the data Bundles it into an in-memory zip file Uploads the zip to an encrypted B2 bucket Saves the metadata of the export file to database Informs the user that the data is now available for upload User clicks a download button A custom download view Fetches the download from B2 Writes it into a http response as a downloadable file Periodic Celery task removes the export metadata from the db after it has expired. (The file itself is automatically deleted from the B2 bucket afyer the expiry.) Collecting And Uploading The custom user model has two methods for collecting and uploading the data.\nA method that does all the work:\ndef _build_and_upload_data_export(self): from .serializers import UserDataexportSerializer self._delete_data_export() serializer = UserDataExportSerializer(self) expires = timezone.now() + timedelta(days=7) export_item: DataExportItem = DataExportItem.objects.create( user=self, expires_at=expires, ) # Create the zip file in memory in_memory = BytesIO() zf = ZipFile(in_memory, mode=\u0026#34;w\u0026#34;) zf.writestr(export_item.file_name, orjson.dumps(serializer.data)) zf.close() in_memory.seek(0) # Upload the file to b2 bucket = b2_api.get_bucket_by_name(settings.B2_ENCRYPTED_BUCKET_NAME) uploaded = bucket.upload_bytes( data_bytes=in_memory.read(), file_name=export_item.b2_file_name ) export_item.size = uploaded.size export_item.is_ready = True export_item.save() Few things to note here:\n I\u0026rsquo;m using a DataExportItem Django model to collect the metadata. To make sure we only have one in any given time we delete possible previous ones before starting a new export. All data collection is handled by Django Rest Framework serializer class. Orjson works here great because it\u0026rsquo;s fast and it serializers to bytes. The B2 bucket has server-side encryption and lifecycle rules set to match the projects needs. Depending on the amount of user data and the server environment, this method will be slow to execute. You\u0026rsquo;ll want to run this in a background process detached from the Django request-response cycle. And again, if your data is big, you probably wouldn\u0026rsquo;t want to process it in memory. The public for the export just triggers the background Celery task:\ndef export_data(self): build_data_export.delay(self.uid) The Celery task itself is also very simple:\n@shared_task def build_data_export(uid: str): from .models import User user = User.objects.get(uid=uid) user._build_and_upload_data_export() # handle any user notifications here Handling The Download The custom user model has a method for getting the export from B2. It returns either a B2 object or None:\ndef _get_data_export(self): \u0026#34;Returns b2 DownloadedFile which can be saved w/ save()\u0026#34; try: export: DataExportItem = self.dataexport # type: ignore bucket = b2_api.get_bucket_by_name(settings.B2_ENCRYPTED_BUCKET_NAME) return bucket.download_file_by_name(export.b2_file_name) except DataExportItem.DoesNotExist: return None Finally there\u0026rsquo;s a Django view that passes the file to the user:\n@login_required def download_data_export(request): try: export: DataExportItem = request.user.dataexport # type: ignore export_file = request.user._get_data_export() if export_file is not None: in_memory_file = BytesIO() export_file.save(in_memory_file) in_memory_file.seek(0) response = HttpResponse(content=in_memory_file.read()) response[\u0026#34;Content-Type\u0026#34;] = \u0026#34;application/zip\u0026#34; response[\u0026#34;Content-Length\u0026#34;] = export.size response[\u0026#34;Content-Disposition\u0026#34;] = f\u0026#34;attachment; filename={export.download_file_name}\u0026#34; return response except DataExportItem.DoesNotExist: pass return HttpResponseNotFound() Conclusion Implementing these simple-sounding \u0026ldquo;let\u0026rsquo;s export the application data to the user\u0026rdquo; features takes a lot of work. Luckily we have great tools to do it safely in a way that doesn\u0026rsquo;t necessarily expose the data to anyone who shouldn\u0026rsquo;t see it. The method described here doesn\u0026rsquo;t work for all cases but if it does, it is pretty simple and straightforward. Storing user data in a way that is encrypted at rest and inaccessible without proper authentication leaves me sleeping better at night.\nOne important thing I intentionally left out here is testing. These kind of things can be tricky to test properly but as long as you keep the individual moving parts simple and small enough, it\u0026rsquo;s not impossible either.\n","permalink":"https://til.unessa.net/django/encrypted-archives/","tags":["django","python","encryption","backblaze","b2","gdpr","security","celery"],"title":"Encrypted Data Archives With Django And Backblaze B2"},{"contents":"Using a custom user model in a Django project is almost always a good idea. The documentation for adding one when starting a project is good and clear but switching to a custom model mid-project is not that easy. This blog post explains one way, and ticket #25313 has several helpful comments.\nThe following steps based on comment #18 worked for me:\nStage 1, in development Create new app (or use existing one that has no migrations yet) for the new user model Create a user model that is identical to the auth.User: class User(AbstractUser): class Meta: db_table = \u0026#34;auth_user\u0026#34; Add the new user model app to INSTALLED_APPS and add the new model as AUTH_USER_MODEL Replace all occurences of from django.contrib.auth.models import User with from newusermodelapp.models import User (Note: the documentation recommends using django.contrib.auth.get_user_model() instead but in practise the user model almost never changes to in my experience using normal module import is simpler and works better. YMMW) Delete all old migrations (NOTE: do NOT run following in a root that includes virtualenv folder): find . -path \u0026#34;*/migrations/*.py\u0026#34; -not -name \u0026#34;__init__.py\u0026#34; -delete find . -path \u0026#34;*/migrations/*.pyc\u0026#34; -delete Create new migrations from scratch: manage.py makemigrations The tests should pass now and the project should run normally. The last step is to manually reset the migrations table in the database by running TRUNCATE TABLE django_migrations; in the db shell and then finally run all migrations with a fake flag: manage.py migrate --fake Push the tested code for running the first part in production. Stage 2, in production Pull new code, update requirements Reset the migration table TRUNCATE TABLE django_migrations; and then fake all the migrations manage.py migrate --fake Stage 3, in development Now remove the db_table = \u0026quot;auth_user\u0026quot; from the custom user model, create migrations Apply the migrations manage.py migrate Lastly, the content types are now mixed up and they need fixing one way or another. An easy way is to execute the following SQL (example in PostgreSQL): UPDATE django_content_type SET app_label = \u0026#39;nonexistent\u0026#39; WHERE app_label = \u0026#39;newusermodelapp\u0026#39; and model = \u0026#39;user\u0026#39;; UPDATE django_content_type SET app_label = \u0026#39;newusermodelapp\u0026#39; WHERE app_label = \u0026#39;users\u0026#39; and model = \u0026#39;user\u0026#39;; Make sure the tests pass, then commit and push the code Stage 4, in production Pull the new code, then execute steps 2 and 3 from the previous stage in production.\nNow you have a custom user model in a fresh state an you can modify the model as you need using a normal development process.\nFinal notes Splitting the process in to smaller steps may or may not work for your project. If the production database is small enough, an easy way to simplify this process would be to copy the production database in to development environment, run all the steps in dev, and finally import the modified database back to production.\nFinally, in a somewhat related note to self; always start new projects with a custom user model to avoid this kind of unnecessary mess!\n","permalink":"https://til.unessa.net/django/custom-user-model/","tags":["django","python","migrations"],"title":"Migrating to a Custom Django User Model Mid-Project"},{"contents":"I\u0026rsquo;ve migrated a couple of old Django projects from MySQL to PostgreSQL lately and decided to document the process here to help make it go faster in the future. If your old database is not exotic in any way the migration process is pretty fast and simple. The hard part is figuring out how and how much you should tweak in the old database to get rid of warnings/errors if there are some.\nPrerequisites You can go about this many ways. This guide uses pgloader and local databases (for example with Docker). Before you start:\n Install pgloader (apt-get install pgloader on Linux or brew install pgloader on macOS) Start local MySQL and import the old database Start local PostgreSQL and create the new database + needed roles etc Optional: install and configure your favourite graphic MySQL management tool to do fast small edits if needed Note: you can obviously use remote databases for this as well, but the actual commands and networking options may vary. Working over the internet can also be quite a bit slower depending on your database size and internet connection.\nThe Migration This command (just change the connection details) starts the migration and displays possible warnings plus a summary table after the migration is finished:\npgloader mysql://[root]:[docker]@[127.0.0.1]/[djuninen] postgres://[postgres]@[127.0.0.1]/[djuninen] Cleanig Up The Data In the real world the data in your old database might have some issues and the summary report might have lots of errors / missing rows. Depending on the project and the data in question you might want to make this process more strict and have for example tests to measure the number of objects etc.\nI\u0026rsquo;m mostly working with non-sensitive data where its not a major issue if a blog post is missing a tag or if some individual rows get deleted in the migration so my tactic was just to find the source for any errors / warnings and iterate the process as many times as needed.\nMost typical cases for me were individual rows that had for example NOT_NULL date_modified columns but were undefined in the data. I just went through these by hand and added a date to these. I also bumped into few missing or invalid relations. Depending on the data I fixed or removed these, whatever made more sense. Having a tool in hand to make quick tweaks to the data helped a lot. I was also working with very small (\u0026lt;200Mb) databases so there weren\u0026rsquo;t that much work. For bigger databases or more important data I would just spend whatever time is needed to make sure the data is correct first before even trying to migrate it. The older and bigger your database is, the more work you\u0026rsquo;ll probably need to massage the data beforehand.\nTesting For critical production data (anything else than just marketing blog posts) I would write two kinds of tests; 1) simple tests that count rows, sums, whatever important and easy metrics you come up with that can be easily be verified before and after the migration, and 2) end-to-end tests that hit at the very least the most important pages on the site and make sure they are identical before and after the migration.\nFor personal projects with noncritical data I didn\u0026rsquo;t really see the point as the normal e2e tests already cover that the db change itself didn\u0026rsquo;t screw anything up and as long as there weren\u0026rsquo;t any visible issues with the pgloader script itself, everything should be fine.\nLastly, you of course should keep backups of both before and after databases at hand for a while in case something comes up after the migration.\n","permalink":"https://til.unessa.net/mysql/migrating-mysql-to-postgresql/","tags":["mysql","postgresql","howto","django"],"title":"How to migrate MySQL database to PostgreSQL"},{"contents":" See TILs tagged with Vercel Useful Links Environment Variables Docs vercel.json Project Configuration Configuration Redirects for JavaScript Routers For routers like vue-router with history mode enabled, something like this in vercel.json works:\n\u0026#34;rewrites\u0026#34;: [ { \u0026#34;source\u0026#34;: \u0026#34;/(.*)\u0026#34;, \u0026#34;destination\u0026#34;: \u0026#34;/index.html\u0026#34; } ] Redirects for Python Functions This (legacy) configuration is the only one that seems to work with Python at the moment:\n\u0026#34;routes\u0026#34;: [ { \u0026#34;src\u0026#34;: \u0026#34;/(.*)\u0026#34;, \u0026#34;dest\u0026#34;: \u0026#34;/\u0026#34; } ] Misc Gotcha when adding a new project: you can\u0026rsquo;t select a branch when creating a project; it automatically uses the main branch. So if your code is not available on the main branch, yu can\u0026rsquo;t create a project for it. (Easy workaround: create whatever works, then edit all the settings afterwards.) ","permalink":"https://til.unessa.net/cheatsheets/vercel/","tags":["vercel","cheatsheet"],"title":"Vercel Cheat Sheet"},{"contents":" Pytest Official Docs Running Fail fast / stop after first failure: pytest -x Run from module or directory: pytest dir/tests/footest.py, pytest dir/ Run specific markers: pytest -m marker (mark with @pytest.mark.mymarker) Drop to pdb on failure: pytest --pdb (set breakpoint w/ breakpoint()) Recreate database using pytest-django: --create-db Configuration pytest.ini conftest.py Use pytest-dotenv to read environment variables from .env files Use pytest-xdist and pytest -n NUMCPUS to parallelize test running Python debugging in VS Code ","permalink":"https://til.unessa.net/cheatsheets/pytest/","tags":["python","pytest","cheatsheet"],"title":"Pytest Cheat Sheet"},{"contents":"It\u0026rsquo;s very easy to add a custom domain to a Vercel project. It\u0026rsquo;s also easy to redirect the default Vercel project URL to another domain or set up separate domains for staging and production. But I had to do some digging to be able to migrate my project from domain foo to point to a new project on domain bar.\nThe Problem Back in January I created a simple dashboard for following my exercises and put it on health.unessa.net. The project has kept growing and growing, and eventually I decided to convert it to a full-blown blog. The blog needed a proper name and I wanted to change the domain to match it as well. I put the new project in a new repository and published it at tuonela.unessa.net. Now I had a problem; how do I redirect the (almost zero) traffic from the old project to the new without breaking the old URLs?\nThe Solution Prerequisites: two different Vercel projects w/ custom domains configured.\n Remove the old custom domain from the original Vercel project. Add the old custom domain to the new Vercel project. Configure a 301 redirect from the old domain to the new domain in the new Vercel project. Profit!1 This is a proper 301 redirect setup which respects your URLs as well. (So all your old URLs from the old domain are redirected to the same path on the new domain.)\n","permalink":"https://til.unessa.net/vercel/moving-domains/","tags":["vercel","dns","domain","migration","refactoring"],"title":"Migrating Vercel Project To Another On A Different Domain"},{"contents":"I\u0026rsquo;ve been playing with MongoDB Realm lately and my last exercise was to integrate the Realm Auth with Nuxt Auth. They both implement a standard Oauth2 workflow but turns out making them work together wasn\u0026rsquo;t as straightforward as one might think.\nMongoDB Realm And [Google] OAuth 2.0 From the documentation:\n The Google authentication provider allows users to log in with their existing Google account through Google Sign-In. When a user logs in, Google provides MongoDB Realm with an OAuth 2.0 access token for the user. Realm uses the token to identify the user and access approved data from Google APIs on their behalf.\n In order to unite the Nuxt Auth module and Realm Auth module, I needed to first add Google login to the Nuxt project, then intercept the oauth login flow, and finally inject the Realm login.\nBoth realm-web and @nuxtjs/auth-next packages are relatively young and the documentation is unfortunately on par with the rest of the JavaScript ecosystem. Both are also written in TypeScript and missing some exports so the task was mostly studying the code.\nAdding Google Login to a Nuxt App Nuxt Auth ships with a built-in Google provider so this part was very easy, but I was surprised of the lack of proper documentation or examples. Assuming you have already configured the Google Oauth 2.0 Client API, here\u0026rsquo;s a working configuration for nuxt.config.js auth section:\nstrategies: { google: { clientId: process.env.GOOGLE_CLIENT_ID, redirectUri: process.env.LOGIN_REDIRECT_URI, scope: [\u0026#39;profile\u0026#39;, \u0026#39;email\u0026#39;], responseType: \u0026#39;token id_token\u0026#39;, codeChallengeMethod: \u0026#39;\u0026#39;, }, }, To log in, just call this.$auth.loginWith('google'), that\u0026rsquo;s all there\u0026rsquo;s to it. See the official documentation for more details.\nImplementing A Custom Nuxt Auth Oauth2 Scheme After spending one whole day doing trial and error with all kinds of different variations and methods of using realm-web together with the built-in Google provider, writing a custom scheme for Nuxt Auth seemed to be the least bad option.\nThe greatest challenge here was the fact that some of the needed helper functions weren\u0026rsquo;t exported by the package so I needed to copy them in the project. I created a ticket for this and also added the whole scheme code as an example in the comments. Save the scheme (full code in linked issue) in ~/schemes/mongoAuth.ts and copy the needed utils from the repo into ~/schemes/utils.ts. (Didn\u0026rsquo;t say it was pretty!)\nAnother catch was the fact that for unknown reason imports from the @nuxtjs/auth-next package need to be written as import foo from '~auth/runtime' and for TypeScript to understand the magic, you need to have a shim.d.ts that resolves the exports.\nSo, in ~/shim.d.ts add the following:\ndeclare module \u0026#39;~auth/runtime\u0026#39; { export { Oauth2Scheme } from \u0026#39;@nuxtjs/auth-next\u0026#39; } Finally, modify the Nuxt Auth config to include the custom strategy (that now needs explicit endpoint URLs as we want to use Google but aren\u0026rsquo;t using the built-in provider):\nstrategies: { modifiedOauth: { scheme: \u0026#39;~/schemes/mongoAuth\u0026#39;, clientId: process.env.GOOGLE_CLIENT_ID, redirectUri: process.env.LOGIN_REDIRECT_URI, scope: [\u0026#39;profile\u0026#39;, \u0026#39;email\u0026#39;], responseType: \u0026#39;token id_token\u0026#39;, codeChallengeMethod: \u0026#39;\u0026#39;, endpoints: { authorization: \u0026#39;https://accounts.google.com/o/oauth2/auth\u0026#39;, userInfo: \u0026#39;https://www.googleapis.com/oauth2/v3/userinfo\u0026#39;, }, }, }, Now you have a fully working Google authentication in your Nuxt app which is also integrated to Realm!\nFinal Words Although this setup seems to work fine for now, probably I wouldn\u0026rsquo;t push anything like this into production on a mission critical site. I wouldn\u0026rsquo;t consider neither realm-web nor @nuxtjs/auth-next production ready by any stretch of the imagination. On the other hand, if you want an easy way to integrate your Nuxt app with MongoDB Realm, setting this whole workflow up takes less than an hour.\nI hope both of these packages get the love and attention they deserve to mature a bit as having these kind of cool tools is really great!\n","permalink":"https://til.unessa.net/mongodb/mongo_realm_auth/","tags":["mongodb","realm","nuxt","auth","oauth"],"title":"Integrating MongoDB Realm Auth With Nuxt Auth"},{"contents":"I\u0026rsquo;ve been working with modern JavaScript applications actively since 2016. One of the challenges of the JS ecosystem is the outrageous number of available packages. This is a continuously gardened list of useful and preferred JavaScript/TypeScript packages that I\u0026rsquo;ve personally worked with.\nDates Preferred: day.js (Github)\nDay.js is a great modern alternative to moment.js. It\u0026rsquo;s small, tree-shakeable, and ships with full TypeScript support. It also has a ton of plugins that will most likely cover all of your needs out of the box.\nAlternatives: date-fns, luxon\nAvoid / deprecate: moment.js (EOL)\nHTTP Preferred: axios (Github)\nIf you need to do HTTP queries from your app, just install axios and be done with it. Sure, you\u0026rsquo;ll save ~6k from the bundle if you just use fetch or some tiny wrapper for it but when your project grows, your needs will grow and you\u0026rsquo;ll end up installing axios anyway. It\u0026rsquo;s feature-complete, well documented, fully typed, and production ready \u0026ndash; just use it.\nAlternatives: gazillion.\nUtilities Preferred: Rambda (Github)\nMost JS developers know lodash or its predecessor underscore. Nowadays there are much better and modern alternatives. Rambda focuses on functional programming and speed. It has a smaller API but it\u0026rsquo;s fully typed and super fast. It\u0026rsquo;s also well documented.\nFirst consider not including any utility library at all. Modern ES has tons of functionality, yyou should try to use the native functions if possible. If you need more, try Rambda.\nAlternatives: Ramda\nAvoid / deprecate: underscore, lodash\n","permalink":"https://til.unessa.net/javascript/recommended-js-packages/","tags":["javascript","typescript","live"],"title":"Recommended JS/TS Packages"},{"contents":"MongoDB Realm is a great Firebase alternative that is backed by a \u0026ldquo;proper\u0026rdquo; database. I was missing automatic created_at and last_modified_at helpers from Django in a MongoDB project so I implemented them using triggers. Turns out it wasn\u0026rsquo;t as straightforward as I thought but still pretty simple in the end.\nMongoDB Realm has very powerful triggers which are JavaScript functions that operate on a linked Atlas (a hosted MongoDB) cluster. The possibilities with these are almost limitless \u0026ndash; you can even install your own npm packages! The only tricky part was to figure out a way to not fall into a forever loop when updating a document with a trigger that runs on every update.\nAFAIK there isn\u0026rsquo;t a \u0026ldquo;this update was triggered by a trigger\u0026rdquo;-flag so you need to figure out the bail out condition manually. The following tigger function runs on insert, update and replace. It updates \u0026lsquo;lastModifiedAt\u0026rsquo; field in a document to current timestamp using $currentDate operator but only if lastModifiedAt field is not updated in the changeEvent.\nexports = function(changeEvent) { const docId = changeEvent.documentKey._id; const description = changeEvent.updateDescription; const insertEvent = changeEvent.operationType === \u0026#34;insert\u0026#34;; if (!docId || (!insertEvent \u0026amp;\u0026amp; !description)) { return; } if (insertEvent || !Object.keys(description.updatedFields).includes(\u0026#34;lastModifiedAt\u0026#34;)) { context.services .get(\u0026#34;my-cluster-name\u0026#34;) .db(\u0026#34;my-db-name\u0026#34;) .collection(\u0026#34;my-collection-name\u0026#34;) .updateOne( { _id: docId }, { $currentDate: {lastModifiedAt: true} } ); } }; Adding automated created_at field is much simpler; just run the trigger with insert events and you\u0026rsquo;re set. (Or, you can retrieve the same data without any extra work by using ObjectId.getTimestamp. I always like to have a dedicated field for this, hence a trigger.)\n","permalink":"https://til.unessa.net/mongodb/automatic_last_modified/","tags":["mongodb","realm","firebase","databases"],"title":"Automatic Last Modified Field With MongoDB"},{"contents":"Publicly visible links to site admin/staff functionality is bad practice that is surprisingly common even today. Besides being harmful security-wise, it\u0026rsquo;s also bad for usability as it adds unnecessary navigation possibilities to non-admin users.\nThing is, it\u0026rsquo;s also very easy to fix. Here\u0026rsquo;s an oneliner that I\u0026rsquo;ve used in many projects:\nconst adminLinksVisible = localStorage.getItem(\u0026#39;admin-links\u0026#39;) === \u0026#39;true\u0026#39; Then just set this localstorage item on the admin index page and/or manually add the storage item from developer tools for those who need it (and document the practise so everyone can do it). In most cases most of the admin users aren\u0026rsquo;t so called expert users so you should make sure to have links to the admin functionality visible in multiple places throughout the organisation infra (such as Slack or intranet).\nLastly, DO NOT RELY on hidden admin links as a security measure. It\u0026rsquo;s useful layer of security by obscurity but it should be just that; an additional layer on top of normal security measures.\n","permalink":"https://til.unessa.net/javascript/simple-staff-detection/","tags":["tip","javascript","localstorage","usability"],"title":"TIP: Hiding and Showing Admin Links"},{"contents":"Tauri is an interesting lightweight alternative to Electron which recently graduated to beta. These are my first impressions and experiences trying it out.\nInstalling and Hello World Tauri docs are pretty good for a project that is just barely reached beta. Tauri can be added as an dependency to any Node project but before you can do that you need to install some Rust tooling first. I wrote my first Hello World with Rust just few months back so I got to skip the most part. But the installer broke with a generic error message due to too old rust version. Quick rustup update fixed the issue and the tooling installation was all done in few minutes.\nNext step was to integrate Tauri to my app. I created a small test repo using my Vite template and followed the instructions. In a couple of minutes I had the new dependencies installed, Tauri initialized and the native development window open. These steps were easy and fast and the Tauri CLI commands seemed pretty clear. Using Tauri in a Vite project means basically just adding one src-tauri source directory and native yarn tauri dev development window to your workflow.\nI\u0026rsquo;ve worked with Electron projects before but haven\u0026rsquo;t published a native app myself. As Tauri is very young compared to Electron it\u0026rsquo;s understandable that it\u0026rsquo;s not as polished or deep yet. Some first impressions:\n the initial native app doesn\u0026rsquo;t have any native menus (at least on macOS). It would be useful to have at least basic bare bones main menu with quit command to make the initial bootstrapping faster and easier. (In general it\u0026rsquo;s much faster to modify and extend than to learn how to do something from scratch.) Tauri ships with default icons for the app but not for Dock (\u0026ldquo;System Tray\u0026rdquo; in Tauri language). Again, the default app shows up in Dock so it would be helpful to have these icons configured by default as well. Tauri has a concept of patterns which is well documented but I still found it confusing. It would probably help if there was more documentation on how the default initialized app is configured in terms of these patterns and other features. I also would have appreciated some kind of easy default \u0026ldquo;use this if you are not sure what you want yet\u0026rdquo; path that would have gotten me started faster. (I\u0026rsquo;m actually not sure if one even needs to worry abut these patterns when just starting out with a first demo app.) All in all, my first impressions of Tauri Beta 2 were positive. I like the philosophy and I really hope the project matures to be a real competitor to Electron. I\u0026rsquo;m considering starting a new native app project using Tauri to ship it on all three desktop platforms, it would probably be an interesting excercise.\n","permalink":"https://til.unessa.net/node/installing-tauri/","tags":["node","tauri","electron","rust"],"title":"Testing Tauri Beta"},{"contents":"There are situations where your Vue app template may be displayed before it\u0026rsquo;s fully compiled and therefore expose the uncompiled vue moustache template tags. In most situations this is not a problem but if you happen to get bitten by this issue, you can use v-cloack directive and CSS to hide the element until the template is rendered:\n\u0026lt;div v-cloak\u0026gt; {{ message }} \u0026lt;/div\u0026gt; and CSS to hide it:\n[v-cloak] { display: none; } This is documented in Vue 3 docs and I\u0026rsquo;ve seen the implementations in the wild many times but for some reason I never seem to remember this when I need to. Maybe this note helps.\n","permalink":"https://til.unessa.net/css/hiding-vue-moustache-tags/","tags":["css","vue","moustache","templates"],"title":"Hiding Uncompiled Vue Moustache Tags"},{"contents":"I don\u0026rsquo;t quite understand how some problems that feel very commonplace and like they would obviously be solved by browsers a long time ago are still in very much flux and unsolved in 2021. One of those things is basic image handling in WYSIWYG editors. By basic image handling I mean aligning, resizing, and uploading. Most users who need these kind of things are probably using proper CMSes or SaaS services so building things like these are not as common as one might think. Anyways, I needed to build a simple note-taking app with basic image support and after spending almost a full day comparing and testing various solutions, I ended up choosing Ckeditor 5 because it seemed to have the biggest developer community, and mature and very user-friendly image plugins.\nCkeditor5 Ckeditor like TinyMCE have been around forever. The good: it has all the features you can possibly imagine already available, and the important ones are also mature enough to use safely. The bad: despite of being the \u0026ldquo;new\u0026rdquo; version of Ckeditor, it\u0026rsquo;s very much oldskool in many ways. For example there is no TypeScript support and it\u0026rsquo;s tightly coupled with old Webpack.\nCkeditor5 ships with a Vue (2 and 3) component but it has little to no actual value over using the vanilla JS class instead. The more unfortunate thing about Ckeditor5 is that using any plugins that are not included in the pre-configured builds requires you to manually configure and compile your own version of the editor. (If your project happens to use old version of Webpack, you can use the source version instead after adding the necessary configuration to your Webpack config.)\nSo, after banging my head long enough into a wall, I went ahead and created a custom build for the project.\nCreating A Custom Build This is luckily documented pretty well. You need to fork the ckeditor/ckeditor5 repo, create a new branch from stable, pick an existing build that most closely matches what you need, and then modify src/ckeditor.js and webpack.config.js under that builds directory.\nMost likely you also want to modify the corresponding package.json and publish the custom-built editor to a package registry so you can use it in your project.\nThe Costs Creating and maintaning a custom build of a third-party package is obviously quite painful and suboptimal way of adding a dependency to your project, but it is open source and free. Before adding this kind of dependency in a commercial project you need to think hard about the cost of keeping it updated and safe. If you don\u0026rsquo;t have an easy way to automate the process and/or follow the upstream releases, I would advice against of using this method. (Tip: try searching NPM for ckeditor5-build for a suitable third-party maintained package you could use instead. There are packages like @blowstack/ckeditor5-full-free-build that are fairly well maintained and might suit your needs. YMMW.)\nNote For Future The upcoming version 2 of TipTap seems very promising modern alternative to Ckeditor. It\u0026rsquo;s headless (you are fully in control of both markup and CSS), written TypeScript, and will support Vue 3 out of the box. Based on the current demos it should handle minimal image operations (adding, aligning) easily and writing the rest could be split for example into a separate open source project.\nI\u0026rsquo;m still amazed that something like this is not yet a solved problem!\n","permalink":"https://til.unessa.net/javascript/custom-ckeditor-builds/","tags":["yarn","ckeditor","javascript"],"title":"Creating Custom Ckeditor5 Builds"},{"contents":"I was bumping to SSL issues when creating new virtualenvs with pyenv. Found tons of similar issues but the main problem for me wasn\u0026rsquo;t a faulty pyenv or SSL installation but an old version of a specific pyenv Python version. As soon as I installed a new Python version, everything worked fine.\nThis isn\u0026rsquo;t exactly a huge surprise or fix but wanted to document the issue here to remind myself when I eventually run into similar issues in future.\n","permalink":"https://til.unessa.net/python/python-ssl-macos/","tags":["python","ssl","virtualenv"],"title":"Python SSL Issues On macOS"},{"contents":"Typing in Python is not yet very mature. This small example displays several issues:\nif request.user.is_authenticated: return JsonResponse( { \u0026#34;user\u0026#34;: { \u0026#34;uid\u0026#34;: request.user.uid, \u0026#34;username\u0026#34;: request.user.username, \u0026#34;email\u0026#34;: request.user.email, } } ) The original type of request.user is Union[AbstractBaseUser, AnonymousUser] (which itself is already wrong; the user is actually a Django model that inherits from AbstractBaseUser, not AbstractBaseUser). MyPy doesn\u0026rsquo;t understand that is_authenticated narrows the options, you need to do it manually using casting.\nHere\u0026rsquo;s the final, working code:\nif request.user.is_authenticated: user = cast(User, request.user) return JsonResponse( { \u0026#34;user\u0026#34;: { \u0026#34;uid\u0026#34;: user.uid, \u0026#34;username\u0026#34;: user.username, \u0026#34;email\u0026#34;: user.email, } } ) ","permalink":"https://til.unessa.net/python/mypy-casting/","tags":["python","mypy","typing","django"],"title":"Casting Types With MyPy"},{"contents":"Some Python dependencies require build tools which can be problematic when running inside Docker or CI environment. Often these packages are also needed only in development or production so having them as optional dependencies can be really useful. I just learned that Poetry can do this.\nFirst, install your dependency with --optional flag:\npoetry add -D pywatchman --optional Not sure if this step is needed, YMMV, but I needed to add a new section in the pyproject.toml file and run poetry update after:\n[tool.poetry.extras] pywatchman = [\u0026quot;pywatchman\u0026quot;] and now, when you do a normall install, the optional packages are not installed. If you want to install those, run:\npoetry install -E pywatchman This is very handy as I now can easily keep things like pywatchman and uwsgi in my Poetry deps without worrying about their build dependencies in CI and/or local environments.\nAs a very long time user of pip-tools, I really like Poetry as a modern alternative.\n","permalink":"https://til.unessa.net/python/poetry-extra-deps/","tags":["python","poetry","docker"],"title":"Optional Dependencies With Poetry"},{"contents":"Had some trouble getting Playwright to run properly on GitLab CI so decided to document my learnings here.\nPlaywright CI documentation was helpful for tracking and tackling the problems:\n Setting DEBUG=pw:browser* environment variable will output debug logs during the browser install which is really helpful. Most issues seem to relate to memory handling and consumption. Adding --disable-dev-shm-usage flag for Chromium should fix issues with environments like Docker with no access or limited size /dev/shm. Launch the broser like this: const browser = await playwright.chromium.launch({ args: [\u0026#39;--disable-dev-shm-usage\u0026#39;] }); Caching is also a common issue in CI environments. Setting PLAYWRIGHT_BROWSERS_PATH=0 environment variable makes Playwright store the browser binaries inside node_modules which resolves most issues. Microsoft also offers pre-build Docker images which should be an easy way to run inside GitLab CI. Use only one browser By default Playwright will install Chromium, Firefox and Webkit. If you don\u0026rsquo;t need all of these, there are specific versions of the package as well. As far as I can tell the only difference between the full package is the number of included browser engines.\n playwright-chromium playwright-webkit playwright-firefox Force install browsers To force broser install, run:\nnpx playwright-cli install\nRunning in Gitlab CI After trying out multiple different things, I ended up switching the runner to use mcr.microsoft.com/playwright:bionic Docker image which is not optimal as I run several other scripts in the same pipeline and wanted to optimize the install into one simple and lightweight step. This extended the pipeline runtime by about two minutes, but it works now.\nI did leave the install script in which still produces an error (that it doesn\u0026rsquo;t do locally):\n$ npx playwright-cli install (node:62) UnhandledPromiseRejectionWarning: Error: EACCES: permission denied, mkdir '/builds/uninen/personal-data/.npm/_npx/20/lib/node_modules/playwright-cli/node_modules/playwright/.local-browsers' I might revisit this sometime in the future when things mature a bit (and my nerves get a bit longer). It works, that\u0026rsquo;s enough for me for now.\nRunning in Vercel Creating the browser instance with --disable-dev-shm-usage flag and adding the PLAYWRIGHT_BROWSERS_PATH=0 environment variable is all you need for Vercel.\n","permalink":"https://til.unessa.net/gitlab/playwright-gitlab-ci/","tags":["gitlab","playwright","testing","ci","vercel"],"title":"Running Playwright in GitLab CI"},{"contents":"My days have been starting to look worryingly similar past few weeks, so I wanted to try to make this Christmas week stand out at least a little. After working a pretty hectic pace for a long time I\u0026rsquo;m finding it pretty difficult to put down the developer tools and wind down.\nI decided to take at least a couple of weeks off from normal work routines and instead do something different with no schedules and release targets. Still need to do a couple of small tasks for Slipmat but after that I\u0026rsquo;m going to drop all that work for a while.\nTo this end I spent the past week mostly studying and learning new things like SwitfUI and WebRTC.\nWebRTC WebRTC is a Web technology that I\u0026rsquo;ve been interested in and following for several years. It\u0026rsquo;s no surprise that the browser developers have put significantly more resources into WebRTC this year as all kinds of conference technologies are seeing increased usage of hundreds percents due to the pandemic.\nI started on working on a demo project that would allow me to build a audio-only chat for Slipmat live page to complement the text chat. This is purely an experimental project but it\u0026rsquo;s very interesting as it combines lots of new technologies and tools that I haven\u0026rsquo;t personally used in production yet. I want to get the demo running during this short break from other work.\nPersonal Data Archiving I\u0026rsquo;ve been collecting and archiving all my data over 20 years already. But most of that data has been suck in a SQL database or on various network disks, not in really usable form. Encouraged by Simon\u0026rsquo;s Dogsheep project I started to take small steps towards automating this data collection with GitLab CI into a portable form.\nAs most of the data is natively handled as JSON, I chose JSON as the base format for the archives as well. It\u0026rsquo;s really easy to work with, both machine and human readable, and easy to import to databases or use with static site tools like Hugo or Gridsome.\nThis week I added Garmin Connect data collector (garmin-connect-to-json on NPM) to the toolbelt. Writing these collectors is a endless process but having started it feels good.\nI put together a new private repository on GitLab with a scheduled CI pipeline that uses all these collectors (currently fetching Tweets, Wakatime, and Garmin data) and archives the data into JSON files. Next step will be to write some kind of frontend that can be used to browse and search it. I\u0026rsquo;m not planning to use Datasette for the final site but might still use it as an quick and easy temporary solution.\nThe primary end goal of this project is to get all my data in a state that if a Web site (that I\u0026rsquo;ve poured data in and that I find important in some sense) shuts down or becomes evil like FB, I can just stop using it and not lose any of that data. (That said, not sure if I want to touch my FB data at all.) Secondary utility for this kind of personal data collection is the easy access and reuse of all the data. Having a uniform and easy to use API to big collection of data is pretty nice thing to have.\nMisc Interestingly my WebRTC demo got stuck in a quest for finding a usable JavaScript rotary knob that would work like all audio software knobs do (meaning vertical and very precise control instead of mimicing a real knob). Browsers should have this element built in! I still haven\u0026rsquo;t written any of the year notes yet, but have been thinking about them quite a bit. I have few ideas on what I want to improve in 2021 and for example the Garmin data will be helpful in building something that\u0026rsquo;ll hopefully help mee stick to my goals. ","permalink":"https://til.unessa.net/weeknotes/2020-52/","tags":["weeknotes","swift","webrtc","garmin"],"title":"Weeknotes 2020/52 - WebRTC, SwiftUI and personal data archiving"},{"contents":"I wrote my first project using Playwright, an interesting headless browser API similar to Puppeteer but for all major browsers (Chromium, Firefox and Webkit). My first hurdle was reading a simple JSON response as opposed to a normal HTML page. The docs suggest attaching a listener to all requests and filtering what you want there, but I found the following to be easiest way:\npage .goto(url) .then(async (response) =\u0026gt; { const body = await response.body() content = await JSON.parse(body.toString()) }) This breaks the async/await convention, but it works.\n","permalink":"https://til.unessa.net/scraping/playwright-json/","tags":["playwright","json","node","chromium","firefox","webkit"],"title":"Fetching JSON With Playwright"},{"contents":"This week was broken into a number of small projects, mainly around Slipmat development. I also worked on various pieces of tooling that helps with this blog and various project dashboards.\nI had a birthday on Thursday, I played my radio show in Friday, and I managed to take one whole day off, so this week was much less productive in general than last week.\nFetching Tweets to a Hugo blog with GitLab CI I published not one but two NPM packages this week. Both of these were needed for this TIL blog of mine, but they also are the first steps for starting collecting my own data in JSON format for something like Simon Willisons dogsheep.\ntweets-to-json is a simple package that does what it says on the tin. I tried to make it flexible but there\u0026rsquo;s plenty of room for improvements still. To be able to update the tweets to this blog via the GitLab CI, I also wrote push-to-repo which is a small node wrapper for GitLab API that updates a single file from CI to back the repo. Together I can use these to update and rebuild this blog with a scheduled CI script.\nAutomatically updating and displaying Tweets with a Hugo blog Now that I have a CI script that periodically fetches my Tweets as a JSON file, I wanted to link them to these TIL-posts based on hashtags and username mentions. The latter is still a work in progress but I managed to update the blog templates to include latest related tweets to these posts. The relevant template code:\nfirst, selet tweets whose tags intersect the post tags:\n{{ $related_tweets := where $.Site.Data.tweets \u0026#34;.tags\u0026#34; \u0026#34;intersect\u0026#34; .Params.tags }} then, if there were related tweets, show an aside:\n{{if gt (len $related_tweets) 0}} \u0026lt;aside class=\u0026#34;md:w-5/12\u0026#34;\u0026gt; \u0026lt;h2 class=\u0026#34;my-4 text-lg font-semibold\u0026#34;\u0026gt; Latest related tweets from \u0026lt;a href=\u0026#34;https://twitter.com/uninen\u0026#34;\u0026gt;@Uninen\u0026lt;/a\u0026gt; \u0026lt;/h2\u0026gt; {{ range first 5 $related_tweets -}} \u0026lt;div class=\u0026#34;mb-4\u0026#34;\u0026gt; {{$pubdate := time (int .timestamp)}} {{ .text | markdownify }} \u0026lt;a href=\u0026#34;https://twitter.com/uninen/status/{{ .id }}\u0026#34;\u0026gt;\u0026lt;time class=\u0026#34;block font-mono text-sm leading-tight text-gray-400\u0026#34; datetime=\u0026#34;{{ dateFormat \u0026#34;2006-01-02T15:04:05-07:00\u0026#34; ($pubdate) }}\u0026#34;\u0026gt; {{ $pubdate.Format \u0026#34;Jan 2, 2006 15:04\u0026#34; }} \u0026lt;/time\u0026gt;\u0026lt;/a\u0026gt; \u0026lt;/div\u0026gt; {{ end }} \u0026lt;/aside\u0026gt; {{end}} I seem to use @-mentions more than hashtags in my tweets so I need to figure out a way to link Twitter usernames to tags and then filter with them as well.\nMisc Dove in to the new Apple SwitUI tutorial, and while I didn\u0026rsquo;t manage to complete it, I did manage to do some minor updates to the experimental Slipmat iOS Mobile app. Continued last weeks UI design experiments and wrote two new page layouts for Slipmat Live Page for dektop and mobile. The current page is only months old but it\u0026rsquo;s complex enough to grow into a beast when trying to acommodate both mobile and desktop views with the same markup so I decided that it makes more sense to split the design and write two good templates instead of one almost good. The new layouts are just empty placeholders with pure TailwindCSS that demo the idea and they seem to work pretty well on all browsers. More tuning needed, tho. After lots of pondering about the next small Slipmat project, I re-started Slipmat Polls app in order to maybe get an MVP in production next week. We\u0026rsquo;ll see. Hunted down an old architecture drawing of the current Slipmat project and doodled a new one from the frontends where every big component sits in their own repo. Not sure if I want to keep everything in their own repo or combine them in a single monorepo but having smaller separate projects for different parts of the site seems absolutely the best way to go now that we have a common UI library on NPM. Have lots of ideas for end of the year posts both from personal and Slipmatio POV. Hopefully I have the stamina to write at least one of those. I\u0026rsquo;ve been working non-stop for months now. I think I\u0026rsquo;m going to try to take a week or two off at some point, but not sure when exatly yet. ","permalink":"https://til.unessa.net/weeknotes/2020-51/","tags":["weeknotes","slipmatio","npm","gitlab","twitter","swiftui","architecturing"],"title":"Weeknotes 2020/51 - NPM packages, GitLab CI, SwiftUI, architecturing"},{"contents":"MySql gets usually installed on MacOS by Brew, which installs a bunch of dylib libraries in /usr/local/opt/mysql/lib/. For some reason my Python installation is looking for wrong version of these files and I\u0026rsquo;ve yet to find a proper solution for matching up the dependencies.\nThe error message looks like this:\n 'Did you install mysqlclient or MySQL-python?' % e django.core.exceptions.ImproperlyConfigured: Error loading MySQLdb module: dlopen(/Users/uninen/.envs/slipmatio/lib/python2.7/site-packages/MySQLdb/_mysql.so, 2): Library not loaded: /usr/local/opt/mysql/lib/libmysqlclient.20.dylib Referenced from: /Users/uninen/.envs/slipmatio/lib/python2.7/site-packages/MySQLdb/_mysql.so Reason: image not found. Did you install mysqlclient or MySQL-python? This old project uses mysqlclient which is installed properly but it seems that a recent brew update broke things. The library it wants (for MySQL 8) is libmysqlclient.20.dylib yet there is only a libmysqlclient.21.dylib in the lib folder. So, I just manually symlinked that in place and it seems to work fine. Obviously, not a proper solution, but so far only one that gets my local installation working.\n\u0026gt; cd /usr/local/opt/mysql/lib/ \u0026gt; ln -s libmysqlclient.21.dylib libmysqlclient.20.dylib \u0026gt; ls -la drwxr-xr-x 17 uninen staff 544 Dec 17 14:49 ./ drwxr-xr-x 19 uninen staff 608 Dec 17 14:38 ../ lrwxr-xr-x 1 uninen staff 23 Dec 17 14:49 libmysqlclient.20.dylib@ -\u0026gt; libmysqlclient.21.dylib -rw-r--r-- 1 uninen staff 7322032 Dec 17 14:38 libmysqlclient.21.dylib -r--r--r-- 1 uninen staff 8137760 Sep 23 16:05 libmysqlclient.a lrwxr-xr-x 1 uninen staff 23 Sep 23 16:05 libmysqlclient.dylib@ -\u0026gt; libmysqlclient.21.dylib There are a couple of 8+ years old related discussions on StackOverflow, not sure if this is a py27 issue or something else.\n","permalink":"https://til.unessa.net/mysql/error-loading-mysqldb-module/","tags":["mysql","python","issues","macos","brew"],"title":"Fixing \"Error loading MySQLdb module\" on macOS 10.15 Catalina"},{"contents":"I\u0026rsquo;ve been publishing my first NPM packages lately and have deeded to install NPM packages locally for testing. There are two ways to do this and they both have their separate use cases.\nUsing yarn add First, you can install local packages with Yarn simply by giving the package path as argument for add:\nyarn add /path/to/my/package This installs the package as any other package by copying it to your node_packages. This has the benefits of working exactly as any normal package but the downside of being an actual copy (instead of symlink) of your local project. If you want to test and develop your local project, you might want to try linking instead.\nIf you use this method for developing, you need to yarn add the path again whenever you want to test new changes.\nUsing yarn link Yarn (like npm) has link command that allows you to symlink any local NPM package in your project. This is very handy for fast development and testing but also the downside of the linked package not being physically in your node_modules which can break some functionality (that relies on looking up parent paths).\nLinking a local module happens in two steps:\n Run yarn link in the project you want to link Run yarn link \u0026quot;package-name\u0026quot; wherever you want to link it to ","permalink":"https://til.unessa.net/node/installing-local-projects/","tags":["yarn","npm","node"],"title":"Installing local NPM packages with Yarn"},{"contents":"Last few weeks I\u0026rsquo;ve been working on some small micro APIs for Slipmat. This week I built a new homepage for Slipmat Labs and added changelogs for Spotify Playlist Parser API, Tidal Playlist Parser API and the new Discogs Search API.\nI\u0026rsquo;ve really liked working with FastAPI and Vercel for making these kind of micro APIs. My typical Python stack runs on Docker and usually has lots of heavy dependencies like databases and background runners. These FastAPI projects are great because they\u0026rsquo;re literally just one file with some extra for docs and theming. What\u0026rsquo;s best, Vercel is amazingly fast deploying them, so the whole CI/CD pipeline feels really rewarding as any changes you push are in production in less than a minute.\nPrivacy-friendly analytics I wanted to have some idea of the usage of Slipmat Labs projects but I didn\u0026rsquo;t want to add Google Analytics for them so I\u0026rsquo;m experimenting with Plausible.io instead. Based on initial impressions, Plausible works really great for small sites like Slipmat. It unfortunately doesn\u0026rsquo;t have an API so I can use it for the whole Slipmat, but as the project is open source, I explained my use case in their discussions - maybe someday.\nWeb Design Excercises \nGot an inspiration from the new GitHum homepage to flex new Tailwind 2.0 muscles and created a new repo for Slipmat UI development. I spent hours on a couple of simple ideas but didn\u0026rsquo;t get very far. I really like the modern \u0026ldquo;let\u0026rsquo;s use the all available space\u0026rdquo; kind of designs, though.\nI\u0026rsquo;ve done quite a lot of work to open up all my Slipmat development but UI design is something that probably doesn\u0026rsquo;t work very well in the open. I\u0026rsquo;ll happily post some mockups and beta designs at some point but not yet.\nMisc Published Slipmat Beta 35 with a couple of nice long-awaited features plus several smaller fixes and tweaks. Added Discord integration to multiple Slipmat repositories and added a firehose channel to our community chat to make the development work even more visible to ordinary users. Sure, the channel doesn\u0026rsquo;t offer any useful information for most, but what it does show really well is the amount of work that goes into the repositories every single day. Reinstalled Wakatime plugin to VSCode to get more detailed statistics on my coding habbits. Based on the first week stats, I spend way too much time coding! ","permalink":"https://til.unessa.net/weeknotes/2020-50/","tags":["weeknotes","apis","vercel","fastapi","analytics","webdesign","slipmatio"],"title":"Weeknotes 2020/50 - Micro APIs, ethical analytics"},{"contents":"GitLab has two methods for third-party integrations: webhooks and official integrations. Discord has an official integration page but it turns out it has a bug where the save button just hangs and doesn\u0026rsquo;t save anything, which makes adding the integrations quite annoying.\nLearned through trial and error today that if you hit \u0026ldquo;Test Settings\u0026rdquo;-button before save, the save still hangs but now the integration is saved after some time.\n(Was going to report this bug, but gave up and wrote this TIL instead.)\n","permalink":"https://til.unessa.net/gitlab/discord-integrations/","tags":["gitlab","discord","integrations"],"title":"Integrating GitLab to Discord"},{"contents":"I\u0026rsquo;ve been working a lot with GitLab Merge Requests lately and stumbled by accident to a workflow that makes creating and working with MRs easy.\nWhen you have a ticket that relates to the MR, name your repo starting with the ticket number: 42-figure-out-life\nNow push the branch empty branh. GitLab project page suggests you to open a MR (and if the repo has configured it, the MR link also shows in the Git log as well). This creates a MR that has the ticket automatically referenced and the MR is marked as draft.\nNext just clean up the title to something a bit more human readable and add any details to the body (while keeping the link reference there) and you\u0026rsquo;re done. When the MR is merged, the ticket is closed automatically (no need for any special commit messages) and the MR is also linked to all related Milestones as well.\n","permalink":"https://til.unessa.net/gitlab/mr-naming/","tags":["gitlab","git"],"title":"Optimal Workflow For GitLab Merge Requests"},{"contents":"There seems to be almost no documentation and examples on how to test file downloads with Cypress.\nThere is a closed ticket #8089 with several \u0026ldquo;any updates on this\u0026rdquo;-comments and no recent replies from Cypress.\nIt looks like I won\u0026rsquo;t be testing file downloads anytime soon.\n","permalink":"https://til.unessa.net/testing/cypress-file-download/","tags":["cypress","issues","testing"],"title":"Downloading Files With Cypress.io"},{"contents":"I wanted a fast and easy way to publish TILs. Giving Hugo a go. It wasn\u0026rsquo;t the easiest thing to get started (documentation is a bit of a hit and miss) but got a basic bare bones site running in a couple of hours. Needs quite a bit of tweaking to get the UI fixed, but hey, I wanted to start from zero instead of trying to learn and strip down someone elses markup.\n","permalink":"https://til.unessa.net/meta/hello-hugo/","tags":["meta","hugo"],"title":"Hello, Hugo!"},{"contents":"Vscode Git branch dropdown becomes unwieldly fast when not kept up to date often. Here\u0026rsquo;s how to update the local list of remote branches:\ngit fetch --prune git pull --prune Make this automatic every time you run pull or fetch:\ngit config remote.origin.prune true ","permalink":"https://til.unessa.net/git/local-branches/","tags":["vscode","git"],"title":"Updating the list of local Git branches from remote"},{"contents":"Cypress has issues with drag\u0026amp;drop and SortableJS. The example repo has a dra\u0026amp;drop example but it doesn\u0026rsquo;t work with SortableJS. Tried all possible combination, no avail.\nThere is also cypress-drag-drop plugin and an old Gist for using Cypress with SortableJS but neither of these work with current (5.x) version of Cypress.\nI also asked about this at Cypress Gitter chat but didn\u0026rsquo;t get any usable responses. Haven\u0026rsquo;t so far figured any code that would work with Cypress and SortableJS. Maybe should investigate some other drag\u0026amp;drop library instead. (Worked around the e2e testing by programmatically changing Vuex store, which just skips the browser.)\n","permalink":"https://til.unessa.net/testing/cypress-drag-drop/","tags":["cypress","sortablejs","issues","testing"],"title":"Cypress.io Drag\u0026Drop Issues"},{"contents":"Adding custom snippets in VSCode is easy. Open Preferences -\u0026gt; User Snippets, add following to the global snippets file:\n\u0026#34;console.log\u0026#34;: { \u0026#34;scope\u0026#34;: \u0026#34;html,vue,javascript,typescript\u0026#34;, \u0026#34;prefix\u0026#34;: \u0026#34;cl\u0026#34;, \u0026#34;body\u0026#34;: \u0026#34;console.log($0)\u0026#34;, \u0026#34;description\u0026#34;: \u0026#34;Insert console log statement\u0026#34; } Now I get a console log statement when typing cl + TAB.\n","permalink":"https://til.unessa.net/vscode/vscode-snippets/","tags":["vscode","tools","productivity"],"title":"VSCode Custom Snippets"}] --------------------------------------------------------------------------------