├── .gitignore ├── src ├── dataset_gen │ ├── package.json │ ├── gen.js │ └── package-lock.json ├── metrics.py ├── compare.py └── generate_diff.py └── README.md /.gitignore: -------------------------------------------------------------------------------- 1 | node_modules/ 2 | **/node_modules/ 3 | *.pyc 4 | -------------------------------------------------------------------------------- /src/dataset_gen/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "dataset_gen_2", 3 | "type": "module", 4 | "version": "1.0.0", 5 | "main": "gen.js", 6 | "scripts": { 7 | "test": "echo \"Error: no test specified\" && exit 1" 8 | }, 9 | "keywords": [], 10 | "author": "", 11 | "license": "ISC", 12 | "description": "", 13 | "dependencies": { 14 | "svelte": "^5.23.0", 15 | "typescript": "^5.8.2" 16 | }, 17 | "devDependencies": { 18 | "@types/node": "^22.13.10" 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Fine-tuning your own code completion model 2 | 3 | Blog post: [Build Your Own GitHub Copilot](https://prvn.sh/build-your-own-github-copilot/) 4 | 5 | This repo contains: 6 | 1. Scripts for generating a fill-in-the-middle (FIM) dataset from a codebase (only works for Svelte projects at the moment) 7 | 2. A Jupyter notebook for running SFT on the generated FIM dataset 8 | 9 | # Generating the dataset 10 | 11 | ``` 12 | cd src/dataset_gen && npm install 13 | node gen.js 14 | ``` 15 | 16 | Then follow the notebook for running SFT on the training data. 17 | 18 | # Computing metrics 19 | If you follow the notebook, you should have a `generated.test.jsonl` (or `generated-post-finetune.test.jsonl`) file containing the prefix, suffix, expected completion and actual completion. 20 | 21 | You can then run `python src/metrics.py ` to get some basic accuracy and BLEU metrics. 22 | -------------------------------------------------------------------------------- /src/metrics.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import json 3 | from nltk.tokenize import TreebankWordTokenizer 4 | from nltk.translate.bleu_score import corpus_bleu 5 | 6 | tokenizer = TreebankWordTokenizer() 7 | 8 | def compute_eval_metrics(generated_file): 9 | total = 0 10 | correct = 0 11 | bleu_generated_tokens = [] 12 | bleu_expected_tokens = [] 13 | with open(generated_file, 'r') as f: 14 | for line in f: 15 | data = json.loads(line) 16 | generated = data["generated"] 17 | generated = generated.replace("<|file_sep|>", "").replace("<|fim_pad|>", "") 18 | expected = data["expected"] 19 | 20 | if generated == expected: 21 | correct += 1 22 | total += 1 23 | 24 | # BLEU score 25 | bleu_generated_tokens.append(tokenizer.tokenize(generated)) 26 | bleu_expected_tokens.append([tokenizer.tokenize(expected)]) 27 | 28 | print(f'Total: {total}') 29 | print(f'[Exact Match] Correct: {correct}') 30 | print(f'[Exact Match] Accuracy: {correct/total:.2f}') 31 | print(f'BLEU Score: {corpus_bleu(bleu_expected_tokens, bleu_generated_tokens):.4f}') 32 | 33 | if __name__ == "__main__": 34 | if len(sys.argv) != 2: 35 | print("Usage: python metrics.py ") 36 | sys.exit(1) 37 | 38 | compute_eval_metrics(sys.argv[1]) -------------------------------------------------------------------------------- /src/compare.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import json 3 | import random 4 | import hashlib 5 | 6 | from generate_diff import generate_diff_html 7 | 8 | def hash(s): 9 | return hashlib.md5(s.encode('utf-8')).hexdigest() 10 | 11 | def get_line_from_file(file_path, line_idx): 12 | with open(file_path) as f: 13 | for i, line in enumerate(f): 14 | if i == line_idx: 15 | return line.strip() 16 | 17 | def take_last_n_lines(text, n=3): 18 | lines = text.split('\n') 19 | return '\n'.join(lines[-n:]) 20 | 21 | def take_first_n_lines(text, n=3): 22 | lines = text.split('\n') 23 | return '\n'.join(lines[:n]) 24 | 25 | def compare(baseline, post_finetune, idx=None): 26 | with open(baseline) as f: 27 | baseline_lines = f.readlines() 28 | baseline_keys = [hash(json.loads(line)['prefix']) for line in baseline_lines] 29 | 30 | with open(post_finetune) as f: 31 | pf_lines = f.readlines() 32 | pf_keys = [hash(json.loads(line)['prefix']) for line in pf_lines] 33 | 34 | num_lines = len(baseline_lines) 35 | baseline_idx = idx if idx is not None else random.randint(0, num_lines - 1) 36 | 37 | chosen_key = baseline_keys[baseline_idx] 38 | pf_idx = pf_keys.index(chosen_key) 39 | 40 | baseline_data = json.loads(baseline_lines[baseline_idx]) 41 | post_finetune_data = json.loads(pf_lines[pf_idx]) 42 | 43 | prefix = take_last_n_lines(baseline_data['prefix']) 44 | suffix = take_first_n_lines(baseline_data['suffix']) 45 | 46 | expected = baseline_data['expected'] 47 | baseline_gen = baseline_data['generated'].replace('<|file_sep|>', '').replace('<|fim_pad|>', '') 48 | post_finetune_gen = post_finetune_data['generated'].replace('<|file_sep|>', '').replace('<|fim_pad|>', '') 49 | 50 | print(f'Example #{baseline_idx}') 51 | print(generate_diff_html(prefix, suffix, expected, baseline_gen, post_finetune_gen)) 52 | 53 | if __name__ == '__main__': 54 | baseline = sys.argv[1] 55 | post_finetune = sys.argv[2] 56 | idx = None 57 | if len(sys.argv) == 4: 58 | idx = int(sys.argv[3]) 59 | compare(baseline, post_finetune, idx) 60 | -------------------------------------------------------------------------------- /src/generate_diff.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import html 3 | 4 | def generate_diff_html(prefix, suffix, expected, baseline, post_finetune): 5 | """ 6 | Generate GitHub-style diff HTML for comparing model outputs. 7 | 8 | Args: 9 | prefix (str): The code prefix before the completion 10 | suffix (str): The code suffix after the completion 11 | expected (str): The expected completion 12 | baseline (str): The baseline model completion 13 | post_finetune (str): The post-finetuning model completion 14 | 15 | Returns: 16 | str: The generated HTML content 17 | """ 18 | # Escape HTML in all inputs 19 | prefix = html.escape(prefix) 20 | suffix = html.escape(suffix) 21 | expected = html.escape(expected) 22 | baseline = html.escape(baseline) 23 | post_finetune = html.escape(post_finetune) 24 | 25 | # Split the code into lines 26 | expected_lines = expected.strip().split('\n') 27 | baseline_lines = baseline.strip().split('\n') 28 | post_finetune_lines = post_finetune.strip().split('\n') 29 | 30 | # Generate the HTML content 31 | html_content = f""" 32 |
33 |
34 |
35 | Code Comparison 36 | Fill-in-middle comparison 37 |
38 | 39 |
40 |
Function Implementation Comparison
41 |
3 versions: expected, baseline, post-finetune
42 |
43 | 44 |
45 | 46 | 47 | """ 48 | 49 | # Add prefix 50 | line_number = 1 51 | for line in prefix.split('\n'): 52 | html_content += f""" 53 | 54 | 55 | 56 | """ 57 | line_number += 1 58 | 59 | # Add expected implementation 60 | for i, line in enumerate(expected_lines): 61 | html_content += f""" 62 | 63 | 64 | 65 | """ 66 | line_number += 1 67 | 68 | # Add baseline implementation 69 | line_number = len(prefix.split('\n')) + 1 # Reset line number for baseline 70 | for i, line in enumerate(baseline_lines): 71 | html_content += f""" 72 | 73 | 74 | 75 | """ 76 | 77 | # Add post-finetune implementation 78 | line_number = len(prefix.split('\n')) + 1 # Reset line number for post-finetune 79 | for i, line in enumerate(post_finetune_lines): 80 | html_content += f""" 81 | 82 | 83 | 84 | """ 85 | 86 | # Add suffix 87 | line_number = len(prefix.split('\n')) + max(len(expected_lines), len(baseline_lines), len(post_finetune_lines)) 88 | for line in suffix.split('\n'): 89 | html_content += f""" 90 | 91 | 92 | 93 | """ 94 | line_number += 1 95 | 96 | # Add footer 97 | html_content += """ 98 |
{line_number}{line}
{line_number}{line}
{line_number + i}{line}
{line_number + i}{line}
{line_number + 1}{line}
99 |
100 | 101 |
102 | Expected 103 | Baseline 104 | Post-Finetune 105 |
106 |
107 |
""" 108 | 109 | return html_content 110 | 111 | def main(): 112 | parser = argparse.ArgumentParser(description='Generate GitHub-style diff HTML for comparing model outputs.') 113 | parser.add_argument('--prefix', required=True, help='Code prefix before the completion') 114 | parser.add_argument('--suffix', required=True, help='Code suffix after the completion') 115 | parser.add_argument('--expected', required=True, help='Expected completion') 116 | parser.add_argument('--baseline', required=True, help='Baseline model completion') 117 | parser.add_argument('--post-finetune', required=True, help='Post-finetuning model completion') 118 | parser.add_argument('--output', required=True, help='Output HTML file') 119 | 120 | args = parser.parse_args() 121 | 122 | html_content = generate_diff_html( 123 | args.prefix, 124 | args.suffix, 125 | args.expected, 126 | args.baseline, 127 | args.post_finetune 128 | ) 129 | 130 | with open(args.output, 'w', encoding='utf-8') as f: 131 | f.write(html_content) 132 | 133 | print(f"HTML file generated: {args.output}") 134 | 135 | if __name__ == "__main__": 136 | main() 137 | -------------------------------------------------------------------------------- /src/dataset_gen/gen.js: -------------------------------------------------------------------------------- 1 | import * as fs from 'node:fs/promises'; 2 | import * as path from 'path'; 3 | import { parse } from 'svelte/compiler'; 4 | 5 | // Get the file path from command line arguments 6 | const rootDir = process.argv[2]; 7 | 8 | if (!rootDir) { 9 | console.error('Please provide a path to a Svelte project.'); 10 | process.exit(1); 11 | } 12 | 13 | async function extractCriticalBlocksFromSvelte(fileContents) { 14 | const ast = parse(fileContents) 15 | 16 | const criticalNodes = [] 17 | let total = 0 18 | 19 | const maxNodeSpan = Math.max(fileContents.length / 10, 30) 20 | const minNodeSpan = 10 21 | 22 | function traverseHtml(node) { 23 | if (!node || node.start === null || node.end === null) { 24 | return 25 | } 26 | 27 | if (node.type === 'Text') { 28 | return 29 | } 30 | 31 | const nodeSpan = node.end - node.start 32 | if (nodeSpan >= minNodeSpan && nodeSpan <= maxNodeSpan) { 33 | criticalNodes.push(node) 34 | total += 1 35 | } 36 | 37 | if (node.children) { 38 | node.children.forEach(traverseHtml) 39 | } 40 | 41 | if (node.type === 'IfBlock') { 42 | traverseHtml(node.else) 43 | traverseHtml(node.elseif) 44 | } 45 | 46 | if (node.type === 'AwaitBlock') { 47 | traverseHtml(node.pending) 48 | traverseHtml(node.then) 49 | traverseHtml(node.catch) 50 | } 51 | 52 | if (node.type === 'EachBlock') { 53 | traverseHtml(node.else) 54 | } 55 | } 56 | 57 | function selectNode(node) { 58 | if (!node) { 59 | return 60 | } 61 | 62 | const span = node.end - node.start 63 | if (span >= minNodeSpan && span <= maxNodeSpan) { 64 | criticalNodes.push(node) 65 | } 66 | } 67 | 68 | function mergeNodes(nodes) { 69 | if (nodes.length === 0) { 70 | return null 71 | } 72 | 73 | const first = nodes[0] 74 | const last = nodes[nodes.length - 1] 75 | return { 76 | type: first.type, 77 | start: first.start, 78 | end: last.end, 79 | } 80 | } 81 | 82 | function traverseScript(node) { 83 | if (!node) { 84 | return 85 | } 86 | 87 | const body = node.content.body 88 | if (!body) { 89 | return 90 | } 91 | 92 | let currType = null 93 | let currNodes = [] 94 | let currNodeSpanLen = 0 95 | 96 | for (const el of body) { 97 | const span = el.end - el.start 98 | switch (el.type) { 99 | case 'ImportDeclaration': 100 | case 'ExportNamedDeclaration': 101 | if (currType === el.type && currNodeSpanLen + span <= maxNodeSpan) { 102 | // We can merge with the previous node 103 | currNodeSpanLen += span 104 | currNodes.push(el) 105 | } else { 106 | selectNode(mergeNodes(currNodes)) 107 | 108 | currType = el.type 109 | currNodes = [el] 110 | currNodeSpanLen = span 111 | } 112 | break 113 | case 'FunctionDeclaration': 114 | if (currNodeSpanLen > 0) { 115 | selectNode(mergeNodes(currNodes)) 116 | currType = null 117 | currNodes = [] 118 | currNodeSpanLen = 0 119 | } 120 | selectNode(el) 121 | 122 | // Try adding the entire function body 123 | if (el.type === 'FunctionDeclaration' && el.body) { 124 | selectNode(el.body) 125 | } 126 | 127 | // Try adding arg list 128 | const firstParam = el.params && el.params[0] 129 | const lastParam = el.params && el.params[el.params.length - 1] 130 | if (firstParam) { 131 | selectNode({ 132 | type: 'Identifier', 133 | start: firstParam.start, 134 | end: lastParam.end 135 | }) 136 | } 137 | 138 | // TODO: Ideally we should recurse into the function body here 139 | // But the API is really annoying to work with, and I can't figure out the types, 140 | // so we'll just skip it for now 141 | 142 | // Try adding individual items from the body 143 | if (el.body && el.body.type === 'BlockStatement') { 144 | for (const st of el.body.body) { 145 | if (st.type === 'IfStatement') { 146 | selectNode(st) 147 | if (st.test) { 148 | selectNode(st.test) 149 | } 150 | if (st.consequent) { 151 | selectNode(st.consequent) 152 | } 153 | if (st.alternate) { 154 | selectNode(st.alternate) 155 | } 156 | } else { 157 | selectNode(st) 158 | } 159 | } 160 | } 161 | 162 | break 163 | default: 164 | if (currNodeSpanLen > 0) { 165 | selectNode(mergeNodes(currNodes)) 166 | currType = null 167 | currNodes = [] 168 | currNodeSpanLen = 0 169 | } 170 | selectNode(el) 171 | break 172 | } 173 | } 174 | } 175 | 176 | traverseHtml(ast.html) 177 | traverseScript(ast.instance) 178 | 179 | return criticalNodes 180 | } 181 | 182 | async function listSvelteFiles(dir) { 183 | const files = await fs.readdir(dir, { recursive: true }); 184 | return files.filter(file => file.endsWith('.svelte')); 185 | } 186 | 187 | async function listTypescriptFiles(dir) { 188 | const files = await fs.readdir(dir, { recursive: true }); 189 | return files.filter(file => file.endsWith('.ts')); 190 | } 191 | 192 | function determineSplit() { 193 | const rnd = Math.random() 194 | return rnd < 0.98 ? 'train' : 'test' 195 | } 196 | 197 | async function run() { 198 | // Resolve the full path 199 | const fullPath = path.resolve(rootDir); 200 | const files = await listSvelteFiles(fullPath); 201 | let total = 0 202 | let success = 0 203 | let failed = 0 204 | let totalSamples = 0 205 | const trainSet = await fs.open('dataset.train.jsonl', 'a') 206 | const testSet = await fs.open('dataset.test.jsonl', 'a') 207 | const maxChunkLen = 8000 208 | for (const file of files) { 209 | const filePath = path.join(fullPath, file) 210 | try { 211 | const fileContents = await fs.readFile(filePath, { encoding: 'utf-8' }) 212 | const datasetSplit = determineSplit() 213 | const nodes = await extractCriticalBlocksFromSvelte(fileContents) 214 | for (const node of nodes) { 215 | const middleLen = node.end - node.start 216 | const remaining = maxChunkLen - middleLen 217 | 218 | const prefix = fileContents.slice(Math.max(0, node.start - remaining / 2), node.start) 219 | const suffix = fileContents.slice(node.end, Math.min(fileContents.length, node.end + remaining / 2)) 220 | 221 | const jsonl = JSON.stringify({ 222 | filePath, 223 | prefix, 224 | middle: fileContents.slice(node.start, node.end), 225 | suffix, 226 | }) 227 | if (datasetSplit === 'train') { 228 | await trainSet.write(jsonl + '\n') 229 | } else { 230 | await testSet.write(jsonl + '\n') 231 | } 232 | } 233 | totalSamples += nodes.length 234 | success += 1 235 | } catch (err) { 236 | // console.error('Error extracting critical blocks from file: ', filePath, err) 237 | failed += 1 238 | } finally { 239 | total += 1 240 | } 241 | } 242 | 243 | await trainSet.close() 244 | await testSet.close() 245 | 246 | console.log('Total files: ', total, 'Total samples:', totalSamples) 247 | console.log('Success: ', success) 248 | console.log('Failed: ', failed) 249 | } 250 | 251 | run() -------------------------------------------------------------------------------- /src/dataset_gen/package-lock.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "dataset_gen_2", 3 | "version": "1.0.0", 4 | "lockfileVersion": 3, 5 | "requires": true, 6 | "packages": { 7 | "": { 8 | "name": "dataset_gen_2", 9 | "version": "1.0.0", 10 | "license": "ISC", 11 | "dependencies": { 12 | "svelte": "^5.23.0", 13 | "typescript": "^5.8.2" 14 | }, 15 | "devDependencies": { 16 | "@types/node": "^22.13.10" 17 | } 18 | }, 19 | "node_modules/@ampproject/remapping": { 20 | "version": "2.3.0", 21 | "resolved": "https://registry.npmjs.org/@ampproject/remapping/-/remapping-2.3.0.tgz", 22 | "integrity": "sha512-30iZtAPgz+LTIYoeivqYo853f02jBYSd5uGnGpkFV0M3xOt9aN73erkgYAmZU43x4VfqcnLxW9Kpg3R5LC4YYw==", 23 | "license": "Apache-2.0", 24 | "dependencies": { 25 | "@jridgewell/gen-mapping": "^0.3.5", 26 | "@jridgewell/trace-mapping": "^0.3.24" 27 | }, 28 | "engines": { 29 | "node": ">=6.0.0" 30 | } 31 | }, 32 | "node_modules/@jridgewell/gen-mapping": { 33 | "version": "0.3.8", 34 | "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.8.tgz", 35 | "integrity": "sha512-imAbBGkb+ebQyxKgzv5Hu2nmROxoDOXHh80evxdoXNOrvAnVx7zimzc1Oo5h9RlfV4vPXaE2iM5pOFbvOCClWA==", 36 | "license": "MIT", 37 | "dependencies": { 38 | "@jridgewell/set-array": "^1.2.1", 39 | "@jridgewell/sourcemap-codec": "^1.4.10", 40 | "@jridgewell/trace-mapping": "^0.3.24" 41 | }, 42 | "engines": { 43 | "node": ">=6.0.0" 44 | } 45 | }, 46 | "node_modules/@jridgewell/resolve-uri": { 47 | "version": "3.1.2", 48 | "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz", 49 | "integrity": "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==", 50 | "license": "MIT", 51 | "engines": { 52 | "node": ">=6.0.0" 53 | } 54 | }, 55 | "node_modules/@jridgewell/set-array": { 56 | "version": "1.2.1", 57 | "resolved": "https://registry.npmjs.org/@jridgewell/set-array/-/set-array-1.2.1.tgz", 58 | "integrity": "sha512-R8gLRTZeyp03ymzP/6Lil/28tGeGEzhx1q2k703KGWRAI1VdvPIXdG70VJc2pAMw3NA6JKL5hhFu1sJX0Mnn/A==", 59 | "license": "MIT", 60 | "engines": { 61 | "node": ">=6.0.0" 62 | } 63 | }, 64 | "node_modules/@jridgewell/sourcemap-codec": { 65 | "version": "1.5.0", 66 | "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.0.tgz", 67 | "integrity": "sha512-gv3ZRaISU3fjPAgNsriBRqGWQL6quFx04YMPW/zD8XMLsU32mhCCbfbO6KZFLjvYpCZ8zyDEgqsgf+PwPaM7GQ==", 68 | "license": "MIT" 69 | }, 70 | "node_modules/@jridgewell/trace-mapping": { 71 | "version": "0.3.25", 72 | "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.25.tgz", 73 | "integrity": "sha512-vNk6aEwybGtawWmy/PzwnGDOjCkLWSD2wqvjGGAgOAwCGWySYXfYoxt00IJkTF+8Lb57DwOb3Aa0o9CApepiYQ==", 74 | "license": "MIT", 75 | "dependencies": { 76 | "@jridgewell/resolve-uri": "^3.1.0", 77 | "@jridgewell/sourcemap-codec": "^1.4.14" 78 | } 79 | }, 80 | "node_modules/@sveltejs/acorn-typescript": { 81 | "version": "1.0.5", 82 | "resolved": "https://registry.npmjs.org/@sveltejs/acorn-typescript/-/acorn-typescript-1.0.5.tgz", 83 | "integrity": "sha512-IwQk4yfwLdibDlrXVE04jTZYlLnwsTT2PIOQQGNLWfjavGifnk1JD1LcZjZaBTRcxZu2FfPfNLOE04DSu9lqtQ==", 84 | "license": "MIT", 85 | "peerDependencies": { 86 | "acorn": "^8.9.0" 87 | } 88 | }, 89 | "node_modules/@types/estree": { 90 | "version": "1.0.6", 91 | "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.6.tgz", 92 | "integrity": "sha512-AYnb1nQyY49te+VRAVgmzfcgjYS91mY5P0TKUDCLEM+gNnA+3T6rWITXRLYCpahpqSQbN5cE+gHpnPyXjHWxcw==", 93 | "license": "MIT" 94 | }, 95 | "node_modules/@types/node": { 96 | "version": "22.13.10", 97 | "resolved": "https://registry.npmjs.org/@types/node/-/node-22.13.10.tgz", 98 | "integrity": "sha512-I6LPUvlRH+O6VRUqYOcMudhaIdUVWfsjnZavnsraHvpBwaEyMN29ry+0UVJhImYL16xsscu0aske3yA+uPOWfw==", 99 | "dev": true, 100 | "license": "MIT", 101 | "dependencies": { 102 | "undici-types": "~6.20.0" 103 | } 104 | }, 105 | "node_modules/acorn": { 106 | "version": "8.14.1", 107 | "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.14.1.tgz", 108 | "integrity": "sha512-OvQ/2pUDKmgfCg++xsTX1wGxfTaszcHVcTctW4UJB4hibJx2HXxxO5UmVgyjMa+ZDsiaf5wWLXYpRWMmBI0QHg==", 109 | "license": "MIT", 110 | "bin": { 111 | "acorn": "bin/acorn" 112 | }, 113 | "engines": { 114 | "node": ">=0.4.0" 115 | } 116 | }, 117 | "node_modules/aria-query": { 118 | "version": "5.3.2", 119 | "resolved": "https://registry.npmjs.org/aria-query/-/aria-query-5.3.2.tgz", 120 | "integrity": "sha512-COROpnaoap1E2F000S62r6A60uHZnmlvomhfyT2DlTcrY1OrBKn2UhH7qn5wTC9zMvD0AY7csdPSNwKP+7WiQw==", 121 | "license": "Apache-2.0", 122 | "engines": { 123 | "node": ">= 0.4" 124 | } 125 | }, 126 | "node_modules/axobject-query": { 127 | "version": "4.1.0", 128 | "resolved": "https://registry.npmjs.org/axobject-query/-/axobject-query-4.1.0.tgz", 129 | "integrity": "sha512-qIj0G9wZbMGNLjLmg1PT6v2mE9AH2zlnADJD/2tC6E00hgmhUOfEB6greHPAfLRSufHqROIUTkw6E+M3lH0PTQ==", 130 | "license": "Apache-2.0", 131 | "engines": { 132 | "node": ">= 0.4" 133 | } 134 | }, 135 | "node_modules/clsx": { 136 | "version": "2.1.1", 137 | "resolved": "https://registry.npmjs.org/clsx/-/clsx-2.1.1.tgz", 138 | "integrity": "sha512-eYm0QWBtUrBWZWG0d386OGAw16Z995PiOVo2B7bjWSbHedGl5e0ZWaq65kOGgUSNesEIDkB9ISbTg/JK9dhCZA==", 139 | "license": "MIT", 140 | "engines": { 141 | "node": ">=6" 142 | } 143 | }, 144 | "node_modules/esm-env": { 145 | "version": "1.2.2", 146 | "resolved": "https://registry.npmjs.org/esm-env/-/esm-env-1.2.2.tgz", 147 | "integrity": "sha512-Epxrv+Nr/CaL4ZcFGPJIYLWFom+YeV1DqMLHJoEd9SYRxNbaFruBwfEX/kkHUJf55j2+TUbmDcmuilbP1TmXHA==", 148 | "license": "MIT" 149 | }, 150 | "node_modules/esrap": { 151 | "version": "1.4.5", 152 | "resolved": "https://registry.npmjs.org/esrap/-/esrap-1.4.5.tgz", 153 | "integrity": "sha512-CjNMjkBWWZeHn+VX+gS8YvFwJ5+NDhg8aWZBSFJPR8qQduDNjbJodA2WcwCm7uQa5Rjqj+nZvVmceg1RbHFB9g==", 154 | "license": "MIT", 155 | "dependencies": { 156 | "@jridgewell/sourcemap-codec": "^1.4.15" 157 | } 158 | }, 159 | "node_modules/is-reference": { 160 | "version": "3.0.3", 161 | "resolved": "https://registry.npmjs.org/is-reference/-/is-reference-3.0.3.tgz", 162 | "integrity": "sha512-ixkJoqQvAP88E6wLydLGGqCJsrFUnqoH6HnaczB8XmDH1oaWU+xxdptvikTgaEhtZ53Ky6YXiBuUI2WXLMCwjw==", 163 | "license": "MIT", 164 | "dependencies": { 165 | "@types/estree": "^1.0.6" 166 | } 167 | }, 168 | "node_modules/locate-character": { 169 | "version": "3.0.0", 170 | "resolved": "https://registry.npmjs.org/locate-character/-/locate-character-3.0.0.tgz", 171 | "integrity": "sha512-SW13ws7BjaeJ6p7Q6CO2nchbYEc3X3J6WrmTTDto7yMPqVSZTUyY5Tjbid+Ab8gLnATtygYtiDIJGQRRn2ZOiA==", 172 | "license": "MIT" 173 | }, 174 | "node_modules/magic-string": { 175 | "version": "0.30.17", 176 | "resolved": "https://registry.npmjs.org/magic-string/-/magic-string-0.30.17.tgz", 177 | "integrity": "sha512-sNPKHvyjVf7gyjwS4xGTaW/mCnF8wnjtifKBEhxfZ7E/S8tQ0rssrwGNn6q8JH/ohItJfSQp9mBtQYuTlH5QnA==", 178 | "license": "MIT", 179 | "dependencies": { 180 | "@jridgewell/sourcemap-codec": "^1.5.0" 181 | } 182 | }, 183 | "node_modules/svelte": { 184 | "version": "5.23.0", 185 | "resolved": "https://registry.npmjs.org/svelte/-/svelte-5.23.0.tgz", 186 | "integrity": "sha512-v0lL3NuKontiCxholEiAXCB+BYbndlKbwlDMK0DS86WgGELMJSpyqCSbJeMEMBDwOglnS7Ar2Rq0wwa/z2L8Vg==", 187 | "license": "MIT", 188 | "dependencies": { 189 | "@ampproject/remapping": "^2.3.0", 190 | "@jridgewell/sourcemap-codec": "^1.5.0", 191 | "@sveltejs/acorn-typescript": "^1.0.5", 192 | "@types/estree": "^1.0.5", 193 | "acorn": "^8.12.1", 194 | "aria-query": "^5.3.1", 195 | "axobject-query": "^4.1.0", 196 | "clsx": "^2.1.1", 197 | "esm-env": "^1.2.1", 198 | "esrap": "^1.4.3", 199 | "is-reference": "^3.0.3", 200 | "locate-character": "^3.0.0", 201 | "magic-string": "^0.30.11", 202 | "zimmerframe": "^1.1.2" 203 | }, 204 | "engines": { 205 | "node": ">=18" 206 | } 207 | }, 208 | "node_modules/typescript": { 209 | "version": "5.8.2", 210 | "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.8.2.tgz", 211 | "integrity": "sha512-aJn6wq13/afZp/jT9QZmwEjDqqvSGp1VT5GVg+f/t6/oVyrgXM6BY1h9BRh/O5p3PlUPAe+WuiEZOmb/49RqoQ==", 212 | "license": "Apache-2.0", 213 | "bin": { 214 | "tsc": "bin/tsc", 215 | "tsserver": "bin/tsserver" 216 | }, 217 | "engines": { 218 | "node": ">=14.17" 219 | } 220 | }, 221 | "node_modules/undici-types": { 222 | "version": "6.20.0", 223 | "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-6.20.0.tgz", 224 | "integrity": "sha512-Ny6QZ2Nju20vw1SRHe3d9jVu6gJ+4e3+MMpqu7pqE5HT6WsTSlce++GQmK5UXS8mzV8DSYHrQH+Xrf2jVcuKNg==", 225 | "dev": true, 226 | "license": "MIT" 227 | }, 228 | "node_modules/zimmerframe": { 229 | "version": "1.1.2", 230 | "resolved": "https://registry.npmjs.org/zimmerframe/-/zimmerframe-1.1.2.tgz", 231 | "integrity": "sha512-rAbqEGa8ovJy4pyBxZM70hg4pE6gDgaQ0Sl9M3enG3I0d6H4XSAM3GeNGLKnsBpuijUow064sf7ww1nutC5/3w==", 232 | "license": "MIT" 233 | } 234 | } 235 | } 236 | --------------------------------------------------------------------------------