tr]:last:border-b-0", className)}
32 | {...props} />
33 | ))
34 | TableFooter.displayName = "TableFooter"
35 |
36 | const TableRow = React.forwardRef(({ className, ...props }, ref) => (
37 |
44 | ))
45 | TableRow.displayName = "TableRow"
46 |
47 | const TableHead = React.forwardRef(({ className, ...props }, ref) => (
48 | [role=checkbox]]:translate-y-[2px]",
52 | className
53 | )}
54 | {...props} />
55 | ))
56 | TableHead.displayName = "TableHead"
57 |
58 | const TableCell = React.forwardRef(({ className, ...props }, ref) => (
59 | | [role=checkbox]]:translate-y-[2px]",
63 | className
64 | )}
65 | {...props} />
66 | ))
67 | TableCell.displayName = "TableCell"
68 |
69 | const TableCaption = React.forwardRef(({ className, ...props }, ref) => (
70 |
74 | ))
75 | TableCaption.displayName = "TableCaption"
76 |
77 | export {
78 | Table,
79 | TableHeader,
80 | TableBody,
81 | TableFooter,
82 | TableHead,
83 | TableRow,
84 | TableCell,
85 | TableCaption,
86 | }
87 |
--------------------------------------------------------------------------------
/src/components/ui/tabs.jsx:
--------------------------------------------------------------------------------
1 | import * as React from "react"
2 | import * as TabsPrimitive from "@radix-ui/react-tabs"
3 |
4 | import { cn } from "@/lib/utils"
5 |
6 | const Tabs = TabsPrimitive.Root
7 |
8 | const TabsList = React.forwardRef(({ className, ...props }, ref) => (
9 |
16 | ))
17 | TabsList.displayName = TabsPrimitive.List.displayName
18 |
19 | const TabsTrigger = React.forwardRef(({ className, ...props }, ref) => (
20 |
27 | ))
28 | TabsTrigger.displayName = TabsPrimitive.Trigger.displayName
29 |
30 | const TabsContent = React.forwardRef(({ className, ...props }, ref) => (
31 |
38 | ))
39 | TabsContent.displayName = TabsPrimitive.Content.displayName
40 |
41 | export { Tabs, TabsList, TabsTrigger, TabsContent }
42 |
--------------------------------------------------------------------------------
/src/components/ui/textarea.jsx:
--------------------------------------------------------------------------------
1 | import * as React from "react"
2 |
3 | import { cn } from "@/lib/utils"
4 |
5 | const Textarea = React.forwardRef(({ className, ...props }, ref) => {
6 | return (
7 | ()
14 | );
15 | })
16 | Textarea.displayName = "Textarea"
17 |
18 | export { Textarea }
19 |
--------------------------------------------------------------------------------
/src/components/ui/toast.jsx:
--------------------------------------------------------------------------------
1 | import * as React from "react"
2 | import { Cross2Icon } from "@radix-ui/react-icons"
3 | import * as ToastPrimitives from "@radix-ui/react-toast"
4 | import { cva } from "class-variance-authority";
5 |
6 | import { cn } from "@/lib/utils"
7 |
8 | const ToastProvider = ToastPrimitives.Provider
9 |
10 | const ToastViewport = React.forwardRef(({ className, ...props }, ref) => (
11 |
18 | ))
19 | ToastViewport.displayName = ToastPrimitives.Viewport.displayName
20 |
21 | const toastVariants = cva(
22 | "group pointer-events-auto relative flex w-full items-center justify-between space-x-2 overflow-hidden rounded-md border p-4 pr-6 shadow-lg transition-all data-[swipe=cancel]:translate-x-0 data-[swipe=end]:translate-x-[var(--radix-toast-swipe-end-x)] data-[swipe=move]:translate-x-[var(--radix-toast-swipe-move-x)] data-[swipe=move]:transition-none data-[state=open]:animate-in data-[state=closed]:animate-out data-[swipe=end]:animate-out data-[state=closed]:fade-out-80 data-[state=closed]:slide-out-to-right-full data-[state=open]:slide-in-from-top-full data-[state=open]:sm:slide-in-from-bottom-full",
23 | {
24 | variants: {
25 | variant: {
26 | default: "border bg-background text-foreground",
27 | destructive:
28 | "destructive group border-destructive bg-destructive text-destructive-foreground",
29 | },
30 | },
31 | defaultVariants: {
32 | variant: "default",
33 | },
34 | }
35 | )
36 |
37 | const Toast = React.forwardRef(({ className, variant, ...props }, ref) => {
38 | return (
39 | ()
43 | );
44 | })
45 | Toast.displayName = ToastPrimitives.Root.displayName
46 |
47 | const ToastAction = React.forwardRef(({ className, ...props }, ref) => (
48 |
55 | ))
56 | ToastAction.displayName = ToastPrimitives.Action.displayName
57 |
58 | const ToastClose = React.forwardRef(({ className, ...props }, ref) => (
59 |
67 |
68 |
69 | ))
70 | ToastClose.displayName = ToastPrimitives.Close.displayName
71 |
72 | const ToastTitle = React.forwardRef(({ className, ...props }, ref) => (
73 |
77 | ))
78 | ToastTitle.displayName = ToastPrimitives.Title.displayName
79 |
80 | const ToastDescription = React.forwardRef(({ className, ...props }, ref) => (
81 |
82 | ))
83 | ToastDescription.displayName = ToastPrimitives.Description.displayName
84 |
85 | export { ToastProvider, ToastViewport, Toast, ToastTitle, ToastDescription, ToastClose, ToastAction };
86 |
--------------------------------------------------------------------------------
/src/components/ui/toaster.jsx:
--------------------------------------------------------------------------------
1 | import { useToast } from "./use-toast.js";
2 | import {
3 | Toast,
4 | ToastClose,
5 | ToastDescription,
6 | ToastProvider,
7 | ToastTitle,
8 | ToastViewport,
9 | } from "@/components/ui/toast";
10 |
11 | export function Toaster() {
12 | const { toasts } = useToast();
13 |
14 | return (
15 |
16 | {toasts.map(function ({ id, title, description, action, ...props }) {
17 | return (
18 |
19 |
20 | {title && {title}}
21 | {description && {description}}
22 |
23 | {action}
24 |
25 |
26 | );
27 | })}
28 |
29 |
30 | );
31 | }
32 |
--------------------------------------------------------------------------------
/src/components/ui/toggle-group.jsx:
--------------------------------------------------------------------------------
1 | "use client";
2 | import * as React from "react"
3 | import * as ToggleGroupPrimitive from "@radix-ui/react-toggle-group"
4 |
5 | import { cn } from "@/lib/utils"
6 | import { toggleVariants } from "@/components/ui/toggle"
7 |
8 | const ToggleGroupContext = React.createContext({
9 | size: "default",
10 | variant: "default",
11 | })
12 |
13 | const ToggleGroup = React.forwardRef(({ className, variant, size, children, ...props }, ref) => (
14 |
18 |
19 | {children}
20 |
21 |
22 | ))
23 |
24 | ToggleGroup.displayName = ToggleGroupPrimitive.Root.displayName
25 |
26 | const ToggleGroupItem = React.forwardRef(({ className, children, variant, size, ...props }, ref) => {
27 | const context = React.useContext(ToggleGroupContext)
28 |
29 | return (
30 | (
37 | {children}
38 | )
39 | );
40 | })
41 |
42 | ToggleGroupItem.displayName = ToggleGroupPrimitive.Item.displayName
43 |
44 | export { ToggleGroup, ToggleGroupItem }
45 |
--------------------------------------------------------------------------------
/src/components/ui/toggle.jsx:
--------------------------------------------------------------------------------
1 | import * as React from "react"
2 | import * as TogglePrimitive from "@radix-ui/react-toggle"
3 | import { cva } from "class-variance-authority";
4 |
5 | import { cn } from "@/lib/utils"
6 |
7 | const toggleVariants = cva(
8 | "inline-flex items-center justify-center gap-2 rounded-md text-sm font-medium transition-colors hover:bg-muted hover:text-muted-foreground focus-visible:outline-none focus-visible:ring-1 focus-visible:ring-ring disabled:pointer-events-none disabled:opacity-50 data-[state=on]:bg-accent data-[state=on]:text-accent-foreground [&_svg]:pointer-events-none [&_svg]:size-4 [&_svg]:shrink-0",
9 | {
10 | variants: {
11 | variant: {
12 | default: "bg-transparent",
13 | outline:
14 | "border border-input bg-transparent shadow-sm hover:bg-accent hover:text-accent-foreground",
15 | },
16 | size: {
17 | default: "h-9 px-2 min-w-9",
18 | sm: "h-8 px-1.5 min-w-8",
19 | lg: "h-10 px-2.5 min-w-10",
20 | },
21 | },
22 | defaultVariants: {
23 | variant: "default",
24 | size: "default",
25 | },
26 | }
27 | )
28 |
29 | const Toggle = React.forwardRef(({ className, variant, size, ...props }, ref) => (
30 |
34 | ))
35 |
36 | Toggle.displayName = TogglePrimitive.Root.displayName
37 |
38 | export { Toggle, toggleVariants }
39 |
--------------------------------------------------------------------------------
/src/components/ui/tooltip.jsx:
--------------------------------------------------------------------------------
1 | "use client"
2 |
3 | import * as React from "react"
4 | import * as TooltipPrimitive from "@radix-ui/react-tooltip"
5 |
6 | import { cn } from "@/lib/utils"
7 |
8 | const TooltipProvider = TooltipPrimitive.Provider
9 |
10 | const Tooltip = TooltipPrimitive.Root
11 |
12 | const TooltipTrigger = TooltipPrimitive.Trigger
13 |
14 | const TooltipContent = React.forwardRef(({ className, sideOffset = 4, ...props }, ref) => (
15 |
16 |
24 |
25 | ))
26 | TooltipContent.displayName = TooltipPrimitive.Content.displayName
27 |
28 | export { Tooltip, TooltipTrigger, TooltipContent, TooltipProvider }
29 |
--------------------------------------------------------------------------------
/src/components/ui/use-toast.js:
--------------------------------------------------------------------------------
1 | // Inspired by react-hot-toast library
2 | import * as React from "react"
3 |
4 | const TOAST_LIMIT = 1
5 | const TOAST_REMOVE_DELAY = 1000000
6 |
7 | const actionTypes = {
8 | ADD_TOAST: "ADD_TOAST",
9 | UPDATE_TOAST: "UPDATE_TOAST",
10 | DISMISS_TOAST: "DISMISS_TOAST",
11 | REMOVE_TOAST: "REMOVE_TOAST"
12 | }
13 |
14 | let count = 0
15 |
16 | function genId() {
17 | count = (count + 1) % Number.MAX_SAFE_INTEGER
18 | return count.toString();
19 | }
20 |
21 | const toastTimeouts = new Map()
22 |
23 | const addToRemoveQueue = (toastId) => {
24 | if (toastTimeouts.has(toastId)) {
25 | return
26 | }
27 |
28 | const timeout = setTimeout(() => {
29 | toastTimeouts.delete(toastId)
30 | dispatch({
31 | type: "REMOVE_TOAST",
32 | toastId: toastId,
33 | })
34 | }, TOAST_REMOVE_DELAY)
35 |
36 | toastTimeouts.set(toastId, timeout)
37 | }
38 |
39 | export const reducer = (state, action) => {
40 | switch (action.type) {
41 | case "ADD_TOAST":
42 | return {
43 | ...state,
44 | toasts: [action.toast, ...state.toasts].slice(0, TOAST_LIMIT),
45 | };
46 |
47 | case "UPDATE_TOAST":
48 | return {
49 | ...state,
50 | toasts: state.toasts.map((t) =>
51 | t.id === action.toast.id ? { ...t, ...action.toast } : t),
52 | };
53 |
54 | case "DISMISS_TOAST": {
55 | const { toastId } = action
56 |
57 | // ! Side effects ! - This could be extracted into a dismissToast() action,
58 | // but I'll keep it here for simplicity
59 | if (toastId) {
60 | addToRemoveQueue(toastId)
61 | } else {
62 | state.toasts.forEach((toast) => {
63 | addToRemoveQueue(toast.id)
64 | })
65 | }
66 |
67 | return {
68 | ...state,
69 | toasts: state.toasts.map((t) =>
70 | t.id === toastId || toastId === undefined
71 | ? {
72 | ...t,
73 | open: false,
74 | }
75 | : t),
76 | };
77 | }
78 | case "REMOVE_TOAST":
79 | if (action.toastId === undefined) {
80 | return {
81 | ...state,
82 | toasts: [],
83 | }
84 | }
85 | return {
86 | ...state,
87 | toasts: state.toasts.filter((t) => t.id !== action.toastId),
88 | };
89 | }
90 | }
91 |
92 | const listeners = []
93 |
94 | let memoryState = { toasts: [] }
95 |
96 | function dispatch(action) {
97 | memoryState = reducer(memoryState, action)
98 | listeners.forEach((listener) => {
99 | listener(memoryState)
100 | })
101 | }
102 |
103 | function toast({
104 | ...props
105 | }) {
106 | const id = genId()
107 |
108 | const update = (props) =>
109 | dispatch({
110 | type: "UPDATE_TOAST",
111 | toast: { ...props, id },
112 | })
113 | const dismiss = () => dispatch({ type: "DISMISS_TOAST", toastId: id })
114 |
115 | dispatch({
116 | type: "ADD_TOAST",
117 | toast: {
118 | ...props,
119 | id,
120 | open: true,
121 | onOpenChange: (open) => {
122 | if (!open) dismiss()
123 | },
124 | },
125 | })
126 |
127 | return {
128 | id: id,
129 | dismiss,
130 | update,
131 | }
132 | }
133 |
134 | function useToast() {
135 | const [state, setState] = React.useState(memoryState)
136 |
137 | React.useEffect(() => {
138 | listeners.push(setState)
139 | return () => {
140 | const index = listeners.indexOf(setState)
141 | if (index > -1) {
142 | listeners.splice(index, 1)
143 | }
144 | };
145 | }, [state])
146 |
147 | return {
148 | ...state,
149 | toast,
150 | dismiss: (toastId) => dispatch({ type: "DISMISS_TOAST", toastId }),
151 | };
152 | }
153 |
154 | export { useToast, toast }
155 |
--------------------------------------------------------------------------------
/src/data/locales/da/Benchmark.json:
--------------------------------------------------------------------------------
1 | {
2 | "title": "Electron BitNet Benchmarking Tool",
3 | "description": "Benchmark din Microsoft BitNet 1-bit LLM-model nedenfor, idet du husker på, at større testvariabler tager længere tid at køre.",
4 | "commandOptions": "Kommandoindstillinger",
5 | "numberOfTokens": "Antal tokens",
6 | "numberOfTokensInfo": "Angiv antallet af tokens, der skal genereres under benchmark.",
7 | "model": "Model",
8 | "modelInfo": "Indtast stien til den kvantiserede '{{fileFormat}}' modelfil genereret ved hjælp af Microsofts BitNet '{{script}}' script.",
9 | "threads": "Tråde",
10 | "threadsInfo": "Angiv antallet af tråde, der skal bruges til benchmark.",
11 | "promptLength": "Spørglængde",
12 | "promptLengthInfo": "Angiv længden af den prompt, der skal genereres tekst fra.",
13 | "runBenchmark": "Kør Benchmark",
14 | "stopBenchmark": "Stop Benchmark",
15 | "log": "Benchmark log",
16 | "license": "Licenseret under {{license}}",
17 | "builtWith": "Bygget med"
18 | }
--------------------------------------------------------------------------------
/src/data/locales/da/InstructionModel.json:
--------------------------------------------------------------------------------
1 | {
2 | "loading": "Indlæser ...",
3 | "delete": "Slet",
4 | "title": "Instruktionsfølgende tilstand",
5 | "description": "Deltag i en samtale med 1-bit LLM. ",
6 | "commandOptions": "Kommandoindstillinger",
7 | "numberOfTokens": "Max -tokens pr. Tur",
8 | "model": "Model",
9 | "threads": "Tråde",
10 | "contextSize": "Kontekststørrelse",
11 | "temperature": "Temperatur",
12 | "prompt": "Hurtig",
13 | "numberOfTokensInfo": "Det maksimale antal tokens (ord) AI genererer i hvert svar.",
14 | "modelInfo": "Vælg den kvantiserede '{{fileFormat}}'Modelfil genereret ved hjælp af Microsofts Bitnet'{{script}}'Script.",
15 | "threadsInfo": "Antallet af tråde, der skal bruges til at køre inferensen, begrænset til antallet af tilgængelige tråde på CPU'en.",
16 | "contextSizeInfo": "Størrelsen på den hurtige kontekst bestemmer, hvor meget af samtalens historie der overvejes under inferens.",
17 | "temperatureInfo": "Kontrollerer tilfældigheden af den genererede tekst. ",
18 | "promptInfo": "Dette er den indledende tekst, som modellen vil bruge til at begynde at generere output.",
19 | "runInference": "Kør inferens",
20 | "stopInference": "Stop inferensen",
21 | "response": "Svar",
22 | "license": "{{license}} Licenseret kode",
23 | "builtWith": "bygget med",
24 | "noModelSelected": "Ingen model valgt",
25 | "systemPrompt": "Systemprompt",
26 | "systemPromptInfo": "Definer AI's rolle eller personlighed (f.eks. 'Du er en hjælpsom assistent.', 'Du er en pirat.'). ",
27 | "systemPromptPlaceholder": "Indtast systemprompt her ...",
28 | "startConversation": "Start samtale",
29 | "starting": "Start ...",
30 | "stopConversation": "Stop samtale",
31 | "chat": "Snak",
32 | "typing": "AI skriver ...",
33 | "typeMessagePlaceholder": "Skriv din besked her ...",
34 | "sendMessage": "Send besked",
35 | "you": "Du",
36 | "ai": "Ai",
37 | "youAvatar": "Du",
38 | "aiAvatar": "Ai",
39 | "regenerate": "Regenerer svar",
40 | "copy": "Kopiér svar",
41 | "copied": "Kopieret!",
42 | "expand": "Udvid svar",
43 | "collapse": "Skjul svar"
44 | }
--------------------------------------------------------------------------------
/src/data/locales/da/PageHeader.json:
--------------------------------------------------------------------------------
1 | {
2 | "commandSearchPlaceholder": "Skriv en kommando eller søg...",
3 | "noResultsFound": "Ingen resultater fundet.",
4 | "english": "engelsk ({{locale}})",
5 | "danish": "dansk ({{locale}})",
6 | "german": "tysk ({{locale}})",
7 | "spanish": "spansk ({{locale}})",
8 | "french": "fransk ({{locale}})",
9 | "italian": "italiensk ({{locale}})",
10 | "japanese": "japansk ({{locale}})",
11 | "korean": "koreansk ({{locale}})",
12 | "portuguese": "portugisisk ({{locale}})",
13 | "thai": "Thai ({{locale}})",
14 | "taiwanese": "taiwansk ({{locale}})",
15 | "index": "Inferens Dashboard",
16 | "benchmark": "Benchmark modeller",
17 | "perplexity": "Beregn forvirring",
18 | "back": "Gå tilbage",
19 | "llmFunctionality": "LLM funktionalitet",
20 | "about": "Om"
21 | }
--------------------------------------------------------------------------------
/src/data/locales/da/Perplexity.json:
--------------------------------------------------------------------------------
1 | {
2 | "title": "Electron BitNet Perplexity Tool",
3 | "description": "Beregn forvirringen (modellens tillid til at forudsige det næste ord) af din BitNet-model nedenfor.",
4 | "commandOptions": "Kommandoindstillinger",
5 | "prompt": "Hurtig",
6 | "promptInfo": "Dette er den indledende tekst, som modellen vil bruge til at begynde at generere output.",
7 | "model": "Model",
8 | "modelInfo": "Indtast stien til den kvantiserede '{{fileFormat}}' modelfil genereret ved hjælp af Microsofts '{{script}}' script.",
9 | "threads": "Tråde",
10 | "threadsInfo": "Antallet af tråde, der skal bruges til at køre forvirringsberegningen.",
11 | "contextSize": "Kontekststørrelse",
12 | "contextSizeInfo": "Størrelsen af promptkonteksten bestemmer, hvor meget af prompten, der tages i betragtning under forvirringsberegningen.",
13 | "pplStride": "Forvirring skridt",
14 | "pplStrideInfo": "Skridt for forvirring beregning.",
15 | "pplOutputType": "Perplexity Output Type",
16 | "pplOutputTypeInfo": "Outputtype til forvirringsberegning.",
17 | "runPerplexity": "Beregn forvirring",
18 | "stopPerplexity": "Stop beregningen",
19 | "log": "Beregnet perplexitetsresultat",
20 | "error": "Fejl",
21 | "license": "{{license}} Licenseret kode",
22 | "builtWith": "bygget med",
23 | "insufficientPromptTokens": "Utilstrækkelige prompt-tokens, dobbelt kontekststørrelse i tokens er påkrævet for at fortsætte."
24 | }
--------------------------------------------------------------------------------
/src/data/locales/de/Benchmark.json:
--------------------------------------------------------------------------------
1 | {
2 | "title": "Electron BitNet Benchmarking-Tool",
3 | "description": "Führen Sie unten einen Vergleich Ihres Microsoft BitNet 1-Bit-LLM-Modells durch. Beachten Sie dabei, dass die Ausführung größerer Testvariablen länger dauert.",
4 | "commandOptions": "Befehlsoptionen",
5 | "numberOfTokens": "Anzahl der Token",
6 | "numberOfTokensInfo": "Geben Sie die Anzahl der Token an, die während des Benchmarks generiert werden sollen.",
7 | "model": "Modell",
8 | "modelInfo": "Geben Sie den Pfad zum quantisierten ' ein.{{fileFormat}}' Modelldatei, generiert mit Microsofts BitNet '{{script}}' Skript.",
9 | "threads": "Themen",
10 | "threadsInfo": "Geben Sie die Anzahl der Threads an, die für den Benchmark verwendet werden sollen.",
11 | "promptLength": "Schnelle Länge",
12 | "promptLengthInfo": "Geben Sie die Länge der Eingabeaufforderung an, aus der Text generiert werden soll.",
13 | "runBenchmark": "Benchmark ausführen",
14 | "stopBenchmark": "Benchmark stoppen",
15 | "log": "Benchmark-Protokoll",
16 | "license": "Lizenziert unter {{license}}",
17 | "builtWith": "Gebaut mit"
18 | }
--------------------------------------------------------------------------------
/src/data/locales/de/InstructionModel.json:
--------------------------------------------------------------------------------
1 | {
2 | "loading": "Laden...",
3 | "delete": "Löschen",
4 | "title": "Befehlsmodus",
5 | "description": "Ein Gespräch mit dem 1-Bit-LLM führen. ",
6 | "commandOptions": "Befehlsoptionen",
7 | "numberOfTokens": "Max -Token pro Runde",
8 | "model": "Modell",
9 | "threads": "Themen",
10 | "contextSize": "Kontextgröße",
11 | "temperature": "Temperatur",
12 | "prompt": "Prompt",
13 | "numberOfTokensInfo": "Die maximale Anzahl von Token (Wörtern) Die KI erzeugt in jeder Antwort.",
14 | "modelInfo": "Wählen Sie das quantisierte '{{fileFormat}}\"Modelldatei mit Microsofts BitNet generiert\"{{script}}'Skript.",
15 | "threadsInfo": "Die Anzahl der Threads, die zum Ausführen der Inferenz verwendet werden sollen, beschränkt sich auf die Anzahl der auf der CPU verfügbaren Threads.",
16 | "contextSizeInfo": "Die Größe des schnellen Kontextes bestimmt, wie viel der Gesprächsgeschichte während der Inferenz berücksichtigt wird.",
17 | "temperatureInfo": "Steuert die Zufälligkeit des generierten Textes. ",
18 | "promptInfo": "Dies ist der anfängliche Text, mit dem das Modell die Ausgabe generiert.",
19 | "runInference": "Inferenz laufen",
20 | "stopInference": "Stoppen Sie den Inferenz",
21 | "response": "Antwort",
22 | "license": "{{license}} Lizenzierter Code",
23 | "builtWith": "gebaut mit",
24 | "noModelSelected": "Kein Modell ausgewählt",
25 | "systemPrompt": "Systemaufforderung",
26 | "systemPromptInfo": "Definieren Sie die Rolle oder Persönlichkeit der KI (z. B. \"Sie sind ein hilfreicher Assistent.\", \"Sie sind ein Pirat\"). ",
27 | "systemPromptPlaceholder": "Geben Sie hier die Systemaufforderung ein ...",
28 | "startConversation": "Gespräch beginnen",
29 | "starting": "Start ...",
30 | "stopConversation": "Beendigung des Gesprächs",
31 | "chat": "Chat",
32 | "typing": "KI tippt ...",
33 | "typeMessagePlaceholder": "Geben Sie Ihre Nachricht hier ein ...",
34 | "sendMessage": "Nachricht senden",
35 | "you": "Du",
36 | "ai": "Ai",
37 | "youAvatar": "Du",
38 | "aiAvatar": "Ai",
39 | "regenerate": "Antwort neu generieren",
40 | "copy": "Antwort kopieren",
41 | "copied": "Kopiert!",
42 | "expand": "Antwort erweitern",
43 | "collapse": "Antwort reduzieren"
44 | }
--------------------------------------------------------------------------------
/src/data/locales/de/PageHeader.json:
--------------------------------------------------------------------------------
1 | {
2 | "commandSearchPlaceholder": "Geben Sie einen Befehl ein oder suchen Sie...",
3 | "noResultsFound": "Keine Ergebnisse gefunden.",
4 | "english": "Englisch ({{locale}})",
5 | "danish": "Dänisch ({{locale}})",
6 | "german": "Deutsch ({{locale}})",
7 | "spanish": "Spanisch ({{locale}})",
8 | "french": "Französisch ({{locale}})",
9 | "italian": "Italienisch ({{locale}})",
10 | "japanese": "Japanisch ({{locale}})",
11 | "korean": "Koreanisch ({{locale}})",
12 | "portuguese": "Portugiesisch ({{locale}})",
13 | "thai": "Thailändisch ({{locale}})",
14 | "taiwanese": "Taiwanesisch ({{locale}})",
15 | "index": "Inferenz-Dashboard",
16 | "benchmark": "Benchmark-Modelle",
17 | "perplexity": "Berechnen Sie die Ratlosigkeit",
18 | "back": "Geh zurück",
19 | "llmFunctionality": "LLM-Funktionalität",
20 | "about": "Um"
21 | }
--------------------------------------------------------------------------------
/src/data/locales/de/Perplexity.json:
--------------------------------------------------------------------------------
1 | {
2 | "title": "Electron BitNet Perplexity Tool",
3 | "description": "Berechnen Sie unten die Perplexität (das Vertrauen des Modells bei der Vorhersage des nächsten Wortes) Ihres BitNet-Modells.",
4 | "commandOptions": "Befehlsoptionen",
5 | "prompt": "Prompt",
6 | "promptInfo": "Dies ist der Anfangstext, den das Modell verwendet, um mit der Generierung der Ausgabe zu beginnen.",
7 | "model": "Modell",
8 | "modelInfo": "Geben Sie den Pfad zum quantisierten ' ein.{{fileFormat}}' Modelldatei, generiert mit Microsofts '{{script}}' Skript.",
9 | "threads": "Themen",
10 | "threadsInfo": "Die Anzahl der Threads, die zum Ausführen der Perplexitätsberechnung verwendet werden sollen.",
11 | "contextSize": "Kontextgröße",
12 | "contextSizeInfo": "Die Größe des Eingabeaufforderungskontexts bestimmt, wie viel von der Eingabeaufforderung bei der Perplexitätsberechnung berücksichtigt wird.",
13 | "pplStride": "Ratlosigkeitsschritt",
14 | "pplStrideInfo": "Schritt für Schritt zur Ratlosigkeitsberechnung.",
15 | "pplOutputType": "Perplexity-Ausgabetyp",
16 | "pplOutputTypeInfo": "Ausgabetyp für Ratlosigkeitsberechnung.",
17 | "runPerplexity": "Berechnen Sie die Ratlosigkeit",
18 | "stopPerplexity": "Berechnung stoppen",
19 | "log": "Berechnetes Ratlosigkeitsergebnis",
20 | "error": "Fehler",
21 | "license": "{{license}} Lizenzierter Code",
22 | "builtWith": "gebaut mit",
23 | "insufficientPromptTokens": "Nicht genügend Eingabeaufforderungstoken. Zum Fortfahren ist die doppelte Kontextgröße in Token erforderlich."
24 | }
--------------------------------------------------------------------------------
/src/data/locales/en/Benchmark.json:
--------------------------------------------------------------------------------
1 | {
2 | "title": "Electron BitNet Benchmarking Tool",
3 | "description": "Benchmark your Microsoft BitNet 1-bit LLM model below, bearing in mind that larger test variables take longer to run.",
4 | "commandOptions": "Command Options",
5 | "numberOfTokens": "Number of Tokens",
6 | "numberOfTokensInfo": "Specify the number of tokens to generate during the benchmark.",
7 | "model": "Model",
8 | "modelInfo": "Input the path to the quantized '{{fileFormat}}' model file generated using Microsoft's BitNet '{{script}}' script.",
9 | "threads": "Threads",
10 | "threadsInfo": "Specify the number of threads to use for the benchmark.",
11 | "promptLength": "Prompt Length",
12 | "promptLengthInfo": "Specify the length of the prompt to generate text from.",
13 | "runBenchmark": "Run Benchmark",
14 | "stopBenchmark": "Stop Benchmark",
15 | "log": "Benchmark Log",
16 | "license": "Licensed under {{license}}",
17 | "builtWith": "Built with"
18 | }
--------------------------------------------------------------------------------
/src/data/locales/en/InstructionModel.json:
--------------------------------------------------------------------------------
1 | {
2 | "loading": "Loading...",
3 | "delete": "Delete",
4 | "title": "Instruction Following Mode",
5 | "description": "Engage in a conversation with the 1-bit LLM. Provide a system prompt and then chat interactively.",
6 | "commandOptions": "Command Options",
7 | "numberOfTokens": "Max Tokens Per Turn",
8 | "model": "Model",
9 | "threads": "Threads",
10 | "contextSize": "Context size",
11 | "temperature": "Temperature",
12 | "prompt": "Prompt",
13 | "numberOfTokensInfo": "The maximum number of tokens (words) the AI will generate in each response.",
14 | "modelInfo": "Select the quantized '{{fileFormat}}' model file generated using Microsoft's BitNet '{{script}}' script.",
15 | "threadsInfo": "The number of threads to use for running the inference, limited to the number of threads available on the CPU.",
16 | "contextSizeInfo": "The size of the prompt context determines how much of the conversation history is considered during inference.",
17 | "temperatureInfo": "Controls the randomness of the generated text. Lower values (e.g., 0.2) make it more focused, higher values (e.g., 1.0) make it more creative.",
18 | "promptInfo": "This is the initial text that the model will use to start generating the output.",
19 | "runInference": "Run Inference",
20 | "stopInference": "Stop Inference",
21 | "response": "Response",
22 | "license": "{{license}} Licensed code",
23 | "builtWith": "built with",
24 | "noModelSelected": "No model selected",
25 | "systemPrompt": "System Prompt",
26 | "systemPromptInfo": "Define the AI's role or personality (e.g., 'You are a helpful assistant.', 'You are a pirate.'). This is sent once at the start.",
27 | "systemPromptPlaceholder": "Enter the system prompt here...",
28 | "startConversation": "Start Conversation",
29 | "starting": "Starting...",
30 | "stopConversation": "Stop Conversation",
31 | "chat": "Chat",
32 | "typing": "AI is typing...",
33 | "typeMessagePlaceholder": "Type your message here...",
34 | "sendMessage": "Send Message",
35 | "you": "You",
36 | "ai": "AI",
37 | "youAvatar": "You",
38 | "aiAvatar": "AI",
39 | "regenerate": "Regenerate Response",
40 | "copy": "Copy Response",
41 | "copied": "Copied!",
42 | "expand": "Expand Response",
43 | "collapse": "Collapse Response"
44 | }
--------------------------------------------------------------------------------
/src/data/locales/en/PageHeader.json:
--------------------------------------------------------------------------------
1 | {
2 | "commandSearchPlaceholder": "Type a command or search...",
3 | "noResultsFound": "No results found.",
4 | "english": "English ({{locale}})",
5 | "danish": "Danish ({{locale}})",
6 | "german": "German ({{locale}})",
7 | "spanish": "Spanish ({{locale}})",
8 | "french": "French ({{locale}})",
9 | "italian": "Italian ({{locale}})",
10 | "japanese": "Japanese ({{locale}})",
11 | "korean": "Korean ({{locale}})",
12 | "portuguese": "Portuguese ({{locale}})",
13 | "thai": "Thai ({{locale}})",
14 | "taiwanese": "Taiwanese ({{locale}})",
15 | "index": "Inference Dashboard",
16 | "benchmark": "Benchmark Models",
17 | "perplexity": "Calculate Perplexity",
18 | "back": "Go back",
19 | "llmFunctionality": "LLM Functionality",
20 | "about": "About"
21 | }
22 |
--------------------------------------------------------------------------------
/src/data/locales/en/Perplexity.json:
--------------------------------------------------------------------------------
1 | {
2 | "title": "Electron BitNet Perplexity Tool",
3 | "description": "Calculate the perplexity (the model's confidence in predicting the next word) of your BitNet model below.",
4 | "commandOptions": "Command Options",
5 | "prompt": "Prompt",
6 | "promptInfo": "This is the initial text that the model will use to start generating the output.",
7 | "model": "Model",
8 | "modelInfo": "Input the path to the quantized '{{fileFormat}}' model file generated using Microsoft's '{{script}}' script.",
9 | "threads": "Threads",
10 | "threadsInfo": "The number of threads to use for running the perplexity calculation.",
11 | "contextSize": "Context size",
12 | "contextSizeInfo": "The size of the prompt context determines how much of the prompt is considered during perplexity calculation.",
13 | "pplStride": "Perplexity Stride",
14 | "pplStrideInfo": "Stride for perplexity calculation.",
15 | "pplOutputType": "Perplexity Output Type",
16 | "pplOutputTypeInfo": "Output type for perplexity calculation.",
17 | "runPerplexity": "Calculate Perplexity",
18 | "stopPerplexity": "Stop calculation",
19 | "log": "Calculated Perplexity Result",
20 | "error": "Error",
21 | "license": "{{license}} Licensed code",
22 | "builtWith": "built with",
23 | "insufficientPromptTokens": "Insufficient prompt tokens, double the context size in tokens is required to proceed."
24 | }
--------------------------------------------------------------------------------
/src/data/locales/es/Benchmark.json:
--------------------------------------------------------------------------------
1 | {
2 | "title": "Herramienta de evaluación comparativa Electron BitNet",
3 | "description": "Compare su modelo LLM de 1 bit de Microsoft BitNet a continuación, teniendo en cuenta que las variables de prueba más grandes tardan más en ejecutarse.",
4 | "commandOptions": "Opciones de comando",
5 | "numberOfTokens": "Número de fichas",
6 | "numberOfTokensInfo": "Especifique la cantidad de tokens que se generarán durante la prueba comparativa.",
7 | "model": "Modelo",
8 | "modelInfo": "Ingrese la ruta al 'cuantizado'{{fileFormat}}'archivo de modelo generado utilizando BitNet de Microsoft'{{script}}' guion.",
9 | "threads": "Trapos",
10 | "threadsInfo": "Especifique el número de subprocesos que se utilizarán para la prueba comparativa.",
11 | "promptLength": "Longitud del mensaje",
12 | "promptLengthInfo": "Especifique la longitud del mensaje a partir del cual generar texto.",
13 | "runBenchmark": "Ejecutar punto de referencia",
14 | "stopBenchmark": "Detener punto de referencia",
15 | "log": "Registro de referencia",
16 | "license": "Licenciado bajo {{license}}",
17 | "builtWith": "Construido con"
18 | }
--------------------------------------------------------------------------------
/src/data/locales/es/InstructionModel.json:
--------------------------------------------------------------------------------
1 | {
2 | "loading": "Cargando...",
3 | "delete": "Borrar",
4 | "title": "Modo de instrucción siguiente",
5 | "description": "Participe en una conversación con el LLM de 1 bits. ",
6 | "commandOptions": "Opciones de comando",
7 | "numberOfTokens": "Tokens máximo por turno",
8 | "model": "Modelo",
9 | "threads": "Trapos",
10 | "contextSize": "Tamaño de contexto",
11 | "temperature": "Temperatura",
12 | "prompt": "Inmediato",
13 | "numberOfTokensInfo": "El número máximo de tokens (palabras) que la IA generará en cada respuesta.",
14 | "modelInfo": "Seleccione el cuantizado '{{fileFormat}}'Archivo de modelo generado utilizando BitNet de Microsoft'{{script}}' guion.",
15 | "threadsInfo": "El número de subprocesos a usar para ejecutar la inferencia, limitado al número de subprocesos disponibles en la CPU.",
16 | "contextSizeInfo": "El tamaño del contexto rápido determina cuánto del historial de conversación se considera durante la inferencia.",
17 | "temperatureInfo": "Controla la aleatoriedad del texto generado. ",
18 | "promptInfo": "Este es el texto inicial que el modelo usará para comenzar a generar la salida.",
19 | "runInference": "Inferencia de ejecución",
20 | "stopInference": "Detener la inferencia",
21 | "response": "Respuesta",
22 | "license": "{{license}} Código con licencia",
23 | "builtWith": "construido con",
24 | "noModelSelected": "Ningún modelo seleccionado",
25 | "systemPrompt": "Aviso del sistema",
26 | "systemPromptInfo": "Defina el papel o la personalidad de la IA (por ejemplo, 'usted es un asistente útil', 'usted es un pirata'). ",
27 | "systemPromptPlaceholder": "Ingrese la solicitud del sistema aquí ...",
28 | "startConversation": "Iniciar conversación",
29 | "starting": "A partir de...",
30 | "stopConversation": "Detener la conversación",
31 | "chat": "Charlar",
32 | "typing": "Ai está escribiendo ...",
33 | "typeMessagePlaceholder": "Escriba su mensaje aquí ...",
34 | "sendMessage": "Enviar un mensaje",
35 | "you": "Tú",
36 | "ai": "AI",
37 | "youAvatar": "Tú",
38 | "aiAvatar": "AI",
39 | "regenerate": "Regenerar respuesta",
40 | "copy": "Copiar respuesta",
41 | "copied": "¡Copiado!",
42 | "expand": "Expandir respuesta",
43 | "collapse": "Contraer respuesta"
44 | }
--------------------------------------------------------------------------------
/src/data/locales/es/PageHeader.json:
--------------------------------------------------------------------------------
1 | {
2 | "commandSearchPlaceholder": "Escribe un comando o busca...",
3 | "noResultsFound": "No se encontraron resultados.",
4 | "english": "Inglés ({{locale}})",
5 | "danish": "danés ({{locale}})",
6 | "german": "Alemán ({{locale}})",
7 | "spanish": "Español ({{locale}})",
8 | "french": "Francés ({{locale}})",
9 | "italian": "italiano ({{locale}})",
10 | "japanese": "japonés ({{locale}})",
11 | "korean": "coreano ({{locale}})",
12 | "portuguese": "portugués ({{locale}})",
13 | "thai": "tailandés ({{locale}})",
14 | "taiwanese": "taiwanés ({{locale}})",
15 | "index": "Panel de inferencia",
16 | "benchmark": "Modelos de referencia",
17 | "perplexity": "Calcular la perplejidad",
18 | "back": "Volver",
19 | "llmFunctionality": "Funcionalidad del Máster en Derecho",
20 | "about": "Acerca de"
21 | }
--------------------------------------------------------------------------------
/src/data/locales/es/Perplexity.json:
--------------------------------------------------------------------------------
1 | {
2 | "title": "Herramienta de perplejidad Electron BitNet",
3 | "description": "Calcule la perplejidad (la confianza del modelo para predecir la siguiente palabra) de su modelo BitNet a continuación.",
4 | "commandOptions": "Opciones de comando",
5 | "prompt": "Inmediato",
6 | "promptInfo": "Este es el texto inicial que el modelo utilizará para comenzar a generar el resultado.",
7 | "model": "Modelo",
8 | "modelInfo": "Ingrese la ruta al 'cuantizado'{{fileFormat}}'archivo de modelo generado usando Microsoft'{{script}}' guion.",
9 | "threads": "Trapos",
10 | "threadsInfo": "El número de subprocesos que se utilizarán para ejecutar el cálculo de perplejidad.",
11 | "contextSize": "Tamaño del contexto",
12 | "contextSizeInfo": "El tamaño del contexto del mensaje determina qué parte del mensaje se considera durante el cálculo de perplejidad.",
13 | "pplStride": "Paso de perplejidad",
14 | "pplStrideInfo": "Paso para el cálculo de la perplejidad.",
15 | "pplOutputType": "Tipo de salida de perplejidad",
16 | "pplOutputTypeInfo": "Tipo de salida para el cálculo de perplejidad.",
17 | "runPerplexity": "Calcular la perplejidad",
18 | "stopPerplexity": "Detener cálculo",
19 | "log": "Resultado de perplejidad calculado",
20 | "error": "Error",
21 | "license": "{{license}} Código con licencia",
22 | "builtWith": "construido con",
23 | "insufficientPromptTokens": "Tokens de aviso insuficientes; se requiere duplicar el tamaño del contexto en tokens para continuar."
24 | }
--------------------------------------------------------------------------------
/src/data/locales/fr/Benchmark.json:
--------------------------------------------------------------------------------
1 | {
2 | "title": "Outil d'analyse comparative Electron BitNet",
3 | "description": "Comparez votre modèle LLM Microsoft BitNet 1 bit ci-dessous, en gardant à l'esprit que les variables de test plus importantes prennent plus de temps à s'exécuter.",
4 | "commandOptions": "Options de commande",
5 | "numberOfTokens": "Nombre de jetons",
6 | "numberOfTokensInfo": "Spécifiez le nombre de jetons à générer lors du benchmark.",
7 | "model": "Modèle",
8 | "modelInfo": "Entrez le chemin vers le ' quantifié{{fileFormat}}'fichier modèle généré à l'aide de BitNet de Microsoft'{{script}}' scénario.",
9 | "threads": "Sujets",
10 | "threadsInfo": "Spécifiez le nombre de threads à utiliser pour le test de performance.",
11 | "promptLength": "Longueur de l'invite",
12 | "promptLengthInfo": "Spécifiez la longueur de l'invite à partir de laquelle générer le texte.",
13 | "runBenchmark": "Exécuter une analyse comparative",
14 | "stopBenchmark": "Arrêter le benchmark",
15 | "log": "Journal de référence",
16 | "license": "Autorisé sous {{license}}",
17 | "builtWith": "Construit avec"
18 | }
--------------------------------------------------------------------------------
/src/data/locales/fr/InstructionModel.json:
--------------------------------------------------------------------------------
1 | {
2 | "loading": "Chargement...",
3 | "delete": "Supprimer",
4 | "title": "Mode suivant instruction",
5 | "description": "Engagez une conversation avec le LLM 1 bits. ",
6 | "commandOptions": "Options de commande",
7 | "numberOfTokens": "Jetons max par tour",
8 | "model": "Modèle",
9 | "threads": "Threads",
10 | "contextSize": "Taille de contexte",
11 | "temperature": "Température",
12 | "prompt": "Rapide",
13 | "numberOfTokensInfo": "Le nombre maximum de jetons (mots) que l'IA générera dans chaque réponse.",
14 | "modelInfo": "Sélectionnez le quantifié '{{fileFormat}}'Fichier modèle généré à l'aide de Bitnet de Microsoft'{{script}}' scénario.",
15 | "threadsInfo": "Le nombre de threads à utiliser pour exécuter l'inférence, limité au nombre de threads disponibles sur le CPU.",
16 | "contextSizeInfo": "La taille du contexte rapide détermine la quantité de l'historique de la conversation considérée pendant l'inférence.",
17 | "temperatureInfo": "Contrôle l'aléatoire du texte généré. ",
18 | "promptInfo": "Il s'agit du texte initial que le modèle utilisera pour commencer à générer la sortie.",
19 | "runInference": "Courir l'inférence",
20 | "stopInference": "Arrêter l'inférence",
21 | "response": "Réponse",
22 | "license": "{{license}} Code sous licence",
23 | "builtWith": "construit avec",
24 | "noModelSelected": "Aucun modèle sélectionné",
25 | "systemPrompt": "Invite du système",
26 | "systemPromptInfo": "Définissez le rôle ou la personnalité de l'IA (par exemple, «vous êtes un assistant utile.», «Vous êtes un pirate».). ",
27 | "systemPromptPlaceholder": "Entrez l'invite système ici ...",
28 | "startConversation": "Commencer la conversation",
29 | "starting": "Départ...",
30 | "stopConversation": "Stop Conversation",
31 | "chat": "Chat",
32 | "typing": "L'IA tape ...",
33 | "typeMessagePlaceholder": "Tapez votre message ici ...",
34 | "sendMessage": "Envoyer un message",
35 | "you": "Toi",
36 | "ai": "IA",
37 | "youAvatar": "Toi",
38 | "aiAvatar": "IA",
39 | "regenerate": "Régénérer la réponse",
40 | "copy": "Copier la réponse",
41 | "copied": "Copié !",
42 | "expand": "Développer la réponse",
43 | "collapse": "Réduire la réponse"
44 | }
--------------------------------------------------------------------------------
/src/data/locales/fr/PageHeader.json:
--------------------------------------------------------------------------------
1 | {
2 | "commandSearchPlaceholder": "Tapez une commande ou recherchez...",
3 | "noResultsFound": "Aucun résultat trouvé.",
4 | "english": "Anglais ({{locale}})",
5 | "danish": "danois ({{locale}})",
6 | "german": "Allemand ({{locale}})",
7 | "spanish": "Espagnol ({{locale}})",
8 | "french": "Français ({{locale}})",
9 | "italian": "italien ({{locale}})",
10 | "japanese": "Japonais ({{locale}})",
11 | "korean": "coréen ({{locale}})",
12 | "portuguese": "Portugais ({{locale}})",
13 | "thai": "Thaï ({{locale}})",
14 | "taiwanese": "Taïwanais ({{locale}})",
15 | "index": "Tableau de bord d'inférence",
16 | "benchmark": "Modèles de référence",
17 | "perplexity": "Calculer la perplexité",
18 | "back": "Retourner",
19 | "llmFunctionality": "Fonctionnalité LLM",
20 | "about": "À propos"
21 | }
--------------------------------------------------------------------------------
/src/data/locales/fr/Perplexity.json:
--------------------------------------------------------------------------------
1 | {
2 | "title": "Outil de perplexité Electron BitNet",
3 | "description": "Calculez la perplexité (la confiance du modèle dans la prédiction du mot suivant) de votre modèle BitNet ci-dessous.",
4 | "commandOptions": "Options de commande",
5 | "prompt": "Rapide",
6 | "promptInfo": "Il s'agit du texte initial que le modèle utilisera pour commencer à générer la sortie.",
7 | "model": "Modèle",
8 | "modelInfo": "Entrez le chemin vers le ' quantifié{{fileFormat}}'fichier modèle généré à l'aide de Microsoft'{{script}}' scénario.",
9 | "threads": "Sujets",
10 | "threadsInfo": "Le nombre de threads à utiliser pour exécuter le calcul de perplexité.",
11 | "contextSize": "Taille du contexte",
12 | "contextSizeInfo": "La taille du contexte d'invite détermine la quantité d'invite prise en compte lors du calcul de la perplexité.",
13 | "pplStride": "Pas de perplexité",
14 | "pplStrideInfo": "Pas à pas pour le calcul de la perplexité.",
15 | "pplOutputType": "Type de sortie Perplexité",
16 | "pplOutputTypeInfo": "Type de sortie pour le calcul de perplexité.",
17 | "runPerplexity": "Calculer la perplexité",
18 | "stopPerplexity": "Arrêter le calcul",
19 | "log": "Résultat de perplexité calculé",
20 | "error": "Erreur",
21 | "license": "{{license}} Code sous licence",
22 | "builtWith": "construit avec",
23 | "insufficientPromptTokens": "Jetons d'invite insuffisants ; le double de la taille du contexte en jetons est nécessaire pour continuer."
24 | }
--------------------------------------------------------------------------------
/src/data/locales/it/Benchmark.json:
--------------------------------------------------------------------------------
1 | {
2 | "title": "Strumento di benchmarking Electron BitNet",
3 | "description": "Confronta di seguito il tuo modello LLM Microsoft BitNet a 1 bit, tenendo presente che le variabili di test più grandi richiedono più tempo per l'esecuzione.",
4 | "commandOptions": "Opzioni di comando",
5 | "numberOfTokens": "Numero di token",
6 | "numberOfTokensInfo": "Specificare il numero di token da generare durante il benchmark.",
7 | "model": "Modello",
8 | "modelInfo": "Inserisci il percorso del \"quantizzato\"{{fileFormat}}'file modello generato utilizzando BitNet di Microsoft'{{script}}' copione.",
9 | "threads": "Discussioni",
10 | "threadsInfo": "Specificare il numero di thread da utilizzare per il benchmark.",
11 | "promptLength": "Lunghezza richiesta",
12 | "promptLengthInfo": "Specificare la lunghezza della richiesta da cui generare il testo.",
13 | "runBenchmark": "Esegui benchmark",
14 | "stopBenchmark": "Ferma il benchmark",
15 | "log": "Registro dei benchmark",
16 | "license": "Concesso in licenza sotto {{license}}",
17 | "builtWith": "Costruito con"
18 | }
--------------------------------------------------------------------------------
/src/data/locales/it/InstructionModel.json:
--------------------------------------------------------------------------------
1 | {
2 | "loading": "Caricamento...",
3 | "delete": "Eliminare",
4 | "title": "Modalità seguente istruzioni",
5 | "description": "Impegnarsi in una conversazione con l'LLM a 1 bit. ",
6 | "commandOptions": "Opzioni di comando",
7 | "numberOfTokens": "Token massimi per turno",
8 | "model": "Modello",
9 | "threads": "Discussioni",
10 | "contextSize": "Dimensione del contesto",
11 | "temperature": "Temperatura",
12 | "prompt": "Richiesta",
13 | "numberOfTokensInfo": "Il numero massimo di token (parole) l'IA genererà in ogni risposta.",
14 | "modelInfo": "Seleziona il quantizzato '{{fileFormat}}\"File modello generato utilizzando BitNet di Microsoft\"{{script}}'Script.",
15 | "threadsInfo": "Il numero di thread da utilizzare per l'esecuzione dell'inferenza, limitato al numero di thread disponibili sulla CPU.",
16 | "contextSizeInfo": "La dimensione del contesto rapido determina la quantità di cronologia della conversazione durante l'inferenza.",
17 | "temperatureInfo": "Controlla la casualità del testo generato. ",
18 | "promptInfo": "Questo è il testo iniziale che il modello utilizzerà per iniziare a generare l'output.",
19 | "runInference": "Eseguire l'inferenza",
20 | "stopInference": "Fermare l'inferenza",
21 | "response": "Risposta",
22 | "license": "{{license}} Codice autorizzato",
23 | "builtWith": "costruito con",
24 | "noModelSelected": "Nessun modello selezionato",
25 | "systemPrompt": "Prompt del sistema",
26 | "systemPromptInfo": "Definisci il ruolo o la personalità dell'intelligenza artificiale (ad esempio, \"sei un assistente utile\", sei un pirata. \"). ",
27 | "systemPromptPlaceholder": "Immettere il prompt del sistema qui ...",
28 | "startConversation": "Inizia la conversazione",
29 | "starting": "Di partenza...",
30 | "stopConversation": "Fermare la conversazione",
31 | "chat": "Chiacchierata",
32 | "typing": "Ai sta digitando ...",
33 | "typeMessagePlaceholder": "Digita il tuo messaggio qui ...",
34 | "sendMessage": "Invia messaggio",
35 | "you": "Voi",
36 | "ai": "AI",
37 | "youAvatar": "Voi",
38 | "aiAvatar": "AI",
39 | "regenerate": "Rigenera risposta",
40 | "copy": "Copia risposta",
41 | "copied": "Copiato!",
42 | "expand": "Espandi risposta",
43 | "collapse": "Comprimi risposta"
44 | }
--------------------------------------------------------------------------------
/src/data/locales/it/PageHeader.json:
--------------------------------------------------------------------------------
1 | {
2 | "commandSearchPlaceholder": "Digita un comando o cerca...",
3 | "noResultsFound": "Nessun risultato trovato",
4 | "english": "Inglese ({{locale}})",
5 | "danish": "danese ({{locale}})",
6 | "german": "tedesco ({{locale}})",
7 | "spanish": "spagnolo ({{locale}})",
8 | "french": "francese ({{locale}})",
9 | "italian": "Italiano ({{locale}})",
10 | "japanese": "giapponese ({{locale}})",
11 | "korean": "coreano ({{locale}})",
12 | "portuguese": "portoghese ({{locale}})",
13 | "thai": "tailandese ({{locale}})",
14 | "taiwanese": "Taiwanese ({{locale}})",
15 | "index": "Dashboard di inferenza",
16 | "benchmark": "Modelli di riferimento",
17 | "perplexity": "Calcola la perplessità",
18 | "back": "Torna indietro",
19 | "llmFunctionality": "Funzionalità LLM",
20 | "about": "Di"
21 | }
--------------------------------------------------------------------------------
/src/data/locales/it/Perplexity.json:
--------------------------------------------------------------------------------
1 | {
2 | "title": "Strumento per la perplessità di Electron BitNet",
3 | "description": "Calcola la perplessità (la confidenza del modello nel prevedere la parola successiva) del tuo modello BitNet di seguito.",
4 | "commandOptions": "Opzioni di comando",
5 | "prompt": "Richiesta",
6 | "promptInfo": "Questo è il testo iniziale che il modello utilizzerà per iniziare a generare l'output.",
7 | "model": "Modello",
8 | "modelInfo": "Inserisci il percorso del \"quantizzato\"{{fileFormat}}'file modello generato utilizzando Microsoft'{{script}}' copione.",
9 | "threads": "Discussioni",
10 | "threadsInfo": "Il numero di thread da utilizzare per eseguire il calcolo della perplessità.",
11 | "contextSize": "Dimensione del contesto",
12 | "contextSizeInfo": "La dimensione del contesto del prompt determina quanta parte del prompt viene considerata durante il calcolo della perplessità.",
13 | "pplStride": "Passo di perplessità",
14 | "pplStrideInfo": "Passo per il calcolo delle perplessità.",
15 | "pplOutputType": "Tipo di output perplessità",
16 | "pplOutputTypeInfo": "Tipo di output per il calcolo della perplessità.",
17 | "runPerplexity": "Calcola la perplessità",
18 | "stopPerplexity": "Interrompere il calcolo",
19 | "log": "Risultato della perplessità calcolata",
20 | "error": "Errore",
21 | "license": "{{license}} Codice concesso in licenza",
22 | "builtWith": "costruito con",
23 | "insufficientPromptTokens": "Token prompt insufficienti, per procedere è necessaria il doppio della dimensione del contesto in token."
24 | }
--------------------------------------------------------------------------------
/src/data/locales/ja/Benchmark.json:
--------------------------------------------------------------------------------
1 | {
2 | "title": "Electron BitNet ベンチマーク ツール",
3 | "description": "以下で Microsoft BitNet 1 ビット LLM モデルのベンチマークを行います。テスト変数が大きいほど実行に時間がかかることに留意してください。",
4 | "commandOptions": "コマンドオプション",
5 | "numberOfTokens": "トークンの数",
6 | "numberOfTokensInfo": "ベンチマーク中に生成するトークンの数を指定します。",
7 | "model": "モデル",
8 | "modelInfo": "量子化された ' へのパスを入力します。{{fileFormat}}' Microsoft の BitNet を使用して生成されたモデル ファイル '{{script}}' スクリプト。",
9 | "threads": "スレッド",
10 | "threadsInfo": "ベンチマークに使用するスレッドの数を指定します。",
11 | "promptLength": "プロンプトの長さ",
12 | "promptLengthInfo": "テキストを生成するプロンプトの長さを指定します。",
13 | "runBenchmark": "ベンチマークの実行",
14 | "stopBenchmark": "停止ベンチマーク",
15 | "log": "ベンチマークログ",
16 | "license": "以下に基づいてライセンスを取得 {{license}}",
17 | "builtWith": "で構築"
18 | }
--------------------------------------------------------------------------------
/src/data/locales/ja/InstructionModel.json:
--------------------------------------------------------------------------------
1 | {
2 | "loading": "読み込み...",
3 | "delete": "消去",
4 | "title": "次のモードの命令",
5 | "description": "1ビットLLMとの会話に参加します。",
6 | "commandOptions": "コマンドオプション",
7 | "numberOfTokens": "ターンあたりのマックストークン",
8 | "model": "モデル",
9 | "threads": "スレッド",
10 | "contextSize": "コンテキストサイズ",
11 | "temperature": "温度",
12 | "prompt": "プロンプト",
13 | "numberOfTokensInfo": "AIが各応答で生成するトークン(単語)の最大数。",
14 | "modelInfo": "Quantizedを選択します」{{fileFormat}}「Microsoftのビットネットを使用して生成されたモデルファイル」{{script}}'スクリプト。",
15 | "threadsInfo": "CPUで利用可能なスレッドの数に制限された、推論の実行に使用するスレッドの数。",
16 | "contextSizeInfo": "迅速なコンテキストのサイズは、推論中に会話履歴のどれだけが考慮されるかを決定します。",
17 | "temperatureInfo": "生成されたテキストのランダム性を制御します。",
18 | "promptInfo": "これは、モデルが出力の生成を開始するために使用する初期テキストです。",
19 | "runInference": "推論を実行します",
20 | "stopInference": "推論を停止します",
21 | "response": "応答",
22 | "license": "{{license}} ライセンスコード",
23 | "builtWith": "で構築されています",
24 | "noModelSelected": "選択されたモデルはありません",
25 | "systemPrompt": "システムプロンプト",
26 | "systemPromptInfo": "AIの役割または人格を定義します(たとえば、「あなたは親切なアシスタントです」、「あなたは海賊です。」)。",
27 | "systemPromptPlaceholder": "ここにシステムプロンプトを入力してください...",
28 | "startConversation": "会話を始めます",
29 | "starting": "起動...",
30 | "stopConversation": "会話をやめなさい",
31 | "chat": "チャット",
32 | "typing": "AIはタイピングしています...",
33 | "typeMessagePlaceholder": "ここにメッセージを入力してください...",
34 | "sendMessage": "メッセージを送信します",
35 | "you": "あなた",
36 | "ai": "ai",
37 | "youAvatar": "あなた",
38 | "aiAvatar": "ai",
39 | "regenerate": "レスポンスを再生成",
40 | "copy": "レスポンスをコピー",
41 | "copied": "コピーしました!",
42 | "expand": "レスポンスを展開",
43 | "collapse": "レスポンスを折りたたむ"
44 | }
--------------------------------------------------------------------------------
/src/data/locales/ja/PageHeader.json:
--------------------------------------------------------------------------------
1 | {
2 | "commandSearchPlaceholder": "コマンドを入力するか検索...",
3 | "noResultsFound": "結果が見つかりませんでした。",
4 | "english": "英語 ({{locale}})",
5 | "danish": "デンマーク語 ({{locale}})",
6 | "german": "ドイツ語 ({{locale}})",
7 | "spanish": "スペイン語 ({{locale}})",
8 | "french": "フランス語 ({{locale}})",
9 | "italian": "イタリア語 ({{locale}})",
10 | "japanese": "日本語 ({{locale}})",
11 | "korean": "韓国語 ({{locale}})",
12 | "portuguese": "ポルトガル語 ({{locale}})",
13 | "thai": "タイ語 ({{locale}})",
14 | "taiwanese": "台湾語({{locale}})",
15 | "index": "推論ダッシュボード",
16 | "benchmark": "ベンチマークモデル",
17 | "perplexity": "複雑さを計算する",
18 | "back": "戻る",
19 | "llmFunctionality": "LLM の機能",
20 | "about": "について"
21 | }
--------------------------------------------------------------------------------
/src/data/locales/ja/Perplexity.json:
--------------------------------------------------------------------------------
1 | {
2 | "title": "Electron BitNet パープレキシティ ツール",
3 | "description": "以下の BitNet モデルのパープレキシティ (次の単語を予測するモデルの信頼度) を計算します。",
4 | "commandOptions": "コマンドオプション",
5 | "prompt": "プロンプト",
6 | "promptInfo": "これは、モデルが出力の生成を開始するために使用する最初のテキストです。",
7 | "model": "モデル",
8 | "modelInfo": "量子化された ' へのパスを入力します。{{fileFormat}}' Microsoft の ' を使用して生成されたモデル ファイル{{script}}' スクリプト。",
9 | "threads": "スレッド",
10 | "threadsInfo": "複雑さの計算を実行するために使用するスレッドの数。",
11 | "contextSize": "コンテキストサイズ",
12 | "contextSizeInfo": "プロンプト コンテキストのサイズによって、パープレキシティの計算中にどの程度のプロンプトが考慮されるかが決まります。",
13 | "pplStride": "パープレキシティ ストライド",
14 | "pplStrideInfo": "パープレキシティ計算のストライド。",
15 | "pplOutputType": "パープレキシティ出力タイプ",
16 | "pplOutputTypeInfo": "複雑度計算の出力タイプ。",
17 | "runPerplexity": "複雑さを計算する",
18 | "stopPerplexity": "計算を停止する",
19 | "log": "計算された複雑さの結果",
20 | "error": "エラー",
21 | "license": "{{license}} ライセンスコード",
22 | "builtWith": "で構築された",
23 | "insufficientPromptTokens": "プロンプト トークンが不十分です。続行するには、トークンのコンテキスト サイズの 2 倍が必要です。"
24 | }
--------------------------------------------------------------------------------
/src/data/locales/ko/Benchmark.json:
--------------------------------------------------------------------------------
1 | {
2 | "title": "Electron BitNet 벤치마킹 도구",
3 | "description": "아래의 Microsoft BitNet 1비트 LLM 모델을 벤치마킹하세요. 테스트 변수가 클수록 실행하는 데 시간이 더 오래 걸린다는 점을 염두에 두세요.",
4 | "commandOptions": "명령 옵션",
5 | "numberOfTokens": "토큰 수",
6 | "numberOfTokensInfo": "벤치마크 중에 생성할 토큰 수를 지정합니다.",
7 | "model": "모델",
8 | "modelInfo": "양자화된 '' 경로를 입력하세요.{{fileFormat}}' Microsoft의 BitNet을 사용하여 생성된 모델 파일 '{{script}}' 스크립트.",
9 | "threads": "스레드",
10 | "threadsInfo": "벤치마크에 사용할 스레드 수를 지정합니다.",
11 | "promptLength": "프롬프트 길이",
12 | "promptLengthInfo": "텍스트를 생성할 프롬프트의 길이를 지정합니다.",
13 | "runBenchmark": "벤치마크 실행",
14 | "stopBenchmark": "벤치마크 중지",
15 | "log": "벤치마크 로그",
16 | "license": "아래 라이선스 {{license}}",
17 | "builtWith": "다음으로 제작됨"
18 | }
--------------------------------------------------------------------------------
/src/data/locales/ko/InstructionModel.json:
--------------------------------------------------------------------------------
1 | {
2 | "loading": "로딩 ...",
3 | "delete": "삭제",
4 | "title": "다음 모드 다음 모드",
5 | "description": "1 비트 LLM과 대화하십시오. ",
6 | "commandOptions": "명령 옵션",
7 | "numberOfTokens": "턴당 최대 토큰",
8 | "model": "모델",
9 | "threads": "스레드",
10 | "contextSize": "컨텍스트 크기",
11 | "temperature": "온도",
12 | "prompt": "즉각적인",
13 | "numberOfTokensInfo": "AI가 각 응답에서 생성되는 최대 토큰 (단어) 수.",
14 | "modelInfo": "양자화 된 선택 ''{{fileFormat}}'Microsoft's Bitnet을 사용하여 생성 된 모델 파일 '{{script}}'대본.",
15 | "threadsInfo": "추론 실행에 사용할 스레드 수는 CPU에서 사용 가능한 스레드 수로 제한됩니다.",
16 | "contextSizeInfo": "신속한 컨텍스트의 크기는 추론 중에 대화 기록의 양을 결정합니다.",
17 | "temperatureInfo": "생성 된 텍스트의 무작위성을 제어합니다. ",
18 | "promptInfo": "이것은 모델이 출력 생성을 시작하는 데 사용할 초기 텍스트입니다.",
19 | "runInference": "추론을 실행하십시오",
20 | "stopInference": "추론을 중지하십시오",
21 | "response": "응답",
22 | "license": "{{license}} 라이센스 코드",
23 | "builtWith": "구축",
24 | "noModelSelected": "선택된 모델이 없습니다",
25 | "systemPrompt": "시스템 프롬프트",
26 | "systemPromptInfo": "AI의 역할이나 성격을 정의하십시오 (예 : '당신은 도움이되는 조수입니다.', '당신은 해적입니다.'). ",
27 | "systemPromptPlaceholder": "여기에 시스템 프롬프트를 입력하십시오 ...",
28 | "startConversation": "대화를 시작하십시오",
29 | "starting": "시작 ...",
30 | "stopConversation": "대화를 중지하십시오",
31 | "chat": "채팅",
32 | "typing": "AI가 입력하고 있습니다 ...",
33 | "typeMessagePlaceholder": "여기에 메시지를 입력하십시오 ...",
34 | "sendMessage": "메시지 보내기",
35 | "you": "너",
36 | "ai": "일체 포함",
37 | "youAvatar": "너",
38 | "aiAvatar": "일체 포함",
39 | "regenerate": "응답 재생성",
40 | "copy": "응답 복사",
41 | "copied": "복사됨!",
42 | "expand": "응답 확장",
43 | "collapse": "응답 축소"
44 | }
--------------------------------------------------------------------------------
/src/data/locales/ko/PageHeader.json:
--------------------------------------------------------------------------------
1 | {
2 | "commandSearchPlaceholder": "명령을 입력하거나 검색하세요...",
3 | "noResultsFound": "검색된 결과가 없습니다.",
4 | "english": "영어 ({{locale}})",
5 | "danish": "덴마크어({{locale}})",
6 | "german": "독일어({{locale}})",
7 | "spanish": "스페인어({{locale}})",
8 | "french": "프랑스어({{locale}})",
9 | "italian": "이탈리아어({{locale}})",
10 | "japanese": "일본어({{locale}})",
11 | "korean": "한국인 ({{locale}})",
12 | "portuguese": "포르투갈어({{locale}})",
13 | "thai": "태국어({{locale}})",
14 | "taiwanese": "대만어({{locale}})",
15 | "index": "추론 대시보드",
16 | "benchmark": "벤치마크 모델",
17 | "perplexity": "당혹감을 계산하다",
18 | "back": "돌아가기",
19 | "llmFunctionality": "LLM 기능",
20 | "about": "에 대한"
21 | }
--------------------------------------------------------------------------------
/src/data/locales/ko/Perplexity.json:
--------------------------------------------------------------------------------
1 | {
2 | "title": "Electron BitNet Perplexity 도구",
3 | "description": "아래 BitNet 모델의 복잡성(다음 단어 예측에 대한 모델의 신뢰도)을 계산하세요.",
4 | "commandOptions": "명령 옵션",
5 | "prompt": "즉각적인",
6 | "promptInfo": "이는 모델이 출력 생성을 시작하는 데 사용할 초기 텍스트입니다.",
7 | "model": "모델",
8 | "modelInfo": "양자화된 '' 경로를 입력하세요.{{fileFormat}}' Microsoft를 사용하여 생성된 모델 파일 '{{script}}' 스크립트.",
9 | "threads": "스레드",
10 | "threadsInfo": "복잡성 계산을 실행하는 데 사용할 스레드 수입니다.",
11 | "contextSize": "컨텍스트 크기",
12 | "contextSizeInfo": "프롬프트 컨텍스트의 크기에 따라 복잡성 계산 중에 고려되는 프롬프트의 양이 결정됩니다.",
13 | "pplStride": "당혹스러운 발걸음",
14 | "pplStrideInfo": "복잡성 계산을 위한 보폭.",
15 | "pplOutputType": "Perplexity 출력 유형",
16 | "pplOutputTypeInfo": "복잡성 계산을 위한 출력 유형입니다.",
17 | "runPerplexity": "당혹감을 계산하다",
18 | "stopPerplexity": "계산 중지",
19 | "log": "계산된 혼란 결과",
20 | "error": "오류",
21 | "license": "{{license}} 라이센스 코드",
22 | "builtWith": "으로 구축",
23 | "insufficientPromptTokens": "프롬프트 토큰이 부족합니다. 계속하려면 토큰의 컨텍스트 크기를 두 배로 늘려야 합니다."
24 | }
--------------------------------------------------------------------------------
/src/data/locales/pt/Benchmark.json:
--------------------------------------------------------------------------------
1 | {
2 | "title": "Ferramenta de benchmarking Electron BitNet",
3 | "description": "Compare seu modelo LLM de 1 bit Microsoft BitNet abaixo, tendo em mente que variáveis de teste maiores demoram mais para serem executadas.",
4 | "commandOptions": "Opções de comando",
5 | "numberOfTokens": "Número de tokens",
6 | "numberOfTokensInfo": "Especifique o número de tokens a serem gerados durante o benchmark.",
7 | "model": "Modelo",
8 | "modelInfo": "Insira o caminho para o quantizado '{{fileFormat}}'arquivo de modelo gerado usando BitNet da Microsoft'{{script}}'roteiro.",
9 | "threads": "Tópicos",
10 | "threadsInfo": "Especifique o número de threads a serem usados para o benchmark.",
11 | "promptLength": "Comprimento do prompt",
12 | "promptLengthInfo": "Especifique o comprimento do prompt para gerar o texto.",
13 | "runBenchmark": "Executar referência",
14 | "stopBenchmark": "Parar referência",
15 | "log": "Registro de referência",
16 | "license": "Licenciado sob {{license}}",
17 | "builtWith": "Construído com"
18 | }
--------------------------------------------------------------------------------
/src/data/locales/pt/InstructionModel.json:
--------------------------------------------------------------------------------
1 | {
2 | "loading": "Carregando...",
3 | "delete": "Excluir",
4 | "title": "Instrução Modo a seguir",
5 | "description": "Inscreva-se em uma conversa com o LLM de 1 bit. ",
6 | "commandOptions": "Opções de comando",
7 | "numberOfTokens": "Tokens máximos por turno",
8 | "model": "Modelo",
9 | "threads": "Tópicos",
10 | "contextSize": "Tamanho do contexto",
11 | "temperature": "Temperatura",
12 | "prompt": "Incitar",
13 | "numberOfTokensInfo": "O número máximo de tokens (palavras) a IA gerará em cada resposta.",
14 | "modelInfo": "Selecione o quantizado '{{fileFormat}}'Arquivo de modelo gerado usando o Microsoft's BitNet'{{script}}'Script.",
15 | "threadsInfo": "O número de threads a serem usados para executar a inferência, limitada ao número de threads disponíveis na CPU.",
16 | "contextSizeInfo": "O tamanho do contexto imediato determina quanto do histórico de conversas é considerado durante a inferência.",
17 | "temperatureInfo": "Controla a aleatoriedade do texto gerado. ",
18 | "promptInfo": "Este é o texto inicial que o modelo usará para começar a gerar a saída.",
19 | "runInference": "Executar inferência",
20 | "stopInference": "Pare a inferência",
21 | "response": "Resposta",
22 | "license": "{{license}} Código licenciado",
23 | "builtWith": "construído com",
24 | "noModelSelected": "Nenhum modelo selecionado",
25 | "systemPrompt": "Prompt de sistema",
26 | "systemPromptInfo": "Defina o papel ou personalidade da IA (por exemplo, 'Você é um assistente útil.', 'Você é um pirata.'). ",
27 | "systemPromptPlaceholder": "Insira o prompt do sistema aqui ...",
28 | "startConversation": "Inicie a conversa",
29 | "starting": "Começando ...",
30 | "stopConversation": "Pare de conversa",
31 | "chat": "Bater papo",
32 | "typing": "Ai está digitando ...",
33 | "typeMessagePlaceholder": "Digite sua mensagem aqui ...",
34 | "sendMessage": "Enviar mensagem",
35 | "you": "Você",
36 | "ai": "Ai",
37 | "youAvatar": "Você",
38 | "aiAvatar": "Ai",
39 | "regenerate": "Regenerar Resposta",
40 | "copy": "Copiar Resposta",
41 | "copied": "Copiado!",
42 | "expand": "Expandir Resposta",
43 | "collapse": "Recolher Resposta"
44 | }
--------------------------------------------------------------------------------
/src/data/locales/pt/PageHeader.json:
--------------------------------------------------------------------------------
1 | {
2 | "commandSearchPlaceholder": "Digite um comando ou pesquise...",
3 | "noResultsFound": "Nenhum resultado encontrado.",
4 | "english": "Inglês ({{locale}})",
5 | "danish": "Dinamarquês ({{locale}})",
6 | "german": "Alemão ({{locale}})",
7 | "spanish": "Espanhol ({{locale}})",
8 | "french": "Francês ({{locale}})",
9 | "italian": "Italiano ({{locale}})",
10 | "japanese": "Japonês ({{locale}})",
11 | "korean": "Coreano ({{locale}})",
12 | "portuguese": "Português ({{locale}})",
13 | "thai": "Tailandês ({{locale}})",
14 | "taiwanese": "Taiwanês ({{locale}})",
15 | "index": "Painel de inferência",
16 | "benchmark": "Modelos de referência",
17 | "perplexity": "Calcular Perplexidade",
18 | "back": "Volte",
19 | "llmFunctionality": "Funcionalidade LLM",
20 | "about": "Sobre"
21 | }
--------------------------------------------------------------------------------
/src/data/locales/pt/Perplexity.json:
--------------------------------------------------------------------------------
1 | {
2 | "title": "Ferramenta de perplexidade Electron BitNet",
3 | "description": "Calcule a perplexidade (a confiança do modelo em prever a próxima palavra) do seu modelo BitNet abaixo.",
4 | "commandOptions": "Opções de comando",
5 | "prompt": "Incitar",
6 | "promptInfo": "Este é o texto inicial que o modelo usará para começar a gerar a saída.",
7 | "model": "Modelo",
8 | "modelInfo": "Insira o caminho para o quantizado '{{fileFormat}}'arquivo de modelo gerado usando o '{{script}}'roteiro.",
9 | "threads": "Tópicos",
10 | "threadsInfo": "O número de threads a serem usados para executar o cálculo de perplexidade.",
11 | "contextSize": "Tamanho do contexto",
12 | "contextSizeInfo": "O tamanho do contexto do prompt determina quanto do prompt é considerado durante o cálculo da perplexidade.",
13 | "pplStride": "Passo de Perplexidade",
14 | "pplStrideInfo": "Passo para cálculo de perplexidade.",
15 | "pplOutputType": "Tipo de saída de perplexidade",
16 | "pplOutputTypeInfo": "Tipo de saída para cálculo de perplexidade.",
17 | "runPerplexity": "Calcular Perplexidade",
18 | "stopPerplexity": "Parar cálculo",
19 | "log": "Resultado de perplexidade calculada",
20 | "error": "Erro",
21 | "license": "{{license}} Código licenciado",
22 | "builtWith": "construído com",
23 | "insufficientPromptTokens": "Tokens de prompt insuficientes, é necessário o dobro do tamanho do contexto em tokens para prosseguir."
24 | }
--------------------------------------------------------------------------------
/src/data/locales/th/Benchmark.json:
--------------------------------------------------------------------------------
1 | {
2 | "title": "เครื่องมือเปรียบเทียบอิเล็กตรอน BitNet",
3 | "description": "เปรียบเทียบโมเดล Microsoft BitNet 1-bit LLM ของคุณด้านล่างนี้ โดยคำนึงว่าตัวแปรทดสอบที่ใหญ่กว่าจะใช้เวลารันนานกว่า",
4 | "commandOptions": "ตัวเลือกคำสั่ง",
5 | "numberOfTokens": "จำนวนโทเค็น",
6 | "numberOfTokensInfo": "ระบุจำนวนโทเค็นที่จะสร้างระหว่างการวัดประสิทธิภาพ",
7 | "model": "แบบอย่าง",
8 | "modelInfo": "ป้อนเส้นทางไปยัง quantized '{{fileFormat}}' ไฟล์โมเดลที่สร้างโดยใช้ BitNet ของ Microsoft '{{script}}' สคริปต์",
9 | "threads": "กระทู้",
10 | "threadsInfo": "ระบุจำนวนเธรดที่จะใช้สำหรับการวัดประสิทธิภาพ",
11 | "promptLength": "ความยาวพร้อมท์",
12 | "promptLengthInfo": "ระบุความยาวของพรอมต์เพื่อสร้างข้อความ",
13 | "runBenchmark": "เรียกใช้เกณฑ์มาตรฐาน",
14 | "stopBenchmark": "หยุดเกณฑ์มาตรฐาน",
15 | "log": "บันทึกมาตรฐาน",
16 | "license": "ได้รับอนุญาตภายใต้ {{license}}",
17 | "builtWith": "สร้างด้วย"
18 | }
--------------------------------------------------------------------------------
/src/data/locales/th/InstructionModel.json:
--------------------------------------------------------------------------------
1 | {
2 | "loading": "กำลังโหลด ...",
3 | "delete": "ลบ",
4 | "title": "คำสั่งต่อไปนี้โหมด",
5 | "description": "มีส่วนร่วมในการสนทนากับ LLM 1 บิต ",
6 | "commandOptions": "ตัวเลือกคำสั่ง",
7 | "numberOfTokens": "โทเค็นสูงสุดต่อเทิร์น",
8 | "model": "แบบอย่าง",
9 | "threads": "ด้าย",
10 | "contextSize": "ขนาดบริบท",
11 | "temperature": "อุณหภูมิ",
12 | "prompt": "แจ้ง",
13 | "numberOfTokensInfo": "จำนวนโทเค็นสูงสุด (คำ) AI จะสร้างในการตอบสนองแต่ละครั้ง",
14 | "modelInfo": "เลือก quantized '{{fileFormat}}'ไฟล์รุ่นที่สร้างขึ้นโดยใช้ BitNet ของ Microsoft'{{script}}'สคริปต์.",
15 | "threadsInfo": "จำนวนเธรดที่จะใช้สำหรับการเรียกใช้การอนุมาน จำกัด จำนวนเธรดที่มีอยู่ใน CPU",
16 | "contextSizeInfo": "ขนาดของบริบทที่รวดเร็วกำหนดจำนวนประวัติการสนทนาที่ได้รับการพิจารณาในระหว่างการอนุมาน",
17 | "temperatureInfo": "ควบคุมการสุ่มของข้อความที่สร้างขึ้น ",
18 | "promptInfo": "นี่คือข้อความเริ่มต้นที่โมเดลจะใช้เพื่อเริ่มสร้างเอาต์พุต",
19 | "runInference": "การอนุมาน",
20 | "stopInference": "หยุดการอนุมาน",
21 | "response": "การตอบสนอง",
22 | "license": "{{license}} รหัสที่ได้รับอนุญาต",
23 | "builtWith": "สร้างขึ้นด้วย",
24 | "noModelSelected": "ไม่มีการเลือกแบบจำลอง",
25 | "systemPrompt": "ระบบแจ้ง",
26 | "systemPromptInfo": "กำหนดบทบาทหรือบุคลิกภาพของ AI (เช่น 'คุณเป็นผู้ช่วยที่เป็นประโยชน์', 'คุณเป็นโจรสลัด') ",
27 | "systemPromptPlaceholder": "ป้อนพรอมต์ระบบที่นี่ ...",
28 | "startConversation": "เริ่มการสนทนา",
29 | "starting": "เริ่มต้น ...",
30 | "stopConversation": "หยุดการสนทนา",
31 | "chat": "แชท",
32 | "typing": "AI กำลังพิมพ์ ...",
33 | "typeMessagePlaceholder": "พิมพ์ข้อความของคุณที่นี่ ...",
34 | "sendMessage": "ส่งข้อความ",
35 | "you": "คุณ",
36 | "ai": "AI",
37 | "youAvatar": "คุณ",
38 | "aiAvatar": "AI",
39 | "regenerate": "สร้างการตอบกลับใหม่",
40 | "copy": "คัดลอกการตอบกลับ",
41 | "copied": "คัดลอกแล้ว!",
42 | "expand": "ขยายการตอบกลับ",
43 | "collapse": "ยุบการตอบกลับ"
44 | }
--------------------------------------------------------------------------------
/src/data/locales/th/PageHeader.json:
--------------------------------------------------------------------------------
1 | {
2 | "commandSearchPlaceholder": "พิมพ์คำสั่งหรือค้นหา...",
3 | "noResultsFound": "ไม่พบผลลัพธ์",
4 | "english": "ภาษาอังกฤษ ({{locale}}-",
5 | "danish": "ภาษาเดนมาร์ก ({{locale}}-",
6 | "german": "เยอรมัน ({{locale}}-",
7 | "spanish": "สเปน ({{locale}}-",
8 | "french": "ภาษาฝรั่งเศส ({{locale}}-",
9 | "italian": "ภาษาอิตาลี ({{locale}}-",
10 | "japanese": "ญี่ปุ่น ({{locale}}-",
11 | "korean": "เกาหลี ({{locale}}-",
12 | "portuguese": "โปรตุเกส ({{locale}}-",
13 | "thai": "แบบไทย ({{locale}}-",
14 | "taiwanese": "ชาวไต้หวัน ({{locale}}-",
15 | "index": "แดชบอร์ดการอนุมาน",
16 | "benchmark": "โมเดลมาตรฐาน",
17 | "perplexity": "คำนวณความฉงนสนเท่ห์",
18 | "back": "กลับไป",
19 | "llmFunctionality": "ฟังก์ชั่น LLM",
20 | "about": "เกี่ยวกับ"
21 | }
--------------------------------------------------------------------------------
/src/data/locales/th/Perplexity.json:
--------------------------------------------------------------------------------
1 | {
2 | "title": "เครื่องมือความฉงนสนเท่ห์ของอิเล็กตรอน BitNet",
3 | "description": "คำนวณความฉงนสนเท่ห์ (ความมั่นใจของโมเดลในการทำนายคำถัดไป) ของโมเดล BitNet ของคุณด้านล่าง",
4 | "commandOptions": "ตัวเลือกคำสั่ง",
5 | "prompt": "พรอมต์",
6 | "promptInfo": "นี่คือข้อความเริ่มต้นที่โมเดลจะใช้เพื่อเริ่มสร้างเอาต์พุต",
7 | "model": "แบบอย่าง",
8 | "modelInfo": "ป้อนเส้นทางไปยัง quantized '{{fileFormat}}' ไฟล์โมเดลที่สร้างโดยใช้ของ Microsoft '{{script}}' สคริปต์",
9 | "threads": "กระทู้",
10 | "threadsInfo": "จำนวนเธรดที่จะใช้สำหรับการรันการคำนวณความงุนงง",
11 | "contextSize": "ขนาดบริบท",
12 | "contextSizeInfo": "ขนาดของบริบทพร้อมท์จะกำหนดจำนวนพร้อมท์ที่จะพิจารณาในระหว่างการคำนวณความสับสน",
13 | "pplStride": "ก้าวย่างที่สับสน",
14 | "pplStrideInfo": "ก้าวไปสู่การคำนวณความสับสน",
15 | "pplOutputType": "ประเภทเอาต์พุตความฉงนสนเท่ห์",
16 | "pplOutputTypeInfo": "ประเภทเอาต์พุตสำหรับการคำนวณความฉงนสนเท่ห์",
17 | "runPerplexity": "คำนวณความฉงนสนเท่ห์",
18 | "stopPerplexity": "หยุดการคำนวณ",
19 | "log": "ผลลัพธ์ความฉงนสนเท่ห์ที่คำนวณได้",
20 | "error": "ข้อผิดพลาด",
21 | "license": "{{license}} รหัสใบอนุญาต",
22 | "builtWith": "สร้างด้วย",
23 | "insufficientPromptTokens": "โทเค็นพร้อมท์ไม่เพียงพอ ต้องใช้ขนาดบริบทเป็นสองเท่าในโทเค็นเพื่อดำเนินการต่อ"
24 | }
--------------------------------------------------------------------------------
/src/data/locales/tw/Benchmark.json:
--------------------------------------------------------------------------------
1 | {
2 | "title": "Electron BitNet 基準測試工具",
3 | "description": "在下面對您的 Microsoft BitNet 1 位 LLM 模型進行基準測試,請記住,較大的測試變數需要更長的時間來運行。",
4 | "commandOptions": "命令選項",
5 | "numberOfTokens": "代幣數量",
6 | "numberOfTokensInfo": "指定基準測試期間要生成的代幣數量。",
7 | "model": "模型",
8 | "modelInfo": "輸入使用 Microsoft 的 BitNet '{{script}}' 腳本生成的量化 '{{fileFormat}}' 模型文件的路徑。",
9 | "threads": "線程",
10 | "threadsInfo": "指定要用於基準測試的線程數。",
11 | "promptLength": "提示長度",
12 | "promptLengthInfo": "指定要從中生成文本的提示的長度。",
13 | "runBenchmark": "運行基準測試",
14 | "stopBenchmark": "停止基準測試",
15 | "log": "基準測試日誌",
16 | "license": "根據 {{license}} 獲得許可",
17 | "builtWith": "內置"
18 | }
--------------------------------------------------------------------------------
/src/data/locales/tw/InstructionModel.json:
--------------------------------------------------------------------------------
1 | {
2 | "loading": "載入中...",
3 | "delete": "刪除",
4 | "title": "指令跟隨模式",
5 | "description": "與 1 位 LLM 進行對話。提供系統提示,然後以交互方式聊天。",
6 | "commandOptions": "命令選項",
7 | "numberOfTokens": "每回合最大代幣數",
8 | "model": "模型",
9 | "threads": "線程",
10 | "contextSize": "上下文大小",
11 | "temperature": "溫度",
12 | "prompt": "提示",
13 | "numberOfTokensInfo": "AI 將在每個回應中生成的最大令牌(單詞)數量。",
14 | "modelInfo": "選擇使用 Microsoft 的 BitNet '{{script}}' 腳本生成的量化 '{{fileFormat}}' 模型檔。",
15 | "threadsInfo": "用於運行推理的線程數,限制為 CPU 上可用的線程數。",
16 | "contextSizeInfo": "提示上下文的大小決定了在推理過程中考慮了多少對話歷史記錄。",
17 | "temperatureInfo": "控制生成文本的隨機性。較低的值(例如 0.2)使其更集中,較高的值(例如 1.0)使其更具創意。",
18 | "promptInfo": "這是模型將用於開始生成輸出的初始文本。",
19 | "runInference": "運行推理",
20 | "stopInference": "停止推理, @停止推理",
21 | "response": "回應",
22 | "license": "{{license}} 許可代碼",
23 | "builtWith": "構建與",
24 | "noModelSelected": "未選擇模型",
25 | "systemPrompt": "系統提示符",
26 | "systemPromptInfo": "定義 AI 的角色或個性(例如,'You are a helpful assistant.', 'You are a pirate.')。這在開始時發送一次。",
27 | "systemPromptPlaceholder": "在此處輸入系統提示符...",
28 | "startConversation": "開始對話",
29 | "starting": "正在啟動...",
30 | "stopConversation": "停止對話",
31 | "chat": "聊天",
32 | "typing": "AI 正在輸入...",
33 | "typeMessagePlaceholder": "在此處輸入您的資訊...",
34 | "sendMessage": "發送消息",
35 | "you": "您",
36 | "ai": "AI",
37 | "youAvatar": "你",
38 | "aiAvatar": "AI",
39 | "regenerate": "重新生成回應",
40 | "copy": "複製回應",
41 | "copied": "已複製!",
42 | "expand": "展開回應",
43 | "collapse": "摺疊回應"
44 | }
--------------------------------------------------------------------------------
/src/data/locales/tw/PageHeader.json:
--------------------------------------------------------------------------------
1 | {
2 | "commandSearchPlaceholder": "鍵入命令或搜索...",
3 | "noResultsFound": "未找到結果。",
4 | "english": "英語 ({{locale}})",
5 | "danish": "丹麥語({{locale}})",
6 | "german": "德語({{locale}})",
7 | "spanish": "西班牙語 ({{locale}})",
8 | "french": "法語 ({{locale}})",
9 | "italian": "意大利語({{locale}})",
10 | "japanese": "日語 ({{locale}})",
11 | "korean": "韓語 ({{locale}})",
12 | "portuguese": "葡萄牙語({{locale}})",
13 | "thai": "泰語({{locale}})",
14 | "taiwanese": "臺灣語({{locale}})",
15 | "index": "推理控制面板",
16 | "benchmark": "基準測試模型",
17 | "perplexity": "計算困惑度",
18 | "back": "返回",
19 | "llmFunctionality": "LLM 功能",
20 | "about": "關於"
21 | }
--------------------------------------------------------------------------------
/src/data/locales/tw/Perplexity.json:
--------------------------------------------------------------------------------
1 | {
2 | "title": "Electron BitNet 困惑工具",
3 | "description": "計算下面的 BitNet 模型的困惑度(模型預測下一個單詞的置信度)。",
4 | "commandOptions": "命令選項",
5 | "prompt": "迅速的",
6 | "promptInfo": "這是模型將用於開始生成輸出的初始文本。",
7 | "model": "模型",
8 | "modelInfo": "輸入使用 Microsoft 的 '{{script}}' 腳本生成的量化 '{{fileFormat}}' 模型文件的路徑。",
9 | "threads": "線程",
10 | "threadsInfo": "用於運行困惑度計算的線程數。",
11 | "contextSize": "上下文大小",
12 | "contextSizeInfo": "提示上下文的大小決定了在困惑度計算過程中考慮了多少提示。",
13 | "pplStride": "困惑步幅",
14 | "pplStrideInfo": "步幅用於困惑度計算。",
15 | "pplOutputType": "Perplexity 輸出類型",
16 | "pplOutputTypeInfo": "用於困惑度計算的輸出類型。",
17 | "runPerplexity": "計算困惑度",
18 | "stopPerplexity": "停止計算",
19 | "log": "計算的困惑度結果",
20 | "error": "錯誤",
21 | "license": "{{license}} 授業代碼",
22 | "builtWith": "建有",
23 | "insufficientPromptTokens": "提示令牌不足,需要雙倍上下文大小的令牌才能繼續。"
24 | }
--------------------------------------------------------------------------------
/src/env.d.ts:
--------------------------------------------------------------------------------
1 | ///
2 | ///
3 |
--------------------------------------------------------------------------------
/src/hooks/use-mobile.jsx:
--------------------------------------------------------------------------------
1 | import * as React from "react"
2 |
3 | const MOBILE_BREAKPOINT = 768
4 |
5 | export function useIsMobile() {
6 | const [isMobile, setIsMobile] = React.useState(undefined)
7 |
8 | React.useEffect(() => {
9 | const mql = window.matchMedia(`(max-width: ${MOBILE_BREAKPOINT - 1}px)`)
10 | const onChange = () => {
11 | setIsMobile(window.innerWidth < MOBILE_BREAKPOINT)
12 | }
13 | mql.addEventListener("change", onChange)
14 | setIsMobile(window.innerWidth < MOBILE_BREAKPOINT)
15 | return () => mql.removeEventListener("change", onChange);
16 | }, [])
17 |
18 | return !!isMobile
19 | }
20 |
--------------------------------------------------------------------------------
/src/hooks/use-mobile.tsx:
--------------------------------------------------------------------------------
1 | import * as React from "react"
2 |
3 | const MOBILE_BREAKPOINT = 768
4 |
5 | export function useIsMobile() {
6 | const [isMobile, setIsMobile] = React.useState(undefined)
7 |
8 | React.useEffect(() => {
9 | const mql = window.matchMedia(`(max-width: ${MOBILE_BREAKPOINT - 1}px)`)
10 | const onChange = () => {
11 | setIsMobile(window.innerWidth < MOBILE_BREAKPOINT)
12 | }
13 | mql.addEventListener("change", onChange)
14 | setIsMobile(window.innerWidth < MOBILE_BREAKPOINT)
15 | return () => mql.removeEventListener("change", onChange)
16 | }, [])
17 |
18 | return !!isMobile
19 | }
20 |
--------------------------------------------------------------------------------
/src/hooks/use-toast.ts:
--------------------------------------------------------------------------------
1 | "use client"
2 |
3 | // Inspired by react-hot-toast library
4 | import * as React from "react"
5 |
6 | import type {
7 | ToastActionElement,
8 | ToastProps,
9 | } from "@/components/ui/toast"
10 |
11 | const TOAST_LIMIT = 1
12 | const TOAST_REMOVE_DELAY = 1000000
13 |
14 | type ToasterToast = ToastProps & {
15 | id: string
16 | title?: React.ReactNode
17 | description?: React.ReactNode
18 | action?: ToastActionElement
19 | }
20 |
21 | const actionTypes = {
22 | ADD_TOAST: "ADD_TOAST",
23 | UPDATE_TOAST: "UPDATE_TOAST",
24 | DISMISS_TOAST: "DISMISS_TOAST",
25 | REMOVE_TOAST: "REMOVE_TOAST",
26 | } as const
27 |
28 | let count = 0
29 |
30 | function genId() {
31 | count = (count + 1) % Number.MAX_SAFE_INTEGER
32 | return count.toString()
33 | }
34 |
35 | type ActionType = typeof actionTypes
36 |
37 | type Action =
38 | | {
39 | type: ActionType["ADD_TOAST"]
40 | toast: ToasterToast
41 | }
42 | | {
43 | type: ActionType["UPDATE_TOAST"]
44 | toast: Partial
45 | }
46 | | {
47 | type: ActionType["DISMISS_TOAST"]
48 | toastId?: ToasterToast["id"]
49 | }
50 | | {
51 | type: ActionType["REMOVE_TOAST"]
52 | toastId?: ToasterToast["id"]
53 | }
54 |
55 | interface State {
56 | toasts: ToasterToast[]
57 | }
58 |
59 | const toastTimeouts = new Map>()
60 |
61 | const addToRemoveQueue = (toastId: string) => {
62 | if (toastTimeouts.has(toastId)) {
63 | return
64 | }
65 |
66 | const timeout = setTimeout(() => {
67 | toastTimeouts.delete(toastId)
68 | dispatch({
69 | type: "REMOVE_TOAST",
70 | toastId: toastId,
71 | })
72 | }, TOAST_REMOVE_DELAY)
73 |
74 | toastTimeouts.set(toastId, timeout)
75 | }
76 |
77 | export const reducer = (state: State, action: Action): State => {
78 | switch (action.type) {
79 | case "ADD_TOAST":
80 | return {
81 | ...state,
82 | toasts: [action.toast, ...state.toasts].slice(0, TOAST_LIMIT),
83 | }
84 |
85 | case "UPDATE_TOAST":
86 | return {
87 | ...state,
88 | toasts: state.toasts.map((t) =>
89 | t.id === action.toast.id ? { ...t, ...action.toast } : t
90 | ),
91 | }
92 |
93 | case "DISMISS_TOAST": {
94 | const { toastId } = action
95 |
96 | // ! Side effects ! - This could be extracted into a dismissToast() action,
97 | // but I'll keep it here for simplicity
98 | if (toastId) {
99 | addToRemoveQueue(toastId)
100 | } else {
101 | state.toasts.forEach((toast) => {
102 | addToRemoveQueue(toast.id)
103 | })
104 | }
105 |
106 | return {
107 | ...state,
108 | toasts: state.toasts.map((t) =>
109 | t.id === toastId || toastId === undefined
110 | ? {
111 | ...t,
112 | open: false,
113 | }
114 | : t
115 | ),
116 | }
117 | }
118 | case "REMOVE_TOAST":
119 | if (action.toastId === undefined) {
120 | return {
121 | ...state,
122 | toasts: [],
123 | }
124 | }
125 | return {
126 | ...state,
127 | toasts: state.toasts.filter((t) => t.id !== action.toastId),
128 | }
129 | }
130 | }
131 |
132 | const listeners: Array<(state: State) => void> = []
133 |
134 | let memoryState: State = { toasts: [] }
135 |
136 | function dispatch(action: Action) {
137 | memoryState = reducer(memoryState, action)
138 | listeners.forEach((listener) => {
139 | listener(memoryState)
140 | })
141 | }
142 |
143 | type Toast = Omit
144 |
145 | function toast({ ...props }: Toast) {
146 | const id = genId()
147 |
148 | const update = (props: ToasterToast) =>
149 | dispatch({
150 | type: "UPDATE_TOAST",
151 | toast: { ...props, id },
152 | })
153 | const dismiss = () => dispatch({ type: "DISMISS_TOAST", toastId: id })
154 |
155 | dispatch({
156 | type: "ADD_TOAST",
157 | toast: {
158 | ...props,
159 | id,
160 | open: true,
161 | onOpenChange: (open) => {
162 | if (!open) dismiss()
163 | },
164 | },
165 | })
166 |
167 | return {
168 | id: id,
169 | dismiss,
170 | update,
171 | }
172 | }
173 |
174 | function useToast() {
175 | const [state, setState] = React.useState(memoryState)
176 |
177 | React.useEffect(() => {
178 | listeners.push(setState)
179 | return () => {
180 | const index = listeners.indexOf(setState)
181 | if (index > -1) {
182 | listeners.splice(index, 1)
183 | }
184 | }
185 | }, [state])
186 |
187 | return {
188 | ...state,
189 | toast,
190 | dismiss: (toastId?: string) => dispatch({ type: "DISMISS_TOAST", toastId }),
191 | }
192 | }
193 |
194 | export { useToast, toast }
195 |
--------------------------------------------------------------------------------
/src/layouts/Layout.astro:
--------------------------------------------------------------------------------
1 | ---
2 | import { ViewTransitions } from 'astro:transitions';
3 |
4 | interface Props {
5 | title: string;
6 | }
7 |
8 | const { title } = Astro.props;
9 | ---
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 | {title}
20 |
21 |
22 |
23 |
24 |
25 |
26 |
46 |
--------------------------------------------------------------------------------
/src/lib/applicationMenu.js:
--------------------------------------------------------------------------------
1 | import {app, Menu} from 'electron';
2 |
3 | /**
4 | * For configuring the electron window menu
5 | */
6 | export function initApplicationMenu(mainWindow) {
7 | const template = [
8 | {
9 | label: 'View',
10 | submenu: [
11 | {
12 | label: 'Send to tray',
13 | click() {
14 | mainWindow.minimize();
15 | }
16 | },
17 | { label: 'Reload', role: 'reload' },
18 | { label: 'Dev tools', role: 'toggleDevTools' }
19 | ]
20 | }
21 | ];
22 | const menu = Menu.buildFromTemplate(template);
23 | Menu.setApplicationMenu(menu);
24 | }
25 |
--------------------------------------------------------------------------------
/src/lib/i18n.js:
--------------------------------------------------------------------------------
1 | import i18n from "i18next";
2 | import { initReactI18next } from "react-i18next";
3 | import { persistentAtom } from "@nanostores/persistent";
4 |
5 | const languages = ["en", "da", "de", "es", "fr", "it", "ja", "ko", "pt", "th", "tw"];
6 | const pages = [
7 | "InstructionModel",
8 | "PageHeader",
9 | "Benchmark",
10 | "Perplexity",
11 | ];
12 |
13 | const locale = persistentAtom("locale", "en");
14 |
15 | async function fetchTranslations() {
16 | const _locale = locale.get();
17 |
18 | const translations = {};
19 | const localPages = {};
20 | for (const page of pages) {
21 | let response;
22 | if (window && window.electron) {
23 | response = await fetch(`/locales/${_locale}/${page}.json`);
24 | } else {
25 | response = await fetch(`../src/data/locales/${_locale}/${page}.json`);
26 | }
27 | if (response) {
28 | const jsonContents = await response.json();
29 | localPages[page] = jsonContents;
30 | }
31 | }
32 |
33 | translations[_locale] = localPages;
34 |
35 | return translations;
36 | }
37 |
38 | async function initialize() {
39 | const resources = await fetchTranslations();
40 |
41 | i18n.use(initReactI18next).init(
42 | {
43 | resources,
44 | lng: "en",
45 | defaultNS: pages,
46 | fallbackLng: languages,
47 | ns: pages,
48 | },
49 | (err, t) => {
50 | if (err) {
51 | console.log("something went wrong loading", err);
52 | }
53 | }
54 | );
55 | }
56 |
57 | initialize();
58 |
59 | export { i18n, locale };
60 |
--------------------------------------------------------------------------------
/src/lib/utils.js:
--------------------------------------------------------------------------------
1 | import { clsx } from "clsx";
2 | import { twMerge } from "tailwind-merge";
3 |
4 | export function cn(...inputs) {
5 | return twMerge(clsx(inputs));
6 | }
7 |
--------------------------------------------------------------------------------
/src/lib/utils.ts:
--------------------------------------------------------------------------------
1 | import { clsx, type ClassValue } from "clsx"
2 | import { twMerge } from "tailwind-merge"
3 |
4 | export function cn(...inputs: ClassValue[]) {
5 | return twMerge(clsx(inputs))
6 | }
7 |
--------------------------------------------------------------------------------
/src/pages/benchmark.astro:
--------------------------------------------------------------------------------
1 | ---
2 | import Layout from '../layouts/Layout.astro';
3 | import BenchmarkUI from '../components/BenchmarkUI.jsx';
4 | import PageHeader from '@/components/PageHeader';
5 | import '@/styles/globals.css'
6 | ---
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
27 |
--------------------------------------------------------------------------------
/src/pages/index.astro:
--------------------------------------------------------------------------------
1 | ---
2 | import Layout from '../layouts/Layout.astro';
3 | import Home from '../components/Home.jsx';
4 | import PageHeader from '@/components/PageHeader';
5 | import '@/styles/globals.css'
6 | ---
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
27 |
--------------------------------------------------------------------------------
/src/pages/perplexity.astro:
--------------------------------------------------------------------------------
1 | ---
2 | import Layout from '../layouts/Layout.astro';
3 | import PerplexityUI from '../components/PerplexityUI.jsx';
4 | import PageHeader from '@/components/PageHeader';
5 | import '@/styles/globals.css'
6 | ---
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
27 |
--------------------------------------------------------------------------------
/src/preload.js:
--------------------------------------------------------------------------------
1 | import { ipcRenderer, contextBridge } from "electron";
2 |
3 | contextBridge.exposeInMainWorld("electron", {
4 | // --- General ---
5 | openURL: async (target) => ipcRenderer.send("openURL", target),
6 | openFileDialog: async () => ipcRenderer.invoke("openFileDialog"),
7 | getMaxThreads: async () => ipcRenderer.invoke("getMaxThreads"),
8 |
9 | // --- Standard Inference (Non-Interactive) ---
10 | runInference: async (args) => ipcRenderer.send("runInference", args),
11 | stopInference: async () => ipcRenderer.send("stopInference"), // Used by both modes
12 | onAiResponse: (func) => {
13 | const listener = (event, data) => func(data);
14 | ipcRenderer.on("aiResponse", listener);
15 | return () => ipcRenderer.removeListener("aiResponse", listener); // Return cleanup function
16 | },
17 | onAiError: (func) => {
18 | const listener = (event, errorMsg) => func(errorMsg); // Pass error message
19 | ipcRenderer.on("aiError", listener);
20 | return () => ipcRenderer.removeListener("aiError", listener); // Return cleanup function
21 | },
22 | onAiComplete: (func) => {
23 | const listener = (event) => func();
24 | ipcRenderer.on("aiComplete", listener);
25 | return () => ipcRenderer.removeListener("aiComplete", listener); // Return cleanup function
26 | },
27 |
28 | // --- Instruction/Conversational Inference (Interactive) ---
29 | initInstructInference: async (args) => ipcRenderer.send("initInstructInference", args),
30 | sendInstructPrompt: async (promptText) => ipcRenderer.send("sendInstructPrompt", promptText),
31 | interjectInference: async () => ipcRenderer.send("interjectInference"), // Added interject
32 | onAiInstructStarted: (func) => {
33 | const listener = (event) => func();
34 | ipcRenderer.on("aiInstructStarted", listener);
35 | return () => ipcRenderer.removeListener("aiInstructStarted", listener); // Return cleanup function
36 | },
37 | onAiResponseChunk: (func) => {
38 | const listener = (event, chunk) => func(chunk);
39 | ipcRenderer.on("aiResponseChunk", listener);
40 | return () => ipcRenderer.removeListener("aiResponseChunk", listener); // Return cleanup function
41 | },
42 | onAiInstructComplete: (func) => {
43 | const listener = (event) => func();
44 | ipcRenderer.on("aiInstructComplete", listener);
45 | return () => ipcRenderer.removeListener("aiInstructComplete", listener); // Return cleanup function
46 | },
47 | // Ensure onAiError is present and correct
48 | onAiError: (func) => {
49 | const listener = (event, errorMsg) => func(errorMsg);
50 | ipcRenderer.on("aiError", listener);
51 | return () => ipcRenderer.removeListener("aiError", listener);
52 | },
53 |
54 | // --- Benchmark ---
55 | onBenchmarkLog: (func) => {
56 | const listener = (event, data) => func(data);
57 | ipcRenderer.on("benchmarkLog", listener);
58 | return () => ipcRenderer.removeListener("benchmarkLog", listener); // Return cleanup function
59 | },
60 | onBenchmarkComplete: (func) => {
61 | const listener = (event) => func();
62 | ipcRenderer.on("benchmarkComplete", listener);
63 | return () => ipcRenderer.removeListener("benchmarkComplete", listener); // Return cleanup function
64 | },
65 | runBenchmark: async (args) => ipcRenderer.send("runBenchmark", args),
66 | stopBenchmark: async (args) => ipcRenderer.send("stopBenchmark", args),
67 |
68 | // --- Perplexity ---
69 | onPerplexityLog: (func) => {
70 | const listener = (event, data) => func(data);
71 | ipcRenderer.on("perplexityLog", listener);
72 | return () => ipcRenderer.removeListener("perplexityLog", listener); // Return cleanup function
73 | },
74 | onPerplexityComplete: (func) => {
75 | const listener = (event) => func();
76 | ipcRenderer.on("perplexityComplete", listener);
77 | return () => ipcRenderer.removeListener("perplexityComplete", listener); // Return cleanup function
78 | },
79 | runPerplexity: async (args) => ipcRenderer.send("runPerplexity", args),
80 | stopPerplexity: async (args) => ipcRenderer.send("stopPerplexity", args),
81 | });
--------------------------------------------------------------------------------
/src/styles/globals.css:
--------------------------------------------------------------------------------
1 | @tailwind base;
2 | @tailwind components;
3 | @tailwind utilities;
4 |
5 |
6 | @layer base {
7 | :root {
8 | --foreground: 20 14.3% 4.1%;
9 | --card: 0 0% 100%;
10 | --card-foreground: 20 14.3% 4.1%;
11 | --popover: 0 0% 100%;
12 | --popover-foreground: 20 14.3% 4.1%;
13 | --primary: 24 9.8% 10%;
14 | --primary-foreground: 60 9.1% 97.8%;
15 | --secondary: 60 4.8% 95.9%;
16 | --secondary-foreground: 24 9.8% 10%;
17 | --muted: 60 4.8% 95.9%;
18 | --muted-foreground: 25 5.3% 44.7%;
19 | --destructive: 0 84.2% 60.2%;
20 | --destructive-foreground: 60 9.1% 97.8%;
21 | --border: 20 5.9% 90%;
22 | --input: 20 5.9% 90%;
23 | --ring: 20 14.3% 4.1%;
24 | --radius: 0.5rem;
25 | --chart-1: 12 76% 61%;
26 | --chart-2: 173 58% 39%;
27 | --chart-3: 197 37% 24%;
28 | --chart-4: 43 74% 66%;
29 | --chart-5: 27 87% 67%;
30 | }
31 |
32 | .dark {
33 | --foreground: 60 9.1% 97.8%;
34 | --card: 20 14.3% 4.1%;
35 | --card-foreground: 60 9.1% 97.8%;
36 | --popover: 20 14.3% 4.1%;
37 | --popover-foreground: 60 9.1% 97.8%;
38 | --primary: 60 9.1% 97.8%;
39 | --primary-foreground: 24 9.8% 10%;
40 | --secondary: 12 6.5% 15.1%;
41 | --secondary-foreground: 60 9.1% 97.8%;
42 | --muted: 12 6.5% 15.1%;
43 | --muted-foreground: 24 5.4% 63.9%;
44 | --destructive: 0 62.8% 30.6%;
45 | --destructive-foreground: 60 9.1% 97.8%;
46 | --border: 12 6.5% 15.1%;
47 | --input: 12 6.5% 15.1%;
48 | --ring: 24 5.7% 82.9%;
49 | --chart-1: 220 70% 50%;
50 | --chart-2: 160 60% 45%;
51 | --chart-3: 30 80% 55%;
52 | --chart-4: 280 65% 60%;
53 | --chart-5: 340 75% 55%;
54 | }
55 | }
56 |
57 |
58 | @layer base {
59 | * {
60 | @apply border-border;
61 | }
62 | body {
63 | @apply bg-background text-foreground;
64 | }
65 | }
--------------------------------------------------------------------------------
/tailwind.config.cjs:
--------------------------------------------------------------------------------
1 | /** @type {import('tailwindcss').Config} */
2 | module.exports = {
3 | darkMode: ['class'],
4 | content: ['./src/**/*.{astro,html,js,jsx,md,mdx,svelte,ts,tsx,vue}'],
5 | theme: {
6 | extend: {
7 | borderRadius: {
8 | lg: 'var(--radius)',
9 | md: 'calc(var(--radius) - 2px)',
10 | sm: 'calc(var(--radius) - 4px)'
11 | },
12 | colors: {
13 | background: 'hsl(var(--background))',
14 | foreground: 'hsl(var(--foreground))',
15 | card: {
16 | DEFAULT: 'hsl(var(--card))',
17 | foreground: 'hsl(var(--card-foreground))'
18 | },
19 | popover: {
20 | DEFAULT: 'hsl(var(--popover))',
21 | foreground: 'hsl(var(--popover-foreground))'
22 | },
23 | primary: {
24 | DEFAULT: 'hsl(var(--primary))',
25 | foreground: 'hsl(var(--primary-foreground))'
26 | },
27 | secondary: {
28 | DEFAULT: 'hsl(var(--secondary))',
29 | foreground: 'hsl(var(--secondary-foreground))'
30 | },
31 | muted: {
32 | DEFAULT: 'hsl(var(--muted))',
33 | foreground: 'hsl(var(--muted-foreground))'
34 | },
35 | accent: {
36 | DEFAULT: 'hsl(var(--accent))',
37 | foreground: 'hsl(var(--accent-foreground))'
38 | },
39 | destructive: {
40 | DEFAULT: 'hsl(var(--destructive))',
41 | foreground: 'hsl(var(--destructive-foreground))'
42 | },
43 | border: 'hsl(var(--border))',
44 | input: 'hsl(var(--input))',
45 | ring: 'hsl(var(--ring))',
46 | chart: {
47 | '1': 'hsl(var(--chart-1))',
48 | '2': 'hsl(var(--chart-2))',
49 | '3': 'hsl(var(--chart-3))',
50 | '4': 'hsl(var(--chart-4))',
51 | '5': 'hsl(var(--chart-5))'
52 | },
53 | sidebar: {
54 | DEFAULT: 'hsl(var(--sidebar-background))',
55 | foreground: 'hsl(var(--sidebar-foreground))',
56 | primary: 'hsl(var(--sidebar-primary))',
57 | 'primary-foreground': 'hsl(var(--sidebar-primary-foreground))',
58 | accent: 'hsl(var(--sidebar-accent))',
59 | 'accent-foreground': 'hsl(var(--sidebar-accent-foreground))',
60 | border: 'hsl(var(--sidebar-border))',
61 | ring: 'hsl(var(--sidebar-ring))'
62 | }
63 | },
64 | keyframes: {
65 | 'accordion-down': {
66 | from: {
67 | height: '0'
68 | },
69 | to: {
70 | height: 'var(--radix-accordion-content-height)'
71 | }
72 | },
73 | 'accordion-up': {
74 | from: {
75 | height: 'var(--radix-accordion-content-height)'
76 | },
77 | to: {
78 | height: '0'
79 | }
80 | }
81 | },
82 | animation: {
83 | 'accordion-down': 'accordion-down 0.2s ease-out',
84 | 'accordion-up': 'accordion-up 0.2s ease-out'
85 | }
86 | }
87 | },
88 | plugins: [require("tailwindcss-animate")],
89 | }
90 |
--------------------------------------------------------------------------------
/tailwind.config.js:
--------------------------------------------------------------------------------
1 | /** @type {import('tailwindcss').Config} */
2 | module.exports = {
3 | darkMode: ["class"],
4 | content: [
5 | './pages/**/*.{js,jsx,astro}',
6 | './components/**/*.{js,jsx}',
7 | './app/**/*.{js,jsx}',
8 | './src/**/*.{js,jsx}',
9 | ],
10 | theme: {
11 | container: {
12 | center: 'true',
13 | padding: '2rem',
14 | screens: {
15 | '2xl': '1400px'
16 | }
17 | },
18 | extend: {
19 | colors: {
20 | border: 'hsl(var(--border))',
21 | input: 'hsl(var(--input))',
22 | ring: 'hsl(var(--ring))',
23 | background: 'hsl(var(--background))',
24 | foreground: 'hsl(var(--foreground))',
25 | primary: {
26 | DEFAULT: 'hsl(var(--primary))',
27 | foreground: 'hsl(var(--primary-foreground))'
28 | },
29 | secondary: {
30 | DEFAULT: 'hsl(var(--secondary))',
31 | foreground: 'hsl(var(--secondary-foreground))'
32 | },
33 | destructive: {
34 | DEFAULT: 'hsl(var(--destructive))',
35 | foreground: 'hsl(var(--destructive-foreground))'
36 | },
37 | muted: {
38 | DEFAULT: 'hsl(var(--muted))',
39 | foreground: 'hsl(var(--muted-foreground))'
40 | },
41 | accent: {
42 | DEFAULT: 'hsl(var(--accent))',
43 | foreground: 'hsl(var(--accent-foreground))'
44 | },
45 | popover: {
46 | DEFAULT: 'hsl(var(--popover))',
47 | foreground: 'hsl(var(--popover-foreground))'
48 | },
49 | card: {
50 | DEFAULT: 'hsl(var(--card))',
51 | foreground: 'hsl(var(--card-foreground))'
52 | }
53 | },
54 | borderRadius: {
55 | lg: 'var(--radius)',
56 | md: 'calc(var(--radius) - 2px)',
57 | sm: 'calc(var(--radius) - 4px)'
58 | },
59 | keyframes: {
60 | 'accordion-down': {
61 | from: {
62 | height: '0'
63 | },
64 | to: {
65 | height: 'var(--radix-accordion-content-height)'
66 | }
67 | },
68 | 'accordion-up': {
69 | from: {
70 | height: 'var(--radix-accordion-content-height)'
71 | },
72 | to: {
73 | height: '0'
74 | }
75 | },
76 | 'accordion-down': {
77 | from: {
78 | height: '0'
79 | },
80 | to: {
81 | height: 'var(--radix-accordion-content-height)'
82 | }
83 | },
84 | 'accordion-up': {
85 | from: {
86 | height: 'var(--radix-accordion-content-height)'
87 | },
88 | to: {
89 | height: '0'
90 | }
91 | }
92 | },
93 | animation: {
94 | 'accordion-down': 'accordion-down 0.2s ease-out',
95 | 'accordion-up': 'accordion-up 0.2s ease-out',
96 | 'accordion-down': 'accordion-down 0.2s ease-out',
97 | 'accordion-up': 'accordion-up 0.2s ease-out'
98 | }
99 | }
100 | },
101 | plugins: [require("tailwindcss-animate")],
102 | }
--------------------------------------------------------------------------------
/tsconfig.json:
--------------------------------------------------------------------------------
1 | {
2 | "extends": "astro/tsconfigs/base",
3 | "compilerOptions": {
4 | "jsx": "react-jsx",
5 | "jsxImportSource": "react",
6 | "baseUrl": ".",
7 | "paths": {
8 | "@/*": [
9 | "./src/*"
10 | ]
11 | }
12 | }
13 | }
--------------------------------------------------------------------------------
|