├── tests ├── outputs │ └── .gitkeep ├── inputs │ ├── regexes.json │ ├── llgs.json │ ├── rlgs.json │ ├── nfas.json │ ├── pdas.json │ ├── tms.json │ ├── lbas.json │ ├── cfgs.json │ └── dfas.json ├── config.py ├── pda.py ├── rlgToNfa.py ├── tmtest.py ├── llgToRlg.py ├── lbatest.py ├── nfaRegex.py ├── nfaToRlg.py ├── postfixToNfa.py ├── nfaToDfa.py ├── regexTree.py ├── regexTreeToNfa.py ├── dfaIsomorphism.py ├── dfaSubset.py ├── dfaEquivalenceIsomorphism.py ├── cfg.py └── dfaOper.py ├── src └── pykleene │ ├── __init__.py │ ├── symbols.py │ ├── _helpers.py │ ├── _config.py │ ├── utils.py │ ├── lba.py │ ├── pda.py │ ├── tm.py │ ├── re.py │ ├── grammar.py │ ├── nfa.py │ └── dfa.py ├── requirements.txt ├── docs ├── .eslintrc.json ├── public │ ├── 1.png │ ├── 2.png │ ├── public-og.png │ └── fonts │ │ ├── CourierPrime-Bold.ttf │ │ ├── CourierPrime-Italic.ttf │ │ ├── CourierPrime-Regular.ttf │ │ └── CourierPrime-BoldItalic.ttf ├── app │ ├── favicon.ico │ ├── docs │ │ ├── layout.tsx │ │ └── [[...slug]] │ │ │ └── page.tsx │ ├── not-found.tsx │ ├── error.tsx │ ├── page.tsx │ ├── layout.tsx │ └── globals.css ├── next.config.mjs ├── postcss.config.js ├── contents │ └── docs │ │ ├── examples │ │ └── code-examples │ │ │ └── index.mdx │ │ ├── miscellaneous │ │ ├── symbols │ │ │ └── index.mdx │ │ ├── helpers │ │ │ └── index.mdx │ │ └── utils │ │ │ └── index.mdx │ │ ├── about │ │ ├── me │ │ │ └── index.mdx │ │ ├── whats-more │ │ │ └── index.mdx │ │ └── acknowledgements │ │ │ └── index.mdx │ │ ├── getting-started │ │ ├── installation │ │ │ ├── react │ │ │ │ └── index.mdx │ │ │ ├── gatsby │ │ │ │ └── index.mdx │ │ │ ├── laravel │ │ │ │ └── index.mdx │ │ │ └── index.mdx │ │ └── introduction │ │ │ └── index.mdx │ │ ├── regular-expression │ │ └── regex │ │ │ └── index.mdx │ │ ├── state-machines │ │ ├── pda │ │ │ └── index.mdx │ │ ├── tm │ │ │ └── index.mdx │ │ ├── lba │ │ │ └── index.mdx │ │ ├── nfa │ │ │ └── index.mdx │ │ └── dfa │ │ │ └── index.mdx │ │ └── grammars │ │ └── grammar │ │ └── index.mdx ├── lib │ ├── utils.ts │ ├── markdown.ts │ └── routes-config.ts ├── components │ ├── theme-provider.tsx │ ├── ui │ │ ├── collapsible.tsx │ │ ├── scroll-area.tsx │ │ ├── button.tsx │ │ ├── table.tsx │ │ ├── sheet.tsx │ │ └── dropdown-menu.tsx │ ├── docs-menu.tsx │ ├── typography.tsx │ ├── anchor.tsx │ ├── note.tsx │ ├── toc.tsx │ ├── leftbar.tsx │ ├── theme-toggle.tsx │ ├── navbar.tsx │ └── sublink.tsx ├── components.json ├── .gitignore ├── tsconfig.json ├── LICENSE ├── package.json ├── README.md └── tailwind.config.ts ├── .gitignore ├── setup.py └── readme.md /tests/outputs/.gitkeep: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/pykleene/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | graphviz 2 | setuptools 3 | wheel 4 | twine -------------------------------------------------------------------------------- /docs/.eslintrc.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": "next/core-web-vitals" 3 | } 4 | -------------------------------------------------------------------------------- /tests/inputs/regexes.json: -------------------------------------------------------------------------------- 1 | { 2 | "regex_5": "b*", 3 | "regex_6": "a+b*" 4 | } -------------------------------------------------------------------------------- /docs/public/1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/krishnachandran-u/pykleene/HEAD/docs/public/1.png -------------------------------------------------------------------------------- /docs/public/2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/krishnachandran-u/pykleene/HEAD/docs/public/2.png -------------------------------------------------------------------------------- /docs/app/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/krishnachandran-u/pykleene/HEAD/docs/app/favicon.ico -------------------------------------------------------------------------------- /docs/public/public-og.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/krishnachandran-u/pykleene/HEAD/docs/public/public-og.png -------------------------------------------------------------------------------- /docs/next.config.mjs: -------------------------------------------------------------------------------- 1 | /** @type {import('next').NextConfig} */ 2 | const nextConfig = {}; 3 | 4 | export default nextConfig; 5 | -------------------------------------------------------------------------------- /docs/postcss.config.js: -------------------------------------------------------------------------------- 1 | module.exports = { 2 | plugins: { 3 | tailwindcss: {}, 4 | autoprefixer: {}, 5 | }, 6 | }; 7 | -------------------------------------------------------------------------------- /docs/public/fonts/CourierPrime-Bold.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/krishnachandran-u/pykleene/HEAD/docs/public/fonts/CourierPrime-Bold.ttf -------------------------------------------------------------------------------- /docs/public/fonts/CourierPrime-Italic.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/krishnachandran-u/pykleene/HEAD/docs/public/fonts/CourierPrime-Italic.ttf -------------------------------------------------------------------------------- /docs/public/fonts/CourierPrime-Regular.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/krishnachandran-u/pykleene/HEAD/docs/public/fonts/CourierPrime-Regular.ttf -------------------------------------------------------------------------------- /docs/public/fonts/CourierPrime-BoldItalic.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/krishnachandran-u/pykleene/HEAD/docs/public/fonts/CourierPrime-BoldItalic.ttf -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | tests/outputs/* 2 | !tests/outputs/.gitkeep 3 | .venv 4 | __pycache__ 5 | build 6 | dist 7 | pykleene.egg-info 8 | Digraph.gv 9 | Digraph.gv.pdf -------------------------------------------------------------------------------- /docs/contents/docs/examples/code-examples/index.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | title: Code Examples 3 | --- 4 | See the [tests directory](https://github.com/krishnachandran-u/pykleene/tree/master/tests) for code examples. -------------------------------------------------------------------------------- /docs/lib/utils.ts: -------------------------------------------------------------------------------- 1 | import { type ClassValue, clsx } from "clsx"; 2 | import { twMerge } from "tailwind-merge"; 3 | 4 | export function cn(...inputs: ClassValue[]) { 5 | return twMerge(clsx(inputs)); 6 | } 7 | -------------------------------------------------------------------------------- /tests/config.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | TESTSINPUTDIR = 'inputs' 4 | TESTSOUTPUTDIR = 'outputs' 5 | 6 | INPUTPATH = lambda filename: f'./{TESTSINPUTDIR}/{filename}' 7 | OUTPUTDIR = lambda: f'./{TESTSOUTPUTDIR}/{sys.argv[0][:-3]}/' -------------------------------------------------------------------------------- /src/pykleene/symbols.py: -------------------------------------------------------------------------------- 1 | class Symbols: 2 | DELTA: str = "δ" 3 | EPS: str = "ε" 4 | PHI: str = "φ" 5 | GAMMA: str = "Γ" 6 | SIGMA: str = "Σ" 7 | FLAT: str = "♭" 8 | BOT: str = "⊥" 9 | VDASH: str = "⊢" 10 | RVDASH: str = "⊣" -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup, find_packages 2 | 3 | setup( 4 | name='pykleene', 5 | version='0.1.5', 6 | packages=find_packages(where='src'), 7 | package_dir={'': 'src'}, 8 | install_requires=[ 9 | 'graphviz==0.20.3' 10 | ] 11 | ) -------------------------------------------------------------------------------- /tests/inputs/llgs.json: -------------------------------------------------------------------------------- 1 | { 2 | "llg1": { 3 | "nonTerminals": ["S", "A"], 4 | "terminals": ["a", "b", "c"], 5 | "productions": { 6 | "S": ["Sa", "Ab"], 7 | "A": ["Ac", "ε"] 8 | }, 9 | "startSymbol": "S" 10 | } 11 | } -------------------------------------------------------------------------------- /tests/inputs/rlgs.json: -------------------------------------------------------------------------------- 1 | { 2 | "a*bc*": { 3 | "nonTerminals": ["S", "A"], 4 | "terminals": ["a", "b", "c"], 5 | "productions": { 6 | "S": ["aS", "bA"], 7 | "A": ["cA", "ε"] 8 | }, 9 | "startSymbol": "S" 10 | } 11 | } -------------------------------------------------------------------------------- /docs/contents/docs/miscellaneous/symbols/index.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | title: Symbols 3 | description: Special symbols class in pykleene 4 | --- 5 | 6 | ```python showLineNumbers 7 | DELTA: str = "δ" 8 | EPS: str = "ε" 9 | PHI: str = "φ" 10 | GAMMA: str = "Γ" 11 | SIGMA: str = "Σ" 12 | FLAT: str = "♭" 13 | BOT: str = "⊥" 14 | VDASH: str = "⊢" 15 | RVDASH: str = "⊣" 16 | ``` -------------------------------------------------------------------------------- /docs/app/docs/layout.tsx: -------------------------------------------------------------------------------- 1 | import { Leftbar } from "@/components/leftbar"; 2 | 3 | export default function DocsLayout({ 4 | children, 5 | }: Readonly<{ 6 | children: React.ReactNode; 7 | }>) { 8 | return ( 9 |
10 | 11 |
{children}
{" "} 12 |
13 | ); 14 | } 15 | -------------------------------------------------------------------------------- /docs/components/theme-provider.tsx: -------------------------------------------------------------------------------- 1 | "use client"; 2 | 3 | import * as React from "react"; 4 | import { ThemeProvider as NextThemesProvider } from "next-themes"; 5 | import { type ThemeProviderProps } from "next-themes/dist/types"; 6 | 7 | export function ThemeProvider({ children, ...props }: ThemeProviderProps) { 8 | return {children}; 9 | } 10 | -------------------------------------------------------------------------------- /src/pykleene/_helpers.py: -------------------------------------------------------------------------------- 1 | from typing import Any 2 | 3 | class BinaryTreeNode: 4 | leftChild: 'BinaryTreeNode' 5 | data: Any 6 | rightChild: 'BinaryTreeNode' 7 | 8 | def __init__(self, leftChild: 'BinaryTreeNode' = None, data: Any = None, rightChild: 'BinaryTreeNode' = None): 9 | self.leftChild = leftChild 10 | self.data = data 11 | self.rightChild = rightChild -------------------------------------------------------------------------------- /docs/components/ui/collapsible.tsx: -------------------------------------------------------------------------------- 1 | "use client" 2 | 3 | import * as CollapsiblePrimitive from "@radix-ui/react-collapsible" 4 | 5 | const Collapsible = CollapsiblePrimitive.Root 6 | 7 | const CollapsibleTrigger = CollapsiblePrimitive.CollapsibleTrigger 8 | 9 | const CollapsibleContent = CollapsiblePrimitive.CollapsibleContent 10 | 11 | export { Collapsible, CollapsibleTrigger, CollapsibleContent } 12 | -------------------------------------------------------------------------------- /docs/components.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://ui.shadcn.com/schema.json", 3 | "style": "default", 4 | "rsc": true, 5 | "tsx": true, 6 | "tailwind": { 7 | "config": "tailwind.config.ts", 8 | "css": "app/globals.css", 9 | "baseColor": "zinc", 10 | "cssVariables": true, 11 | "prefix": "" 12 | }, 13 | "aliases": { 14 | "components": "@/components", 15 | "utils": "@/lib/utils" 16 | } 17 | } -------------------------------------------------------------------------------- /tests/inputs/nfas.json: -------------------------------------------------------------------------------- 1 | { 2 | "ax,xb": { 3 | "states": ["A0", "A1", "A2", "A3", "A4"], 4 | "alphabet": ["a", "b"], 5 | "transitions": [ 6 | ["A0", "ε", ["A1", "A3"]], 7 | ["A1", "a", ["A2"]], 8 | ["A2", "a", ["A2"]], 9 | ["A2", "b", ["A2"]], 10 | ["A3", "a", ["A3"]], 11 | ["A3", "b", ["A3", "A4"]] 12 | ], 13 | "startStates": ["A0"], 14 | "finalStates": ["A2", "A4"] 15 | } 16 | } -------------------------------------------------------------------------------- /docs/app/not-found.tsx: -------------------------------------------------------------------------------- 1 | import { buttonVariants } from "@/components/ui/button"; 2 | import Link from "next/link"; 3 | 4 | export default function NotFound() { 5 | return ( 6 |
7 |
8 |

404

9 |

Page not found

10 |
11 | 12 | 13 | Back to homepage 14 | 15 |
16 | ); 17 | } 18 | -------------------------------------------------------------------------------- /tests/pda.py: -------------------------------------------------------------------------------- 1 | from pykleene.pda import PDA 2 | from typing import Dict 3 | from config import INPUTPATH, OUTPUTDIR 4 | import json 5 | 6 | FILENAME = 'pdas.json' 7 | 8 | if __name__ == '__main__': 9 | PDAs: Dict[str, PDA] = {} 10 | with open(INPUTPATH(FILENAME), 'r') as file: 11 | PDAs = json.load(file) 12 | 13 | for pdaName, pdaData in PDAs.items(): 14 | pda = PDA() 15 | pda.loadFromJSONDict(pdaData) 16 | pda.image(dir=OUTPUTDIR(), save=True) 17 | print(pda.accepts('bbaba')) 18 | PDAs[pdaName] = pda -------------------------------------------------------------------------------- /tests/inputs/pdas.json: -------------------------------------------------------------------------------- 1 | { 2 | "PDA_NAME": { 3 | "states": ["q0", "q1", "q2"], 4 | "inputAlphabet": ["a", "b"], 5 | "stackAlphabet": ["A", "B", "⊥"], 6 | "transitions": [ 7 | ["q0", "a", "⊥", "q0", "A⊥"], 8 | ["q0", "a", "A", "q0", "AA"], 9 | ["q0", "b", "A", "q1", "ε"], 10 | ["q1", "b", "A", "q1", "ε"], 11 | ["q1", "a", "⊥", "q2", "⊥"] 12 | ], 13 | "startState": "q0", 14 | "initialStackSymbol": "⊥", 15 | "finalStates": ["q2"] 16 | } 17 | } -------------------------------------------------------------------------------- /docs/.gitignore: -------------------------------------------------------------------------------- 1 | # See https://help.github.com/articles/ignoring-files/ for more about ignoring files. 2 | 3 | # dependencies 4 | /node_modules 5 | /.pnp 6 | .pnp.js 7 | .yarn/install-state.gz 8 | 9 | # testing 10 | /coverage 11 | 12 | # next.js 13 | /.next/ 14 | /out/ 15 | 16 | # production 17 | /build 18 | 19 | # misc 20 | .DS_Store 21 | *.pem 22 | 23 | # debug 24 | npm-debug.log* 25 | yarn-debug.log* 26 | yarn-error.log* 27 | 28 | # local env files 29 | .env*.local 30 | 31 | # vercel 32 | .vercel 33 | 34 | # typescript 35 | *.tsbuildinfo 36 | next-env.d.ts 37 | -------------------------------------------------------------------------------- /docs/components/docs-menu.tsx: -------------------------------------------------------------------------------- 1 | import { ROUTES } from "@/lib/routes-config"; 2 | import SubLink from "./sublink"; 3 | 4 | export default function DocsMenu({ isSheet = false }) { 5 | return ( 6 |
7 | {ROUTES.map((item, index) => { 8 | const modifiedItems = { 9 | ...item, 10 | href: `/docs${item.href}`, 11 | level: 0, 12 | isSheet, 13 | }; 14 | return ; 15 | })} 16 |
17 | ); 18 | } 19 | -------------------------------------------------------------------------------- /src/pykleene/_config.py: -------------------------------------------------------------------------------- 1 | from pykleene.utils import randomDarkColor 2 | 3 | graphvizConfig = { 4 | 'node_attr': {'shape': 'circle', 'fontname': 'Courier Prime Bold'}, 5 | 'edge_attr': {'fontname': 'Courier Prime Bold'}, 6 | 'graph_attr': {'dpi': '300'} 7 | } 8 | 9 | graphvizAttrConfig = { 10 | 'dpi': '300', 11 | 'size': '10,10', 12 | 'splines': 'true', 13 | 'overlap': 'false', 14 | 'nodesep': '1', 15 | 'ranksep': '2' 16 | } 17 | 18 | graphvizEdgeConfig = { 19 | 'labelloc': 'c', 20 | 'labeljust': 'c', 21 | 'margin': '0.1', 22 | } -------------------------------------------------------------------------------- /tests/rlgToNfa.py: -------------------------------------------------------------------------------- 1 | from typing import Dict 2 | from config import INPUTPATH, OUTPUTDIR 3 | import json 4 | from pykleene.grammar import Grammar 5 | 6 | FILENAME = 'rlgs.json' 7 | 8 | if __name__ == '__main__': 9 | rlgs: Dict[str, str] 10 | with open(INPUTPATH(FILENAME), 'r') as file: 11 | rlgs = json.load(file) 12 | 13 | for rlgName, rlg in rlgs.items(): 14 | print(f"RLG for {rlgName}: {rlg}") 15 | grammar = Grammar() 16 | grammar.loadFromJSONDict(rlg) 17 | nfa = grammar.nfa() 18 | nfa.image(dir=OUTPUTDIR(), save=True) 19 | -------------------------------------------------------------------------------- /tests/tmtest.py: -------------------------------------------------------------------------------- 1 | from pykleene.tm import TM 2 | from typing import Dict 3 | from config import INPUTPATH, OUTPUTDIR 4 | import json 5 | from pprint import pprint 6 | 7 | FILENAME = 'tms.json' 8 | 9 | if __name__ == '__main__': 10 | TMs: Dict[str, TM] = {} 11 | with open(INPUTPATH(FILENAME), 'r') as file: 12 | TMs = json.load(file) 13 | # pprint(TMs) 14 | 15 | for tmName, tmData in TMs.items(): 16 | tm = TM() 17 | tm.loadFromJSONDict(tmData) 18 | tm.image(dir=OUTPUTDIR(), save=True) 19 | print(tm.accepts('1111#11111', verbose=False)) -------------------------------------------------------------------------------- /tests/llgToRlg.py: -------------------------------------------------------------------------------- 1 | from typing import Dict 2 | from config import INPUTPATH, OUTPUTDIR 3 | import json 4 | from pykleene.grammar import Grammar 5 | from pprint import pprint 6 | 7 | FILENAME = 'llgs.json' 8 | 9 | if __name__ == '__main__': 10 | rlgs: Dict[str, str] 11 | with open(INPUTPATH(FILENAME), 'r') as file: 12 | rlgs = json.load(file) 13 | 14 | for rlgName, rlg in rlgs.items(): 15 | print(f"RLG for {rlgName}: {rlg}") 16 | grammar = Grammar() 17 | grammar.loadFromJSONDict(rlg) 18 | grammar = grammar.toRightLinear() 19 | pprint(grammar.__dict__) 20 | 21 | -------------------------------------------------------------------------------- /tests/lbatest.py: -------------------------------------------------------------------------------- 1 | from pykleene.lba import LBA 2 | from typing import Dict 3 | from config import INPUTPATH, OUTPUTDIR 4 | import json 5 | from pprint import pprint 6 | 7 | FILENAME = 'lbas.json' 8 | 9 | if __name__ == '__main__': 10 | LBAs: Dict[str, LBA] = {} 11 | with open(INPUTPATH(FILENAME), 'r') as file: 12 | LBAs = json.load(file) 13 | # pprint(TMs) 14 | 15 | for lbaName, lbaData in LBAs.items(): 16 | lba = LBA() 17 | lba.loadFromJSONDict(lbaData) 18 | lba.image(dir=OUTPUTDIR(), save=True) 19 | print(lba.accepts('1111#11111', verbose=True, tapeLenFunc=lambda x: x+10)) -------------------------------------------------------------------------------- /docs/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "lib": ["dom", "dom.iterable", "esnext"], 4 | "allowJs": true, 5 | "skipLibCheck": true, 6 | "strict": true, 7 | "noEmit": true, 8 | "esModuleInterop": true, 9 | "module": "esnext", 10 | "moduleResolution": "bundler", 11 | "resolveJsonModule": true, 12 | "isolatedModules": true, 13 | "jsx": "preserve", 14 | "incremental": true, 15 | "plugins": [ 16 | { 17 | "name": "next" 18 | } 19 | ], 20 | "paths": { 21 | "@/*": ["./*"] 22 | } 23 | }, 24 | "include": ["next-env.d.ts", "**/*.ts", "**/*.tsx", ".next/types/**/*.ts"], 25 | "exclude": ["node_modules"] 26 | } 27 | -------------------------------------------------------------------------------- /tests/nfaRegex.py: -------------------------------------------------------------------------------- 1 | from pykleene.nfa import NFA 2 | from typing import Dict 3 | from config import INPUTPATH, OUTPUTDIR 4 | import json 5 | 6 | FILENAME = 'nfas.json' 7 | 8 | if __name__ == '__main__': 9 | NFAs: Dict[str, NFA] 10 | with open(INPUTPATH(FILENAME), 'r') as file: 11 | NFAs = json.load(file) 12 | 13 | for nfaName, nfaData in NFAs.items(): 14 | dfa = NFA() 15 | dfa.loadFromJSONDict(nfaData) 16 | dfa.image(dir=OUTPUTDIR(), save=True) 17 | NFAs[nfaName] = dfa 18 | 19 | for nfaName, nfa in NFAs.items(): 20 | nfa.image(dir=OUTPUTDIR(), save=True) 21 | regex = nfa.regex() 22 | print(f"Regex for {nfaName}: {regex}") 23 | -------------------------------------------------------------------------------- /docs/components/typography.tsx: -------------------------------------------------------------------------------- 1 | import { PropsWithChildren } from "react"; 2 | 3 | export function Typography({ children }: PropsWithChildren) { 4 | return ( 5 |
6 | {children} 7 |
8 | ); 9 | } 10 | -------------------------------------------------------------------------------- /tests/nfaToRlg.py: -------------------------------------------------------------------------------- 1 | from pykleene.nfa import NFA 2 | from typing import Dict 3 | from config import INPUTPATH, OUTPUTDIR 4 | import json 5 | import pprint 6 | 7 | FILENAME = 'nfas.json' 8 | 9 | if __name__ == '__main__': 10 | NFAs: Dict[str, NFA] 11 | with open(INPUTPATH(FILENAME), 'r') as file: 12 | NFAs = json.load(file) 13 | 14 | for nfaName, nfaData in NFAs.items(): 15 | dfa = NFA() 16 | dfa.loadFromJSONDict(nfaData) 17 | dfa.image(dir=OUTPUTDIR(), save=True) 18 | NFAs[nfaName] = dfa 19 | 20 | for nfaName, nfa in NFAs.items(): 21 | nfa.image(dir=OUTPUTDIR(), save=True) 22 | Rlg = nfa.grammar() 23 | pprint.pprint(Rlg.__dict__) 24 | -------------------------------------------------------------------------------- /tests/postfixToNfa.py: -------------------------------------------------------------------------------- 1 | from typing import Dict 2 | from config import INPUTPATH, OUTPUTDIR 3 | from pykleene.re import RE 4 | import json 5 | 6 | FILENAME = 'regexes.json' 7 | 8 | if __name__ == '__main__': 9 | regexes: Dict[str, str] 10 | with open(INPUTPATH(FILENAME), 'r') as file: 11 | regexes = json.load(file) 12 | 13 | for regexName, regex in regexes.items(): 14 | print(f"Regex for {regexName}: {regex}") 15 | formattedRegex = RE.format(regex) 16 | print(f"Formatted regex for {regexName}: {formattedRegex}") 17 | postfixRegex = RE.postfix(formattedRegex) 18 | nfa = RE.nfa(regex, method='postfix') 19 | nfa.image(dir=OUTPUTDIR(), save=True) 20 | -------------------------------------------------------------------------------- /tests/inputs/tms.json: -------------------------------------------------------------------------------- 1 | { 2 | "TM_ADD": { 3 | "states": ["q0", "q1", "q2", "t", "r"], 4 | "inputAlphabet": ["0", "1"], 5 | "tapeAlphabet": ["0", "1", "#", "♭", "⊢"], 6 | "startState": "q0", 7 | "transitions": [ 8 | ["q0", "⊢", "q0", "⊢", "R"], 9 | ["q0", "1", "q0", "1", "R"], 10 | ["q0", "#", "q0", "1", "R"], 11 | ["q0", "♭", "q1", "♭", "L"], 12 | ["q1", "1", "q2", "♭", "L"], 13 | ["q2", "1", "q2", "1", "L"], 14 | ["q2", "⊢", "t", "⊢", "S"] 15 | ], 16 | "leftEndMarker": "⊢", 17 | "blankSymbol": "♭", 18 | "acceptState": "t", 19 | "rejectState": "r" 20 | } 21 | } -------------------------------------------------------------------------------- /tests/nfaToDfa.py: -------------------------------------------------------------------------------- 1 | from pykleene.nfa import NFA 2 | from typing import Dict 3 | from config import INPUTPATH, OUTPUTDIR 4 | import json 5 | 6 | FILENAME = 'nfas.json' 7 | 8 | if __name__ == '__main__': 9 | NFAs: Dict[str, NFA] 10 | with open(INPUTPATH(FILENAME), 'r') as file: 11 | NFAs = json.load(file) 12 | 13 | for nfaName, nfaData in NFAs.items(): 14 | nfa = NFA() 15 | nfa.loadFromJSONDict(nfaData) 16 | # print(nfa.__dict__) 17 | nfa.image(dir=OUTPUTDIR(), save=True) 18 | NFAs[nfaName] = nfa 19 | 20 | for nfaName, nfa in NFAs.items(): 21 | nfa.image(dir=OUTPUTDIR(), save=True) 22 | dfa = nfa.dfa() 23 | dfa.image(dir=OUTPUTDIR(), save=True) -------------------------------------------------------------------------------- /tests/regexTree.py: -------------------------------------------------------------------------------- 1 | from typing import Dict 2 | from config import INPUTPATH, OUTPUTDIR 3 | from pykleene.re import RE 4 | from pykleene._helpers import BinaryTreeNode 5 | import json 6 | 7 | FILENAME = 'regexes.json' 8 | 9 | if __name__ == '__main__': 10 | regexes: Dict[str, str] 11 | with open(INPUTPATH(FILENAME), 'r') as file: 12 | regexes = json.load(file) 13 | 14 | for regexName, regex in regexes.items(): 15 | print(f"Regex for {regexName}: {regex}") 16 | formattedRegex = RE.format(regex) 17 | print(f"Formatted regex for {regexName}: {formattedRegex}") 18 | regexTree = RE.expressionTree(regex) 19 | RE.image(param=regexTree, type='regexTree', dir=OUTPUTDIR(), save=True) 20 | -------------------------------------------------------------------------------- /tests/inputs/lbas.json: -------------------------------------------------------------------------------- 1 | { 2 | "LBA_ADD": { 3 | "states": ["q0", "q1", "q2", "t", "r"], 4 | "inputAlphabet": ["0", "1"], 5 | "tapeAlphabet": ["0", "1", "#", "♭", "⊢", "⊣"], 6 | "startState": "q0", 7 | "transitions": [ 8 | ["q0", "⊢", "q0", "⊢", "R"], 9 | ["q0", "1", "q0", "1", "R"], 10 | ["q0", "#", "q0", "1", "R"], 11 | ["q0", "♭", "q1", "♭", "L"], 12 | ["q1", "1", "q2", "♭", "L"], 13 | ["q2", "1", "q2", "1", "L"], 14 | ["q2", "⊢", "t", "⊢", "S"] 15 | ], 16 | "leftEndMarker": "⊢", 17 | "rightEndMarker": "⊣", 18 | "blankSymbol": "♭", 19 | "acceptState": "t", 20 | "rejectState": "r" 21 | } 22 | } -------------------------------------------------------------------------------- /tests/regexTreeToNfa.py: -------------------------------------------------------------------------------- 1 | from typing import Dict 2 | from config import INPUTPATH, OUTPUTDIR 3 | from pykleene.re import RE 4 | from pykleene.nfa import NFA 5 | import json 6 | 7 | FILENAME = 'regexes.json' 8 | 9 | if __name__ == '__main__': 10 | regexes: Dict[str, str] 11 | with open(INPUTPATH(FILENAME), 'r') as file: 12 | regexes = json.load(file) 13 | 14 | for regexName, regex in regexes.items(): 15 | print(f"Regex for {regexName}: {regex}") 16 | formattedRegex = RE.format(regex) 17 | print(f"Formatted regex for {regexName}: {formattedRegex}") 18 | postfixRegex = RE.postfix(formattedRegex) 19 | nfa: NFA = RE.nfa(regex, method='regexTree') 20 | # nfa.image(dir=OUTPUTDIR(), save=True) 21 | nfa.dfa().minimal().image(dir=OUTPUTDIR(), save=True) 22 | -------------------------------------------------------------------------------- /docs/contents/docs/about/me/index.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | title: About Me 3 | --- 4 | 5 | Hi there! I'm **Krishnachandran U**, a CS undergrad at the College of Engineering, Trivandrum. 6 | 7 | I’ve worked on a couple of interesting projects and with companies in **Frontend**, **Backend**, and **AI**. I have a deep love for **automata** and **cryptography**, and I’ve also dabbled in **competitive programming**. 8 | 9 | You can reach me at: 10 | - Email: krishnachandran.u@outlook.com 11 | - GitHub: [github.com/krishnachandran-u](https://github.com/krishnachandran-u) 12 | - LinkedIn: [linkedin.com/in/krishnachandran-u](https://linkedin.com/in/krishnachandran-u) 13 | - Codeforces: [codeforces.com/profile/krishnachandran](https://codeforces.com/profile/krishnachandran) 14 | 15 | Thanks for checking out **Pykleene**. I hope you enjoy playing with automata as much as I do! -------------------------------------------------------------------------------- /tests/dfaIsomorphism.py: -------------------------------------------------------------------------------- 1 | from pykleene.dfa import DFA 2 | from typing import Dict 3 | from config import INPUTPATH, OUTPUTDIR 4 | import json 5 | 6 | FILENAME = 'dfas.json' 7 | 8 | if __name__ == '__main__': 9 | DFAs: Dict[str, DFA] 10 | with open(INPUTPATH(FILENAME), 'r') as file: 11 | DFAs = json.load(file) 12 | 13 | for dfaName, dfaData in DFAs.items(): 14 | dfa = DFA() 15 | dfa.loadFromJSONDict(dfaData) 16 | dfa.image(dir=OUTPUTDIR(), save=True) 17 | DFAs[dfaName] = dfa 18 | 19 | for dfaName1, dfa1 in DFAs.items(): 20 | for dfaName2, dfa2 in DFAs.items(): 21 | if dfaName1 != dfaName2: 22 | if dfa1.isomorphic(dfa2): 23 | print(f"{dfaName1} is equivalent to {dfaName2}") 24 | else: 25 | print(f"{dfaName1} is not equivalent to {dfaName2}") -------------------------------------------------------------------------------- /docs/contents/docs/miscellaneous/helpers/index.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | title: Helpers 3 | description: Some useful stuff :) 4 | --- 5 | 6 | ## BinaryTreeNode 7 | 8 | ```python showLineNumbers 9 | leftChild: BinaryTreeNode # Left child node 10 | data: Any # Node data of any type 11 | rightChild: BinaryTreeNode # Right child node 12 | ``` 13 | 14 | ### CONSTRUCTOR 15 | Initialize a new binary tree node. 16 | #### Parameters 17 | - `leftChild`: Reference to left child node (default: None) 18 | - `data`: Data to be stored in the node (default: None) 19 | - `rightChild`: Reference to right child node (default: None) 20 | #### Return Value 21 | Creates a new BinaryTreeNode object with specified configuration 22 | 23 | ### Usage Notes 24 | - Although part of internal helpers (`_helpers.py`), the class can be used externally 25 | - Supports any data type through Python's `typing.Any` -------------------------------------------------------------------------------- /tests/dfaSubset.py: -------------------------------------------------------------------------------- 1 | from pykleene.dfa import DFA 2 | from typing import Dict 3 | from config import INPUTPATH, OUTPUTDIR 4 | import json 5 | 6 | FILENAME = 'dfas.json' 7 | 8 | if __name__ == '__main__': 9 | DFAs: Dict[str, DFA] 10 | with open(INPUTPATH(FILENAME), 'r') as file: 11 | DFAs = json.load(file) 12 | 13 | for dfaName, dfaData in DFAs.items(): 14 | dfa = DFA() 15 | dfa.loadFromJSONDict(dfaData) 16 | dfa.image(dir=OUTPUTDIR(), save=True) 17 | DFAs[dfaName] = dfa 18 | 19 | for dfaName1, dfa1 in DFAs.items(): 20 | for dfaName2, dfa2 in DFAs.items(): 21 | if dfaName1 != dfaName2: 22 | if dfa1.isLangSubset(dfa2) and dfa2.isLangSubset(dfa1): 23 | print(f"{dfaName1} is equivalent to {dfaName2}") 24 | else: 25 | print(f"{dfaName1} is not equivalent to {dfaName2}") -------------------------------------------------------------------------------- /tests/dfaEquivalenceIsomorphism.py: -------------------------------------------------------------------------------- 1 | from pykleene.dfa import DFA 2 | from typing import Dict 3 | import sys 4 | from tests.config import INPUTPATH, OUTPUTDIR 5 | import json 6 | 7 | FILENAME = 'dfas.json' 8 | 9 | if __name__ == '__main__': 10 | DFAs: Dict[str, DFA] 11 | with open(INPUTPATH(FILENAME), 'r') as file: 12 | DFAs = json.load(file) 13 | 14 | for dfaName, dfaData in DFAs.items(): 15 | dfa = DFA() 16 | dfa.loadFromJSONDict(dfaData) 17 | dfa.image(dir=OUTPUTDIR, save=True) 18 | DFAs[dfaName] = dfa 19 | 20 | for dfaName1, dfa1 in DFAs.items(): 21 | for dfaName2, dfa2 in DFAs.items(): 22 | if dfaName1 != dfaName2: 23 | if dfa1.isomorphic(dfa2): 24 | print(f"{dfaName1} is equivalent to {dfaName2}") 25 | else: 26 | print(f"{dfaName1} is not equivalent to {dfaName2}") -------------------------------------------------------------------------------- /docs/contents/docs/about/whats-more/index.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | title: What's more 3 | --- 4 | 5 | There’s still a lot to be done in **Pykleene**, and I’m excited to continue expanding its capabilities. Here are some of the features and functionalities that are yet to be implemented, and I welcome anyone who’d like to contribute: 6 | 7 | - Writing some functions for emptiness, finiteness, and other important properties of regular languages. 8 | 9 | - Adding support for simulating **Mealy** and **Moore** machines. 10 | 11 | - PDA Membership, Emptiness, and Finiteness Tests 12 | 13 | - Membership Problems for Context-Sensitive and Recursive Languages 14 | 15 | If you’re interested in contributing or have suggestions for additional features, feel free to open an issue or submit a pull request. Let’s continue making **Pykleene** the go-to library for automata and formal language simulations! 16 | 17 | Thank you for being part of this journey. 18 | -------------------------------------------------------------------------------- /docs/app/error.tsx: -------------------------------------------------------------------------------- 1 | "use client"; // Error components must be Client Components 2 | 3 | import { Button } from "@/components/ui/button"; 4 | import { useEffect } from "react"; 5 | 6 | export default function Error({ 7 | error, 8 | reset, 9 | }: { 10 | error: Error & { digest?: string }; 11 | reset: () => void; 12 | }) { 13 | useEffect(() => { 14 | // Log the error to an error reporting service 15 | console.error(error); 16 | }, [error]); 17 | 18 | return ( 19 |
20 |
21 |

Oops!

22 |

Something went wrong!

23 |
24 | 32 |
33 | ); 34 | } 35 | -------------------------------------------------------------------------------- /tests/cfg.py: -------------------------------------------------------------------------------- 1 | from typing import Dict 2 | from config import INPUTPATH, OUTPUTDIR 3 | import json 4 | from pykleene.grammar import Grammar 5 | from typing import Any 6 | 7 | FILENAME = 'cfgs.json' 8 | 9 | if __name__ == '__main__': 10 | CFGs: Dict[str, Any] 11 | with open(INPUTPATH(FILENAME), 'r') as file: 12 | CFGs = json.load(file) 13 | 14 | for cfgName, cfg in CFGs.items(): 15 | grammar = Grammar() 16 | grammar.loadFromJSONDict(cfg) 17 | if grammar.isRegular(): 18 | print(f"CFG for {cfgName} is regular") 19 | if grammar.isContextFree(): 20 | print(f"CFG for {cfgName} is context-free") 21 | if grammar.inCNF(): 22 | print(f"CFG for {cfgName} is in Chomsky Normal Form") 23 | if grammar.inGNF(): 24 | print(f"CFG for {cfgName} is in Greibach Normal Form") 25 | if grammar.isContextSensitive(): 26 | print(f"CFG for {cfgName} is context-sensitive") -------------------------------------------------------------------------------- /src/pykleene/utils.py: -------------------------------------------------------------------------------- 1 | def getAllStrings(alphabets: list, length: int) -> list[str]: 2 | if length < 0: 3 | raise Exception(f"Inside get_all_strings: variable length cannot be negative") 4 | result = [""] 5 | all_results = [""] 6 | for current_length in range(1, length + 1): 7 | new_strings = [] 8 | for string in result: 9 | for alphabet in alphabets: 10 | new_strings.append(string + alphabet) 11 | result = new_strings 12 | all_results.extend(result) 13 | return all_results 14 | 15 | def _getNextLetter(char: str) -> str: 16 | if char == 'Z': 17 | return 'A' 18 | if char == 'z': 19 | return 'a' 20 | if char == '9': 21 | return '0' 22 | return chr(ord(char) + 1) 23 | 24 | def randomDarkColor() -> str: 25 | from random import randint 26 | r = randint(50, 150) 27 | g = randint(50, 150) 28 | b = randint(50, 150) 29 | return f'#{r:02x}{g:02x}{b:02x}' -------------------------------------------------------------------------------- /docs/components/anchor.tsx: -------------------------------------------------------------------------------- 1 | "use client"; 2 | 3 | import { cn } from "@/lib/utils"; 4 | import Link from "next/link"; 5 | import { usePathname } from "next/navigation"; 6 | import { ComponentProps } from "react"; 7 | 8 | type AnchorProps = ComponentProps & { 9 | absolute?: boolean; 10 | activeClassName?: string; 11 | disabled?: boolean; 12 | }; 13 | 14 | export default function Anchor({ 15 | absolute, 16 | className = "", 17 | activeClassName = "", 18 | disabled, 19 | children, 20 | ...props 21 | }: AnchorProps) { 22 | const path = usePathname(); 23 | let isMatch = absolute 24 | ? props.href.toString().split("/")[1] == path.split("/")[1] 25 | : path === props.href; 26 | 27 | if (props.href.toString().includes("http")) isMatch = false; 28 | 29 | if (disabled) 30 | return ( 31 |
{children}
32 | ); 33 | return ( 34 | 35 | {children} 36 | 37 | ); 38 | } 39 | -------------------------------------------------------------------------------- /tests/inputs/cfgs.json: -------------------------------------------------------------------------------- 1 | { 2 | "a^nb^m:m!=n": { 3 | "nonTerminals": ["S", "T", "U", "V"], 4 | "terminals": ["a", "b"], 5 | "productions": { 6 | "S": ["T", "U"], 7 | "T": ["VaT", "VaV", "TaV"], 8 | "U": ["VbU", "VbV", "UbV"], 9 | "V": ["aVbV", "bVaV", "ε"] 10 | }, 11 | "startSymbol": "S" 12 | }, 13 | "chomsky_1": { 14 | "nonTerminals": ["S", "A", "B", "C"], 15 | "terminals": ["a", "b"], 16 | "productions": { 17 | "S": ["AB", "BC"], 18 | "A": ["BA", "a"], 19 | "B": ["CC", "b"], 20 | "C": ["AB", "a"] 21 | }, 22 | "startSymbol": "S" 23 | }, 24 | "greibach_1": { 25 | "nonTerminals": ["S", "A", "B", "C"], 26 | "terminals": ["a", "b"], 27 | "productions": { 28 | "S": ["aAB", "bBC"], 29 | "A": ["aBA", "a"], 30 | "B": ["bCC", "b"], 31 | "C": ["aAB", "a"] 32 | }, 33 | "startSymbol": "S" 34 | } 35 | } -------------------------------------------------------------------------------- /docs/components/note.tsx: -------------------------------------------------------------------------------- 1 | import { cn } from "@/lib/utils"; 2 | import clsx from "clsx"; 3 | import { PropsWithChildren } from "react"; 4 | 5 | type NoteProps = PropsWithChildren & { 6 | title?: string; 7 | type?: "note" | "danger" | "warning" | "success"; 8 | }; 9 | 10 | export default function Note({ 11 | children, 12 | title = "Note", 13 | type = "note", 14 | }: NoteProps) { 15 | const noteClassNames = clsx({ 16 | "dark:bg-neutral-900 bg-neutral-100": type == "note", 17 | "dark:bg-red-950 bg-red-100 border-red-200 dark:border-red-900": 18 | type === "danger", 19 | "dark:bg-orange-950 bg-orange-100 border-orange-200 dark:border-orange-900": 20 | type === "warning", 21 | "dark:bg-green-950 bg-green-100 border-green-200 dark:border-green-900": 22 | type === "success", 23 | }); 24 | 25 | return ( 26 |
32 |

{title}:

{children} 33 |
34 | ); 35 | } 36 | -------------------------------------------------------------------------------- /docs/contents/docs/about/acknowledgements/index.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | title: Acknowledgements 3 | --- 4 | 5 | I would like to express my deepest gratitude to the following individuals and resources that helped shape this project. 6 | 7 | A special thanks to Professor **Dr. Sumesh Divakaran**, whose classes were the sole reason I got interested in Formal Languages and Automata Theory. His teaching was both insightful and engaging, and his explanations of concepts like soundness and completeness truly inspired me to dive deeper into the subject. 8 | 9 | - **Peter Linz's** _An Introduction to Formal Languages and Automata_ 10 | - **Michael Sipser's** _Introduction to the Theory of Computation_ 11 | 12 | These two resources provided invaluable help in understanding the theoretical foundations that were essential for this project. 13 | 14 | Thanks to [**AriaDocs**](https://vercel.com/templates/next.js/documentation-template), a Next.js documentation template. It saved me a lot of time and effort, as I didn’t need to design a documentation site from scratch. 15 | 16 | Thanks to everyone who supported and encouraged me. -------------------------------------------------------------------------------- /docs/LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 Mohd. Nisab 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /docs/app/page.tsx: -------------------------------------------------------------------------------- 1 | import { buttonVariants } from "@/components/ui/button"; 2 | import { page_routes } from "@/lib/routes-config"; 3 | import Link from "next/link"; 4 | 5 | export default function Home() { 6 | return ( 7 |
8 |

9 | pykleene{" "} 10 |

11 |

12 | Simulate any automaton, master any grammar. 13 |

14 |

15 | Pykleene is a python library for building and simulating various types of automata and formal grammars, from finite state machines to Turing machines, as well as regular to unrestricted grammars. 16 |

17 |
18 | 25 | Get Started 26 | 27 |
28 |
29 | ); 30 | } 31 | -------------------------------------------------------------------------------- /docs/components/toc.tsx: -------------------------------------------------------------------------------- 1 | import { getDocsTocs } from "@/lib/markdown"; 2 | import { ScrollArea } from "@/components/ui/scroll-area"; 3 | import Link from "next/link"; 4 | import clsx from "clsx"; 5 | 6 | export default async function Toc({ path }: { path: string }) { 7 | const tocs = await getDocsTocs(path).then((tocs) => tocs.filter((toc) => toc.level <= 2)); 8 | 9 | return ( 10 |
11 |
12 |

On this page

13 | 14 |
15 | {tocs.map(({ href, level, text }) => ( 16 | 25 | {text} 26 | 27 | ))} 28 |
29 |
30 |
31 |
32 | ); 33 | } 34 | -------------------------------------------------------------------------------- /docs/contents/docs/getting-started/installation/react/index.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | title: React 3 | description: React guide for our application. 4 | --- 5 | 6 | ## Overview 7 | 8 | Welcome to the React guide. Here, you'll find all the information you need to get started. 9 | 10 | Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nulla ut metus ligula. Proin vehicula velit quis justo facilisis, in facilisis dolor interdum. 11 | 12 | ## Prerequisites 13 | 14 | Before proceeding, ensure you have the following: 15 | 16 | - Node.js installed 17 | - Basic knowledge of command-line interface (CLI) 18 | - A code editor (e.g., VSCode) 19 | 20 | ## Installation Steps 21 | 22 | 1. **Clone the Repository**: 23 | Clone the repository using the following command: 24 | 25 | ```bash 26 | git clone https://github.com/your-repo/your-project.git 27 | ``` 28 | 29 | 2. **Navigate to the Project Directory**: 30 | Use the `cd` command to navigate to your project directory: 31 | 32 | ```bash 33 | cd your-project 34 | ``` 35 | 36 | 3. **Install Dependencies**: 37 | Install the required dependencies using npm or yarn: 38 | 39 | ```bash 40 | npm install 41 | # or 42 | yarn install 43 | ``` 44 | 45 | ## Additional Information 46 | 47 | For more details, please refer to our official [documentation](#). 48 | -------------------------------------------------------------------------------- /docs/contents/docs/getting-started/installation/gatsby/index.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | title: Gatsby 3 | description: Gatsby guide for our application. 4 | --- 5 | 6 | ## Overview 7 | 8 | Welcome to the Gatsby guide. Here, you'll find all the information you need to get started. 9 | 10 | Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nulla ut metus ligula. Proin vehicula velit quis justo facilisis, in facilisis dolor interdum. 11 | 12 | ## Prerequisites 13 | 14 | Before proceeding, ensure you have the following: 15 | 16 | - Node.js installed 17 | - Basic knowledge of command-line interface (CLI) 18 | - A code editor (e.g., VSCode) 19 | 20 | ## Installation Steps 21 | 22 | 1. **Clone the Repository**: 23 | Clone the repository using the following command: 24 | 25 | ```bash 26 | git clone https://github.com/your-repo/your-project.git 27 | ``` 28 | 29 | 2. **Navigate to the Project Directory**: 30 | Use the `cd` command to navigate to your project directory: 31 | 32 | ```bash 33 | cd your-project 34 | ``` 35 | 36 | 3. **Install Dependencies**: 37 | Install the required dependencies using npm or yarn: 38 | 39 | ```bash 40 | npm install 41 | # or 42 | yarn install 43 | ``` 44 | 45 | ## Additional Information 46 | 47 | For more details, please refer to our official [documentation](#). 48 | -------------------------------------------------------------------------------- /docs/contents/docs/getting-started/installation/laravel/index.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | title: Laravel 3 | description: Laravel guide for our application. 4 | --- 5 | 6 | ## Overview 7 | 8 | Welcome to the Laravel guide. Here, you'll find all the information you need to get started. 9 | 10 | Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nulla ut metus ligula. Proin vehicula velit quis justo facilisis, in facilisis dolor interdum. 11 | 12 | ## Prerequisites 13 | 14 | Before proceeding, ensure you have the following: 15 | 16 | - Node.js installed 17 | - Basic knowledge of command-line interface (CLI) 18 | - A code editor (e.g., VSCode) 19 | 20 | ## Installation Steps 21 | 22 | 1. **Clone the Repository**: 23 | Clone the repository using the following command: 24 | 25 | ```bash 26 | git clone https://github.com/your-repo/your-project.git 27 | ``` 28 | 29 | 2. **Navigate to the Project Directory**: 30 | Use the `cd` command to navigate to your project directory: 31 | 32 | ```bash 33 | cd your-project 34 | ``` 35 | 36 | 3. **Install Dependencies**: 37 | Install the required dependencies using npm or yarn: 38 | 39 | ```bash 40 | npm install 41 | # or 42 | yarn install 43 | ``` 44 | 45 | ## Additional Information 46 | 47 | For more details, please refer to our official [documentation](#). 48 | -------------------------------------------------------------------------------- /docs/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "aria-docs", 3 | "version": "1.0.0", 4 | "private": true, 5 | "scripts": { 6 | "dev": "next dev", 7 | "build": "next build", 8 | "start": "next start", 9 | "lint": "next lint" 10 | }, 11 | "dependencies": { 12 | "@radix-ui/react-collapsible": "^1.1.0", 13 | "@radix-ui/react-dialog": "^1.1.1", 14 | "@radix-ui/react-dropdown-menu": "^2.1.1", 15 | "@radix-ui/react-scroll-area": "^1.1.0", 16 | "@radix-ui/react-slot": "^1.1.0", 17 | "class-variance-authority": "^0.7.0", 18 | "clsx": "^2.1.1", 19 | "lucide-react": "^0.435.0", 20 | "next": "^14.2.6", 21 | "next-mdx-remote": "^5.0.0", 22 | "next-themes": "^0.3.0", 23 | "react": "^18.3.1", 24 | "react-dom": "^18.3.1", 25 | "rehype-autolink-headings": "^7.1.0", 26 | "rehype-code-titles": "^1.2.0", 27 | "rehype-prism-plus": "^2.0.0", 28 | "rehype-slug": "^6.0.0", 29 | "remark-gfm": "^4.0.0", 30 | "tailwind-merge": "^2.5.2", 31 | "tailwindcss-animate": "^1.0.7" 32 | }, 33 | "devDependencies": { 34 | "@tailwindcss/typography": "^0.5.14", 35 | "@types/node": "^20", 36 | "@types/react": "^18", 37 | "@types/react-dom": "^18", 38 | "autoprefixer": "^10.4.20", 39 | "eslint": "^8", 40 | "eslint-config-next": "^14.2.6", 41 | "postcss": "^8", 42 | "tailwindcss": "^3.4.10", 43 | "typescript": "^5" 44 | } 45 | } 46 | -------------------------------------------------------------------------------- /docs/components/leftbar.tsx: -------------------------------------------------------------------------------- 1 | import { ScrollArea } from "@/components/ui/scroll-area"; 2 | import { 3 | Sheet, 4 | SheetContent, 5 | SheetHeader, 6 | SheetTitle, 7 | SheetTrigger, 8 | } from "@/components/ui/sheet"; 9 | import { Button } from "./ui/button"; 10 | import { AlignLeftIcon } from "lucide-react"; 11 | import DocsMenu from "./docs-menu"; 12 | 13 | export function Leftbar() { 14 | return ( 15 | 20 | ); 21 | } 22 | 23 | export function SheetLeftbar() { 24 | return ( 25 | 26 | 27 | 30 | 31 | 32 | Menu 33 | 34 |

Menu

35 |
36 | 37 |
38 | 39 |
40 |
41 |
42 |
43 | ); 44 | } 45 | -------------------------------------------------------------------------------- /docs/components/theme-toggle.tsx: -------------------------------------------------------------------------------- 1 | "use client"; 2 | 3 | import * as React from "react"; 4 | import { Moon, Sun } from "lucide-react"; 5 | import { useTheme } from "next-themes"; 6 | 7 | import { Button } from "@/components/ui/button"; 8 | import { 9 | DropdownMenu, 10 | DropdownMenuContent, 11 | DropdownMenuItem, 12 | DropdownMenuTrigger, 13 | } from "@/components/ui/dropdown-menu"; 14 | 15 | export function ModeToggle() { 16 | const { setTheme } = useTheme(); 17 | 18 | return ( 19 | 20 | 21 | 26 | 27 | 28 | setTheme("light")}> 29 | Light 30 | 31 | setTheme("dark")}> 32 | Dark 33 | 34 | setTheme("system")}> 35 | System 36 | 37 | 38 | 39 | ); 40 | } 41 | -------------------------------------------------------------------------------- /tests/dfaOper.py: -------------------------------------------------------------------------------- 1 | from pykleene.dfa import DFA 2 | from typing import Dict 3 | from config import INPUTPATH, OUTPUTDIR 4 | import json 5 | 6 | FILENAME = 'dfas.json' 7 | NEWOUTPUTDIR = lambda x: f"{OUTPUTDIR()}{x}/" 8 | 9 | if __name__ == '__main__': 10 | DFAs: Dict[str, DFA] 11 | with open(INPUTPATH(FILENAME), 'r') as file: 12 | DFAs = json.load(file) 13 | 14 | for dfaName, dfaData in DFAs.items(): 15 | dfa = DFA() 16 | dfa.loadFromJSONDict(dfaData) 17 | dfa.image(dir=OUTPUTDIR(), save=True) 18 | DFAs[dfaName] = dfa 19 | 20 | for dfaName1, dfa1 in DFAs.items(): 21 | print(f"Complement of {dfaName1} is:") 22 | dfa1.complement().image(dir=NEWOUTPUTDIR(f"complement/{dfaName1}"), save=True) 23 | for dfaName2, dfa2 in DFAs.items(): 24 | print(f"Intersection of {dfaName1} and {dfaName2} is:") 25 | dfa1.intersection(dfa2).image(dir=NEWOUTPUTDIR(f"intersection/{dfaName1 + dfaName2}"), save=True) 26 | print(f"Union of {dfaName1} and {dfaName2} is:") 27 | dfa1.union(dfa2).image(dir=NEWOUTPUTDIR(f"union/{dfaName1 + dfaName2}"), save=True) 28 | print(f"Difference of {dfaName1} and {dfaName2} is:") 29 | dfa1.difference(dfa2).image(dir=NEWOUTPUTDIR(f"difference/{dfaName1 + dfaName2}"), save=True) 30 | print(f"Symmetric Difference of {dfaName1} and {dfaName2} is:") 31 | dfa1.symmetricDifference(dfa2).image(dir=NEWOUTPUTDIR(f"symmetricDifference/{dfaName1 + dfaName2}"), save=True) -------------------------------------------------------------------------------- /tests/inputs/dfas.json: -------------------------------------------------------------------------------- 1 | { 2 | "DFA 1": { 3 | "states": ["A0", "A1", "A2", "A3", "A4", "A5"], 4 | "alphabet": ["0", "1"], 5 | "transitions": [ 6 | ["A0", "0", "A3"], 7 | ["A0", "1", "A1"], 8 | ["A1", "0", "A2"], 9 | ["A1", "1", "A5"], 10 | ["A2", "0", "A2"], 11 | ["A2", "1", "A5"], 12 | ["A3", "0", "A0"], 13 | ["A3", "1", "A4"], 14 | ["A4", "0", "A2"], 15 | ["A4", "1", "A5"], 16 | ["A5", "0", "A5"], 17 | ["A5", "1", "A5"] 18 | ], 19 | "startState": "A0", 20 | "finalStates": ["A1", "A4", "A2"] 21 | }, 22 | "DFA 2": { 23 | "states": ["B0", "B1", "B2"], 24 | "alphabet": ["0", "1"], 25 | "transitions": [ 26 | ["B0", "0", "B0"], 27 | ["B0", "1", "B1"], 28 | ["B1", "0", "B1"], 29 | ["B1", "1", "B2"], 30 | ["B2", "0", "B2"], 31 | ["B2", "1", "B2"] 32 | ], 33 | "startState": "B0", 34 | "finalStates": ["B1"] 35 | }, 36 | "DFA 3": { 37 | "states": ["C0", "C1", "C2"], 38 | "alphabet": ["0", "1"], 39 | "transitions": [ 40 | ["C0", "0", "C1"], 41 | ["C0", "1", "C2"], 42 | ["C1", "0", "C1"], 43 | ["C1", "1", "C0"], 44 | ["C2", "0", "C1"], 45 | ["C2", "1", "C2"] 46 | ], 47 | "startState": "C0", 48 | "finalStates": ["C0"] 49 | } 50 | } -------------------------------------------------------------------------------- /docs/app/docs/[[...slug]]/page.tsx: -------------------------------------------------------------------------------- 1 | import Toc from "@/components/toc"; 2 | import { page_routes } from "@/lib/routes-config"; 3 | import { notFound } from "next/navigation"; 4 | import { getDocsForSlug } from "@/lib/markdown"; 5 | import { Typography } from "@/components/typography"; 6 | 7 | type PageProps = { 8 | params: { slug: string[] }; 9 | }; 10 | 11 | export default async function DocsPage({ params: { slug = [] } }: PageProps) { 12 | const pathName = slug.join("/"); 13 | const res = await getDocsForSlug(pathName); 14 | 15 | if (!res) notFound(); 16 | return ( 17 |
18 |
19 | 20 |

{res.frontmatter.title}

21 |

22 | {res.frontmatter.description} 23 |

24 |
{res.content}
25 |
26 |
27 | 28 |
29 | ); 30 | } 31 | 32 | export async function generateMetadata({ params: { slug = [] } }: PageProps) { 33 | const pathName = slug.join("/"); 34 | const res = await getDocsForSlug(pathName); 35 | if (!res) return null; 36 | const { frontmatter } = res; 37 | return { 38 | title: frontmatter.title, 39 | description: frontmatter.description, 40 | }; 41 | } 42 | 43 | export function generateStaticParams() { 44 | return page_routes.map((item) => ({ 45 | slug: item.href.split("/").slice(1), 46 | })); 47 | } 48 | -------------------------------------------------------------------------------- /docs/contents/docs/miscellaneous/utils/index.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | title: utils 3 | description: A collection of utility functions for string manipulation, character processing, and color generation. 4 | --- 5 | 6 | ## getAllStrings 7 | Generates all possible strings of a given length from a set of alphabets. 8 | May become computationally expensive for large alphabets or lengths 9 | #### Parameters 10 | - `alphabets`: List of characters to generate strings from 11 | - `length`: Desired length of output strings 12 | #### Return Value 13 | List of all possible strings of specified length using given alphabets 14 | #### Example 15 | ```python showLineNumbers 16 | alphabets = ['a', 'b'] 17 | length = 2 18 | strings = getAllStrings(alphabets, length) 19 | # Returns: ['aa', 'ab', 'ba', 'bb'] 20 | ``` 21 | 22 | ## _getNextLetter 23 | Returns the next letter in sequence, with wraparound for 'Z' to 'A', 'z' to 'a', and '9' to '0'. 24 | #### Parameters 25 | - `char`: Single character input 26 | #### Return Value 27 | Next character in sequence 28 | #### Example 29 | ```python showLineNumbers 30 | next_char = _getNextLetter('Z') # Returns: 'A' 31 | next_char = _getNextLetter('z') # Returns: 'a' 32 | next_char = _getNextLetter('9') # Returns: '0' 33 | next_char = _getNextLetter('B') # Returns: 'C' 34 | ``` 35 | 36 | ## randomDarkColor 37 | Generates a random dark color in hexadecimal format. 38 | #### Parameters 39 | None 40 | #### Return Value 41 | String representing hexadecimal color code (e.g., '#4B6A8C') 42 | #### Example 43 | ```python showLineNumbers 44 | color = randomDarkColor() # Returns: e.g., '#4B6A8C' 45 | ``` -------------------------------------------------------------------------------- /docs/contents/docs/regular-expression/regex/index.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | title: RE 3 | description: Regular Expression (RE) class in pykleene 4 | --- 5 | 6 | ```python showLineNumbers 7 | OPERATORS = ['+', '.', '*'] 8 | PARENTHESES = ['(', ')'] 9 | PRECEDENCE = { 10 | '+': 1, 11 | '.': 2, 12 | '*': 3, 13 | '(': 0, 14 | ')': 0 15 | } 16 | ``` 17 | ## format 18 | Formats a raw regular expression by inserting implicit concatenation operators. 19 | #### Parameters 20 | - `regex`: Input regular expression string 21 | #### Returns 22 | Formatted regular expression string 23 | 24 | ## postfix 25 | Converts infix regular expression to postfix notation using the Shunting Yard algorithm. 26 | #### Parameters 27 | - `regex`: Input regular expression string 28 | #### Returns 29 | Postfix notation of the regular expression 30 | 31 | ## expressionTree 32 | Generates a binary tree representation of the regular expression. 33 | #### Parameters 34 | - `regex`: Input regular expression string 35 | #### Returns 36 | Binary tree node representing the expression structure 37 | 38 | ## nfa 39 | Converts a regular expression to a Nondeterministic Finite Automaton (NFA). 40 | #### Parameters 41 | - `regex`: Input regular expression string 42 | - `method`: Conversion method (`'regexTree'` or `'postfix'`) 43 | #### Returns 44 | NFA representing the regular expression 45 | 46 | ## image 47 | Generates a graphical visualization of the regular expression. 48 | #### Parameters 49 | - `param`: Expression tree or postfix expression (type: Class `BinaryTreeNode`) 50 | - `type`: Visualization type (currently only supports `'regexTree'`) 51 | - `dir`: Directory to save the image 52 | - `save`: Boolean to determine if image should be saved -------------------------------------------------------------------------------- /docs/components/ui/scroll-area.tsx: -------------------------------------------------------------------------------- 1 | "use client" 2 | 3 | import * as React from "react" 4 | import * as ScrollAreaPrimitive from "@radix-ui/react-scroll-area" 5 | 6 | import { cn } from "@/lib/utils" 7 | 8 | const ScrollArea = React.forwardRef< 9 | React.ElementRef, 10 | React.ComponentPropsWithoutRef 11 | >(({ className, children, ...props }, ref) => ( 12 | 17 | 18 | {children} 19 | 20 | 21 | 22 | 23 | )) 24 | ScrollArea.displayName = ScrollAreaPrimitive.Root.displayName 25 | 26 | const ScrollBar = React.forwardRef< 27 | React.ElementRef, 28 | React.ComponentPropsWithoutRef 29 | >(({ className, orientation = "vertical", ...props }, ref) => ( 30 | 43 | 44 | 45 | )) 46 | ScrollBar.displayName = ScrollAreaPrimitive.ScrollAreaScrollbar.displayName 47 | 48 | export { ScrollArea, ScrollBar } 49 | -------------------------------------------------------------------------------- /docs/components/navbar.tsx: -------------------------------------------------------------------------------- 1 | import { ModeToggle } from "@/components/theme-toggle"; 2 | import { HexagonIcon } from "lucide-react"; 3 | import Link from "next/link"; 4 | import Anchor from "./anchor"; 5 | import { SheetLeftbar } from "./leftbar"; 6 | import { page_routes } from "@/lib/routes-config"; 7 | 8 | export const NAVLINKS = [ 9 | { 10 | title: "Documentation", 11 | href: `/docs${page_routes[0].href}`, 12 | }, 13 | { 14 | title: "GitHub", 15 | href: "https://github.com/krishnachandran-u/pykleene", 16 | }, 17 | ]; 18 | 19 | export function Navbar() { 20 | return ( 21 | 46 | ); 47 | } 48 | 49 | function Logo() { 50 | return ( 51 | 52 | 53 |

pykleene

54 | 55 | ); 56 | } 57 | -------------------------------------------------------------------------------- /docs/contents/docs/getting-started/installation/index.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | title: Installation 3 | description: Installation guide for pykleene - A Python library for automata theory 4 | --- 5 | 6 | To get started with pykleene, follow these simple steps to set up the library in your Python environment. 7 | 8 | ## Prerequisites 9 | 10 | Before installing pykleene, make sure you have the following: 11 | - Python 3.8 or higher installed on your machine 12 | - pip package manager (usually comes with Python) 13 | - Virtual environment (recommended) 14 | - Graphviz (for visualizing automata) 15 | 16 | 17 | pykleene requires Python 3.8+ due to its dependency on modern Python features. 18 | Using older versions may result in compatibility issues or unexpected behavior. 19 | 20 | 21 | ## Installation Steps 22 | 23 | 1. Install Graphviz: 24 | Before proceeding with the installation of pykleene, make sure you have Graphviz installed on your system. Graphviz is required for visualizing automata. 25 | You can download and install Graphviz from [Graphviz's official website](https://graphviz.org/download/). 26 | 27 | 2. Create and activate a virtual environment (recommended): 28 | ```bash 29 | python -m venv pykleene-env 30 | source pykleene-env/bin/activate # On Windows: pykleene-env\Scripts\activate 31 | ``` 32 | 33 | 3. Install pykleene using pip: 34 | ```bash 35 | pip install pykleene 36 | ``` 37 | 38 | 4. For development installation, clone the repository: 39 | ```bash 40 | git clone https://github.com/your-repo/pykleene.git 41 | cd pykleene 42 | pip install -e . 43 | ``` 44 | 45 | ## Additional Information 46 | 47 | For more detailed instructions and troubleshooting tips, refer to our [documentation](#). 48 | 49 | Common installation issues and solutions: 50 | - If you encounter permission errors, try using `pip install --user pykleene` 51 | - For system-wide installation (not recommended), use `sudo pip install pykleene` 52 | - To upgrade an existing installation: `pip install --upgrade pykleene` -------------------------------------------------------------------------------- /docs/components/ui/button.tsx: -------------------------------------------------------------------------------- 1 | import * as React from "react"; 2 | import { Slot } from "@radix-ui/react-slot"; 3 | import { cva, type VariantProps } from "class-variance-authority"; 4 | 5 | import { cn } from "@/lib/utils"; 6 | 7 | const buttonVariants = cva( 8 | "inline-flex items-center justify-center whitespace-nowrap rounded-md text-sm font-medium ring-offset-background transition-colors focus-visible:outline-none focus-visible:ring-2 focus-visible:ring-ring focus-visible:ring-offset-2 disabled:pointer-events-none disabled:opacity-50", 9 | { 10 | variants: { 11 | variant: { 12 | default: "bg-primary text-primary-foreground hover:bg-primary/90", 13 | destructive: 14 | "bg-destructive text-destructive-foreground hover:bg-destructive/90", 15 | outline: 16 | "border border-input bg-background hover:bg-accent hover:text-accent-foreground", 17 | secondary: 18 | "bg-secondary text-secondary-foreground hover:bg-secondary/80", 19 | ghost: "hover:bg-accent hover:text-accent-foreground", 20 | link: "text-primary underline-offset-4 hover:underline", 21 | }, 22 | size: { 23 | default: "h-10 px-4 py-2", 24 | sm: "h-9 rounded-md px-3", 25 | lg: "h-11 rounded-md px-8", 26 | icon: "h-9 w-9", 27 | xs: "h-7 rounded-md px-2", 28 | }, 29 | }, 30 | defaultVariants: { 31 | variant: "default", 32 | size: "default", 33 | }, 34 | } 35 | ); 36 | 37 | export interface ButtonProps 38 | extends React.ButtonHTMLAttributes, 39 | VariantProps { 40 | asChild?: boolean; 41 | } 42 | 43 | const Button = React.forwardRef( 44 | ({ className, variant, size, asChild = false, ...props }, ref) => { 45 | const Comp = asChild ? Slot : "button"; 46 | return ( 47 | 52 | ); 53 | } 54 | ); 55 | Button.displayName = "Button"; 56 | 57 | export { Button, buttonVariants }; 58 | -------------------------------------------------------------------------------- /docs/app/layout.tsx: -------------------------------------------------------------------------------- 1 | import type { Metadata } from "next"; 2 | import { ThemeProvider } from "@/components/theme-provider"; 3 | import { Navbar } from "@/components/navbar"; 4 | import localFont from "next/font/local"; 5 | import "./globals.css"; 6 | import { Courier_Prime } from "next/font/google"; 7 | 8 | const regularFont = localFont({ 9 | src: [ 10 | { 11 | path: "./../public/fonts/CourierPrime-Regular.ttf", 12 | weight: "400", 13 | style: "normal" 14 | }, 15 | { 16 | path: "./../public/fonts/CourierPrime-Italic.ttf", 17 | weight: "400", 18 | style: "italic" 19 | }, 20 | { 21 | path: "./../public/fonts/CourierPrime-Bold.ttf", 22 | weight: "700", 23 | style: "normal" 24 | }, 25 | { 26 | path: "./../public/fonts/CourierPrime-BoldItalic.ttf", 27 | weight: "700", 28 | style: "italic" 29 | } 30 | ], 31 | variable: "--font-regular", 32 | }); 33 | 34 | const codeFont = Courier_Prime({ 35 | subsets: ["latin"], 36 | variable: "--font-code", 37 | display: "swap", 38 | weight: ["400", "700"], 39 | }); 40 | 41 | 42 | export const metadata: Metadata = { 43 | title: "pykleene", 44 | description: 45 | "pykleene is a python library for building and simulating various types of automata and formal grammars, from finite state machines to Turing machines, as well as Type 0 to Type 3 grammars.", 46 | }; 47 | 48 | export default function RootLayout({ 49 | children, 50 | }: Readonly<{ 51 | children: React.ReactNode; 52 | }>) { 53 | return ( 54 | 55 | 59 | 65 | 66 |
67 | {children} 68 |
69 |
70 | 71 | 72 | ); 73 | } -------------------------------------------------------------------------------- /docs/README.md: -------------------------------------------------------------------------------- 1 | ## AriaDocsLite - Documentation Template (Branch - `minimal-docs`) 2 | 3 | This branch has feature for document versioning 4 | 5 | This feature-packed documentation template, built with Next.js, offers a sleek and responsive design, perfect for all your project documentation needs. 6 | 7 | 8 | 9 | 10 |
11 | 12 | This is a [Next.js](https://nextjs.org/) project bootstrapped with [`create-next-app`](https://github.com/vercel/next.js/tree/canary/packages/create-next-app). 13 | 14 | ## Getting Started 15 | 16 | First, run the development server: 17 | 18 | ```bash 19 | npm run dev 20 | # or 21 | yarn dev 22 | # or 23 | pnpm dev 24 | # or 25 | bun dev 26 | ``` 27 | 28 | Open [http://localhost:3000](http://localhost:3000) with your browser to see the result. 29 | 30 | You can start editing the page by modifying `app/page.tsx`. The page auto-updates as you edit the file. 31 | 32 | This project uses [`next/font`](https://nextjs.org/docs/basic-features/font-optimization) to automatically optimize and load Inter, a custom Google Font. 33 | 34 | ## Learn More 35 | 36 | To learn more about Next.js, take a look at the following resources: 37 | 38 | - [Next.js Documentation](https://nextjs.org/docs) - learn about Next.js features and API. 39 | - [Learn Next.js](https://nextjs.org/learn) - an interactive Next.js tutorial. 40 | 41 | You can check out [the Next.js GitHub repository](https://github.com/vercel/next.js/) - your feedback and contributions are welcome! 42 | 43 | ## Deploy on Vercel 44 | 45 | The easiest way to deploy your Next.js app is to use the [Vercel Platform](https://vercel.com/new?utm_medium=default-template&filter=next.js&utm_source=create-next-app&utm_campaign=create-next-app-readme) from the creators of Next.js. 46 | 47 | Check out our [Next.js deployment documentation](https://nextjs.org/docs/deployment) for more details. 48 | 49 | [![Deploy with Vercel](https://vercel.com/button)](https://vercel.com/new/clone?repository-url=https://github.com/nisabmohd/Aria-Docs) 50 | 51 | Got it! Here's a way to present the features in a more structured and visually appealing way using a table: 52 | 53 | 54 | 55 | 56 | ## Features 57 | 58 | ```plaintext 59 | Features 60 | ├── MDX supported 61 | ├── Nested pages support 62 | ├── Syntax highlighting 63 | ├── Table of contents 64 | ├── Code line highlight & code title 65 | ├── Static site generation 66 | ├── Custom components 67 | └── Light mode & dark mode 68 | ``` 69 | 70 | 71 | -------------------------------------------------------------------------------- /docs/lib/markdown.ts: -------------------------------------------------------------------------------- 1 | import path from "path"; 2 | import { promises as fs } from "fs"; 3 | import { compileMDX } from "next-mdx-remote/rsc"; 4 | import remarkGfm from "remark-gfm"; 5 | import rehypePrism from "rehype-prism-plus"; 6 | import rehypeAutolinkHeadings from "rehype-autolink-headings"; 7 | import rehypeSlug from "rehype-slug"; 8 | import rehypeCodeTitles from "rehype-code-titles"; 9 | 10 | // custom components imports 11 | import Note from "@/components/note"; 12 | 13 | // add custom components 14 | const components = { 15 | Note, 16 | }; 17 | 18 | // can be used for other pages like blogs, Guides etc 19 | async function parseMdx(rawMdx: string) { 20 | return await compileMDX({ 21 | source: rawMdx, 22 | options: { 23 | parseFrontmatter: true, 24 | mdxOptions: { 25 | rehypePlugins: [ 26 | rehypeCodeTitles, 27 | rehypePrism, 28 | rehypeSlug, 29 | rehypeAutolinkHeadings, 30 | ], 31 | remarkPlugins: [remarkGfm], 32 | }, 33 | }, 34 | components, 35 | }); 36 | } 37 | 38 | // logic for docs 39 | 40 | type BaseMdxFrontmatter = { 41 | title: string; 42 | description: string; 43 | }; 44 | 45 | export async function getDocsForSlug(slug: string) { 46 | try { 47 | const contentPath = getDocsContentPath(slug); 48 | const rawMdx = await fs.readFile(contentPath, "utf-8"); 49 | return await parseMdx(rawMdx); 50 | } catch (err) { 51 | console.log(err); 52 | } 53 | } 54 | 55 | export async function getDocsTocs(slug: string) { 56 | const contentPath = getDocsContentPath(slug); 57 | const rawMdx = await fs.readFile(contentPath, "utf-8"); 58 | // captures between ## - #### can modify accordingly 59 | const headingsRegex = /^(#{2,4})\s(.+)$/gm; 60 | let match; 61 | const extractedHeadings = []; 62 | while ((match = headingsRegex.exec(rawMdx)) !== null) { 63 | const headingLevel = match[1].length; 64 | const headingText = match[2].trim(); 65 | const slug = sluggify(headingText); 66 | extractedHeadings.push({ 67 | level: headingLevel, 68 | text: headingText, 69 | href: `#${slug}`, 70 | }); 71 | } 72 | return extractedHeadings; 73 | } 74 | 75 | function sluggify(text: string) { 76 | const slug = text.toLowerCase().replace(/\s+/g, "-"); 77 | return slug.replace(/[^a-z0-9-]/g, ""); 78 | } 79 | 80 | function getDocsContentPath(slug: string) { 81 | return path.join(process.cwd(), "/contents/docs/", `${slug}/index.mdx`); 82 | } 83 | -------------------------------------------------------------------------------- /docs/tailwind.config.ts: -------------------------------------------------------------------------------- 1 | import type { Config } from "tailwindcss"; 2 | 3 | const config = { 4 | darkMode: ["class"], 5 | content: [ 6 | "./pages/**/*.{ts,tsx}", 7 | "./components/**/*.{ts,tsx}", 8 | "./app/**/*.{ts,tsx}", 9 | "./src/**/*.{ts,tsx}", 10 | ], 11 | prefix: "", 12 | theme: { 13 | container: { 14 | center: true, 15 | padding: "2rem", 16 | screens: { 17 | "2xl": "1250px", 18 | }, 19 | }, 20 | extend: { 21 | colors: { 22 | border: "hsl(var(--border))", 23 | input: "hsl(var(--input))", 24 | ring: "hsl(var(--ring))", 25 | background: "hsl(var(--background))", 26 | foreground: "hsl(var(--foreground))", 27 | primary: { 28 | DEFAULT: "hsl(var(--primary))", 29 | foreground: "hsl(var(--primary-foreground))", 30 | }, 31 | secondary: { 32 | DEFAULT: "hsl(var(--secondary))", 33 | foreground: "hsl(var(--secondary-foreground))", 34 | }, 35 | destructive: { 36 | DEFAULT: "hsl(var(--destructive))", 37 | foreground: "hsl(var(--destructive-foreground))", 38 | }, 39 | muted: { 40 | DEFAULT: "hsl(var(--muted))", 41 | foreground: "hsl(var(--muted-foreground))", 42 | }, 43 | accent: { 44 | DEFAULT: "hsl(var(--accent))", 45 | foreground: "hsl(var(--accent-foreground))", 46 | }, 47 | popover: { 48 | DEFAULT: "hsl(var(--popover))", 49 | foreground: "hsl(var(--popover-foreground))", 50 | }, 51 | card: { 52 | DEFAULT: "hsl(var(--card))", 53 | foreground: "hsl(var(--card-foreground))", 54 | }, 55 | }, 56 | borderRadius: { 57 | lg: "var(--radius)", 58 | md: "calc(var(--radius) - 2px)", 59 | sm: "calc(var(--radius) - 4px)", 60 | }, 61 | fontFamily: { 62 | code: ["var(--font-code)"], 63 | regular: ["var(--font-regular)"], 64 | }, 65 | keyframes: { 66 | "accordion-down": { 67 | from: { height: "0" }, 68 | to: { height: "var(--radix-accordion-content-height)" }, 69 | }, 70 | "accordion-up": { 71 | from: { height: "var(--radix-accordion-content-height)" }, 72 | to: { height: "0" }, 73 | }, 74 | }, 75 | animation: { 76 | "accordion-down": "accordion-down 0.2s ease-out", 77 | "accordion-up": "accordion-up 0.2s ease-out", 78 | }, 79 | }, 80 | }, 81 | plugins: [require("tailwindcss-animate"), require("@tailwindcss/typography")], 82 | } satisfies Config; 83 | 84 | export default config; 85 | -------------------------------------------------------------------------------- /docs/contents/docs/state-machines/pda/index.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | title: PDA 3 | description: Pushdown Automaton (PDA) class in pykleene 4 | --- 5 | 6 | ```python showLineNumbers 7 | states: set[str] 8 | inputAlphabet: set[str] 9 | stackAlphabet: set[str] 10 | transitions: dict[tuple[str, str, str], set[tuple[str, str]]] 11 | startState: str 12 | initialStackSymbol: str 13 | finalStates: set[str] 14 | ``` 15 | ## CONSTRUCTOR 16 | Initialize a new Pushdown Automaton with specified parameters. 17 | #### Parameters 18 | - `states`: Set of states in the PDA 19 | - `inputAlphabet`: Set of input symbols 20 | - `stackAlphabet`: Set of stack symbols 21 | - `transitions`: Mapping of state-input-stack triples to sets of next state and stack string configurations 22 | - `startState`: Initial state of the PDA 23 | - `initialStackSymbol`: Initial symbol on the stack 24 | - `finalStates`: Set of accepting states 25 | #### Return Value 26 | Creates a new PDA object with specified configuration 27 | 28 | ## loadFromJSONDict 29 | Load PDA configuration from a JSON dictionary. 30 | ```python showLineNumbers 31 | # Example 32 | { 33 | "PDA_NAME": { 34 | "states": ["q0", "q1", "q2"], 35 | "inputAlphabet": ["a", "b"], 36 | "stackAlphabet": ["A", "B", "⊥"], 37 | "transitions": [ 38 | ["q0", "a", "⊥", "q0", "A⊥"], 39 | ["q0", "a", "A", "q0", "AA"], 40 | ["q0", "b", "A", "q1", "ε"], 41 | ["q1", "b", "A", "q1", "ε"], 42 | ["q1", "a", "⊥", "q2", "⊥"] 43 | ], 44 | "startState": "q0", 45 | "initialStackSymbol": "⊥", 46 | "finalStates": ["q2"] 47 | } 48 | } 49 | ``` 50 | #### Parameters 51 | - `jsonDict`: Dictionary containing PDA configuration 52 | #### Return Value 53 | Populates the PDA object with configuration from the dictionary 54 | 55 | ## isValid 56 | Validate the PDA configuration. 57 | #### Parameters 58 | None 59 | #### Return Value 60 | Boolean indicating whether the PDA configuration is valid 61 | 62 | ## accepts 63 | Determine if a given string is accepted by the PDA. 64 | #### Parameters 65 | - `inputString`: Input string to be processed 66 | #### Return Value 67 | Boolean indicating whether the input string is accepted by the PDA 68 | 69 | ## isDeterministic 70 | Check if the PDA is deterministic. 71 | #### Parameters 72 | None 73 | #### Return Value 74 | Boolean indicating whether the PDA is deterministic 75 | 76 | ## image 77 | Generate a graphical representation of the PDA. 78 | #### Parameters 79 | - `dir`: Directory to save the image 80 | - `save`: Boolean to determine if image should be saved 81 | - `monochrome`: Boolean to generate a black and white visualization 82 | #### Return Value 83 | Graphviz Digraph object representing the PDA 84 | -------------------------------------------------------------------------------- /docs/components/sublink.tsx: -------------------------------------------------------------------------------- 1 | "use client"; 2 | 3 | import { EachRoute } from "@/lib/routes-config"; 4 | import Anchor from "./anchor"; 5 | import { 6 | Collapsible, 7 | CollapsibleContent, 8 | CollapsibleTrigger, 9 | } from "@/components/ui/collapsible"; 10 | import { cn } from "@/lib/utils"; 11 | import { SheetClose } from "@/components/ui/sheet"; 12 | import { Button } from "./ui/button"; 13 | import { ChevronDown, ChevronRight } from "lucide-react"; 14 | import { useState } from "react"; 15 | 16 | export default function SubLink({ 17 | title, 18 | href, 19 | items, 20 | noLink, 21 | level, 22 | isSheet, 23 | }: EachRoute & { level: number; isSheet: boolean }) { 24 | const [isOpen, setIsOpen] = useState(level == 0); 25 | 26 | const Comp = ( 27 | 28 | {title} 29 | 30 | ); 31 | 32 | const titleOrLink = !noLink ? ( 33 | isSheet ? ( 34 | {Comp} 35 | ) : ( 36 | Comp 37 | ) 38 | ) : ( 39 |

{title}

40 | ); 41 | 42 | if (!items) { 43 | return
{titleOrLink}
; 44 | } 45 | 46 | return ( 47 |
48 | 49 |
50 | {titleOrLink} 51 | 52 | 64 | 65 |
66 | 67 |
0 && "pl-4 border-l ml-1" 71 | )} 72 | > 73 | {items?.map((innerLink) => { 74 | const modifiedItems = { 75 | ...innerLink, 76 | href: `${href + innerLink.href}`, 77 | level: level + 1, 78 | isSheet, 79 | }; 80 | return ; 81 | })} 82 |
83 |
84 |
85 |
86 | ); 87 | } 88 | -------------------------------------------------------------------------------- /docs/contents/docs/state-machines/tm/index.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | title: TM 3 | description: Turing Machine (TM) class in pykleene 4 | --- 5 | 6 | ```python showLineNumbers 7 | states: set[str] 8 | inputAlphabet: set[str] 9 | tapeAlphabet: set[str] 10 | startState: str 11 | transitions: dict[tuple[str, str], tuple[str, str, str]] 12 | leftEndMarker: str 13 | blankSymbol: str 14 | acceptState: str 15 | rejectState: str 16 | ``` 17 | ## CONSTRUCTOR 18 | Initialize a new Turing Machine with specified parameters. 19 | #### Parameters 20 | - `states`: Set of states in the TM 21 | - `inputAlphabet`: Set of input symbols 22 | - `tapeAlphabet`: Set of tape symbols 23 | - `startState`: Initial state of the TM 24 | - `transitions`: Mapping of state-symbol pairs to next state, write symbol, and direction 25 | - `leftEndMarker`: Marker for the left end of the tape 26 | - `blankSymbol`: Symbol used to represent empty tape cells 27 | - `acceptState`: State indicating successful computation 28 | - `rejectState`: State indicating failed computation 29 | #### Return Value 30 | Creates a new TM object with specified configuration 31 | 32 | ## loadFromJSONDict 33 | Load TM configuration from a JSON dictionary. 34 | ```python showLineNumbers 35 | # Example 36 | { 37 | "TM_ADD": { 38 | "states": ["q0", "q1", "q2", "t", "r"], 39 | "inputAlphabet": ["0", "1"], 40 | "tapeAlphabet": ["0", "1", "#", "♭", "⊢"], 41 | "startState": "q0", 42 | "transitions": [ 43 | ["q0", "⊢", "q0", "⊢", "R"], 44 | ["q0", "1", "q0", "1", "R"], 45 | ["q0", "#", "q0", "1", "R"], 46 | ["q0", "♭", "q1", "♭", "L"], 47 | ["q1", "1", "q2", "♭", "L"], 48 | ["q2", "1", "q2", "1", "L"], 49 | ["q2", "⊢", "t", "⊢", "S"] 50 | ], 51 | "leftEndMarker": "⊢", 52 | "blankSymbol": "♭", 53 | "acceptState": "t", 54 | "rejectState": "r" 55 | } 56 | } 57 | ``` 58 | #### Parameters 59 | - `jsonDict`: Dictionary containing TM configuration 60 | #### Return Value 61 | Populates the TM object with configuration from the dictionary 62 | 63 | ## isValid 64 | Validate the TM configuration. 65 | #### Parameters 66 | None 67 | #### Return Value 68 | Boolean indicating whether the TM configuration is valid 69 | 70 | ## accepts 71 | Determine if a given string is accepted by the TM. 72 | #### Parameters 73 | - `inputString`: Input string to be processed 74 | - `verbose`: Boolean to enable detailed computation tracing 75 | #### Return Value 76 | Tuple of (acceptance boolean, final tape contents) 77 | 78 | ## image 79 | Generate a graphical representation of the TM. 80 | #### Parameters 81 | - `dir`: Directory to save the image 82 | - `save`: Boolean to determine if image should be saved 83 | - `monochrome`: Boolean to generate a black and white visualization 84 | #### Return Value 85 | Graphviz Digraph object representing the Turing Machine -------------------------------------------------------------------------------- /docs/lib/routes-config.ts: -------------------------------------------------------------------------------- 1 | // for page navigation & to sort on leftbar 2 | 3 | export type EachRoute = { 4 | title: string; 5 | href: string; 6 | noLink?: true; 7 | items?: EachRoute[]; 8 | }; 9 | 10 | export const ROUTES: EachRoute[] = [ 11 | { 12 | title: "Getting Started", 13 | href: "/getting-started", 14 | noLink: true, 15 | items: [ 16 | { title: "Introduction", href: "/introduction" }, 17 | { title: "Installation", href: "/installation" }, 18 | /* 19 | { 20 | title: "Installation", 21 | href: "/installation", 22 | items: [ 23 | { title: "Laravel", href: "/laravel" }, 24 | { title: "React", href: "/react" }, 25 | { title: "Gatsby", href: "/gatsby" }, 26 | ], 27 | }, 28 | */ 29 | ], 30 | }, 31 | { 32 | title: "State Machines", 33 | href: "/state-machines", 34 | noLink: true, 35 | items: [ 36 | { title: "Deterministic Finite Automaton", href: "/dfa" }, 37 | { title: "Nondeterministic Finite Automaton", href: "/nfa" }, 38 | { title: "Pushdown Automaton", href: "/pda" }, 39 | { title: "Linear Bounded Automaton", href: "/lba" }, 40 | { title: "Turing Machine", href: "/tm" }, 41 | ], 42 | }, 43 | { 44 | title: "Grammars", 45 | href: "/grammars", 46 | noLink: true, 47 | items: [ 48 | { title: "Grammar", href: "/grammar" }, 49 | ], 50 | }, 51 | { 52 | title: "Regular Expression", 53 | href: "/regular-expression", 54 | noLink: true, 55 | items: [ 56 | { title: "Regular Expression", href: "/regex" }, 57 | ], 58 | }, 59 | { 60 | title: "Miscellaneous", 61 | href: "/miscellaneous", 62 | noLink: true, 63 | items: [ 64 | { title: "Utils", href: "/utils" }, 65 | { title: "Symbols", href: "/symbols" }, 66 | { title: "Helpers", href: "/helpers" }, 67 | ], 68 | }, 69 | { 70 | title: "Examples", 71 | href: "/examples", 72 | noLink: true, 73 | items: [ 74 | { title: "Code Examples", href: "/code-examples" }, 75 | ], 76 | }, 77 | { 78 | title: "About", 79 | href: "/about", 80 | noLink: true, 81 | items: [ 82 | { title: "Acknowledegments", href: "/acknowledgements" }, 83 | { title: "About Me", href: "/me" }, 84 | { title: "What's More", href: "/whats-more" }, 85 | ], 86 | }, 87 | ]; 88 | 89 | type Page = { title: string; href: string }; 90 | 91 | function getRecurrsiveAllLinks(node: EachRoute) { 92 | const ans: Page[] = []; 93 | if (!node.noLink) { 94 | ans.push({ title: node.title, href: node.href }); 95 | } 96 | node.items?.forEach((subNode) => { 97 | const temp = { ...subNode, href: `${node.href}${subNode.href}` }; 98 | ans.push(...getRecurrsiveAllLinks(temp)); 99 | }); 100 | return ans; 101 | } 102 | 103 | export const page_routes = ROUTES.map((it) => getRecurrsiveAllLinks(it)).flat(); 104 | -------------------------------------------------------------------------------- /docs/contents/docs/state-machines/lba/index.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | title: LBA 3 | description: Linear Bounded Automaton (LBA) class in pykleene 4 | --- 5 | 6 | ```python showLineNumbers 7 | states: set[str] 8 | inputAlphabet: set[str] 9 | tapeAlphabet: set[str] 10 | startState: str 11 | transitions: dict[tuple[str, str], tuple[str, str, str]] 12 | leftEndMarker: str 13 | rightEndMarker: str 14 | blankSymbol: str 15 | acceptState: str 16 | rejectState: str 17 | ``` 18 | 19 | ## CONSTRUCTOR 20 | Initialize a new Linear Bounded Automaton with specified parameters. 21 | #### Parameters 22 | - `states`: Set of states in the LBA 23 | - `inputAlphabet`: Set of input symbols 24 | - `tapeAlphabet`: Set of tape symbols 25 | - `startState`: Initial state of the LBA 26 | - `transitions`: Mapping of state-symbol pairs to next state, write symbol, and direction 27 | - `leftEndMarker`: Marker for the left end of the tape 28 | - `rightEndMarker`: Marker for the right end of the tape 29 | - `blankSymbol`: Symbol used to represent empty tape cells 30 | - `acceptState`: State indicating successful computation 31 | - `rejectState`: State indicating failed computation 32 | #### Return Value 33 | Creates a new LBA object with specified configuration 34 | 35 | ## loadFromJSONDict 36 | Load LBA configuration from a JSON dictionary. 37 | ```python showLineNumbers 38 | # Example 39 | { 40 | "LBA_ADD": { 41 | "states": ["q0", "q1", "q2", "t", "r"], 42 | "inputAlphabet": ["0", "1"], 43 | "tapeAlphabet": ["0", "1", "#", "♭", "⊢", "⊣"], 44 | "startState": "q0", 45 | "transitions": [ 46 | ["q0", "⊢", "q0", "⊢", "R"], 47 | ["q0", "1", "q0", "1", "R"], 48 | ["q0", "#", "q0", "1", "R"], 49 | ["q0", "♭", "q1", "♭", "L"], 50 | ["q1", "1", "q2", "♭", "L"], 51 | ["q2", "1", "q2", "1", "L"], 52 | ["q2", "⊢", "t", "⊢", "S"] 53 | ], 54 | "leftEndMarker": "⊢", 55 | "rightEndMarker": "⊣", 56 | "blankSymbol": "♭", 57 | "acceptState": "t", 58 | "rejectState": "r" 59 | } 60 | } 61 | ``` 62 | #### Parameters 63 | - `jsonDict`: Dictionary containing LBA configuration 64 | #### Return Value 65 | Populates the LBA object with configuration from the dictionary 66 | 67 | ## isValid 68 | Validate the LBA configuration. 69 | #### Parameters 70 | None 71 | #### Return Value 72 | Boolean indicating whether the LBA configuration is valid 73 | 74 | ## accepts 75 | Determine if a given string is accepted by the LBA. 76 | #### Parameters 77 | - `inputString`: Input string to be processed 78 | - `verbose`: Boolean to enable detailed computation tracing 79 | - `tapeLenFunc`: Function to determine tape length based on input length 80 | #### Return Value 81 | Tuple of (acceptance boolean, final tape contents) 82 | 83 | ## image 84 | Generate a graphical representation of the LBA. 85 | #### Parameters 86 | - `dir`: Directory to save the image 87 | - `save`: Boolean to determine if image should be saved 88 | - `monochrome`: Boolean to generate a black and white visualization 89 | #### Return Value 90 | Graphviz Digraph object representing the Linear Bounded Automaton -------------------------------------------------------------------------------- /docs/components/ui/table.tsx: -------------------------------------------------------------------------------- 1 | import * as React from "react" 2 | 3 | import { cn } from "@/lib/utils" 4 | 5 | const Table = React.forwardRef< 6 | HTMLTableElement, 7 | React.HTMLAttributes 8 | >(({ className, ...props }, ref) => ( 9 |
10 | 15 | 16 | )) 17 | Table.displayName = "Table" 18 | 19 | const TableHeader = React.forwardRef< 20 | HTMLTableSectionElement, 21 | React.HTMLAttributes 22 | >(({ className, ...props }, ref) => ( 23 | 24 | )) 25 | TableHeader.displayName = "TableHeader" 26 | 27 | const TableBody = React.forwardRef< 28 | HTMLTableSectionElement, 29 | React.HTMLAttributes 30 | >(({ className, ...props }, ref) => ( 31 | 36 | )) 37 | TableBody.displayName = "TableBody" 38 | 39 | const TableFooter = React.forwardRef< 40 | HTMLTableSectionElement, 41 | React.HTMLAttributes 42 | >(({ className, ...props }, ref) => ( 43 | tr]:last:border-b-0", 47 | className 48 | )} 49 | {...props} 50 | /> 51 | )) 52 | TableFooter.displayName = "TableFooter" 53 | 54 | const TableRow = React.forwardRef< 55 | HTMLTableRowElement, 56 | React.HTMLAttributes 57 | >(({ className, ...props }, ref) => ( 58 | 66 | )) 67 | TableRow.displayName = "TableRow" 68 | 69 | const TableHead = React.forwardRef< 70 | HTMLTableCellElement, 71 | React.ThHTMLAttributes 72 | >(({ className, ...props }, ref) => ( 73 |
81 | )) 82 | TableHead.displayName = "TableHead" 83 | 84 | const TableCell = React.forwardRef< 85 | HTMLTableCellElement, 86 | React.TdHTMLAttributes 87 | >(({ className, ...props }, ref) => ( 88 | 93 | )) 94 | TableCell.displayName = "TableCell" 95 | 96 | const TableCaption = React.forwardRef< 97 | HTMLTableCaptionElement, 98 | React.HTMLAttributes 99 | >(({ className, ...props }, ref) => ( 100 |
105 | )) 106 | TableCaption.displayName = "TableCaption" 107 | 108 | export { 109 | Table, 110 | TableHeader, 111 | TableBody, 112 | TableFooter, 113 | TableHead, 114 | TableRow, 115 | TableCell, 116 | TableCaption, 117 | } 118 | -------------------------------------------------------------------------------- /readme.md: -------------------------------------------------------------------------------- 1 | # Pykleene 2 | 3 | Pykleene is a powerful Python library designed to make working with automata theory and formal languages accessible and intuitive. Whether you're a student learning theoretical computer science, a researcher working on language processing, or a developer implementing formal verification systems, pykleene provides the tools you need to work with various types of automata and grammars. 4 | 5 | See [pykleene documentation](https://pykleene.vercel.app/). 6 | 7 | ## Getting Started 8 | 9 | To begin using pykleene, follow these simple steps: 10 | - make sure you have `Graphviz` installed on your system. 11 | - Install pykleene using pip: `pip install pykleene` 12 | - Import the necessary components into your Python script 13 | - Start building and simulating automata! 14 | 15 | Pykleene supports a wide range of formal language concepts, from basic finite automata to complex Turing machines. The library is designed with both educational and practical applications in mind, featuring clear APIs and comprehensive documentation to help you get started quickly. 16 | 17 | ## Key Features 18 | 19 | > "Automata theory is a fundamental pillar of computer science, and pykleene aims to make these concepts tangible and practical through clean, intuitive Python implementations." 20 | 21 | Our library includes support for: 22 | - Deterministic Finite Automata (DFA) 23 | - Non-deterministic Finite Automata (NFA) 24 | - Regular Expressions 25 | - Pushdown Automata 26 | - Linear Bounded Automata and Turing Machines 27 | - Various Grammar Types (Regular to Unrestricted) 28 | - Regular Expressions 29 | 30 | ## Code Example 31 | 32 | Here's a simple example of checking the isomorphism of two DFAs: 33 | 34 | ```python showLineNumbers 35 | from pykleene.dfa import DFA 36 | from typing import Dict 37 | import json 38 | import os 39 | 40 | # Define input and output paths directly 41 | INPUT_FILE_PATH = 'path/to/your/input/dfas.json' # Replace with actual path to the input file 42 | OUTPUT_DIR_PATH = 'path/to/output/directory' # Replace with actual output directory path 43 | 44 | FILENAME = 'dfas.json' 45 | 46 | if __name__ == '__main__': 47 | DFAs: Dict[str, DFA] 48 | 49 | # Load the DFAs from the JSON file 50 | with open(INPUT_FILE_PATH, 'r') as file: 51 | DFAs = json.load(file) 52 | 53 | # Process each DFA 54 | for dfaName, dfaData in DFAs.items(): 55 | dfa = DFA() 56 | dfa.loadFromJSONDict(dfaData) 57 | 58 | # Generate the DFA image and save it in the output directory 59 | output_file_path = os.path.join(OUTPUT_DIR_PATH, f'{dfaName}_dfa.png') 60 | dfa.image(dir=output_file_path, save=True) 61 | 62 | # Update the DFAs dictionary with the processed DFA 63 | DFAs[dfaName] = dfa 64 | 65 | # Compare the DFAs for isomorphism (equivalence) 66 | for dfaName1, dfa1 in DFAs.items(): 67 | for dfaName2, dfa2 in DFAs.items(): 68 | if dfaName1 != dfaName2: 69 | if dfa1.isomorphic(dfa2): 70 | print(f"{dfaName1} is equivalent to {dfaName2}") 71 | else: 72 | print(f"{dfaName1} is not equivalent to {dfaName2}") 73 | ``` 74 | 75 | ## Conclusion 76 | 77 | Pykleene strives to be your go-to library for working with formal languages and automata in Python. Whether you're teaching these concepts, learning them, or applying them in practice, our library provides the flexibility and functionality you need. Check out our other documentation sections to dive deeper into specific features and use cases. -------------------------------------------------------------------------------- /docs/contents/docs/getting-started/introduction/index.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | title: Introduction 3 | description: An introduction to pykleene - A comprehensive Python library for automata theory and formal languages 4 | --- 5 | 6 | Pykleene is a powerful Python library designed to make working with automata theory and formal languages accessible and intuitive. Whether you're a student learning theoretical computer science, a researcher working on language processing, or a developer implementing formal verification systems, pykleene provides the tools you need to work with various types of automata and grammars. 7 | 8 | ## Getting Started 9 | 10 | To begin using pykleene, follow these simple steps: 11 | - Make sure you have `Graphviz` installed on your system. 12 | - Install pykleene using pip: `pip install pykleene` 13 | - Import the necessary components into your Python script 14 | - Start building and simulating automata! 15 | 16 | Pykleene supports a wide range of formal language concepts, from basic finite automata to complex Turing machines. The library is designed with both educational and practical applications in mind, featuring clear APIs and comprehensive documentation to help you get started quickly. 17 | 18 | ## Key Features 19 | 20 | > "Automata theory is a fundamental pillar of computer science, and pykleene aims to make these concepts tangible and practical through clean, intuitive Python implementations." 21 | 22 | Our library includes support for: 23 | - Deterministic Finite Automata (DFA) 24 | - Non-deterministic Finite Automata (NFA) 25 | - Regular Expressions 26 | - Pushdown Automata 27 | - Linear Bounded Automata and Turing Machines 28 | - Various Grammar Types (Regular to Unrestricted) 29 | - Regular Expressions 30 | 31 | ## Code Example 32 | 33 | Here's a simple example of checking the isomorphism of two DFAs: 34 | 35 | ```python showLineNumbers 36 | from pykleene.dfa import DFA 37 | from typing import Dict 38 | import json 39 | import os 40 | 41 | # Define input and output paths directly 42 | INPUT_FILE_PATH = 'path/to/your/input/dfas.json' # Replace with actual path to the input file 43 | OUTPUT_DIR_PATH = 'path/to/output/directory' # Replace with actual output directory path 44 | 45 | FILENAME = 'dfas.json' 46 | 47 | if __name__ == '__main__': 48 | DFAs: Dict[str, DFA] 49 | 50 | # Load the DFAs from the JSON file 51 | with open(INPUT_FILE_PATH, 'r') as file: 52 | DFAs = json.load(file) 53 | 54 | # Process each DFA 55 | for dfaName, dfaData in DFAs.items(): 56 | dfa = DFA() 57 | dfa.loadFromJSONDict(dfaData) 58 | 59 | # Generate the DFA image and save it in the output directory 60 | output_file_path = os.path.join(OUTPUT_DIR_PATH, f'{dfaName}_dfa.png') 61 | dfa.image(dir=output_file_path, save=True) 62 | 63 | # Update the DFAs dictionary with the processed DFA 64 | DFAs[dfaName] = dfa 65 | 66 | # Compare the DFAs for isomorphism (equivalence) 67 | for dfaName1, dfa1 in DFAs.items(): 68 | for dfaName2, dfa2 in DFAs.items(): 69 | if dfaName1 != dfaName2: 70 | if dfa1.isomorphic(dfa2): 71 | print(f"{dfaName1} is equivalent to {dfaName2}") 72 | else: 73 | print(f"{dfaName1} is not equivalent to {dfaName2}") 74 | ``` 75 | 76 | ## Conclusion 77 | 78 | Pykleene strives to be your go-to library for working with formal languages and automata in Python. Whether you're teaching these concepts, learning them, or applying them in practice, our library provides the flexibility and functionality you need. Check out our other documentation sections to dive deeper into specific features and use cases. -------------------------------------------------------------------------------- /docs/contents/docs/grammars/grammar/index.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | title: Grammar 3 | description: Grammar class in pykleene 4 | --- 5 | 6 | ```python showLineNumbers 7 | nonTerminals: set[str] 8 | terminals: set[str] 9 | productions: dict[str, set[str]] 10 | startSymbol: str 11 | ``` 12 | 13 | ## CONSTRUCTOR 14 | Initialize a new Grammar with specified parameters. 15 | #### Parameters 16 | - `nonTerminals`: Set of non-terminal symbols 17 | - `terminals`: Set of terminal symbols 18 | - `productions`: Dictionary mapping left-hand sides to sets of right-hand sides 19 | - `startSymbol`: Starting symbol of the grammar 20 | #### Return Value 21 | Creates a new Grammar object with specified configuration 22 | 23 | ## loadFromJSONDict 24 | Load grammar configuration from a JSON dictionary. 25 | ```python showLineNumbers 26 | # Example 27 | { 28 | "nonTerminals": ["S", "A", "B"], 29 | "terminals": ["a", "b"], 30 | "productions": { 31 | "S": ["aA", "bB"], 32 | "A": ["aA", "a"], 33 | "B": ["bB", "b"] 34 | }, 35 | "startSymbol": "S" 36 | } 37 | ``` 38 | #### Parameters 39 | - `data`: Dictionary containing grammar configuration 40 | #### Return Value 41 | Populates the Grammar object with configuration from the dictionary 42 | 43 | ## isValid 44 | Validate the grammar configuration. 45 | #### Parameters 46 | None 47 | #### Return Value 48 | Boolean indicating whether the grammar configuration is valid 49 | 50 | ## isLeftLinear 51 | Check if the grammar is left-linear. 52 | #### Parameters 53 | None 54 | #### Return Value 55 | Boolean indicating whether the grammar is left-linear 56 | 57 | ## isRightLinear 58 | Check if the grammar is right-linear. 59 | #### Parameters 60 | None 61 | #### Return Value 62 | Boolean indicating whether the grammar is right-linear 63 | 64 | ## isRegular 65 | Check if the grammar is regular (either left-linear or right-linear). 66 | #### Parameters 67 | None 68 | #### Return Value 69 | Boolean indicating whether the grammar is regular 70 | 71 | ## reverse 72 | Create a reversed version of the grammar. 73 | #### Parameters 74 | None 75 | #### Return Value 76 | A new Grammar with reversed production rules 77 | 78 | ## nfa 79 | Convert the grammar to an equivalent NFA (for regular grammars). 80 | #### Parameters 81 | None 82 | #### Return Value 83 | NFA object representing the regular grammar 84 | 85 | ## toRightLinear 86 | Convert a regular grammar to right-linear form. 87 | #### Parameters 88 | None 89 | #### Return Value 90 | Equivalent right-linear grammar 91 | 92 | ## toLeftLinear 93 | Convert a regular grammar to left-linear form. 94 | #### Parameters 95 | None 96 | #### Return Value 97 | Equivalent left-linear grammar 98 | 99 | ## isContextFree 100 | Check if the grammar is context-free. 101 | #### Parameters 102 | None 103 | #### Return Value 104 | Boolean indicating whether the grammar is context-free 105 | 106 | ## isContextSensitive 107 | Check if the grammar is context-sensitive. 108 | #### Parameters 109 | None 110 | #### Return Value 111 | Boolean indicating whether the grammar is context-sensitive 112 | 113 | ## isUnrestricted 114 | Check if the grammar is unrestricted. 115 | #### Parameters 116 | None 117 | #### Return Value 118 | Boolean indicating whether the grammar is unrestricted 119 | 120 | ## inCNF 121 | Check if the grammar is in Chomsky Normal Form. 122 | #### Parameters 123 | None 124 | #### Return Value 125 | Boolean indicating whether the grammar is in CNF 126 | 127 | ## inGNF 128 | Check if the grammar is in Greibach Normal Form. 129 | #### Parameters 130 | None 131 | #### Return Value 132 | Boolean indicating whether the grammar is in GNF -------------------------------------------------------------------------------- /src/pykleene/lba.py: -------------------------------------------------------------------------------- 1 | from pykleene.tm import TM 2 | from typing import Callable 3 | class LBA(TM): 4 | rightEndMarker: str 5 | 6 | def __init__(self, 7 | states: set[str] = set(), 8 | inputAlphabet: set[str] = set(), 9 | tapeAlphabet: set[str] = set(), 10 | startState: str = None, 11 | transitions: dict[tuple[str, str], tuple[str, str, str]] = dict(), 12 | leftEndMarker: str = None, 13 | rightEndMarker: str = None, 14 | blankSymbol: str = None, 15 | acceptState: str = None, 16 | rejectState: str = None): 17 | self.rightEndMarker = rightEndMarker 18 | super().__init__(states, 19 | inputAlphabet, 20 | tapeAlphabet, 21 | startState, 22 | transitions, 23 | leftEndMarker, 24 | blankSymbol, 25 | acceptState, 26 | rejectState) 27 | self.tapeLength = None 28 | try: 29 | if self.rightEndMarker: assert self.rightEndMarker in self.tapeAlphabet, f"Right end marker {self.rightEndMarker} not in tape alphabet {self.tapeAlphabet}" 30 | except AssertionError as e: 31 | print(e) 32 | self._setNone() 33 | 34 | def loadFromJSONDict(self, jsonDict: dict) -> None: 35 | self.rightEndMarker = jsonDict['rightEndMarker'] 36 | super().loadFromJSONDict(jsonDict) 37 | try: 38 | if self.rightEndMarker: assert self.rightEndMarker in self.tapeAlphabet, f"Right end marker {self.rightEndMarker} not in tape alphabet {self.tapeAlphabet}" 39 | except AssertionError as e: 40 | print(e) 41 | self._setNone() 42 | 43 | def accepts(self, inputString: str, verbose: bool = False, tapeLenFunc: Callable[[int], int] = None) -> tuple[bool, str]: 44 | assert tapeLenFunc, "tapeLenFunc not provided" 45 | self.tapeLength = tapeLenFunc(len(inputString)) 46 | tape = [self.blankSymbol] * self.tapeLength 47 | assert len(inputString) <= self.tapeLength - 2, f"Input string {inputString} too long for tape length {self.tapeLength}" 48 | tape[1:1+len(inputString)] = list(inputString) 49 | tape[0] = self.leftEndMarker 50 | tape[self.tapeLength-1] = self.rightEndMarker 51 | head = 1 52 | state = self.startState 53 | while state not in [self.acceptState, self.rejectState]: 54 | assert head >= 0 and head < self.tapeLength, f"Read/Write head out of bounds: {head}" 55 | assert tape[head] in self.tapeAlphabet, f"Symbol {tape[head]} not in tape alphabet" 56 | if verbose: 57 | print(f"{''.join(tape)} | ({state}, {head})") 58 | if state == self.acceptState or state == self.rejectState: 59 | break 60 | readSymbol = tape[head] 61 | if (state, readSymbol) in self.transitions: 62 | nextState, writeSymbol, direction = self.transitions[(state, readSymbol)] 63 | tape[head] = writeSymbol 64 | if direction == 'L': 65 | head -= 1 66 | elif direction == 'R': 67 | head += 1 68 | elif direction == 'S': 69 | pass 70 | else: 71 | assert False, f"Invalid direction {direction}" 72 | state = nextState 73 | else: 74 | assert False, f"No transition for state {state} and symbol {readSymbol}" 75 | if state == self.acceptState: 76 | return True, ''.join(tape) 77 | elif state == self.rejectState: 78 | return False, ''.join(tape) 79 | assert False, f"TM halted in undefined state: {state}" 80 | 81 | 82 | -------------------------------------------------------------------------------- /docs/contents/docs/state-machines/nfa/index.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | title: NFA 3 | description: Nondeterministic Finite Automaton (NFA) class in pykleene 4 | --- 5 | 6 | ```python showLineNumbers 7 | states: set[str] 8 | alphabet: set[str] 9 | transitions: dict[tuple[str, str], set[str]] 10 | startStates: set[str] 11 | finalStates: set[str] 12 | ``` 13 | 14 | ## CONSTRUCTOR 15 | Initialize a new Nondeterministic Finite Automaton with specified parameters. 16 | #### Parameters 17 | - `states`: Set of states in the NFA 18 | - `alphabet`: Set of input symbols 19 | - `transitions`: Mapping of state-symbol pairs to sets of next states 20 | - `startStates`: Set of initial states of the NFA 21 | - `finalStates`: Set of accepting states 22 | #### Return Value 23 | Creates a new NFA object with specified configuration 24 | 25 | ## loadFromJSONDict 26 | Load NFA configuration from a JSON dictionary. 27 | ```python showLineNumbers 28 | # Example 29 | { 30 | "NFA_NAME": { 31 | "states": ["A0", "A1", "A2", "A3", "A4"], 32 | "alphabet": ["a", "b"], 33 | "transitions": [ 34 | ["A0", "ε", ["A1", "A3"]], 35 | ["A1", "a", ["A2"]], 36 | ["A2", "a", ["A2"]], 37 | ["A2", "b", ["A2"]], 38 | ["A3", "a", ["A3"]], 39 | ["A3", "b", ["A3", "A4"]] 40 | ], 41 | "startStates": ["A0"], 42 | "finalStates": ["A2", "A4"] 43 | } 44 | } 45 | ``` 46 | #### Parameters 47 | - `data`: Dictionary containing NFA configuration 48 | #### Return Value 49 | Populates the NFA object with configuration from the dictionary 50 | 51 | ## isValid 52 | Validate the NFA configuration. 53 | #### Parameters 54 | None 55 | #### Return Value 56 | Boolean indicating whether the NFA configuration is valid 57 | 58 | ## accepts 59 | Determine if a given string is accepted by the NFA. 60 | 61 | #### Parameters 62 | - `string`: Input string to be processed 63 | 64 | ## addTransition 65 | Add a new transition to the NFA. 66 | #### Parameters 67 | - `startState`: Source state of the transition 68 | - `symbol`: Input symbol for the transition 69 | - `endState`: Destination state of the transition 70 | #### Return Value 71 | A new NFA with the added transition 72 | 73 | ## singleStartStateNFA 74 | Convert the NFA to have a single start state. 75 | #### Parameters 76 | None 77 | #### Return Value 78 | A new NFA with a single start state 79 | 80 | ## singleFinalStateNFA 81 | Convert the NFA to have a single final state. 82 | #### Parameters 83 | None 84 | #### Return Value 85 | A new NFA with a single final state 86 | 87 | ## regex 88 | Generate a regular expression equivalent to the NFA. 89 | #### Parameters 90 | None 91 | #### Return Value 92 | String representing the regular expression of the NFA 93 | 94 | ## reverse 95 | Create a reversed version of the NFA. 96 | #### Parameters 97 | None 98 | #### Return Value 99 | A new NFA with reversed transitions and start/final states swapped 100 | 101 | ## grammar 102 | Convert the NFA to an equivalent grammar. 103 | #### Parameters 104 | None 105 | #### Return Value 106 | A Grammar object representing the language of the NFA 107 | 108 | ## image 109 | Generate a graphical representation of the NFA. 110 | #### Parameters 111 | - `dir`: Directory to save the image 112 | - `save`: Boolean to determine if image should be saved 113 | #### Return Value 114 | Graphviz Digraph object representing the NFA 115 | 116 | ## epsilonClosure 117 | Compute the epsilon closure of a given state. 118 | #### Parameters 119 | - `state`: State to compute epsilon closure for 120 | #### Return Value 121 | Set of states reachable through epsilon transitions 122 | 123 | ## nextStates 124 | Compute states reachable from a given state with a specific symbol. 125 | #### Parameters 126 | - `state`: Source state 127 | - `symbol`: Input symbol 128 | #### Return Value 129 | Set of states reachable from the source state with the given symbol 130 | 131 | ## dfa 132 | Convert the NFA to an equivalent Deterministic Finite Automaton (DFA). 133 | #### Parameters 134 | None 135 | #### Return Value 136 | A DFA object equivalent to the original NFA -------------------------------------------------------------------------------- /docs/app/globals.css: -------------------------------------------------------------------------------- 1 | @tailwind base; 2 | @tailwind components; 3 | @tailwind utilities; 4 | 5 | @layer base { 6 | :root { 7 | --background: 0 0% 100%; 8 | --foreground: 240 10% 3.9%; 9 | --card: 0 0% 100%; 10 | --card-foreground: 240 10% 3.9%; 11 | --popover: 0 0% 100%; 12 | --popover-foreground: 240 10% 3.9%; 13 | --primary: 240 5.9% 10%; 14 | --primary-foreground: 0 0% 98%; 15 | --secondary: 240 4.8% 95.9%; 16 | --secondary-foreground: 240 5.9% 10%; 17 | --muted: 240 4.8% 95.9%; 18 | --muted-foreground: 240 3.8% 46.1%; 19 | --accent: 240 4.8% 95.9%; 20 | --accent-foreground: 240 5.9% 10%; 21 | --destructive: 0 84.2% 60.2%; 22 | --destructive-foreground: 0 0% 98%; 23 | --border: 240 5.9% 90%; 24 | --input: 240 5.9% 90%; 25 | --ring: 240 5.9% 10%; 26 | --radius: 0.5rem; 27 | } 28 | 29 | .dark { 30 | --background: 240 10% 3.9%; 31 | --foreground: 0 0% 98%; 32 | --card: 240 10% 3.9%; 33 | --card-foreground: 0 0% 98%; 34 | --popover: 240 10% 3.9%; 35 | --popover-foreground: 0 0% 98%; 36 | --primary: 0 0% 98%; 37 | --primary-foreground: 240 5.9% 10%; 38 | --secondary: 240 3.7% 15.9%; 39 | --secondary-foreground: 0 0% 98%; 40 | --muted: 240 3.7% 15.9%; 41 | --muted-foreground: 240 5% 64.9%; 42 | --accent: 240 3.7% 15.9%; 43 | --accent-foreground: 0 0% 98%; 44 | --destructive: 0 62.8% 30.6%; 45 | --destructive-foreground: 0 0% 98%; 46 | --border: 240 3.7% 15.9%; 47 | --input: 240 3.7% 15.9%; 48 | --ring: 240 4.9% 83.9%; 49 | } 50 | } 51 | 52 | @layer base { 53 | * { 54 | @apply border-border; 55 | } 56 | 57 | body { 58 | @apply bg-background text-foreground; 59 | } 60 | } 61 | 62 | .prose { 63 | margin: 0 !important; 64 | } 65 | 66 | pre { 67 | padding: 0 !important; 68 | width: inherit !important; 69 | overflow-x: auto; 70 | } 71 | 72 | pre>code { 73 | display: grid; 74 | max-width: inherit !important; 75 | padding: 14px 0 !important; 76 | } 77 | 78 | .code-line { 79 | padding: 0.75px 12.5px; 80 | } 81 | 82 | .line-number::before { 83 | display: inline-block; 84 | width: 1rem; 85 | margin-right: 22px; 86 | margin-left: -2px; 87 | color: rgb(110, 110, 110); 88 | content: attr(line); 89 | font-size: 13.5px; 90 | text-align: right; 91 | } 92 | 93 | 94 | .highlight-line { 95 | @apply dark:bg-neutral-800/90; 96 | @apply bg-neutral-200/90; 97 | } 98 | 99 | .punctuation { 100 | color: gray; 101 | } 102 | 103 | .comment { 104 | color: gray; 105 | } 106 | 107 | /* Light Mode */ 108 | .keyword { 109 | color: #e53e3e; 110 | /* Red */ 111 | } 112 | 113 | .function { 114 | color: #c53030; 115 | /* Darker Red */ 116 | } 117 | 118 | .punctuation { 119 | color: #718096; 120 | /* Light Gray */ 121 | } 122 | 123 | .comment { 124 | color: #a0aec0; 125 | /* Gray */ 126 | } 127 | 128 | .string, 129 | .constant, 130 | .annotation, 131 | .boolean, 132 | .number { 133 | color: #f56565; 134 | /* Soft Red */ 135 | } 136 | 137 | .tag { 138 | color: #fc8181; 139 | /* Light Red */ 140 | } 141 | 142 | .attr-name { 143 | color: #e53e3e; 144 | /* Red */ 145 | } 146 | 147 | .attr-value { 148 | color: #f6ad55; 149 | /* Soft Orange */ 150 | } 151 | 152 | /* Dark Mode */ 153 | .dark .keyword { 154 | color: #fc8181; 155 | /* Light Red */ 156 | } 157 | 158 | .dark .function { 159 | color: #e53e3e; 160 | /* Red */ 161 | } 162 | 163 | .dark .string, 164 | .dark .constant, 165 | .dark .annotation, 166 | .dark .boolean, 167 | .dark .number { 168 | color: #f56565; 169 | /* Soft Red */ 170 | } 171 | 172 | .dark .tag { 173 | color: #fbb6ce; 174 | /* Soft Pink */ 175 | } 176 | 177 | .dark .attr-name { 178 | color: #e53e3e; 179 | /* Red */ 180 | } 181 | 182 | .dark .attr-value { 183 | color: #f6ad55; 184 | /* Soft Orange */ 185 | } 186 | 187 | 188 | .rehype-code-title { 189 | @apply px-2; 190 | @apply -mb-8; 191 | @apply w-full; 192 | @apply text-sm; 193 | @apply pt-1; 194 | @apply pb-5; 195 | @apply font-normal; 196 | @apply tracking-wider; 197 | @apply font-medium; 198 | font-family: var(--font-code) !important; 199 | } 200 | 201 | .highlight-comp>code { 202 | background-color: transparent !important; 203 | } -------------------------------------------------------------------------------- /docs/contents/docs/state-machines/dfa/index.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | title: DFA 3 | description: Deterministic Finite Automaton (DFA) class in pykleene 4 | --- 5 | 6 | ```python showLineNumbers 7 | states: set[str] 8 | alphabet: set[str] 9 | transitions: dict[tuple[str, str], str] 10 | startState: str 11 | finalStates: set[str] 12 | ``` 13 | 14 | ## CONSTRUCTOR 15 | Initialize a new Deterministic Finite Automaton with specified parameters. 16 | 17 | #### Parameters 18 | - `states`: Set of states in the DFA 19 | - `alphabet`: Set of input symbols 20 | - `transitions`: Mapping of state-symbol pairs to next states 21 | - `startState`: Initial state of the DFA 22 | - `finalStates`: Set of accepting states 23 | 24 | #### Return Value 25 | Creates a new DFA object with specified configuration 26 | 27 | ## loadFromJSONDict 28 | Load DFA configuration from a JSON dictionary. 29 | ```python showLineNumbers 30 | # Example 31 | { 32 | "DFA_NAME": { 33 | "states": ["A0", "A1", "A2", "A3", "A4", "A5"], 34 | "alphabet": ["0", "1"], 35 | "transitions": [ 36 | ["A0", "0", "A3"], 37 | ["A0", "1", "A1"], 38 | ["A1", "0", "A2"], 39 | ["A1", "1", "A5"], 40 | ["A2", "0", "A2"], 41 | ["A2", "1", "A5"], 42 | ["A3", "0", "A0"], 43 | ["A3", "1", "A4"], 44 | ["A4", "0", "A2"], 45 | ["A4", "1", "A5"], 46 | ["A5", "0", "A5"], 47 | ["A5", "1", "A5"] 48 | ], 49 | "startState": "A0", 50 | "finalStates": ["A1", "A4", "A2"] 51 | }, 52 | } 53 | ``` 54 | #### Parameters 55 | - `data`: Dictionary containing DFA configuration 56 | 57 | #### Return Value 58 | Populates the DFA object with configuration from the dictionary 59 | 60 | ## accepts 61 | Determine if a given string is accepted by the DFA. 62 | 63 | #### Parameters 64 | - `string`: Input string to be processed 65 | - `verbose`: Boolean to determine whether to print verbose output 66 | 67 | #### Return Value 68 | Boolean indicating whether the string is accepted by the DFA 69 | 70 | ## nextState 71 | Compute the next state for a given current state and input symbol. 72 | 73 | #### Parameters 74 | - `currentState`: Current state in the DFA 75 | - `symbol`: Input symbol 76 | 77 | #### Return Value 78 | Next state after processing the symbol, or None if no transition exists 79 | 80 | ## minimal 81 | Generate a minimized equivalent DFA. 82 | 83 | #### Parameters 84 | None 85 | 86 | #### Return Value 87 | A new, minimized DFA with equivalent language 88 | 89 | ## isomorphic 90 | Check structural equivalence between two DFAs. 91 | 92 | #### Parameters 93 | - `dfa`: Another DFA to compare 94 | 95 | #### Return Value 96 | Boolean indicating whether the DFAs are isomorphic 97 | 98 | ## image 99 | Generate a graphical representation of the DFA. 100 | 101 | #### Parameters 102 | - `dir`: Directory to save the image 103 | - `save`: Boolean to determine if image should be saved 104 | 105 | #### Return Value 106 | Graphviz Digraph object representing the DFA 107 | 108 | ## union 109 | Compute the union of two DFAs. 110 | 111 | #### Parameters 112 | - `dfa`: Another DFA to combine 113 | 114 | #### Return Value 115 | A new DFA representing the union of the two input DFAs 116 | 117 | ## complement 118 | Generate the complement of the current DFA. 119 | 120 | #### Parameters 121 | None 122 | 123 | #### Return Value 124 | A new DFA that accepts strings not in the original DFA's language 125 | 126 | ## intersection 127 | Compute the intersection of two DFAs. 128 | 129 | #### Parameters 130 | - `dfa`: Another DFA to intersect with 131 | 132 | #### Return Value 133 | A new DFA representing the intersection of the two input DFAs 134 | 135 | ## reachable 136 | Generate a DFA containing only reachable states. 137 | 138 | #### Parameters 139 | None 140 | 141 | #### Return Value 142 | A new DFA with only reachable states from the start state 143 | 144 | ## isLangSubset 145 | Check if the current DFA's language is a subset of another DFA's language. 146 | 147 | #### Parameters 148 | - `dfa`: Another DFA to compare 149 | 150 | #### Return Value 151 | Boolean indicating language subset relationship 152 | 153 | ## difference 154 | Compute the difference between two DFAs. 155 | 156 | #### Parameters 157 | - `dfa`: Another DFA to subtract 158 | 159 | #### Return Value 160 | A new DFA representing the difference of the two input DFAs 161 | 162 | ## symmetricDifference 163 | Compute the symmetric difference between two DFAs. 164 | 165 | #### Parameters 166 | - `dfa`: Another DFA to compute symmetric difference with 167 | 168 | #### Return Value 169 | A new DFA representing the symmetric difference of the two input DFAs -------------------------------------------------------------------------------- /docs/components/ui/sheet.tsx: -------------------------------------------------------------------------------- 1 | "use client"; 2 | 3 | import * as React from "react"; 4 | import * as SheetPrimitive from "@radix-ui/react-dialog"; 5 | import { cva, type VariantProps } from "class-variance-authority"; 6 | import { X } from "lucide-react"; 7 | 8 | import { cn } from "@/lib/utils"; 9 | 10 | const Sheet = SheetPrimitive.Root; 11 | 12 | const SheetTrigger = SheetPrimitive.Trigger; 13 | 14 | const SheetClose = SheetPrimitive.Close; 15 | 16 | const SheetPortal = SheetPrimitive.Portal; 17 | 18 | const SheetOverlay = React.forwardRef< 19 | React.ElementRef, 20 | React.ComponentPropsWithoutRef 21 | >(({ className, ...props }, ref) => ( 22 | 30 | )); 31 | SheetOverlay.displayName = SheetPrimitive.Overlay.displayName; 32 | 33 | const sheetVariants = cva( 34 | "fixed z-50 gap-4 bg-background p-6 shadow-lg transition ease-in-out data-[state=open]:animate-in data-[state=closed]:animate-out data-[state=closed]:duration-300 data-[state=open]:duration-500", 35 | { 36 | variants: { 37 | side: { 38 | top: "inset-x-0 top-0 border-b data-[state=closed]:slide-out-to-top data-[state=open]:slide-in-from-top", 39 | bottom: 40 | "inset-x-0 bottom-0 border-t data-[state=closed]:slide-out-to-bottom data-[state=open]:slide-in-from-bottom", 41 | left: "inset-y-0 left-0 h-full w-3/4 border-r data-[state=closed]:slide-out-to-left data-[state=open]:slide-in-from-left sm:max-w-sm", 42 | right: 43 | "inset-y-0 right-0 h-full w-3/4 border-l data-[state=closed]:slide-out-to-right data-[state=open]:slide-in-from-right sm:max-w-sm", 44 | }, 45 | }, 46 | defaultVariants: { 47 | side: "right", 48 | }, 49 | } 50 | ); 51 | 52 | interface SheetContentProps 53 | extends React.ComponentPropsWithoutRef, 54 | VariantProps {} 55 | 56 | const SheetContent = React.forwardRef< 57 | React.ElementRef, 58 | SheetContentProps 59 | >(({ side = "right", className, children, ...props }, ref) => ( 60 | 61 | 62 | 67 | {children} 68 | 69 | 70 | Close 71 | 72 | 73 | 74 | )); 75 | SheetContent.displayName = SheetPrimitive.Content.displayName; 76 | 77 | const SheetHeader = ({ 78 | className, 79 | ...props 80 | }: React.HTMLAttributes) => ( 81 |
88 | ); 89 | SheetHeader.displayName = "SheetHeader"; 90 | 91 | const SheetFooter = ({ 92 | className, 93 | ...props 94 | }: React.HTMLAttributes) => ( 95 |
102 | ); 103 | SheetFooter.displayName = "SheetFooter"; 104 | 105 | const SheetTitle = React.forwardRef< 106 | React.ElementRef, 107 | React.ComponentPropsWithoutRef 108 | >(({ className, ...props }, ref) => ( 109 | 114 | )); 115 | SheetTitle.displayName = SheetPrimitive.Title.displayName; 116 | 117 | const SheetDescription = React.forwardRef< 118 | React.ElementRef, 119 | React.ComponentPropsWithoutRef 120 | >(({ className, ...props }, ref) => ( 121 | 126 | )); 127 | SheetDescription.displayName = SheetPrimitive.Description.displayName; 128 | 129 | export { 130 | Sheet, 131 | SheetPortal, 132 | SheetOverlay, 133 | SheetTrigger, 134 | SheetClose, 135 | SheetContent, 136 | SheetHeader, 137 | SheetFooter, 138 | SheetTitle, 139 | SheetDescription, 140 | }; 141 | -------------------------------------------------------------------------------- /src/pykleene/pda.py: -------------------------------------------------------------------------------- 1 | import graphviz 2 | 3 | class PDA: 4 | states: set[str] 5 | inputAlphabet: set[str] 6 | stackAlphabet: set[str] 7 | transitions: dict[tuple[str, str, str], set[tuple[str, str]]] 8 | startState: str 9 | initialStackSymbol: str 10 | finalStates: set[str] 11 | 12 | def _setNone(self) -> None: 13 | for key, _ in self.__annotations__.items(): 14 | setattr(self, key, None) 15 | 16 | def __init__(self, 17 | states: set[str] = set(), 18 | inputAlphabet: set[str] = set(), 19 | stackAlphabet: set[str] = set(), 20 | transitions: dict[tuple[str, str, str], set[tuple[str, str]]] = dict(), 21 | startState: str = None, 22 | initialStackSymbol: str = None, 23 | finalStates: set[str] = set()) -> None: 24 | 25 | self.states = states 26 | self.inputAlphabet = inputAlphabet 27 | self.stackAlphabet = stackAlphabet 28 | self.transitions = transitions 29 | self.startState = startState 30 | self.initialStackSymbol = initialStackSymbol 31 | self.finalStates = finalStates 32 | try: 33 | self.isValid() 34 | except AssertionError as e: 35 | print(e) 36 | self._setNone() 37 | 38 | def isValid(self) -> bool: 39 | if self.startState: assert self.startState in self.states, f"Start state {self.startState} not in states" 40 | if self.initialStackSymbol: assert self.initialStackSymbol in self.stackAlphabet, f"Initial stack symbol {self.initialStackSymbol} not in stack alphabet" 41 | assert self.finalStates <= self.states, f"Final states {self.finalStates} not in states" 42 | for (state, inputSymbol, stackSymbol), nextConfigs in self.transitions.items(): 43 | for nextState, stackString in nextConfigs: 44 | assert state in self.states, f"State {state} not in states" 45 | if inputSymbol and inputSymbol != "ε": assert inputSymbol in self.inputAlphabet, f"Input symbol {inputSymbol} not in input alphabet" 46 | assert stackSymbol in self.stackAlphabet, f"Stack symbol {stackSymbol} not in stack alphabet" 47 | assert nextState in self.states, f"Next state {nextState} not in states" 48 | if stackString != "ε": 49 | for symbol in stackString: 50 | assert symbol in self.stackAlphabet, f"Symbol {symbol} in stack string not in stack alphabet" 51 | return True 52 | 53 | def loadFromJSONDict(self, jsonDict: dict) -> None: 54 | self.states = set(jsonDict['states']) 55 | self.inputAlphabet = set(jsonDict['inputAlphabet']) 56 | self.stackAlphabet = set(jsonDict['stackAlphabet']) 57 | for [state, inputSymbol, stackSymbol, nextState, stackString] in jsonDict['transitions']: 58 | if (state, inputSymbol, stackSymbol) not in self.transitions: 59 | self.transitions[(state, inputSymbol, stackSymbol)] = set() 60 | self.transitions[(state, inputSymbol, stackSymbol)].add((nextState, stackString)) 61 | self.startState = jsonDict['startState'] 62 | self.initialStackSymbol = jsonDict['initialStackSymbol'] 63 | self.finalStates = set(jsonDict['finalStates']) 64 | from pprint import pprint 65 | pprint(self.__dict__) 66 | try: 67 | self.isValid() 68 | except AssertionError as e: 69 | print(e) 70 | self._setNone() 71 | 72 | def image(self, dir: str = None, save: bool = False, monochrome: bool = False) -> graphviz.Digraph: 73 | from pykleene._config import graphvizConfig, graphvizAttrConfig, graphvizEdgeConfig 74 | from pykleene.utils import randomDarkColor 75 | 76 | dot = graphviz.Digraph(**graphvizConfig) 77 | 78 | dot.attr(**graphvizAttrConfig) 79 | 80 | for state in self.states: 81 | if state in self.finalStates: 82 | dot.node(state, shape='doublecircle') 83 | else: 84 | dot.node(state) 85 | 86 | if monochrome: color = 'black' 87 | else: color = randomDarkColor() 88 | dot.node(f'{id(self.startState)}', shape='point', label='', color=color, fontcolor=color) 89 | dot.edge(f'{id(self.startState)}', self.startState, **graphvizEdgeConfig, color=color, fontcolor=color) 90 | 91 | for (state, inputSymbol, stackSymbol), nextConfigs in self.transitions.items(): 92 | for nextState, stackString in nextConfigs: 93 | if monochrome: color = 'black' 94 | else: color = randomDarkColor() 95 | dot.edge(state, nextState, label=f" {inputSymbol}: {stackSymbol} -> {stackString} ", **graphvizEdgeConfig, color=color, fontcolor=color) 96 | 97 | if dir and save: 98 | try: 99 | dot.render(f"{dir}/{id(self)}", format='png', cleanup=True) 100 | except Exception as e: 101 | print(f"Error while saving image: {e}") 102 | 103 | return dot 104 | 105 | def isDeterministic(self) -> bool: 106 | for state in self.states: 107 | for stackSymbol in self.stackAlphabet: 108 | nextConfigs = set() 109 | if (state, "ε", stackSymbol) in self.transitions: 110 | nextConfigs = nextConfigs | self.transitions[(state, "ε", stackSymbol)] 111 | 112 | for inputSymbol in self.inputAlphabet: 113 | newNextConfigs = set() 114 | if (state, inputSymbol, stackSymbol) in self.transitions: 115 | newNextConfigs = nextConfigs | self.transitions[(state, inputSymbol, stackSymbol)] 116 | if len(newNextConfigs) > 1: 117 | return False 118 | return True 119 | 120 | def _hasEpsilonTransitions(self) -> bool: 121 | for (_, inputSymbol, _), _ in self.transitions.items(): 122 | if inputSymbol == "ε": 123 | return True 124 | return False 125 | 126 | def accepts(self, inputString: str) -> bool: 127 | assert self.isDeterministic(), "PDA is not deterministic. Membership checking with NPDA has not been implemented yet" 128 | assert not self._hasEpsilonTransitions(), "PDA has epsilon transitions. Membership checking with PDA with epsilon transitions has not been implemented yet" 129 | currentState = self.startState 130 | stack = [self.initialStackSymbol] 131 | inputString = list(inputString) 132 | while inputString: 133 | inputSymbol = inputString.pop(0) 134 | stackSymbol = stack.pop() 135 | if (currentState, inputSymbol, stackSymbol) not in self.transitions: 136 | return False 137 | nextState, stackString = list(self.transitions[(currentState, inputSymbol, stackSymbol)])[0] 138 | if stackString != "ε": 139 | stack.extend(list(stackString)[::-1]) 140 | currentState = nextState 141 | return currentState in self.finalStates or len(stack) == 0 142 | -------------------------------------------------------------------------------- /src/pykleene/tm.py: -------------------------------------------------------------------------------- 1 | from pykleene.symbols import Symbols 2 | import graphviz 3 | 4 | class TM: 5 | states: set[str] 6 | inputAlphabet: set[str] 7 | tapeAlphabet: set[str] 8 | startState: str 9 | transitions: dict[tuple[str, str], tuple[str, str, str]] 10 | leftEndMarker: str 11 | blankSymbol: str 12 | acceptState: str 13 | rejectState: str 14 | 15 | tapeLength: int = int(1e6) 16 | 17 | def _setNone(self) -> None: 18 | for key, _ in self.__annotations__.items(): 19 | setattr(self, key, None) 20 | 21 | def isValid(self) -> bool: 22 | assert self.inputAlphabet <= self.tapeAlphabet, f"Input alphabet {self.inputAlphabet} not a subset of tape alphabet {self.tapeAlphabet}" 23 | if self.startState: assert self.startState in self.states, f"Start state {self.startState} not in states" 24 | if self.acceptState: assert self.acceptState in self.states, f"Accept state {self.acceptState} not in states" 25 | if self.rejectState: assert self.rejectState in self.states, f"Reject state {self.rejectState} not in states" 26 | if self.leftEndMarker: assert self.leftEndMarker in self.tapeAlphabet, f"Left end marker {self.leftEndMarker} not in tape alphabet {self.tapeAlphabet}" 27 | if self.blankSymbol: assert self.blankSymbol in self.tapeAlphabet, f"Blank symbol {self.blankSymbol} not in tape alphabet" 28 | for (state, symbol), (nextState, writeSymbol, direction) in self.transitions.items(): 29 | assert state in self.states, f"State {state} not in states" 30 | assert symbol in self.tapeAlphabet, f"Symbol {symbol} not in tape alphabet" 31 | assert nextState in self.states, f"Next state {nextState} not in states" 32 | assert writeSymbol in self.tapeAlphabet, f"Write symbol {writeSymbol} not in tape alphabet" 33 | assert direction in ['L', 'R', 'S'], f"Direction {direction} not in ['L', 'R', 'S']" 34 | return True 35 | 36 | def __init__(self, 37 | states: set[str] = set(), 38 | inputAlphabet: set[str] = set(), 39 | tapeAlphabet: set[str] = set(), 40 | startState: str = None, 41 | transitions: dict[tuple[str, str], tuple[str, str, str]] = dict(), 42 | leftEndMarker: str = None, 43 | blankSymbol: str = None, 44 | acceptState: str = None, 45 | rejectState: str = None): 46 | 47 | self.states = states 48 | self.inputAlphabet = inputAlphabet 49 | self.tapeAlphabet = tapeAlphabet 50 | self.startState = startState 51 | self.transitions = transitions 52 | self.leftEndMarker = leftEndMarker 53 | self.blankSymbol = blankSymbol 54 | self.acceptState = acceptState 55 | self.rejectState = rejectState 56 | try: 57 | self.isValid() 58 | except AssertionError as e: 59 | print(e) 60 | self._setNone() 61 | 62 | def loadFromJSONDict(self, jsonDict: dict) -> None: 63 | self.states = set(jsonDict['states']) 64 | self.inputAlphabet = set(jsonDict['inputAlphabet']) 65 | self.tapeAlphabet = set(jsonDict['tapeAlphabet']) 66 | self.startState = jsonDict['startState'] 67 | self.transitions = dict() 68 | for [state, symbol, nextState, writeSymbol, direction] in jsonDict['transitions']: 69 | assert (state, symbol) not in self.transitions, f"Multiple transitions for state {state} and symbol {symbol}" 70 | self.transitions[(state, symbol)] = (nextState, writeSymbol, direction) 71 | self.leftEndMarker = jsonDict['leftEndMarker'] 72 | self.blankSymbol = jsonDict['blankSymbol'] 73 | self.acceptState = jsonDict['acceptState'] 74 | self.rejectState = jsonDict['rejectState'] 75 | try: 76 | self.isValid() 77 | except AssertionError as e: 78 | print(e) 79 | self._setNone() 80 | 81 | def image(self, dir: str = None, save: bool = False, monochrome: bool = False) -> graphviz.Digraph: 82 | from pykleene._config import graphvizConfig, graphvizAttrConfig, graphvizEdgeConfig 83 | from pykleene.utils import randomDarkColor 84 | from pprint import pprint 85 | 86 | dot = graphviz.Digraph(**graphvizConfig) 87 | 88 | dot.attr(**graphvizAttrConfig) 89 | 90 | for state in self.states: 91 | if state == self.startState: 92 | dot.node(state, shape='circle', color='black', fontcolor='black') 93 | elif state == self.acceptState: 94 | dot.node(state, shape='doublecircle', color='darkgreen', fontcolor='darkgreen') 95 | elif state == self.rejectState: 96 | dot.node(state, shape='doublecircle', color='darkred', fontcolor='darkred') 97 | else: 98 | dot.node(state, shape='circle') 99 | 100 | if monochrome: color = 'black' 101 | else: color = randomDarkColor() 102 | dot.node(f'{id(self.startState)}', shape='point', label='', color=color, fontcolor=color) 103 | dot.edge(f'{id(self.startState)}', self.startState, **graphvizEdgeConfig, color=color, fontcolor=color) 104 | 105 | for (state, readSymbol), (nextState, writeSymbol, direction) in self.transitions.items(): 106 | if monochrome: color = 'black' 107 | else: color = randomDarkColor() 108 | dot.edge(state, nextState, label=f" {readSymbol}|({writeSymbol}, {direction}) ", **graphvizEdgeConfig, color=color, fontcolor=color) 109 | 110 | if dir and save: 111 | try: 112 | dot.render(f"{dir}/{id(self)}", format='png', cleanup=True) 113 | except Exception as e: 114 | print(f"Error while saving image: {e}") 115 | 116 | return dot 117 | 118 | def accepts(self, inputString: str, verbose: bool = False) -> tuple[bool, str]: 119 | # from pprint import pprint 120 | # pprint(self.__dict__) 121 | tape = [self.blankSymbol] * self.tapeLength 122 | tape[1:1+len(inputString)] = list(inputString) 123 | tape[0] = self.leftEndMarker 124 | horizon = (len(inputString) - 1) + 1 125 | head = 0 126 | state = self.startState 127 | while state not in [self.acceptState, self.rejectState]: 128 | assert head >= 0 and head < self.tapeLength, f"Read/Write head out of bounds: {head}" 129 | assert tape[head] in self.tapeAlphabet, f"Symbol {tape[head]} not in tape alphabet" 130 | horizon = max(horizon, head) 131 | if verbose: 132 | print(f"{''.join(tape[:horizon+1])} | ({state}, {head})") 133 | if state == self.acceptState or state == self.rejectState: 134 | break 135 | readSymbol = tape[head] 136 | if (state, readSymbol) in self.transitions: 137 | nextState, writeSymbol, direction = self.transitions[(state, readSymbol)] 138 | tape[head] = writeSymbol 139 | if direction == 'L': 140 | head -= 1 141 | elif direction == 'R': 142 | head += 1 143 | elif direction == 'S': 144 | pass 145 | else: 146 | assert False, f"Invalid direction {direction}" 147 | state = nextState 148 | else: 149 | assert False, f"No transition for state {state} and symbol {readSymbol}" 150 | if state == self.acceptState: 151 | return True, ''.join(tape[:horizon+1]) 152 | elif state == self.rejectState: 153 | return False, ''.join(tape[:horizon+1]) 154 | assert False, f"TM halted in undefined state: {state}" 155 | 156 | -------------------------------------------------------------------------------- /docs/components/ui/dropdown-menu.tsx: -------------------------------------------------------------------------------- 1 | "use client" 2 | 3 | import * as React from "react" 4 | import * as DropdownMenuPrimitive from "@radix-ui/react-dropdown-menu" 5 | import { Check, ChevronRight, Circle } from "lucide-react" 6 | 7 | import { cn } from "@/lib/utils" 8 | 9 | const DropdownMenu = DropdownMenuPrimitive.Root 10 | 11 | const DropdownMenuTrigger = DropdownMenuPrimitive.Trigger 12 | 13 | const DropdownMenuGroup = DropdownMenuPrimitive.Group 14 | 15 | const DropdownMenuPortal = DropdownMenuPrimitive.Portal 16 | 17 | const DropdownMenuSub = DropdownMenuPrimitive.Sub 18 | 19 | const DropdownMenuRadioGroup = DropdownMenuPrimitive.RadioGroup 20 | 21 | const DropdownMenuSubTrigger = React.forwardRef< 22 | React.ElementRef, 23 | React.ComponentPropsWithoutRef & { 24 | inset?: boolean 25 | } 26 | >(({ className, inset, children, ...props }, ref) => ( 27 | 36 | {children} 37 | 38 | 39 | )) 40 | DropdownMenuSubTrigger.displayName = 41 | DropdownMenuPrimitive.SubTrigger.displayName 42 | 43 | const DropdownMenuSubContent = React.forwardRef< 44 | React.ElementRef, 45 | React.ComponentPropsWithoutRef 46 | >(({ className, ...props }, ref) => ( 47 | 55 | )) 56 | DropdownMenuSubContent.displayName = 57 | DropdownMenuPrimitive.SubContent.displayName 58 | 59 | const DropdownMenuContent = React.forwardRef< 60 | React.ElementRef, 61 | React.ComponentPropsWithoutRef 62 | >(({ className, sideOffset = 4, ...props }, ref) => ( 63 | 64 | 73 | 74 | )) 75 | DropdownMenuContent.displayName = DropdownMenuPrimitive.Content.displayName 76 | 77 | const DropdownMenuItem = React.forwardRef< 78 | React.ElementRef, 79 | React.ComponentPropsWithoutRef & { 80 | inset?: boolean 81 | } 82 | >(({ className, inset, ...props }, ref) => ( 83 | 92 | )) 93 | DropdownMenuItem.displayName = DropdownMenuPrimitive.Item.displayName 94 | 95 | const DropdownMenuCheckboxItem = React.forwardRef< 96 | React.ElementRef, 97 | React.ComponentPropsWithoutRef 98 | >(({ className, children, checked, ...props }, ref) => ( 99 | 108 | 109 | 110 | 111 | 112 | 113 | {children} 114 | 115 | )) 116 | DropdownMenuCheckboxItem.displayName = 117 | DropdownMenuPrimitive.CheckboxItem.displayName 118 | 119 | const DropdownMenuRadioItem = React.forwardRef< 120 | React.ElementRef, 121 | React.ComponentPropsWithoutRef 122 | >(({ className, children, ...props }, ref) => ( 123 | 131 | 132 | 133 | 134 | 135 | 136 | {children} 137 | 138 | )) 139 | DropdownMenuRadioItem.displayName = DropdownMenuPrimitive.RadioItem.displayName 140 | 141 | const DropdownMenuLabel = React.forwardRef< 142 | React.ElementRef, 143 | React.ComponentPropsWithoutRef & { 144 | inset?: boolean 145 | } 146 | >(({ className, inset, ...props }, ref) => ( 147 | 156 | )) 157 | DropdownMenuLabel.displayName = DropdownMenuPrimitive.Label.displayName 158 | 159 | const DropdownMenuSeparator = React.forwardRef< 160 | React.ElementRef, 161 | React.ComponentPropsWithoutRef 162 | >(({ className, ...props }, ref) => ( 163 | 168 | )) 169 | DropdownMenuSeparator.displayName = DropdownMenuPrimitive.Separator.displayName 170 | 171 | const DropdownMenuShortcut = ({ 172 | className, 173 | ...props 174 | }: React.HTMLAttributes) => { 175 | return ( 176 | 180 | ) 181 | } 182 | DropdownMenuShortcut.displayName = "DropdownMenuShortcut" 183 | 184 | export { 185 | DropdownMenu, 186 | DropdownMenuTrigger, 187 | DropdownMenuContent, 188 | DropdownMenuItem, 189 | DropdownMenuCheckboxItem, 190 | DropdownMenuRadioItem, 191 | DropdownMenuLabel, 192 | DropdownMenuSeparator, 193 | DropdownMenuShortcut, 194 | DropdownMenuGroup, 195 | DropdownMenuPortal, 196 | DropdownMenuSub, 197 | DropdownMenuSubContent, 198 | DropdownMenuSubTrigger, 199 | DropdownMenuRadioGroup, 200 | } 201 | -------------------------------------------------------------------------------- /src/pykleene/re.py: -------------------------------------------------------------------------------- 1 | from typing import TYPE_CHECKING 2 | import graphviz 3 | 4 | if TYPE_CHECKING: 5 | from pykleene.nfa import NFA 6 | from pykleene._helpers import BinaryTreeNode 7 | 8 | class RE: 9 | OPERATORS = ['+', '.', '*'] 10 | PARENTHESES = ['(', ')'] 11 | PRECEDENCE = { 12 | '+': 1, 13 | '.': 2, 14 | '*': 3, 15 | '(': 0, 16 | ')': 0 17 | } 18 | 19 | def _isSymbol(char: str) -> bool: 20 | return char not in RE.OPERATORS and char not in RE.PARENTHESES 21 | 22 | def format(regex: str) -> str: 23 | formatted = [] 24 | for i in range(len(regex) - 1): 25 | formatted.append(regex[i]) 26 | if (RE._isSymbol(regex[i]) or regex[i] in [')', '*']) and (RE._isSymbol(regex[i + 1]) or regex[i + 1] == '(' ): 27 | formatted.append('.') 28 | formatted.append(regex[-1]) 29 | return ''.join(formatted) 30 | 31 | def postfix(regex: str) -> str: 32 | stack = [] 33 | postfix = [] 34 | for char in regex: 35 | if char == '(': 36 | stack.append(char) 37 | elif char == ')': 38 | while stack[-1] != '(': 39 | postfix.append(stack.pop()) 40 | stack.pop() 41 | elif char in RE.PRECEDENCE: 42 | while stack and RE.PRECEDENCE[stack[-1]] >= RE.PRECEDENCE[char]: 43 | postfix.append(stack.pop()) 44 | stack.append(char) 45 | else: 46 | postfix.append(char) 47 | while stack: 48 | postfix.append(stack.pop()) 49 | return ''.join(postfix) 50 | 51 | def expressionTree(regex: str) -> 'BinaryTreeNode': 52 | from pykleene._helpers import BinaryTreeNode 53 | postfix = RE.postfix(RE.format(regex)) 54 | stack: list[BinaryTreeNode] = [] 55 | for char in postfix: 56 | if char not in RE.OPERATORS: 57 | stack.append(BinaryTreeNode(leftChild=None, data=char, rightChild=None)) 58 | else: 59 | if char == '*': 60 | leftChild = stack.pop() 61 | if leftChild.data in ['ε', 'φ']: # ε* = ε, φ* = ε 62 | node = BinaryTreeNode(leftChild=None, data='ε', rightChild=None) 63 | else: 64 | node = BinaryTreeNode(leftChild=leftChild, data=char, rightChild=None) 65 | elif char == '.': 66 | rightChild = stack.pop() 67 | leftChild = stack.pop() 68 | if leftChild.data == 'φ' or rightChild.data == 'φ': # φ.anything = φ 69 | node = BinaryTreeNode(leftChild=None, data='φ', rightChild=None) 70 | elif leftChild.data == 'ε': # ε.anything = anything 71 | node = rightChild 72 | elif rightChild.data == 'ε': 73 | node = leftChild 74 | else: 75 | node = BinaryTreeNode(leftChild=leftChild, data=char, rightChild=rightChild) 76 | elif char == '+': 77 | rightChild = stack.pop() 78 | leftChild = stack.pop() 79 | if leftChild.data == 'φ': 80 | node = rightChild 81 | elif rightChild.data == 'φ': 82 | node = leftChild 83 | elif leftChild.data == 'ε' and rightChild.data == 'ε': 84 | node = BinaryTreeNode(leftChild=None, data='ε', rightChild=None) 85 | else: 86 | node = BinaryTreeNode(leftChild=leftChild, data=char, rightChild=rightChild) 87 | stack.append(node) 88 | return stack.pop() 89 | 90 | def nfa(regex: str, method: str = 'regexTree') -> 'NFA': 91 | from pykleene.nfa import NFA 92 | from pykleene._helpers import BinaryTreeNode 93 | 94 | def regexTreeToNfa(node: BinaryTreeNode, cnt: int = 0) -> tuple[NFA, int]: 95 | from copy import deepcopy 96 | leftNfa: NFA 97 | rightNfa: NFA 98 | 99 | if node.leftChild is not None: 100 | leftNfa, cnt = regexTreeToNfa(node.leftChild, cnt) 101 | if node.rightChild is not None: 102 | rightNfa, cnt = regexTreeToNfa(node.rightChild, cnt) 103 | 104 | if RE._isSymbol(node.data): 105 | newNfa = NFA( 106 | states = {f"q{cnt}", f"q{cnt + 1}"}, 107 | alphabet= {node.data} if node.data not in ['ε', 'φ'] else set(), 108 | transitions = dict(), 109 | startStates = {f"q{cnt}"}, 110 | finalStates = {f"q{cnt + 1}"} 111 | ) 112 | cnt += 2 113 | if node.data != 'φ': 114 | newNfa = newNfa.addTransition(f"q{cnt - 2}", node.data, f"q{cnt - 1}") 115 | else: 116 | newNfa.transitions = dict() 117 | return newNfa, cnt 118 | 119 | elif node.data == '*': 120 | newNfa = deepcopy(leftNfa) 121 | newNfa = newNfa.addTransition(list(leftNfa.finalStates)[0], 'ε', list(leftNfa.startStates)[0]) 122 | # newNfa = newNfa.addTransition(list(leftNfa.startStates)[0], 'ε', list(leftNfa.finalStates)[0]) 123 | newStartState = f"q{cnt}" 124 | newFinalState = f"q{cnt + 1}" 125 | oldStartStates = leftNfa.startStates 126 | oldFinalStates = leftNfa.finalStates 127 | newNfa = NFA( 128 | states=newNfa.states | {newStartState, newFinalState}, 129 | alphabet=newNfa.alphabet, 130 | transitions=newNfa.transitions, 131 | startStates={newStartState}, 132 | finalStates={newFinalState} 133 | ) 134 | newNfa = newNfa.addTransition(newStartState, 'ε', list(oldStartStates)[0]) 135 | newNfa = newNfa.addTransition(list(oldFinalStates)[0], 'ε', newFinalState) 136 | newNfa = newNfa.addTransition(newStartState, 'ε', newFinalState) 137 | cnt += 2 138 | return newNfa, cnt 139 | 140 | elif node.data == '+': 141 | newStartState = f"q{cnt}" 142 | newFinalState = f"q{cnt + 1}" 143 | oldStartStatesLeftNfa = leftNfa.startStates 144 | oldStartStatesRightNfa = rightNfa.startStates 145 | oldFinalStatesLeftNfa = leftNfa.finalStates 146 | oldFinalStatesRightNfa = rightNfa.finalStates 147 | newNfa = NFA( 148 | states=leftNfa.states | rightNfa.states | {newStartState, newFinalState}, 149 | alphabet=leftNfa.alphabet | rightNfa.alphabet, 150 | transitions=leftNfa.transitions | rightNfa.transitions, 151 | startStates={newStartState}, 152 | finalStates={newFinalState} 153 | ) 154 | newNfa = newNfa.addTransition(newStartState, 'ε', list(oldStartStatesLeftNfa)[0]) 155 | newNfa = newNfa.addTransition(newStartState, 'ε', list(oldStartStatesRightNfa)[0]) 156 | newNfa = newNfa.addTransition(list(oldFinalStatesLeftNfa)[0], 'ε', newFinalState) 157 | newNfa = newNfa.addTransition(list(oldFinalStatesRightNfa)[0], 'ε', newFinalState) 158 | cnt += 2 159 | return newNfa, cnt 160 | 161 | elif node.data == '.': 162 | newNfa = NFA( 163 | states = leftNfa.states | rightNfa.states, 164 | alphabet = leftNfa.alphabet | rightNfa.alphabet, 165 | transitions = leftNfa.transitions | rightNfa.transitions, 166 | startStates = leftNfa.startStates, 167 | finalStates = rightNfa.finalStates 168 | ) 169 | 170 | newNfa = newNfa.addTransition(list(leftNfa.finalStates)[0], 'ε', list(rightNfa.startStates)[0]) 171 | return newNfa, cnt 172 | 173 | else: 174 | raise ValueError(f"Invalid operator {node.data}") 175 | 176 | def regexPostfixToNfa(postfix: str) -> NFA: 177 | from copy import deepcopy 178 | stack: list[NFA] = [] 179 | cnt: int = 0 180 | 181 | for char in postfix: 182 | if char not in RE.OPERATORS: 183 | newNfa = NFA( 184 | states={f"q{cnt}", f"q{cnt + 1}"}, 185 | alphabet={char} if char not in ['ε', 'φ'] else set(), 186 | transitions=dict(), 187 | startStates={f"q{cnt}"}, 188 | finalStates={f"q{cnt + 1}"} 189 | ) 190 | cnt += 2 191 | if char != 'φ': 192 | newNfa = newNfa.addTransition(f"q{cnt - 2}", char, f"q{cnt - 1}") 193 | stack.append(newNfa) 194 | 195 | elif char == '*': 196 | leftNfa = stack.pop() 197 | newNfa = deepcopy(leftNfa) 198 | newNfa = newNfa.addTransition(list(leftNfa.startStates)[0], 'ε', list(rightNfa.finalStates)[0]) 199 | newNfa = newNfa.addTransition(list(leftNfa.finalStates)[0], 'ε', list(leftNfa.startStates)[0]) 200 | stack.append(newNfa) 201 | 202 | elif char == '+': 203 | rightNfa = stack.pop() 204 | leftNfa = stack.pop() 205 | newNfa = NFA( 206 | states=leftNfa.states | rightNfa.states, 207 | alphabet=leftNfa.alphabet | rightNfa.alphabet, 208 | transitions=leftNfa.transitions | rightNfa.transitions, 209 | startStates=leftNfa.startStates, 210 | finalStates=rightNfa.finalStates 211 | ) 212 | newNfa = newNfa.addTransition(list(leftNfa.finalStates)[0], 'ε', list(rightNfa.finalStates)[0]) 213 | newNfa = newNfa.addTransition(list(leftNfa.startStates)[0], 'ε', list(rightNfa.startStates)[0]) 214 | stack.append(newNfa) 215 | 216 | elif char == '.': 217 | rightNfa = stack.pop() 218 | leftNfa = stack.pop() 219 | newNfa = NFA( 220 | states=leftNfa.states | rightNfa.states, 221 | alphabet=leftNfa.alphabet | rightNfa.alphabet, 222 | transitions=leftNfa.transitions | rightNfa.transitions, 223 | startStates=leftNfa.startStates, 224 | finalStates=rightNfa.finalStates 225 | ) 226 | newNfa = newNfa.addTransition(list(leftNfa.finalStates)[0], 'ε', list(rightNfa.startStates)[0]) 227 | stack.append(newNfa) 228 | 229 | else: 230 | raise ValueError(f"Invalid operator {char}") 231 | 232 | return stack.pop() 233 | 234 | if method == 'regexTree': 235 | return regexTreeToNfa(RE.expressionTree(RE.format(regex)))[0] 236 | 237 | if method == 'postfix': 238 | return regexPostfixToNfa(RE.postfix(RE.format(regex))) 239 | 240 | else: 241 | raise ValueError(f"Invalid method: {method}") 242 | 243 | def image(param, type: str = 'regexTree', dir: str = None, save: bool = False) -> None: 244 | from pykleene._config import graphvizConfig 245 | from pykleene._helpers import BinaryTreeNode 246 | dot = graphviz.Digraph(**graphvizConfig) 247 | 248 | def drawRegexTree(node: BinaryTreeNode): 249 | dot.node(str(id(node)), node.data) 250 | if node.leftChild is not None: 251 | drawRegexTree(node.leftChild) 252 | dot.edge(str(id(node)), str(id(node.leftChild))) 253 | if node.rightChild is not None: 254 | drawRegexTree(node.rightChild) 255 | dot.edge(str(id(node)), str(id(node.rightChild))) 256 | 257 | if type == 'regexTree': 258 | drawRegexTree(param) 259 | dot.render(f"{dir}/{id(param)}", format='png', cleanup=True) 260 | 261 | else: 262 | raise ValueError(f"Invalid type: {type}") 263 | 264 | -------------------------------------------------------------------------------- /src/pykleene/grammar.py: -------------------------------------------------------------------------------- 1 | from pykleene.nfa import NFA 2 | class Grammar: 3 | nonTerminals: set[str] = set() 4 | terminals: set[str] = set() 5 | productions: dict[str, set[str]] = dict() 6 | startSymbol: str 7 | 8 | def __init__(self, 9 | nonTerminals: set[str] = set(), 10 | terminals: set[str] = set(), 11 | productions: dict[str, set[str]] = dict(), 12 | startSymbol: str = None): 13 | self.nonTerminals = nonTerminals 14 | self.terminals = terminals 15 | self.productions = productions 16 | self.startSymbol = startSymbol 17 | 18 | def loadFromJSONDict(self, data: dict) -> None: 19 | try: 20 | newGrammar = Grammar() 21 | newGrammar.nonTerminals = set(data['nonTerminals']) 22 | newGrammar.terminals = set(data['terminals']) 23 | newGrammar.productions = dict() 24 | for lhs, productions in data['productions'].items(): 25 | if lhs not in newGrammar.productions: 26 | newGrammar.productions[lhs] = set() 27 | newGrammar.productions[lhs] = newGrammar.productions[lhs] | set(productions) 28 | newGrammar.startSymbol = data['startSymbol'] 29 | except Exception as e: 30 | print(f"Illegal JSONDict: {e}") 31 | 32 | if newGrammar.isValid(): 33 | self.nonTerminals = newGrammar.nonTerminals 34 | self.terminals = newGrammar.terminals 35 | self.productions = newGrammar.productions 36 | self.startSymbol = newGrammar.startSymbol 37 | else: 38 | raise ValueError("Invalid grammar") 39 | 40 | def isValid(self) -> bool: 41 | for nonTerminal in self.nonTerminals: 42 | if len(nonTerminal) > 1: 43 | return False 44 | for terminal in self.terminals: 45 | if len(terminal) > 1: 46 | return False 47 | if len(self.terminals & self.nonTerminals) > 0: 48 | return False 49 | if self.startSymbol not in self.nonTerminals: 50 | return False 51 | for lhs, productions in self.productions.items(): 52 | if len(lhs) == 0: 53 | return False 54 | for char in lhs: 55 | if char not in self.nonTerminals and char not in self.terminals: 56 | return False 57 | for rhs in productions: 58 | if len(rhs) == 0: 59 | return False 60 | if rhs == 'ε': 61 | continue 62 | for char in rhs: 63 | if char not in self.terminals and char not in self.nonTerminals: 64 | return False 65 | return True 66 | 67 | def isLeftLinear(self) -> bool: 68 | if not self.isValid(): 69 | return False 70 | 71 | for lhs, productions in self.productions.items(): 72 | if lhs not in self.nonTerminals: 73 | return False 74 | for rhs in productions: 75 | if len(rhs) == 0: 76 | return False 77 | if rhs == 'ε': 78 | continue 79 | if rhs[0] not in self.terminals and rhs[0] not in self.nonTerminals: 80 | return False 81 | for char in rhs[1:]: 82 | if char not in self.terminals: 83 | return False 84 | return True 85 | 86 | def isRightLinear(self) -> bool: 87 | if not self.isValid(): 88 | return False 89 | for lhs, productions in self.productions.items(): 90 | if lhs not in self.nonTerminals: 91 | return False 92 | for rhs in productions: 93 | if len(rhs) == 0: 94 | return False 95 | if rhs == 'ε': 96 | continue 97 | if rhs[-1] not in self.terminals and rhs[-1] not in self.nonTerminals: 98 | return False 99 | for char in rhs[:-1]: 100 | if char not in self.terminals: 101 | return False 102 | return True 103 | 104 | def isRegular(self) -> bool: 105 | if not self.isValid(): 106 | return False 107 | return self.isLeftLinear() or self.isRightLinear() 108 | 109 | def _getNewState(self) -> str: 110 | cnt = 0 111 | while f"q{cnt}" in self.nonTerminals: 112 | cnt += 1 113 | return f"q{cnt}" 114 | 115 | def reverse(self) -> 'Grammar': 116 | from copy import deepcopy 117 | grammar = deepcopy(self) 118 | newProductions = dict() 119 | for lhs, productions in self.productions.items(): 120 | newProductions[lhs] = set() 121 | for rhs in productions: 122 | newProductions[lhs].add(rhs[::-1]) 123 | grammar.productions = newProductions 124 | return grammar 125 | 126 | def nfa(self) -> NFA: 127 | def rightLinearToNfa() -> NFA: 128 | nfa = NFA( 129 | states={self.startSymbol}, 130 | alphabet=self.terminals, 131 | transitions=dict(), 132 | startStates={self.startSymbol}, 133 | finalStates=set() 134 | ) 135 | 136 | cnt = 0 137 | for lhs, productions in self.productions.items(): 138 | for rhs in productions: 139 | if rhs == 'ε': 140 | nfa.finalStates.add(lhs) 141 | elif rhs in self.terminals: 142 | nextState = self._getNewState() 143 | nfa.states.add(nextState) 144 | nfa = nfa.addTransition(lhs, rhs, nextState) 145 | nfa.finalStates.add(nextState) 146 | elif rhs in self.nonTerminals: 147 | nextState = rhs 148 | nfa = nfa.addTransition(lhs, 'ε', nextState) 149 | else: 150 | currState = lhs 151 | for i, char in enumerate(rhs[:-1]): 152 | if i == len(rhs) - 2 and rhs[i + 1] in self.nonTerminals: 153 | nextState = rhs[i + 1] 154 | nfa.states.add(nextState) 155 | nfa = nfa.addTransition(currState, char, nextState) 156 | break 157 | if i == len(rhs) - 1: 158 | nextState = self._getNewState() 159 | nfa.states.add(nextState) 160 | nfa.addTransition(currState, char, nextState) 161 | nfa = nfa.finalStates.add(nextState) 162 | else: 163 | nextState = self._getNewState() 164 | nfa.states.add(nextState) 165 | nfa = nfa.addTransition(currState, char, nextState) 166 | currState = nextState 167 | return nfa 168 | 169 | def leftLinearToNfa() -> NFA: 170 | reversedRightLinearGrammar = self.reverse() 171 | reversedGrammarNfa = reversedRightLinearGrammar.nfa() 172 | Nfa = reversedGrammarNfa.reverse() 173 | return Nfa 174 | 175 | if not self.isRegular(): 176 | raise ValueError("Grammar is not regular") 177 | if self.isRightLinear(): 178 | return rightLinearToNfa() 179 | if self.isLeftLinear(): 180 | return leftLinearToNfa() 181 | else: 182 | raise ValueError("Error in converting grammar to NFA") 183 | 184 | def toRightLinear(self) -> 'Grammar': 185 | if not self.isRegular(): 186 | raise ValueError("Grammar is not regular") 187 | if self.isRightLinear(): 188 | return self 189 | if self.isLeftLinear(): 190 | nfa = self.nfa() 191 | return nfa.grammar() 192 | else: 193 | raise ValueError("Error in converting grammar to right linear form") 194 | 195 | def toLeftLinear(self) -> 'Grammar': 196 | if not self.isRegular(): 197 | raise ValueError("Grammar is not regular") 198 | if self.isLeftLinear(): 199 | return self 200 | if self.isRightLinear(): 201 | nfa = self.nfa() 202 | reversedNfa = nfa.reverse() 203 | reversedRightLinearGrammar = reversedNfa.grammar() 204 | leftLinearGrammar = reversedRightLinearGrammar.reverse() 205 | return leftLinearGrammar 206 | 207 | def isContextFree(self) -> bool: 208 | if not self.isValid(): 209 | return False 210 | for lhs, productions in self.productions.items(): 211 | if len(lhs) != 1 or lhs not in self.nonTerminals: 212 | return False 213 | for rhs in productions: 214 | if len(rhs) == 0: 215 | return False 216 | if rhs == 'ε': 217 | continue 218 | for char in rhs: 219 | if char not in self.terminals and char not in self.nonTerminals: 220 | return False 221 | return True 222 | 223 | def isContextSensitive(self) -> bool: 224 | if not self.isValid(): 225 | return False 226 | 227 | lhsInEpsilonProduction = False 228 | for lhs, productions in self.productions.items(): 229 | if len(lhs) > len(productions) or len(lhs) == 0: 230 | return False 231 | for char in lhs: 232 | if char not in self.nonTerminals and char not in self.terminals: 233 | return False 234 | for rhs in productions: 235 | if len(rhs) == 0: 236 | return False 237 | if rhs == 'ε': 238 | if lhsInEpsilonProduction or lhs != self.startSymbol: 239 | return False 240 | lhsInEpsilonProduction = True 241 | for char in rhs: 242 | if char not in self.nonTerminals and char not in self.terminals: 243 | return False 244 | 245 | if lhsInEpsilonProduction: 246 | for lhs, productions in self.productions.items(): 247 | for rhs in productions: 248 | if self.startSymbol in rhs: 249 | return False 250 | 251 | return True 252 | 253 | def isUnrestricted(self) -> bool: 254 | if not self.isValid(): 255 | return False 256 | for lhs, productions in self.productions.items(): 257 | if len(lhs) == 0: 258 | return False 259 | for char in lhs: 260 | if char not in self.nonTerminals and char not in self.terminals: 261 | return False 262 | for rhs in productions: 263 | if len(rhs) == 0: 264 | return False 265 | if rhs == 'ε': 266 | continue 267 | for char in rhs: 268 | if char not in self.nonTerminals and char not in self.terminals: 269 | return False 270 | return True 271 | 272 | def inCNF(self) -> bool: 273 | if not self.isContextFree(): 274 | return False 275 | for _, productions in self.productions.items(): 276 | for rhs in productions: 277 | if len(rhs) == 0: 278 | return False 279 | if rhs == 'ε': 280 | continue 281 | if len(rhs) == 1 and rhs in self.terminals: 282 | continue 283 | if len(rhs) == 2 and rhs[0] in self.nonTerminals and rhs[1] in self.nonTerminals: 284 | continue 285 | return False 286 | 287 | return True 288 | 289 | def inGNF(self) -> bool: 290 | if not self.isContextFree(): 291 | return False 292 | for _, productions in self.productions.items(): 293 | for rhs in productions: 294 | if len(rhs) == 0: 295 | return False 296 | if rhs == 'ε': 297 | continue 298 | if len(rhs) == 1 and rhs not in self.terminals: 299 | return False 300 | if len(rhs) > 1: 301 | if rhs[0] not in self.terminals: 302 | return False 303 | for char in rhs[1:]: 304 | if char not in self.nonTerminals: 305 | return False 306 | 307 | return True -------------------------------------------------------------------------------- /src/pykleene/nfa.py: -------------------------------------------------------------------------------- 1 | from typing import TYPE_CHECKING 2 | import graphviz 3 | 4 | if TYPE_CHECKING: 5 | from pykleene.grammar import Grammar 6 | from pykleene.dfa import DFA 7 | class NFA: 8 | states: set[str] 9 | alphabet: set[str] 10 | transitions: dict[tuple[str, str], set[str]] 11 | startStates: set[str] 12 | finalStates: set[str] 13 | 14 | def __init__(self, 15 | states: set[str] = set(), 16 | alphabet: set[str] = set(), 17 | transitions: dict[tuple[str, str], set[str]] = dict(), 18 | startStates: set[str] = set(), 19 | finalStates: set[str] = set()): 20 | self.states = states 21 | self.alphabet = alphabet 22 | self.transitions = transitions 23 | self.startStates = startStates 24 | self.finalStates = finalStates 25 | 26 | def isValid(self) -> bool: 27 | for (state, symbol), nextStates in self.transitions.items(): 28 | if state not in self.states: 29 | return False 30 | if symbol not in self.alphabet and symbol != 'ε': 31 | return False 32 | for nextState in nextStates: 33 | if nextState not in self.states: 34 | return False 35 | if not self.startStates.issubset(self.states): 36 | return False 37 | if not self.finalStates.issubset(self.states): 38 | return False 39 | return True 40 | 41 | def accepts(self, string: str = None) -> bool: 42 | def run(state: str, string: str) -> bool: 43 | if len(string) == 0: 44 | return state in self.finalStates 45 | for nextState in self.epsilonClosure(state): 46 | for nextNextState in self.nextStates(nextState, string[0]): 47 | if run(nextNextState, string[1:]): 48 | return True 49 | return False 50 | 51 | for startState in self.startStates: 52 | if run(startState, string): 53 | return True 54 | 55 | return False 56 | 57 | def loadFromJSONDict(self, data: dict): 58 | nfa = NFA() 59 | try: 60 | nfa.states = set(data['states']) 61 | nfa.alphabet = set(data['alphabet']) 62 | nfa.transitions = dict() 63 | for transition in data['transitions']: 64 | nfa.transitions[(transition[0], transition[1])] = set(transition[2]) 65 | nfa.startStates = set(data['startStates']) 66 | nfa.finalStates = set(data['finalStates']) 67 | 68 | if nfa.isValid(): 69 | self.states = nfa.states 70 | self.alphabet = nfa.alphabet 71 | self.transitions = nfa.transitions 72 | self.startStates = nfa.startStates 73 | self.finalStates = nfa.finalStates 74 | else: 75 | raise Exception("Invalid NFA") 76 | except Exception as e: 77 | print(f"Error while loading NFA from JSON dict: {e}") 78 | 79 | def addTransition(self, startState: str, symbol: str, endState: str) -> 'NFA': 80 | from copy import deepcopy 81 | nfa = deepcopy(self) 82 | for (state, sym), nextStates in nfa.transitions.items(): 83 | if state == startState and sym == symbol: 84 | nextStates.add(endState) 85 | return nfa 86 | nfa.transitions[(startState, symbol)] = {endState} 87 | return nfa 88 | 89 | def singleStartStateNFA(self) -> 'NFA': 90 | if len(self.startStates) == 1: 91 | return self 92 | from copy import deepcopy 93 | newNfa = deepcopy(self) 94 | cnt = 0 95 | while f"q{cnt}" in newNfa.states: 96 | cnt += 1 97 | newStartState = f"q{cnt}" 98 | newNfa.states.add(newStartState) 99 | for startState in newNfa.startStates: 100 | newNfa.transitions[(newStartState, 'ε')] = {startState} 101 | newNfa.startStates = {newStartState} 102 | return newNfa 103 | 104 | 105 | def singleFinalStateNFA(self) -> 'NFA': 106 | if len(self.finalStates) == 1: 107 | return self 108 | from copy import deepcopy 109 | newNfa = deepcopy(self) 110 | cnt = 0 111 | while f"q{cnt}" in newNfa.states: 112 | cnt += 1 113 | newFinalState = f"q{cnt}" 114 | newNfa.states.add(newFinalState) 115 | for finalState in newNfa.finalStates: 116 | if (finalState, 'ε') in newNfa.transitions: 117 | newNfa.transitions[(finalState, 'ε')].add(newFinalState) 118 | else: 119 | newNfa.transitions[(finalState, 'ε')] = {newFinalState} 120 | newNfa.finalStates = {newFinalState} 121 | return newNfa 122 | 123 | def regex(self) -> str: 124 | nfa = self.singleStartStateNFA().singleFinalStateNFA() 125 | 126 | def R(startState: str, states: set[str], finalState: str) -> str: 127 | if len(states) == 0: 128 | alphabet = set() 129 | for (state, symbol), nextStates in nfa.transitions.items(): 130 | if state == startState and finalState in nextStates: 131 | alphabet.add(symbol) 132 | if startState != finalState: 133 | if len(alphabet) == 0: 134 | return 'φ' 135 | else: 136 | return '+'.join(alphabet) 137 | if startState == finalState: 138 | if 'ε' not in alphabet: 139 | alphabet.add('ε') 140 | return '+'.join(alphabet) 141 | else: 142 | r = states.pop() 143 | X = states 144 | return f"(({R(startState, X, finalState)})+({R(startState, X, r)})({R(r, X, r)})*({R(r, X, finalState)}))" 145 | 146 | return R(list(nfa.startStates)[0], nfa.states, list(nfa.finalStates)[0]) 147 | 148 | def reverse(self) -> 'NFA': 149 | reversedNfa = NFA( 150 | states=self.states, 151 | alphabet=self.alphabet, 152 | transitions=dict(), 153 | startStates=self.finalStates, 154 | finalStates=self.startStates 155 | ) 156 | transMap: dict[tuple[str, str], set[str]] = dict() 157 | for (state, symbol), nextStates in self.transitions.items(): 158 | for nextState in nextStates: 159 | if (nextState, symbol) not in transMap: 160 | transMap[(nextState, symbol)] = set() 161 | if state not in transMap[(nextState, symbol)]: 162 | transMap[(nextState, symbol)].add(state) 163 | reversedNfa.transitions = transMap 164 | return reversedNfa 165 | 166 | def grammar(self) -> 'Grammar': 167 | from pykleene.grammar import Grammar 168 | from pykleene.utils import _getNextLetter 169 | from copy import deepcopy 170 | nfa = self.singleStartStateNFA() 171 | grammar = Grammar( 172 | startSymbol=None, 173 | terminals=nfa.alphabet, 174 | nonTerminals=set(), 175 | productions=dict() 176 | ) 177 | stateToSymbol = dict() 178 | currSymbol = 'A' 179 | for (state, symbol), nextStates in nfa.transitions.items(): 180 | if state not in stateToSymbol: 181 | stateToSymbol[state] = currSymbol 182 | currSymbol = _getNextLetter(currSymbol) 183 | for nextState in nextStates: 184 | if nextState not in stateToSymbol: 185 | stateToSymbol[nextState] = currSymbol 186 | currSymbol = _getNextLetter(currSymbol) 187 | for nextState in nextStates: 188 | lhs = stateToSymbol[state] 189 | rhs = (symbol if symbol != 'ε' else '') + stateToSymbol[nextState] 190 | 191 | if lhs not in grammar.productions: 192 | grammar.productions[lhs] = set() 193 | grammar.productions[lhs].add(rhs) 194 | 195 | for _, value in stateToSymbol.items(): 196 | grammar.nonTerminals.add(value) 197 | 198 | nfaStartStates = deepcopy(nfa.startStates) 199 | grammar.startSymbol = stateToSymbol[nfaStartStates.pop()] 200 | 201 | for state in nfa.finalStates: 202 | if stateToSymbol[state] not in grammar.productions: 203 | grammar.productions[stateToSymbol[state]] = set() 204 | grammar.productions[stateToSymbol[state]].add('ε') 205 | 206 | return grammar 207 | 208 | def image(self, dir: str = None, save: bool = False) -> 'graphviz.Digraph': 209 | from pykleene._config import graphvizConfig 210 | 211 | dot = graphviz.Digraph(**graphvizConfig) 212 | 213 | for state in self.states: 214 | if state in self.finalStates: 215 | dot.node(state, shape='doublecircle') 216 | else: 217 | dot.node(state) 218 | 219 | for startState in self.startStates: 220 | dot.node(f'{id(startState)}', shape='point', label='') 221 | dot.edge(f'{id(startState)}', startState) 222 | 223 | for (state, symbol), nextStates in self.transitions.items(): 224 | for nextState in nextStates: 225 | dot.edge(state, nextState, label=symbol) 226 | 227 | if dir and save: 228 | try: 229 | dot.render(f"{dir}/{id(self)}", format='png', cleanup=True) 230 | except Exception as e: 231 | print(f"Error while saving image: {e}") 232 | 233 | return dot 234 | 235 | def epsilonClosure(self, state: str) -> set[str]: 236 | closure = set() 237 | closure.add(state) 238 | queue = [state] 239 | while len(queue) > 0: 240 | currentState = queue.pop(0) 241 | for (s, symbol), nextStates in self.transitions.items(): 242 | if s == currentState and symbol == 'ε': 243 | for nextState in nextStates: 244 | if nextState not in closure: 245 | closure.add(nextState) 246 | queue.append(nextState) 247 | return closure 248 | 249 | def nextStates(self, state: str, symbol: str) -> set[str]: 250 | for (s, sym), nStates in self.transitions.items(): 251 | if s == state and sym == symbol: 252 | return nStates 253 | return set() 254 | 255 | def dfa(self) -> 'DFA': 256 | from pykleene.dfa import DFA 257 | def closure(state: str, symbol: str) -> set[str]: 258 | closure = set() 259 | 260 | closure = closure | nfa.nextStates(state, symbol) 261 | 262 | for nextState in nfa.epsilonClosure(state): 263 | closure = closure | nfa.nextStates(nextState, symbol) 264 | 265 | for nextState in nfa.nextStates(state, symbol): 266 | closure = closure | nfa.epsilonClosure(nextState) 267 | 268 | for nextState in nfa.epsilonClosure(state): 269 | for nextNextState in nfa.nextStates(nextState, symbol): 270 | closure = closure | nfa.epsilonClosure(nextNextState) 271 | 272 | return closure 273 | 274 | from pprint import pprint 275 | nfa = self.singleStartStateNFA() 276 | nfa = nfa.singleFinalStateNFA() 277 | 278 | # nfa.image().view() 279 | 280 | # pprint(nfa.__dict__) 281 | 282 | alphabet: set[str] = self.alphabet 283 | transitions: dict[tuple[str, str], str] = dict() 284 | 285 | startState = nfa.epsilonClosure(list(nfa.startStates)[0]) 286 | 287 | states = set() 288 | states.add(str(sorted(startState))) 289 | queue: list[set[str]] = [startState] 290 | 291 | startState = str(sorted(startState)) 292 | 293 | finalStates = set() 294 | 295 | while len(queue) > 0: 296 | dfaState = queue.pop(0) 297 | for symbol in alphabet: 298 | nextDfaState = set() 299 | for state in dfaState: 300 | nextDfaState = nextDfaState | closure(state, symbol) 301 | transitions[(str(sorted(dfaState)), symbol)] = str(sorted(nextDfaState)) 302 | if len(dfaState & nfa.finalStates) > 0 and str(sorted(dfaState)) not in finalStates: 303 | finalStates.add(str(sorted(dfaState))) 304 | if str(sorted(nextDfaState)) not in states: 305 | queue.append(nextDfaState) 306 | states.add(str(sorted(nextDfaState))) 307 | 308 | dfa = DFA( 309 | states=states, 310 | alphabet=alphabet, 311 | transitions=transitions, 312 | startState=startState, 313 | finalStates=finalStates 314 | ) 315 | 316 | return dfa -------------------------------------------------------------------------------- /src/pykleene/dfa.py: -------------------------------------------------------------------------------- 1 | import graphviz 2 | class DFA: 3 | states: set[str] 4 | alphabet: set[str] 5 | transitions: dict[tuple[str, str], str] 6 | startState: str 7 | finalStates: set[str] 8 | 9 | def __init__(self, 10 | states: set[str] = set(), 11 | alphabet: set[str] = set(), 12 | transitions: dict[tuple[str, str], str] = dict(), 13 | startState: str = None, 14 | finalStates: set[str] = set()): 15 | self.states = states 16 | self.alphabet = alphabet 17 | self.transitions = transitions 18 | self.startState = startState 19 | self.finalStates = finalStates 20 | 21 | def loadFromJSONDict(self, data: dict): 22 | try: 23 | dfa = DFA() 24 | dfa.states = set(data['states']) 25 | dfa.alphabet = set(data['alphabet']) 26 | dfa.transitions = {tuple(transition[:2]): transition[2] for transition in data["transitions"]} 27 | dfa.startState = data['startState'] 28 | dfa.finalStates = set(data['finalStates']) 29 | 30 | if dfa.isValid(): 31 | self.states = dfa.states 32 | self.alphabet = dfa.alphabet 33 | self.transitions = dfa.transitions 34 | self.startState = dfa.startState 35 | self.finalStates = dfa.finalStates 36 | else: 37 | raise Exception("Invalid DFA") 38 | except Exception as e: 39 | print(f"Error while loading DFA from JSON: {e}") 40 | 41 | def isValid(self) -> bool: 42 | if self.startState not in self.states: 43 | return False 44 | if not self.finalStates.issubset(self.states): 45 | return False 46 | for state in self.states: 47 | for symbol in self.alphabet: 48 | if (state, symbol) not in self.transitions: 49 | return False 50 | if self.transitions[(state, symbol)] not in self.states: 51 | return False 52 | if len(self.transitions) != len(self.states) * len(self.alphabet): 53 | return False 54 | return True 55 | 56 | def __str__(self): 57 | states = ", ".join(self.states) 58 | alphabet = ", ".join(self.alphabet) 59 | transitions = "\n".join([f"δ({q}, {a}) = {self.transitions[(q, a)]}" for (q, a) in self.transitions.items()]) 60 | startState = self.startState 61 | finalStates = ", ".join(self.finalStates) 62 | 63 | return f"Q = {{{states}}}\n\nΣ = {{{alphabet}}}\n\n{{{transitions}}}\n\ns = {startState}\n\nF = {{{finalStates}}}" 64 | 65 | def accepts(self, string: str = None, verbose: str = False) -> bool: 66 | currentState = self.startState 67 | for symbol in string: 68 | currentState = self.transitions[(currentState, symbol)] 69 | print(f"({currentState}, {symbol}) -> {self.transitions[(currentState, symbol)]}") if verbose else None 70 | return currentState in self.finalStates 71 | 72 | def nextState(self, currentState: str, symbol: str) -> str: 73 | if (currentState, symbol) in self.transitions: 74 | return self.transitions[(currentState, symbol)] 75 | else: 76 | return None 77 | 78 | def minimal(self) -> 'DFA': 79 | from pykleene.utils import getAllStrings 80 | import copy 81 | 82 | dfaCopy = copy.deepcopy(self) 83 | 84 | states = list(dfaCopy.states) 85 | alphabet = list(dfaCopy.alphabet) 86 | transitions = dfaCopy.transitions 87 | finalStates = dfaCopy.finalStates 88 | startState = dfaCopy.startState 89 | 90 | grid = [[True for _ in range(len(states))] for _ in range(len(states))] 91 | equivalenceClasses: list[list[str]] = [] 92 | 93 | strings = getAllStrings(alphabet, len(states) - 1) 94 | 95 | for i in range(len(states)): 96 | for j in range(i): 97 | dfa1 = copy.deepcopy(dfaCopy) 98 | dfa2 = copy.deepcopy(dfaCopy) 99 | dfa1.startState = states[i] 100 | dfa2.startState = states[j] 101 | 102 | for string in strings: 103 | if dfa1.accepts(string) != dfa2.accepts(string): 104 | grid[i][j] = False 105 | break 106 | 107 | for i in range(len(states)): 108 | for j in range(i + 1): 109 | if grid[i][j]: 110 | equivalenceClassFound = False 111 | if i != j: 112 | for equivalenceClass in equivalenceClasses: 113 | if states[i] in equivalenceClass: 114 | equivalenceClass.append(states[j]) 115 | equivalenceClassFound = True 116 | break 117 | elif states[j] in equivalenceClass: 118 | equivalenceClass.append(states[i]) 119 | equivalenceClassFound = True 120 | break 121 | if not equivalenceClassFound: 122 | equivalenceClasses.append([states[i], states[j]]) 123 | else: 124 | for equivalenceClass in equivalenceClasses: 125 | if states[i] in equivalenceClass: 126 | equivalenceClassFound = True 127 | break 128 | if not equivalenceClassFound: 129 | equivalenceClasses.append([states[i]]) 130 | 131 | newTransitions = {} 132 | for (state, symbol), nextState in transitions.items(): 133 | for equivalenceClass in equivalenceClasses: 134 | if state in equivalenceClass: 135 | state = str(equivalenceClass) 136 | if nextState in equivalenceClass: 137 | nextState = str(equivalenceClass) 138 | newTransitions[(state, symbol)] = nextState 139 | 140 | newStartState = None 141 | for equivalenceClass in equivalenceClasses: 142 | if startState in equivalenceClass: 143 | newStartState = str(equivalenceClass) 144 | break 145 | 146 | newFinalStates = set() 147 | for finalState in finalStates: 148 | for equivalenceClass in equivalenceClasses: 149 | if finalState in equivalenceClass: 150 | newFinalStates.add(str(equivalenceClass)) 151 | break 152 | 153 | newStates = [str(equivalenceClass) for equivalenceClass in equivalenceClasses] 154 | 155 | newDfa = DFA( 156 | states=set(newStates), 157 | alphabet=set(alphabet), 158 | transitions=newTransitions, 159 | startState=newStartState, 160 | finalStates=newFinalStates 161 | ) 162 | 163 | return newDfa 164 | 165 | def isomorphic(self, dfa: 'DFA') -> bool: 166 | minDfa1 = self.minimal() 167 | minDfa2 = dfa.minimal() 168 | 169 | if minDfa1.alphabet != minDfa2.alphabet: 170 | return False 171 | alphabet = list(minDfa1.alphabet) 172 | 173 | if len(minDfa1.states) != len(minDfa2.states): 174 | return False 175 | 176 | if (minDfa1.startState in minDfa1.finalStates) != (minDfa2.startState in minDfa2.finalStates): 177 | return False 178 | 179 | visited = {} 180 | 181 | bfsQueue = [(minDfa1.startState, minDfa2.startState)] 182 | visited[(minDfa1.startState, minDfa2.startState)] = True 183 | 184 | def areStatesNonEquivalent(state1: str, state2: str) -> bool: 185 | if (state1 in minDfa1.finalStates) != (state2 in minDfa2.finalStates): 186 | return True 187 | for visitedState1, visitedState2 in visited: 188 | if visitedState1 == state1 and visitedState2 != state2: 189 | return True 190 | if visitedState1 != state1 and visitedState2 == state2: 191 | return True 192 | return False 193 | 194 | while bfsQueue: 195 | state1, state2 = bfsQueue.pop(0) 196 | 197 | for symbol in alphabet: 198 | nextState1 = minDfa1.nextState(state1, symbol) 199 | nextState2 = minDfa2.nextState(state2, symbol) 200 | 201 | if areStatesNonEquivalent(nextState1, nextState2): 202 | return False 203 | 204 | if (nextState1, nextState2) not in visited: 205 | visited[(nextState1, nextState2)] = True 206 | bfsQueue.append((nextState1, nextState2)) 207 | 208 | return True 209 | 210 | def image(self, dir: str = None, save: bool = False) -> 'graphviz.Digraph': 211 | from pykleene._config import graphvizConfig 212 | 213 | dot = graphviz.Digraph(**graphvizConfig) 214 | 215 | for state in self.states: 216 | if state in self.finalStates: 217 | dot.node(state, shape='doublecircle') 218 | else: 219 | dot.node(state) 220 | 221 | dot.node(f'{id(self.startState)}', shape='point', label='') 222 | dot.edge(f'{id(self.startState)}', self.startState) 223 | 224 | for (state, symbol), nextState in self.transitions.items(): 225 | dot.edge(state, nextState, label=symbol) 226 | 227 | if dir and save: 228 | try: 229 | dot.render(f"{dir}/{id(self)}", format='png', cleanup=True) 230 | except Exception as e: 231 | print(f"Error while saving image: {e}") 232 | return dot 233 | 234 | def union(self, dfa: 'DFA') -> 'DFA': 235 | if self.alphabet != dfa.alphabet: 236 | print("Alphabets of the DFAs do not match.") 237 | return None 238 | 239 | newStates = set((state1, state2) for state1 in self.states for state2 in dfa.states) 240 | newFinalStates = set((state1, state2) for state1 in self.finalStates for state2 in dfa.states) | set((state1, state2) for state1 in self.states for state2 in dfa.finalStates) 241 | newStartState = (self.startState, dfa.startState) 242 | newTransitions = set( 243 | ((state1, state2), symbol, (self.nextState(state1, symbol), dfa.nextState(state2, symbol))) 244 | for state1, state2 in newStates 245 | for symbol in self.alphabet 246 | ) 247 | 248 | unionDfa = DFA( 249 | states = set(str(state) for state in newStates), 250 | alphabet = self.alphabet, 251 | transitions = {(str(state), symbol): str(nextState) for (state, symbol, nextState) in newTransitions}, 252 | startState = str(newStartState), 253 | finalStates = set(str(state) for state in newFinalStates) 254 | ) 255 | 256 | return unionDfa.reachable() 257 | 258 | def complement(self) -> 'DFA': 259 | complementDfa = DFA( 260 | states = self.states, 261 | alphabet = self.alphabet, 262 | transitions = self.transitions, 263 | startState = self.startState, 264 | finalStates = self.states - self.finalStates 265 | ) 266 | 267 | return complementDfa 268 | 269 | def intersection(self, dfa: 'DFA') -> 'DFA': 270 | complementSelf = self.complement() 271 | complementDfa = dfa.complement() 272 | unionDfa = complementSelf.union(complementDfa) 273 | complementUnionDfa = unionDfa.complement() 274 | 275 | return complementUnionDfa 276 | 277 | def reachable(self) -> 'DFA': 278 | reachableStates = set() 279 | reachableStates.add(self.startState) 280 | 281 | statesQueue = [self.startState] 282 | 283 | while statesQueue: 284 | state = statesQueue.pop(0) 285 | for symbol in self.alphabet: 286 | nextState = self.nextState(state, symbol) 287 | if nextState not in reachableStates: 288 | reachableStates.add(nextState) 289 | statesQueue.append(nextState) 290 | 291 | newFinalStates = self.finalStates & reachableStates 292 | newTransitions = {(state, symbol): nextState 293 | for (state, symbol), nextState in self.transitions.items() 294 | if state in reachableStates and nextState in reachableStates} 295 | 296 | reachableDfa = DFA( 297 | states = reachableStates, 298 | alphabet = self.alphabet, 299 | transitions = newTransitions, 300 | startState = self.startState, 301 | finalStates = newFinalStates 302 | ) 303 | 304 | return reachableDfa 305 | 306 | def isLangSubset(self, dfa: 'DFA') -> bool: 307 | intersectionDfa = self.intersection(dfa) 308 | minimalIntersectionDfa = intersectionDfa.minimal() 309 | minSelf = self.minimal() 310 | return minSelf.isomorphic(minimalIntersectionDfa) 311 | 312 | def difference(self, dfa: 'DFA') -> 'DFA': 313 | return self.intersection(dfa.complement()) 314 | 315 | def symmetricDifference(self, dfa: 'DFA') -> 'DFA': 316 | return self.union(dfa).difference(self.intersection(dfa)) 317 | 318 | --------------------------------------------------------------------------------