├── .gitignore ├── LICENSE ├── NFA-to-DFA └── nfa-to-dfa.py ├── README.md ├── ex-10 intermediate-code-gen ├── 689-ex10-intermediate-CodeGen.docx └── intermediate-codeGen.py ├── ex-11 quadruples ├── cd-exp-11-689.docx └── quadruples.py ├── ex-5 first & follow └── first&follow.py ├── ex-6 predictive parsing & shift reduce ├── 689-lab6-predictiveParsing.docx └── __pycache__ │ ├── predictive.cpython-38.pyc │ ├── predictive_parser.py │ ├── tempCodeRunnerFile.py │ └── trial_predictive_parser.py ├── ex-7 shift reduce ├── 689-ex7-shift_reduce.docx └── shift_reduce.py ├── ex-8 leading&trailing sets ├── 689-ex8-leading&trailing.docx └── leading_trailing.py ├── ex-9 lr(0) ├── 689-ex9-lr0.docx └── lr_0.py ├── ex2-LexicalAnalysis ├── C program lexical.docx ├── ex2.c ├── ex2.exe └── lexical-analyzer.py ├── ex3a-regex to nfa ├── regex-to-nfa.docx ├── regextonfa.py └── tempCodeRunnerFile.py ├── ex3b- nfa to dfa ├── CD-683-exp4.docx ├── ex-4.docx └── nfa2dfa.py └── ex4- left recursion & factoring ├── ex4-689-elr&lf.docx ├── leftFactoring.py ├── leftRecursion.py └── tempCodeRunnerFile.py /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pyenv 85 | .python-version 86 | 87 | # pipenv 88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 91 | # install all needed dependencies. 92 | #Pipfile.lock 93 | 94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 95 | __pypackages__/ 96 | 97 | # Celery stuff 98 | celerybeat-schedule 99 | celerybeat.pid 100 | 101 | # SageMath parsed files 102 | *.sage.py 103 | 104 | # Environments 105 | .env 106 | .venv 107 | env/ 108 | venv/ 109 | ENV/ 110 | env.bak/ 111 | venv.bak/ 112 | 113 | # Spyder project settings 114 | .spyderproject 115 | .spyproject 116 | 117 | # Rope project settings 118 | .ropeproject 119 | 120 | # mkdocs documentation 121 | /site 122 | 123 | # mypy 124 | .mypy_cache/ 125 | .dmypy.json 126 | dmypy.json 127 | 128 | # Pyre type checker 129 | .pyre/ 130 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2021 Devangi Purkayastha 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /NFA-to-DFA/nfa-to-dfa.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | 3 | # Taking NFA input from User 4 | 5 | nfa = {} 6 | n = int(input("No. of states : ")) #Enter total no. of states 7 | t = int(input("No. of transitions : ")) #Enter total no. of transitions/paths eg: a,b so input 2 for a,b,c input 3 8 | for i in range(n): 9 | state = input("state name : ") #Enter state name eg: A, B, C, q1, q2 ..etc 10 | nfa[state] = {} #Creating a nested dictionary 11 | for j in range(t): 12 | path = input("path : ") #Enter path eg : a or b in {a,b} 0 or 1 in {0,1} 13 | print("Enter end state from state {} travelling through path {} : ".format(state,path)) 14 | reaching_state = [x for x in input().split()] #Enter all the end states that 15 | nfa[state][path] = reaching_state #Assigning the end states to the paths in dictionary 16 | 17 | print("\nNFA :- \n") 18 | print(nfa) #Printing NFA 19 | print("\nPrinting NFA table :- ") 20 | nfa_table = pd.DataFrame(nfa) 21 | print(nfa_table.transpose()) 22 | 23 | print("Enter final state of NFA : ") 24 | nfa_final_state = [x for x in input().split()] # Enter final state/states of NFA 25 | ################################################### 26 | 27 | new_states_list = [] #holds all the new states created in dfa 28 | dfa = {} #dfa dictionary/table or the output structure we needed 29 | keys_list = list(list(nfa.keys())[0]) #conatins all the states in nfa plus the states created in dfa are also appended further 30 | path_list = list(nfa[keys_list[0]].keys()) #list of all the paths eg: [a,b] or [0,1] 31 | 32 | ################################################### 33 | 34 | # Computing first row of DFA transition table 35 | 36 | dfa[keys_list[0]] = {} #creating a nested dictionary in dfa 37 | for y in range(t): 38 | var = "".join(nfa[keys_list[0]][path_list[y]]) #creating a single string from all the elements of the list which is a new state 39 | dfa[keys_list[0]][path_list[y]] = var #assigning the state in DFA table 40 | if var not in keys_list: #if the state is newly created 41 | new_states_list.append(var) #then append it to the new_states_list 42 | keys_list.append(var) #as well as to the keys_list which contains all the states 43 | 44 | ################################################### 45 | 46 | # Computing the other rows of DFA transition table 47 | 48 | while len(new_states_list) != 0: #consition is true only if the new_states_list is not empty 49 | dfa[new_states_list[0]] = {} #taking the first element of the new_states_list and examining it 50 | for _ in range(len(new_states_list[0])): 51 | for i in range(len(path_list)): 52 | temp = [] #creating a temporay list 53 | for j in range(len(new_states_list[0])): 54 | temp += nfa[new_states_list[0][j]][path_list[i]] #taking the union of the states 55 | s = "" 56 | s = s.join(temp) #creating a single string(new state) from all the elements of the list 57 | if s not in keys_list: #if the state is newly created 58 | new_states_list.append(s) #then append it to the new_states_list 59 | keys_list.append(s) #as well as to the keys_list which contains all the states 60 | dfa[new_states_list[0]][path_list[i]] = s #assigning the new state in the DFA table 61 | 62 | new_states_list.remove(new_states_list[0]) #Removing the first element in the new_states_list 63 | 64 | print("\nDFA :- \n") 65 | print(dfa) #Printing the DFA created 66 | print("\nPrinting DFA table :- ") 67 | dfa_table = pd.DataFrame(dfa) 68 | print(dfa_table.transpose()) 69 | 70 | dfa_states_list = list(dfa.keys()) 71 | dfa_final_states = [] 72 | for x in dfa_states_list: 73 | for i in x: 74 | if i in nfa_final_state: 75 | dfa_final_states.append(x) 76 | break 77 | 78 | print("\nFinal states of the DFA are : ",dfa_final_states) #Printing Final states of DFA 79 | 80 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Compiler-Design 2 | A repository containing all experiments for the course Compiler Design. 3 | -------------------------------------------------------------------------------- /ex-10 intermediate-code-gen/689-ex10-intermediate-CodeGen.docx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/devangi2000/Compiler-Design/66bcc4a1f59b8319819fdab03278017a615e7944/ex-10 intermediate-code-gen/689-ex10-intermediate-CodeGen.docx -------------------------------------------------------------------------------- /ex-10 intermediate-code-gen/intermediate-codeGen.py: -------------------------------------------------------------------------------- 1 | OPERATORS = set(['+', '-', '*', '/', '(', ')']) 2 | PRI = {'+':1, '-':1, '*':2, '/':2} 3 | 4 | ### INFIX ===> POSTFIX ### 5 | def infix_to_postfix(formula): 6 | stack = [] # only pop when the coming op has priority 7 | output = '' 8 | for ch in formula: 9 | if ch not in OPERATORS: 10 | output += ch 11 | elif ch == '(': 12 | stack.append('(') 13 | elif ch == ')': 14 | while stack and stack[-1] != '(': 15 | output += stack.pop() 16 | stack.pop() # pop '(' 17 | else: 18 | while stack and stack[-1] != '(' and PRI[ch] <= PRI[stack[-1]]: 19 | output += stack.pop() 20 | stack.append(ch) 21 | # leftover 22 | while stack: 23 | output += stack.pop() 24 | print(f'POSTFIX: {output}') 25 | return output 26 | 27 | ### INFIX ===> PREFIX ### 28 | def infix_to_prefix(formula): 29 | op_stack = [] 30 | exp_stack = [] 31 | for ch in formula: 32 | if not ch in OPERATORS: 33 | exp_stack.append(ch) 34 | elif ch == '(': 35 | op_stack.append(ch) 36 | elif ch == ')': 37 | while op_stack[-1] != '(': 38 | op = op_stack.pop() 39 | a = exp_stack.pop() 40 | b = exp_stack.pop() 41 | exp_stack.append( op+b+a ) 42 | op_stack.pop() # pop '(' 43 | else: 44 | while op_stack and op_stack[-1] != '(' and PRI[ch] <= PRI[op_stack[-1]]: 45 | op = op_stack.pop() 46 | a = exp_stack.pop() 47 | b = exp_stack.pop() 48 | exp_stack.append( op+b+a ) 49 | op_stack.append(ch) 50 | 51 | # leftover 52 | while op_stack: 53 | op = op_stack.pop() 54 | a = exp_stack.pop() 55 | b = exp_stack.pop() 56 | exp_stack.append( op+b+a ) 57 | print(f'PREFIX: {exp_stack[-1]}') 58 | return exp_stack[-1] 59 | 60 | ### THREE ADDRESS CODE GENERATION ### 61 | def generate3AC(pos): 62 | print("### THREE ADDRESS CODE GENERATION ###") 63 | exp_stack = [] 64 | t = 1 65 | 66 | for i in pos: 67 | if i not in OPERATORS: 68 | exp_stack.append(i) 69 | else: 70 | print(f't{t} := {exp_stack[-2]} {i} {exp_stack[-1]}') 71 | exp_stack=exp_stack[:-2] 72 | exp_stack.append(f't{t}') 73 | t+=1 74 | 75 | expres = input("INPUT THE EXPRESSION: ") 76 | pre = infix_to_prefix(expres) 77 | pos = infix_to_postfix(expres) 78 | generate3AC(pos) -------------------------------------------------------------------------------- /ex-11 quadruples/cd-exp-11-689.docx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/devangi2000/Compiler-Design/66bcc4a1f59b8319819fdab03278017a615e7944/ex-11 quadruples/cd-exp-11-689.docx -------------------------------------------------------------------------------- /ex-11 quadruples/quadruples.py: -------------------------------------------------------------------------------- 1 | OPERATORS = set(['+', '-', '*', '/', '(', ')']) 2 | PRI = {'+':1, '-':1, '*':2, '/':2} 3 | 4 | ### INFIX ===> POSTFIX ### 5 | def infix_to_postfix(formula): 6 | stack = [] # only pop when the coming op has priority 7 | output = '' 8 | for ch in formula: 9 | if ch not in OPERATORS: 10 | output += ch 11 | elif ch == '(': 12 | stack.append('(') 13 | elif ch == ')': 14 | while stack and stack[-1] != '(': 15 | output += stack.pop() 16 | stack.pop() # pop '(' 17 | else: 18 | while stack and stack[-1] != '(' and PRI[ch] <= PRI[stack[-1]]: 19 | output += stack.pop() 20 | stack.append(ch) 21 | # leftover 22 | while stack: 23 | output += stack.pop() 24 | print(f'POSTFIX: {output}') 25 | return output 26 | 27 | ### INFIX ===> PREFIX ### 28 | def infix_to_prefix(formula): 29 | op_stack = [] 30 | exp_stack = [] 31 | for ch in formula: 32 | if not ch in OPERATORS: 33 | exp_stack.append(ch) 34 | elif ch == '(': 35 | op_stack.append(ch) 36 | elif ch == ')': 37 | while op_stack[-1] != '(': 38 | op = op_stack.pop() 39 | a = exp_stack.pop() 40 | b = exp_stack.pop() 41 | exp_stack.append( op+b+a ) 42 | op_stack.pop() # pop '(' 43 | else: 44 | while op_stack and op_stack[-1] != '(' and PRI[ch] <= PRI[op_stack[-1]]: 45 | op = op_stack.pop() 46 | a = exp_stack.pop() 47 | b = exp_stack.pop() 48 | exp_stack.append( op+b+a ) 49 | op_stack.append(ch) 50 | 51 | # leftover 52 | while op_stack: 53 | op = op_stack.pop() 54 | a = exp_stack.pop() 55 | b = exp_stack.pop() 56 | exp_stack.append( op+b+a ) 57 | print(f'PREFIX: {exp_stack[-1]}') 58 | return exp_stack[-1] 59 | 60 | ### THREE ADDRESS CODE GENERATION ### 61 | def generate3AC(pos): 62 | print("### THREE ADDRESS CODE GENERATION ###") 63 | exp_stack = [] 64 | t = 1 65 | 66 | for i in pos: 67 | if i not in OPERATORS: 68 | exp_stack.append(i) 69 | else: 70 | print(f't{t} := {exp_stack[-2]} {i} {exp_stack[-1]}') 71 | exp_stack=exp_stack[:-2] 72 | exp_stack.append(f't{t}') 73 | t+=1 74 | 75 | expres = input("INPUT THE EXPRESSION: ") 76 | pre = infix_to_prefix(expres) 77 | pos = infix_to_postfix(expres) 78 | generate3AC(pos) 79 | def Quadruple(pos): 80 | stack = [] 81 | op = [] 82 | x = 1 83 | for i in pos: 84 | if i not in OPERATORS: 85 | stack.append(i) 86 | elif i == '-': 87 | op1 = stack.pop() 88 | stack.append("t(%s)" %x) 89 | print("{0:^4s} | {1:^4s} | {2:^4s}|{3:4s}".format(i,op1,"(-)"," t(%s)" %x)) 90 | x = x+1 91 | if stack != []: 92 | op2 = stack.pop() 93 | op1 = stack.pop() 94 | print("{0:^4s} | {1:^4s} | {2:^4s}|{3:4s}".format("+",op1,op2," t(%s)" %x)) 95 | stack.append("t(%s)" %x) 96 | x = x+1 97 | elif i == '=': 98 | op2 = stack.pop() 99 | op1 = stack.pop() 100 | print("{0:^4s} | {1:^4s} | {2:^4s}|{3:4s}".format(i,op2,"(-)",op1)) 101 | else: 102 | op1 = stack.pop() 103 | op2 = stack.pop() 104 | print("{0:^4s} | {1:^4s} | {2:^4s}|{3:4s}".format(i,op2,op1," t(%s)" %x)) 105 | stack.append("t(%s)" %x) 106 | x = x+1 107 | print("The quadruple for the expression ") 108 | print(" OP | ARG 1 |ARG 2 |RESULT ") 109 | Quadruple(pos) 110 | 111 | def Triple(pos): 112 | stack = [] 113 | op = [] 114 | x = 0 115 | for i in pos: 116 | if i not in OPERATORS: 117 | stack.append(i) 118 | elif i == '-': 119 | op1 = stack.pop() 120 | stack.append("(%s)" %x) 121 | print("{0:^4s} | {1:^4s} | {2:^4s}".format(i,op1,"(-)")) 122 | x = x+1 123 | if stack != []: 124 | op2 = stack.pop() 125 | op1 = stack.pop() 126 | print("{0:^4s} | {1:^4s} | {2:^4s}".format("+",op1,op2)) 127 | stack.append("(%s)" %x) 128 | x = x+1 129 | elif i == '=': 130 | op2 = stack.pop() 131 | op1 = stack.pop() 132 | print("{0:^4s} | {1:^4s} | {2:^4s}".format(i,op1,op2)) 133 | else: 134 | op1 = stack.pop() 135 | if stack != []: 136 | op2 = stack.pop() 137 | print("{0:^4s} | {1:^4s} | {2:^4s}".format(i,op2,op1)) 138 | stack.append("(%s)" %x) 139 | x = x+1 140 | print("The triple for given expression") 141 | print(" OP | ARG 1 |ARG 2 ") 142 | Triple(pos) -------------------------------------------------------------------------------- /ex-5 first & follow/first&follow.py: -------------------------------------------------------------------------------- 1 | gram = { 2 | "E":["E+T","T"], 3 | "T":["T*F","F"], 4 | "F":["(E)","id"] 5 | } 6 | 7 | def removeDirectLR(gramA, A): 8 | temp = gramA[A] 9 | tempCr = [] 10 | tempInCr = [] 11 | for i in temp: 12 | if i[0] == A: 13 | #tempInCr.append(i[1:]) 14 | tempInCr.append(i[1:]+[A+"'"]) 15 | else: 16 | #tempCr.append(i) 17 | tempCr.append(i+[A+"'"]) 18 | tempInCr.append(["e"]) 19 | gramA[A] = tempCr 20 | gramA[A+"'"] = tempInCr 21 | return gramA 22 | 23 | 24 | def checkForIndirect(gramA, a, ai): 25 | if ai not in gramA: 26 | return False 27 | if a == ai: 28 | return True 29 | for i in gramA[ai]: 30 | if i[0] == ai: 31 | return False 32 | if i[0] in gramA: 33 | return checkForIndirect(gramA, a, i[0]) 34 | return False 35 | 36 | def rep(gramA, A): 37 | temp = gramA[A] 38 | newTemp = [] 39 | for i in temp: 40 | if checkForIndirect(gramA, A, i[0]): 41 | t = [] 42 | for k in gramA[i[0]]: 43 | t=[] 44 | t+=k 45 | t+=i[1:] 46 | newTemp.append(t) 47 | 48 | else: 49 | newTemp.append(i) 50 | gramA[A] = newTemp 51 | return gramA 52 | 53 | def rem(gram): 54 | c = 1 55 | conv = {} 56 | gramA = {} 57 | revconv = {} 58 | for j in gram: 59 | conv[j] = "A"+str(c) 60 | gramA["A"+str(c)] = [] 61 | c+=1 62 | 63 | for i in gram: 64 | for j in gram[i]: 65 | temp = [] 66 | for k in j: 67 | if k in conv: 68 | temp.append(conv[k]) 69 | else: 70 | temp.append(k) 71 | gramA[conv[i]].append(temp) 72 | 73 | 74 | for i in range(c-1,0,-1): 75 | ai = "A"+str(i) 76 | for j in range(0,i): 77 | aj = gramA[ai][0][0] 78 | if ai!=aj : 79 | if aj in gramA and checkForIndirect(gramA,ai,aj): 80 | gramA = rep(gramA, ai) 81 | 82 | for i in range(1,c): 83 | ai = "A"+str(i) 84 | for j in gramA[ai]: 85 | if ai==j[0]: 86 | gramA = removeDirectLR(gramA, ai) 87 | break 88 | 89 | op = {} 90 | for i in gramA: 91 | a = str(i) 92 | for j in conv: 93 | a = a.replace(conv[j],j) 94 | revconv[i] = a 95 | 96 | for i in gramA: 97 | l = [] 98 | for j in gramA[i]: 99 | k = [] 100 | for m in j: 101 | if m in revconv: 102 | k.append(m.replace(m,revconv[m])) 103 | else: 104 | k.append(m) 105 | l.append(k) 106 | op[revconv[i]] = l 107 | 108 | return op 109 | 110 | result = rem(gram) 111 | 112 | 113 | def first(gram, term): 114 | a = [] 115 | if term not in gram: 116 | return [term] 117 | for i in gram[term]: 118 | if i[0] not in gram: 119 | a.append(i[0]) 120 | elif i[0] in gram: 121 | a += first(gram, i[0]) 122 | return a 123 | 124 | firsts = {} 125 | for i in result: 126 | firsts[i] = first(result,i) 127 | print(f'First({i}):',firsts[i]) 128 | 129 | 130 | def follow(gram, term): 131 | a = [] 132 | for rule in gram: 133 | for i in gram[rule]: 134 | if term in i: 135 | temp = i 136 | indx = i.index(term) 137 | if indx+1!=len(i): 138 | if i[-1] in firsts: 139 | a+=firsts[i[-1]] 140 | else: 141 | a+=[i[-1]] 142 | else: 143 | a+=["e"] 144 | if rule != term and "e" in a: 145 | a+= follow(gram,rule) 146 | return a 147 | 148 | follows = {} 149 | for i in result: 150 | follows[i] = list(set(follow(result,i))) 151 | if "e" in follows[i]: 152 | follows[i].pop(follows[i].index("e")) 153 | follows[i]+=["$"] 154 | print(f'Follow({i}):',follows[i]) -------------------------------------------------------------------------------- /ex-6 predictive parsing & shift reduce/689-lab6-predictiveParsing.docx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/devangi2000/Compiler-Design/66bcc4a1f59b8319819fdab03278017a615e7944/ex-6 predictive parsing & shift reduce/689-lab6-predictiveParsing.docx -------------------------------------------------------------------------------- /ex-6 predictive parsing & shift reduce/__pycache__/predictive.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/devangi2000/Compiler-Design/66bcc4a1f59b8319819fdab03278017a615e7944/ex-6 predictive parsing & shift reduce/__pycache__/predictive.cpython-38.pyc -------------------------------------------------------------------------------- /ex-6 predictive parsing & shift reduce/__pycache__/predictive_parser.py: -------------------------------------------------------------------------------- 1 | # #example for direct left recursion 2 | # gram = {"A":["Aa","Ab","c","d"] 3 | # } 4 | #example for indirect left recursion 5 | gram = { 6 | "E":["E+T","T"], 7 | "T":["T*F","F"], 8 | "F":["(E)","i"] 9 | } 10 | 11 | def removeDirectLR(gramA, A): 12 | """gramA is dictonary""" 13 | temp = gramA[A] 14 | tempCr = [] 15 | tempInCr = [] 16 | for i in temp: 17 | if i[0] == A: 18 | #tempInCr.append(i[1:]) 19 | tempInCr.append(i[1:]+[A+"'"]) 20 | else: 21 | #tempCr.append(i) 22 | tempCr.append(i+[A+"'"]) 23 | tempInCr.append(["e"]) 24 | gramA[A] = tempCr 25 | gramA[A+"'"] = tempInCr 26 | return gramA 27 | 28 | 29 | def checkForIndirect(gramA, a, ai): 30 | if ai not in gramA: 31 | return False 32 | if a == ai: 33 | return True 34 | for i in gramA[ai]: 35 | if i[0] == ai: 36 | return False 37 | if i[0] in gramA: 38 | return checkForIndirect(gramA, a, i[0]) 39 | return False 40 | 41 | def rep(gramA, A): 42 | temp = gramA[A] 43 | newTemp = [] 44 | for i in temp: 45 | if checkForIndirect(gramA, A, i[0]): 46 | t = [] 47 | for k in gramA[i[0]]: 48 | t=[] 49 | t+=k 50 | t+=i[1:] 51 | newTemp.append(t) 52 | 53 | else: 54 | newTemp.append(i) 55 | gramA[A] = newTemp 56 | return gramA 57 | 58 | def rem(gram): 59 | c = 1 60 | conv = {} 61 | gramA = {} 62 | revconv = {} 63 | for j in gram: 64 | conv[j] = "A"+str(c) 65 | gramA["A"+str(c)] = [] 66 | c+=1 67 | 68 | for i in gram: 69 | for j in gram[i]: 70 | temp = [] 71 | for k in j: 72 | if k in conv: 73 | temp.append(conv[k]) 74 | else: 75 | temp.append(k) 76 | gramA[conv[i]].append(temp) 77 | 78 | 79 | #print(gramA) 80 | for i in range(c-1,0,-1): 81 | ai = "A"+str(i) 82 | for j in range(0,i): 83 | aj = gramA[ai][0][0] 84 | if ai!=aj : 85 | if aj in gramA and checkForIndirect(gramA,ai,aj): 86 | gramA = rep(gramA, ai) 87 | 88 | for i in range(1,c): 89 | ai = "A"+str(i) 90 | for j in gramA[ai]: 91 | if ai==j[0]: 92 | gramA = removeDirectLR(gramA, ai) 93 | break 94 | 95 | op = {} 96 | for i in gramA: 97 | a = str(i) 98 | for j in conv: 99 | a = a.replace(conv[j],j) 100 | revconv[i] = a 101 | 102 | for i in gramA: 103 | l = [] 104 | for j in gramA[i]: 105 | k = [] 106 | for m in j: 107 | if m in revconv: 108 | k.append(m.replace(m,revconv[m])) 109 | else: 110 | k.append(m) 111 | l.append(k) 112 | op[revconv[i]] = l 113 | 114 | return op 115 | 116 | result = rem(gram) 117 | terminals = [] 118 | for i in result: 119 | for j in result[i]: 120 | for k in j: 121 | if k not in result: 122 | terminals+=[k] 123 | terminals = list(set(terminals)) 124 | #print(terminals) 125 | 126 | def first(gram, term): 127 | a = [] 128 | if term not in gram: 129 | return [term] 130 | for i in gram[term]: 131 | if i[0] not in gram: 132 | a.append(i[0]) 133 | elif i[0] in gram: 134 | a += first(gram, i[0]) 135 | return a 136 | 137 | firsts = {} 138 | for i in result: 139 | firsts[i] = first(result,i) 140 | # print(f'First({i}):',firsts[i]) 141 | 142 | def follow(gram, term): 143 | a = [] 144 | for rule in gram: 145 | for i in gram[rule]: 146 | if term in i: 147 | temp = i 148 | indx = i.index(term) 149 | if indx+1!=len(i): 150 | if i[-1] in firsts: 151 | a+=firsts[i[-1]] 152 | else: 153 | a+=[i[-1]] 154 | else: 155 | a+=["e"] 156 | if rule != term and "e" in a: 157 | a+= follow(gram,rule) 158 | return a 159 | 160 | follows = {} 161 | for i in result: 162 | follows[i] = list(set(follow(result,i))) 163 | if "e" in follows[i]: 164 | follows[i].pop(follows[i].index("e")) 165 | follows[i]+=["$"] 166 | # print(f'Follow({i}):',follows[i]) 167 | 168 | resMod = {} 169 | for i in result: 170 | l = [] 171 | for j in result[i]: 172 | temp = "" 173 | for k in j: 174 | temp+=k 175 | l.append(temp) 176 | resMod[i] = l 177 | 178 | # create predictive parsing table 179 | tterm = list(terminals) 180 | tterm.pop(tterm.index("e")) 181 | tterm+=["$"] 182 | pptable = {} 183 | for i in result: 184 | for j in tterm: 185 | if j in firsts[i]: 186 | pptable[(i,j)]=resMod[i[0]][0] 187 | else: 188 | pptable[(i,j)]="" 189 | if "e" in firsts[i]: 190 | for j in tterm: 191 | if j in follows[i]: 192 | pptable[(i,j)]="e" 193 | pptable[("F","i")] = "i" 194 | toprint = f'{"": <10}' 195 | for i in tterm: 196 | toprint+= f'|{i: <10}' 197 | print(toprint) 198 | for i in result: 199 | toprint = f'{i: <10}' 200 | for j in tterm: 201 | if pptable[(i,j)]!="": 202 | toprint+=f'|{i+"->"+pptable[(i,j)]: <10}' 203 | else: 204 | toprint+=f'|{pptable[(i,j)]: <10}' 205 | print(f'{"-":-<76}') 206 | print(toprint) -------------------------------------------------------------------------------- /ex-6 predictive parsing & shift reduce/__pycache__/tempCodeRunnerFile.py: -------------------------------------------------------------------------------- 1 | # #example for direct left recursion 2 | # gram = {"A":["Aa","Ab","c","d"] 3 | # } 4 | #example for indirect left recursion 5 | gram = { 6 | "E":["E+T","T"], 7 | "T":["T*F","F"], 8 | "F":["(E)","i"] 9 | } 10 | 11 | def removeDirectLR(gramA, A): 12 | """gramA is dictonary""" 13 | temp = gramA[A] 14 | tempCr = [] 15 | tempInCr = [] 16 | for i in temp: 17 | if i[0] == A: 18 | #tempInCr.append(i[1:]) 19 | tempInCr.append(i[1:]+[A+"'"]) 20 | else: 21 | #tempCr.append(i) 22 | tempCr.append(i+[A+"'"]) 23 | tempInCr.append(["e"]) 24 | gramA[A] = tempCr 25 | gramA[A+"'"] = tempInCr 26 | return gramA 27 | 28 | 29 | def checkForIndirect(gramA, a, ai): 30 | if ai not in gramA: 31 | return False 32 | if a == ai: 33 | return True 34 | for i in gramA[ai]: 35 | if i[0] == ai: 36 | return False 37 | if i[0] in gramA: 38 | return checkForIndirect(gramA, a, i[0]) 39 | return False 40 | 41 | def rep(gramA, A): 42 | temp = gramA[A] 43 | newTemp = [] 44 | for i in temp: 45 | if checkForIndirect(gramA, A, i[0]): 46 | t = [] 47 | for k in gramA[i[0]]: 48 | t=[] 49 | t+=k 50 | t+=i[1:] 51 | newTemp.append(t) 52 | 53 | else: 54 | newTemp.append(i) 55 | gramA[A] = newTemp 56 | return gramA 57 | 58 | def rem(gram): 59 | c = 1 60 | conv = {} 61 | gramA = {} 62 | revconv = {} 63 | for j in gram: 64 | conv[j] = "A"+str(c) 65 | gramA["A"+str(c)] = [] 66 | c+=1 67 | 68 | for i in gram: 69 | for j in gram[i]: 70 | temp = [] 71 | for k in j: 72 | if k in conv: 73 | temp.append(conv[k]) 74 | else: 75 | temp.append(k) 76 | gramA[conv[i]].append(temp) 77 | 78 | 79 | #print(gramA) 80 | for i in range(c-1,0,-1): 81 | ai = "A"+str(i) 82 | for j in range(0,i): 83 | aj = gramA[ai][0][0] 84 | if ai!=aj : 85 | if aj in gramA and checkForIndirect(gramA,ai,aj): 86 | gramA = rep(gramA, ai) 87 | 88 | for i in range(1,c): 89 | ai = "A"+str(i) 90 | for j in gramA[ai]: 91 | if ai==j[0]: 92 | gramA = removeDirectLR(gramA, ai) 93 | break 94 | 95 | op = {} 96 | for i in gramA: 97 | a = str(i) 98 | for j in conv: 99 | a = a.replace(conv[j],j) 100 | revconv[i] = a 101 | 102 | for i in gramA: 103 | l = [] 104 | for j in gramA[i]: 105 | k = [] 106 | for m in j: 107 | if m in revconv: 108 | k.append(m.replace(m,revconv[m])) 109 | else: 110 | k.append(m) 111 | l.append(k) 112 | op[revconv[i]] = l 113 | 114 | return op 115 | 116 | result = rem(gram) 117 | terminals = [] 118 | for i in result: 119 | for j in result[i]: 120 | for k in j: 121 | if k not in result: 122 | terminals+=[k] 123 | terminals = list(set(terminals)) 124 | #print(terminals) 125 | 126 | def first(gram, term): 127 | a = [] 128 | if term not in gram: 129 | return [term] 130 | for i in gram[term]: 131 | if i[0] not in gram: 132 | a.append(i[0]) 133 | elif i[0] in gram: 134 | a += first(gram, i[0]) 135 | return a 136 | 137 | firsts = {} 138 | for i in result: 139 | firsts[i] = first(result,i) 140 | # print(f'First({i}):',firsts[i]) 141 | 142 | def follow(gram, term): 143 | a = [] 144 | for rule in gram: 145 | for i in gram[rule]: 146 | if term in i: 147 | temp = i 148 | indx = i.index(term) 149 | if indx+1!=len(i): 150 | if i[-1] in firsts: 151 | a+=firsts[i[-1]] 152 | else: 153 | a+=[i[-1]] 154 | else: 155 | a+=["e"] 156 | if rule != term and "e" in a: 157 | a+= follow(gram,rule) 158 | return a 159 | 160 | follows = {} 161 | for i in result: 162 | follows[i] = list(set(follow(result,i))) 163 | if "e" in follows[i]: 164 | follows[i].pop(follows[i].index("e")) 165 | follows[i]+=["$"] 166 | # print(f'Follow({i}):',follows[i]) 167 | 168 | resMod = {} 169 | for i in result: 170 | l = [] 171 | for j in result[i]: 172 | temp = "" 173 | for k in j: 174 | temp+=k 175 | l.append(temp) 176 | resMod[i] = l 177 | 178 | # create predictive parsing table 179 | tterm = list(terminals) 180 | tterm.pop(tterm.index("e")) 181 | tterm+=["$"] 182 | pptable = {} 183 | for i in result: 184 | for j in tterm: 185 | if j in firsts[i]: 186 | pptable[(i,j)]=resMod[i[0]][0] 187 | else: 188 | pptable[(i,j)]="" 189 | if "e" in firsts[i]: 190 | for j in tterm: 191 | if j in follows[i]: 192 | pptable[(i,j)]="e" 193 | pptable[("F","i")] = "i" 194 | toprint = f'{"": <10}' 195 | for i in tterm: 196 | toprint+= f'|{i: <10}' 197 | print(toprint) 198 | for i in result: 199 | toprint = f'{i: <10}' 200 | for j in tterm: 201 | if pptable[(i,j)]!="": 202 | toprint+=f'|{i+"->"+pptable[(i,j)]: <10}' 203 | else: 204 | toprint+=f'|{pptable[(i,j)]: <10}' 205 | print(f'{"-":-<76}') 206 | print(toprint) -------------------------------------------------------------------------------- /ex-6 predictive parsing & shift reduce/__pycache__/trial_predictive_parser.py: -------------------------------------------------------------------------------- 1 | #Some helper functions 2 | def print_iter(Matched,Stack,Input,Action,verbose=True): 3 | if verbose==True: 4 | print(".".join(Matched).ljust(30)," | ",".".join(Stack).ljust(25)," | ",".".join(Input).ljust(30)," | ",Action) 5 | #The predictive parsing algorithm 6 | def predictive_parsing(sentence,parsingtable,terminals,start_state="S",verbose=True): #Set verbose to false to not see the stages of the algorithm 7 | status = None 8 | match = [] 9 | stack = [start_state,"$"] 10 | Inp = sentence.split(".") 11 | if verbose==True: 12 | print_iter(["Matched"],["Stack"],["Input"],"Action") 13 | print_iter(match,stack,Inp,"Initial",verbose) 14 | action=[] 15 | while(len(sentence)>0 and status!=False): 16 | top_of_input = Inp[0] 17 | pos = top_of_input 18 | if stack[0] =="$" and pos == "$" : 19 | print_iter(match,stack,Inp,"Accepted",verbose) 20 | return "Accepted" 21 | if stack[0] == pos: 22 | print_iter(match,stack,Inp,"Pop",verbose) 23 | match.append(stack[0]) 24 | del(stack[0]) 25 | del(Inp[0]) 26 | continue 27 | if stack[0]=="epsilon": 28 | print_iter(match,stack,Inp,"Poping Epsilon",verbose) 29 | del(stack[0]) 30 | continue 31 | try: 32 | production=parsingtable[stack[0]][pos] 33 | print_iter(match,stack,Inp,stack[0]+" -> "+production,verbose) 34 | except: 35 | return "error for "+str(stack[0])+" on "+str(pos),"Not Accepted" 36 | 37 | new = production.split(".") 38 | stack=new+stack[1:] 39 | return "Not Accepted" 40 | 41 | if __name__=="__main__": 42 | #Example for the working of the predictive parsing :- 43 | #input for the grammar : E->TE1;E1->+TE1|epsilon;T->FT1 ... 44 | parsingtable = { 45 | "E" : {"id" : "T.E1", "(" : "T.E1"}, 46 | "E1" : {"+":"+.T.E1", ")":"epsilon", "$" : "epsilon"}, 47 | "T" : {"id" : "F.T1", "(" : "F.T1" }, 48 | "T1" : {"+" : "epsilon", "*" : "*.F.T1", ")" : "epsilon", "$" : "epsilon"}, 49 | "F":{"id":"id","(":"(.E.)"} 50 | } 51 | terminals = ["id","(",")","+","*"] 52 | print(predictive_parsing(sentence="id.+.(.id.+.id.).$",parsingtable=parsingtable,terminals=terminals,start_state="E",verbose=True)) 53 | #Another Example done in class:- 54 | print(predictive_parsing(sentence="c.c.c.c.d.d.$",parsingtable={"S" : {"c":"C.C","d":"C.C"},"C":{"c":"c.C","d":"d"}},terminals=["c,d"],start_state="S")) 55 | -------------------------------------------------------------------------------- /ex-7 shift reduce/689-ex7-shift_reduce.docx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/devangi2000/Compiler-Design/66bcc4a1f59b8319819fdab03278017a615e7944/ex-7 shift reduce/689-ex7-shift_reduce.docx -------------------------------------------------------------------------------- /ex-7 shift reduce/shift_reduce.py: -------------------------------------------------------------------------------- 1 | gram = { 2 | "E":["2E2","3E3","4"] 3 | } 4 | starting_terminal = "E" 5 | inp = "2324232$" 6 | """ 7 | # example 2 8 | gram = { 9 | "S":["S+S","S*S","i"] 10 | } 11 | starting_terminal = "S" 12 | inp = "i+i*i" 13 | """ 14 | stack = "$" 15 | print(f'{"Stack": <15}'+"|"+f'{"Input Buffer": <15}'+"|"+f'Parsing Action') 16 | print(f'{"-":-<50}') 17 | 18 | while True: 19 | action = True 20 | i = 0 21 | while i{gram[starting_terminal][i]}') 25 | i=-1 26 | action = False 27 | i+=1 28 | if len(inp)>1: 29 | stack+=inp[0] 30 | inp=inp[1:] 31 | print(f'{stack: <15}'+"|"+f'{inp: <15}'+"|"+f'Shift') 32 | action = False 33 | 34 | if inp == "$" and stack == ("$"+starting_terminal): 35 | print(f'{stack: <15}'+"|"+f'{inp: <15}'+"|"+f'Accepted') 36 | break 37 | 38 | if action: 39 | print(f'{stack: <15}'+"|"+f'{inp: <15}'+"|"+f'Rejected') 40 | break -------------------------------------------------------------------------------- /ex-8 leading&trailing sets/689-ex8-leading&trailing.docx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/devangi2000/Compiler-Design/66bcc4a1f59b8319819fdab03278017a615e7944/ex-8 leading&trailing sets/689-ex8-leading&trailing.docx -------------------------------------------------------------------------------- /ex-8 leading&trailing sets/leading_trailing.py: -------------------------------------------------------------------------------- 1 | 2 | a = ["E=E+T", 3 | "E=T", 4 | "T=T*F", 5 | "T=F", 6 | "F=(E)", 7 | "F=i"] 8 | 9 | rules = {} 10 | terms = [] 11 | for i in a: 12 | temp = i.split("=") 13 | terms.append(temp[0]) 14 | try: 15 | rules[temp[0]] += [temp[1]] 16 | except: 17 | rules[temp[0]] = [temp[1]] 18 | 19 | terms = list(set(terms)) 20 | print(rules,terms) 21 | 22 | def leading(gram, rules, term, start): 23 | s = [] 24 | if gram[0] not in terms: 25 | return gram[0] 26 | elif len(gram) == 1: 27 | return [0] 28 | elif gram[1] not in terms and gram[-1] is not start: 29 | for i in rules[gram[-1]]: 30 | s+= leading(i, rules, gram[-1], start) 31 | s+= [gram[1]] 32 | return s 33 | 34 | def trailing(gram, rules, term, start): 35 | s = [] 36 | if gram[-1] not in terms: 37 | return gram[-1] 38 | elif len(gram) == 1: 39 | return [0] 40 | elif gram[-2] not in terms and gram[-1] is not start: 41 | 42 | for i in rules[gram[-1]]: 43 | s+= trailing(i, rules, gram[-1], start) 44 | s+= [gram[-2]] 45 | return s 46 | 47 | leads = {} 48 | trails = {} 49 | for i in terms: 50 | s = [0] 51 | for j in rules[i]: 52 | s+=leading(j,rules,i,i) 53 | s = set(s) 54 | s.remove(0) 55 | leads[i] = s 56 | s = [0] 57 | for j in rules[i]: 58 | s+=trailing(j,rules,i,i) 59 | s = set(s) 60 | s.remove(0) 61 | trails[i] = s 62 | 63 | for i in terms: 64 | print("LEADING("+i+"):",leads[i]) 65 | for i in terms: 66 | print("TRAILING("+i+"):",trails[i]) -------------------------------------------------------------------------------- /ex-9 lr(0)/689-ex9-lr0.docx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/devangi2000/Compiler-Design/66bcc4a1f59b8319819fdab03278017a615e7944/ex-9 lr(0)/689-ex9-lr0.docx -------------------------------------------------------------------------------- /ex-9 lr(0)/lr_0.py: -------------------------------------------------------------------------------- 1 | gram = { 2 | "S":["CC"], 3 | "C":["aC","d"] 4 | } 5 | start = "S" 6 | terms = ["a","d","$"] 7 | 8 | non_terms = [] 9 | for i in gram: 10 | non_terms.append(i) 11 | gram["S'"]= [start] 12 | 13 | 14 | new_row = {} 15 | for i in terms+non_terms: 16 | new_row[i]="" 17 | 18 | 19 | non_terms += ["S'"] 20 | # each row in state table will be dictionary {nonterms ,term,$} 21 | stateTable = [] 22 | # I = [(terminal, closure)] 23 | # I = [("S","A.A")] 24 | 25 | def Closure(term, I): 26 | if term in non_terms: 27 | for i in gram[term]: 28 | I+=[(term,"."+i)] 29 | I = list(set(I)) 30 | for i in I: 31 | # print("." != i[1][-1],i[1][i[1].index(".")+1]) 32 | if "." != i[1][-1] and i[1][i[1].index(".")+1] in non_terms and i[1][i[1].index(".")+1] != term: 33 | I += Closure(i[1][i[1].index(".")+1], []) 34 | return I 35 | 36 | Is = [] 37 | Is+=set(Closure("S'", [])) 38 | 39 | 40 | countI = 0 41 | omegaList = [set(Is)] 42 | while countI 2 | #include 3 | #include 4 | #include 5 | int isKeyword(char buffer[]){ 6 | char keywords[32][10] = {"auto","break","case","char","const","continue","default", 7 | "do","double","else","enum","extern","float","for","goto", 8 | "if","int","long","register","return","short","signed", 9 | "sizeof","static","struct","switch","typedef","union", 10 | "unsigned","void","volatile","while"}; 11 | int i, flag = 0; 12 | for(i = 0; i < 32; ++i){ 13 | if(strcmp(keywords[i], buffer) == 0){ 14 | flag = 1; 15 | break; 16 | } 17 | } 18 | return flag; 19 | } 20 | int main(){ 21 | char ch, buffer[15], operators[] = "+-*/%="; 22 | FILE *fp; 23 | int i,j=0; 24 | 25 | fp = fopen("C:/Users/nikki/Desktop/study material/Compiler Design/Lab/ex2-LexicalAnalysis/test.txt","r"); 26 | if(fp == NULL){ 27 | printf("error while opening the file\n"); 28 | exit(0); 29 | } 30 | while((ch = fgetc(fp)) != EOF){ 31 | for(i = 0; i < 6; ++i){ 32 | if(ch == operators[i]) 33 | printf("%c is operator\n", ch); 34 | } 35 | if(isalnum(ch)){ 36 | buffer[j++] = ch; 37 | } 38 | else if((ch == ' ' || ch == '\n') && (j != 0)){ 39 | buffer[j] = '\0'; 40 | j = 0; 41 | if(isKeyword(buffer) == 1) 42 | printf("%s is keyword\n", buffer); 43 | else 44 | printf("%s is indentifier\n", buffer); 45 | } 46 | } 47 | fclose(fp); 48 | return 0; 49 | } -------------------------------------------------------------------------------- /ex2-LexicalAnalysis/ex2.exe: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/devangi2000/Compiler-Design/66bcc4a1f59b8319819fdab03278017a615e7944/ex2-LexicalAnalysis/ex2.exe -------------------------------------------------------------------------------- /ex2-LexicalAnalysis/lexical-analyzer.py: -------------------------------------------------------------------------------- 1 | f=open(r"C:\Users\nikki\Desktop\study material\Compiler Design\Lab\ex2-LexicalAnalysis\ex2.c") 2 | key=['int','float','string','include','stdio.h','char','break','if','else','switch','return','void' 3 | ,'while','struct','for'] 4 | iden=[] 5 | sp={"(",")","{","}",";","&","#","$","\n",'"',","} 6 | spec=["%d","%f","%c","%s"] 7 | num="012345678910" 8 | n=[] 9 | k=[] 10 | o=[] 11 | l=[] 12 | io=['scanf','printf','cin','cout'] 13 | op="+-%*=/^><" 14 | dl=[] 15 | F=[] 16 | for lines in f: 17 | words=lines.split(" ") 18 | for i in range(len(words)): 19 | if words[i] in key: 20 | k.append(words[i]) 21 | elif words[i] in io: 22 | l.append(words[i]) 23 | elif words[i] in op: 24 | o.append(words[i]) 25 | elif words[i] in sp: 26 | dl.append(words[i]) 27 | elif words[i] in spec: 28 | F.append(words[i]) 29 | elif words[i] in num: 30 | n.append(words[i]) 31 | else: 32 | iden.append(words[i]) 33 | print("Keywords are: ") 34 | print(set(k)) 35 | print("input/output are: ") 36 | print(set(l)) 37 | 38 | print("Operators are: ") 39 | print(set(o)) 40 | print("Special Symbols are: ") 41 | print(set(dl)) 42 | print("Identifiers are: " ) 43 | print(set(iden)) 44 | print("Format Specifier are:") 45 | print(set(F)) 46 | print("Constants are:") 47 | print(set(n)) -------------------------------------------------------------------------------- /ex3a-regex to nfa/regex-to-nfa.docx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/devangi2000/Compiler-Design/66bcc4a1f59b8319819fdab03278017a615e7944/ex3a-regex to nfa/regex-to-nfa.docx -------------------------------------------------------------------------------- /ex3a-regex to nfa/regextonfa.py: -------------------------------------------------------------------------------- 1 | if __name__ == '__main__': 2 | states = { 3 | "1": "0 a 1", 4 | "2": "0 e 1 a 2 e 5 0 e 3 b 4 e 5", 5 | "3": "0 a 1 b", 6 | "4": "0 e 1 a 2 e 3 0 e 3 2 e 1" 7 | } 8 | choices = ["1. a", "2. a/b", "3. ab", "4. a*"] 9 | while(True): 10 | print("enter your choice") 11 | for i in choices: 12 | print(i) 13 | 14 | choice = input() 15 | if(choice == "1"): 16 | print(states["1"]) 17 | elif(choice == "2"): 18 | print(states["2"]) 19 | elif(choice == "3"): 20 | print(states["3"]) 21 | elif(choice == "4"): 22 | print(states["4"]) 23 | 24 | print("enter Q to exit") 25 | if input() == "Q": 26 | break -------------------------------------------------------------------------------- /ex3a-regex to nfa/tempCodeRunnerFile.py: -------------------------------------------------------------------------------- 1 | choice = input() -------------------------------------------------------------------------------- /ex3b- nfa to dfa/CD-683-exp4.docx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/devangi2000/Compiler-Design/66bcc4a1f59b8319819fdab03278017a615e7944/ex3b- nfa to dfa/CD-683-exp4.docx -------------------------------------------------------------------------------- /ex3b- nfa to dfa/ex-4.docx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/devangi2000/Compiler-Design/66bcc4a1f59b8319819fdab03278017a615e7944/ex3b- nfa to dfa/ex-4.docx -------------------------------------------------------------------------------- /ex3b- nfa to dfa/nfa2dfa.py: -------------------------------------------------------------------------------- 1 | NFA=[] 2 | DFA1=[] 3 | DFA=[] 4 | while 1: 5 | st = input("Enter the Regular Expression and Q to exit: ") 6 | if(st=='0'): 7 | break 8 | if(len(st)==1): 9 | print("NFA : "+"0 "+st+" 1") 10 | print("DFA : "+"0 "+st+" 1") 11 | if(len(st)==2): 12 | if(st[1]=='*'): 13 | NFA = ("0 e 1 "+st[0]+" 2 e 3 0 e 3 2 e 1") 14 | print("NFA : "+NFA) 15 | # print(NFA[0]) 16 | for i in range (0, len(NFA)): 17 | # print(i) 18 | if(NFA[i]==st[0]): 19 | DFA1 = (NFA[i-2]+" "+NFA[i]+" "+NFA[i+2]) 20 | print("DFA : "+DFA1) 21 | break 22 | else: 23 | print("NFA : "+"0 "+st[0]+" 1 "+st[1]) 24 | print("DFA : "+"0 "+st[0]+" 1 "+st[1]) 25 | if(len(st)==3): 26 | NFA=("0 e 1 "+st[0]+" 2 e 5 0 e 3 "+st[2]+" 4 e 5") 27 | print("NFA : "+NFA) 28 | for i in range(0, len(NFA)): 29 | if (NFA[i]==st[0]) or (NFA[i]==st[2]): 30 | DFA1 = (NFA[i-2]+" "+NFA[i]+" "+NFA[i+2]) 31 | DFA.append(DFA1) 32 | print('DFA : '+' '.join(DFA)) 33 | DFA=[] 34 | if st=='Q': 35 | break -------------------------------------------------------------------------------- /ex4- left recursion & factoring/ex4-689-elr&lf.docx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/devangi2000/Compiler-Design/66bcc4a1f59b8319819fdab03278017a615e7944/ex4- left recursion & factoring/ex4-689-elr&lf.docx -------------------------------------------------------------------------------- /ex4- left recursion & factoring/leftFactoring.py: -------------------------------------------------------------------------------- 1 | from itertools import takewhile 2 | def groupby(ls): 3 | d = {} 4 | ls = [ y[0] for y in rules ] 5 | initial = list(set(ls)) 6 | for y in initial: 7 | for i in rules: 8 | if i.startswith(y): 9 | if y not in d: 10 | d[y] = [] 11 | d[y].append(i) 12 | return d 13 | 14 | def prefix(x): 15 | return len(set(x)) == 1 16 | 17 | 18 | starting="" 19 | rules=[] 20 | common=[] 21 | alphabetset=["A'","B'","C'","D'","E'","F'","G'","H'","I'","J'","K'","L'","M'","N'","O'","P'","Q'","R'","S'","T'","U'","V'","W'","X'","Y'","Z'"] 22 | 23 | 24 | s= "S->iEtS|iEtSeS|a" 25 | while(True): 26 | rules=[] 27 | common=[] 28 | split=s.split("->") 29 | starting=split[0] 30 | for i in split[1].split("|"): 31 | rules.append(i) 32 | 33 | for k, l in groupby(rules).items(): 34 | r = [l[0] for l in takewhile(prefix, zip(*l))] 35 | common.append(''.join(r)) 36 | for i in common: 37 | newalphabet=alphabetset.pop() 38 | print(starting+"->"+i+newalphabet) 39 | index=[] 40 | for k in rules: 41 | if(k.startswith(i)): 42 | index.append(k) 43 | print(newalphabet+"->",end="") 44 | for j in index[:-1]: 45 | stringtoprint=j.replace(i,"", 1)+"|" 46 | if stringtoprint=="|": 47 | print("\u03B5","|",end="") 48 | else: 49 | print(j.replace(i,"", 1)+"|",end="") 50 | stringtoprint=index[-1].replace(i,"", 1)+"|" 51 | if stringtoprint=="|": 52 | print("\u03B5","",end="") 53 | else: 54 | print(index[-1].replace(i,"", 1)+"",end="") 55 | print("") 56 | break -------------------------------------------------------------------------------- /ex4- left recursion & factoring/leftRecursion.py: -------------------------------------------------------------------------------- 1 | gram = { 2 | "E":["E+T","T"], 3 | "T":["T*F","F"], 4 | "F":["(E)","i"] 5 | } 6 | 7 | def removeDirectLR(gramA, A): 8 | temp = gramA[A] 9 | tempCr = [] 10 | tempInCr = [] 11 | for i in temp: 12 | if i[0] == A: 13 | tempInCr.append(i[1:]+[A+"'"]) 14 | else: 15 | tempCr.append(i+[A+"'"]) 16 | tempInCr.append(["e"]) 17 | gramA[A] = tempCr 18 | gramA[A+"'"] = tempInCr 19 | return gramA 20 | 21 | 22 | def checkForIndirect(gramA, a, ai): 23 | if ai not in gramA: 24 | return False 25 | if a == ai: 26 | return True 27 | for i in gramA[ai]: 28 | if i[0] == ai: 29 | return False 30 | if i[0] in gramA: 31 | return checkForIndirect(gramA, a, i[0]) 32 | return False 33 | 34 | def rep(gramA, A): 35 | temp = gramA[A] 36 | newTemp = [] 37 | for i in temp: 38 | if checkForIndirect(gramA, A, i[0]): 39 | t = [] 40 | for k in gramA[i[0]]: 41 | t=[] 42 | t+=k 43 | t+=i[1:] 44 | newTemp.append(t) 45 | 46 | else: 47 | newTemp.append(i) 48 | gramA[A] = newTemp 49 | return gramA 50 | 51 | def rem(gram): 52 | c = 1 53 | conv = {} 54 | gramA = {} 55 | revconv = {} 56 | for j in gram: 57 | conv[j] = "A"+str(c) 58 | gramA["A"+str(c)] = [] 59 | c+=1 60 | 61 | for i in gram: 62 | for j in gram[i]: 63 | temp = [] 64 | for k in j: 65 | if k in conv: 66 | temp.append(conv[k]) 67 | else: 68 | temp.append(k) 69 | gramA[conv[i]].append(temp) 70 | 71 | for i in range(c-1,0,-1): 72 | ai = "A"+str(i) 73 | for j in range(0,i): 74 | aj = gramA[ai][0][0] 75 | if ai!=aj : 76 | if aj in gramA and checkForIndirect(gramA,ai,aj): 77 | gramA = rep(gramA, ai) 78 | 79 | for i in range(1,c): 80 | ai = "A"+str(i) 81 | for j in gramA[ai]: 82 | if ai==j[0]: 83 | gramA = removeDirectLR(gramA, ai) 84 | break 85 | 86 | op = {} 87 | for i in gramA: 88 | a = str(i) 89 | for j in conv: 90 | a = a.replace(conv[j],j) 91 | revconv[i] = a 92 | 93 | for i in gramA: 94 | l = [] 95 | for j in gramA[i]: 96 | k = [] 97 | for m in j: 98 | if m in revconv: 99 | k.append(m.replace(m,revconv[m])) 100 | else: 101 | k.append(m) 102 | l.append(k) 103 | op[revconv[i]] = l 104 | 105 | return op 106 | 107 | result = rem(gram) 108 | 109 | for i in result: 110 | print(f'{i}->{result[i]}') -------------------------------------------------------------------------------- /ex4- left recursion & factoring/tempCodeRunnerFile.py: -------------------------------------------------------------------------------- 1 | gram = { 2 | "E":["E+T","T"], 3 | "T":["T*F","F"], 4 | "F":["(E)","i"] 5 | } 6 | 7 | def removeDirectLR(gramA, A): 8 | temp = gramA[A] 9 | tempCr = [] 10 | tempInCr = [] 11 | for i in temp: 12 | if i[0] == A: 13 | tempInCr.append(i[1:]+[A+"'"]) 14 | else: 15 | tempCr.append(i+[A+"'"]) 16 | tempInCr.append(["e"]) 17 | gramA[A] = tempCr 18 | gramA[A+"'"] = tempInCr 19 | return gramA 20 | 21 | 22 | def checkForIndirect(gramA, a, ai): 23 | if ai not in gramA: 24 | return False 25 | if a == ai: 26 | return True 27 | for i in gramA[ai]: 28 | if i[0] == ai: 29 | return False 30 | if i[0] in gramA: 31 | return checkForIndirect(gramA, a, i[0]) 32 | return False 33 | 34 | def rep(gramA, A): 35 | temp = gramA[A] 36 | newTemp = [] 37 | for i in temp: 38 | if checkForIndirect(gramA, A, i[0]): 39 | t = [] 40 | for k in gramA[i[0]]: 41 | t=[] 42 | t+=k 43 | t+=i[1:] 44 | newTemp.append(t) 45 | 46 | else: 47 | newTemp.append(i) 48 | gramA[A] = newTemp 49 | return gramA 50 | 51 | def rem(gram): 52 | c = 1 53 | conv = {} 54 | gramA = {} 55 | revconv = {} 56 | for j in gram: 57 | conv[j] = "A"+str(c) 58 | gramA["A"+str(c)] = [] 59 | c+=1 60 | 61 | for i in gram: 62 | for j in gram[i]: 63 | temp = [] 64 | for k in j: 65 | if k in conv: 66 | temp.append(conv[k]) 67 | else: 68 | temp.append(k) 69 | gramA[conv[i]].append(temp) 70 | 71 | for i in range(c-1,0,-1): 72 | ai = "A"+str(i) 73 | for j in range(0,i): 74 | aj = gramA[ai][0][0] 75 | if ai!=aj : 76 | if aj in gramA and checkForIndirect(gramA,ai,aj): 77 | gramA = rep(gramA, ai) 78 | 79 | for i in range(1,c): 80 | ai = "A"+str(i) 81 | for j in gramA[ai]: 82 | if ai==j[0]: 83 | gramA = removeDirectLR(gramA, ai) 84 | break 85 | 86 | op = {} 87 | for i in gramA: 88 | a = str(i) 89 | for j in conv: 90 | a = a.replace(conv[j],j) 91 | revconv[i] = a 92 | 93 | for i in gramA: 94 | l = [] 95 | for j in gramA[i]: 96 | k = [] 97 | for m in j: 98 | if m in revconv: 99 | k.append(m.replace(m,revconv[m])) 100 | else: 101 | k.append(m) 102 | l.append(k) 103 | op[revconv[i]] = l 104 | 105 | return op 106 | 107 | result = rem(gram) 108 | 109 | for i in result: 110 | print(f'{i}->{result[i]}') --------------------------------------------------------------------------------