├── README.md ├── project1 ├── .gitignore ├── example │ ├── input.csv │ └── output.csv ├── requirements.txt ├── src │ ├── DNSQuery.py │ ├── Header.py │ ├── Question.py │ ├── ResourceRecord.py │ └── main.py └── test_server.py ├── project2 ├── .gitignore ├── cert │ ├── test.crt │ └── test.key ├── requirements.txt └── src │ ├── ClientHandler.py │ ├── CommandsLogger.py │ ├── MITMProxy.py │ ├── client.py │ └── server.py └── project3 ├── .gitignore └── src ├── DHCPPacket.py ├── Timeout.py ├── client.py └── server.py /README.md: -------------------------------------------------------------------------------- 1 | # Computer Networks course projects 2 | 3 | Here are some shitty projects for AUT's Computer Networks course. 4 | 5 | The projects are [DNS resolver](./project1), [Telnet](./project2), and [DHCP](./project3) protocol implementation, respectively. 6 | 7 | -------------------------------------------------------------------------------- /project1/.gitignore: -------------------------------------------------------------------------------- 1 | 2 | # Created by https://www.toptal.com/developers/gitignore/api/python,pycharm+all,virtualenv 3 | # Edit at https://www.toptal.com/developers/gitignore?templates=python,pycharm+all,virtualenv 4 | 5 | ### PyCharm+all ### 6 | # Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio, WebStorm and Rider 7 | # Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 8 | 9 | # User-specific stuff 10 | .idea/**/workspace.xml 11 | .idea/**/tasks.xml 12 | .idea/**/usage.statistics.xml 13 | .idea/**/dictionaries 14 | .idea/**/shelf 15 | 16 | # Generated files 17 | .idea/**/contentModel.xml 18 | 19 | # Sensitive or high-churn files 20 | .idea/**/dataSources/ 21 | .idea/**/dataSources.ids 22 | .idea/**/dataSources.local.xml 23 | .idea/**/sqlDataSources.xml 24 | .idea/**/dynamic.xml 25 | .idea/**/uiDesigner.xml 26 | .idea/**/dbnavigator.xml 27 | 28 | # Gradle 29 | .idea/**/gradle.xml 30 | .idea/**/libraries 31 | 32 | # Gradle and Maven with auto-import 33 | # When using Gradle or Maven with auto-import, you should exclude module files, 34 | # since they will be recreated, and may cause churn. Uncomment if using 35 | # auto-import. 36 | # .idea/artifacts 37 | # .idea/compiler.xml 38 | # .idea/jarRepositories.xml 39 | # .idea/modules.xml 40 | # .idea/*.iml 41 | # .idea/modules 42 | # *.iml 43 | # *.ipr 44 | 45 | # CMake 46 | cmake-build-*/ 47 | 48 | # Mongo Explorer plugin 49 | .idea/**/mongoSettings.xml 50 | 51 | # File-based project format 52 | *.iws 53 | 54 | # IntelliJ 55 | out/ 56 | 57 | # mpeltonen/sbt-idea plugin 58 | .idea_modules/ 59 | 60 | # JIRA plugin 61 | atlassian-ide-plugin.xml 62 | 63 | # Cursive Clojure plugin 64 | .idea/replstate.xml 65 | 66 | # Crashlytics plugin (for Android Studio and IntelliJ) 67 | com_crashlytics_export_strings.xml 68 | crashlytics.properties 69 | crashlytics-build.properties 70 | fabric.properties 71 | 72 | # Editor-based Rest Client 73 | .idea/httpRequests 74 | 75 | # Android studio 3.1+ serialized cache file 76 | .idea/caches/build_file_checksums.ser 77 | 78 | ### PyCharm+all Patch ### 79 | # Ignores the whole .idea folder and all .iml files 80 | # See https://github.com/joeblau/gitignore.io/issues/186 and https://github.com/joeblau/gitignore.io/issues/360 81 | 82 | .idea/ 83 | 84 | # Reason: https://github.com/joeblau/gitignore.io/issues/186#issuecomment-249601023 85 | 86 | *.iml 87 | modules.xml 88 | .idea/misc.xml 89 | *.ipr 90 | 91 | # Sonarlint plugin 92 | .idea/sonarlint 93 | 94 | ### Python ### 95 | # Byte-compiled / optimized / DLL files 96 | __pycache__/ 97 | *.py[cod] 98 | *$py.class 99 | 100 | # C extensions 101 | *.so 102 | 103 | # Distribution / packaging 104 | .Python 105 | build/ 106 | develop-eggs/ 107 | dist/ 108 | downloads/ 109 | eggs/ 110 | .eggs/ 111 | parts/ 112 | sdist/ 113 | var/ 114 | wheels/ 115 | pip-wheel-metadata/ 116 | share/python-wheels/ 117 | *.egg-info/ 118 | .installed.cfg 119 | *.egg 120 | MANIFEST 121 | 122 | # PyInstaller 123 | # Usually these files are written by a python script from a template 124 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 125 | *.manifest 126 | *.spec 127 | 128 | # Installer logs 129 | pip-log.txt 130 | pip-delete-this-directory.txt 131 | 132 | # Unit test / coverage reports 133 | htmlcov/ 134 | .tox/ 135 | .nox/ 136 | .coverage 137 | .coverage.* 138 | .cache 139 | nosetests.xml 140 | coverage.xml 141 | *.cover 142 | *.py,cover 143 | .hypothesis/ 144 | .pytest_cache/ 145 | pytestdebug.log 146 | 147 | # Translations 148 | *.mo 149 | *.pot 150 | 151 | # Django stuff: 152 | *.log 153 | local_settings.py 154 | db.sqlite3 155 | db.sqlite3-journal 156 | 157 | # Flask stuff: 158 | instance/ 159 | .webassets-cache 160 | 161 | # Scrapy stuff: 162 | .scrapy 163 | 164 | # Sphinx documentation 165 | docs/_build/ 166 | doc/_build/ 167 | 168 | # PyBuilder 169 | target/ 170 | 171 | # Jupyter Notebook 172 | .ipynb_checkpoints 173 | 174 | # IPython 175 | profile_default/ 176 | ipython_config.py 177 | 178 | # pyenv 179 | .python-version 180 | 181 | # pipenv 182 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 183 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 184 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 185 | # install all needed dependencies. 186 | #Pipfile.lock 187 | 188 | # poetry 189 | #poetry.lock 190 | 191 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 192 | __pypackages__/ 193 | 194 | # Celery stuff 195 | celerybeat-schedule 196 | celerybeat.pid 197 | 198 | # SageMath parsed files 199 | *.sage.py 200 | 201 | # Environments 202 | # .env 203 | .env/ 204 | .venv/ 205 | env/ 206 | venv/ 207 | ENV/ 208 | env.bak/ 209 | venv.bak/ 210 | pythonenv* 211 | 212 | # Spyder project settings 213 | .spyderproject 214 | .spyproject 215 | 216 | # Rope project settings 217 | .ropeproject 218 | 219 | # mkdocs documentation 220 | /site 221 | 222 | # mypy 223 | .mypy_cache/ 224 | .dmypy.json 225 | dmypy.json 226 | 227 | # Pyre type checker 228 | .pyre/ 229 | 230 | # pytype static type analyzer 231 | .pytype/ 232 | 233 | # operating system-related files 234 | *.DS_Store #file properties cache/storage on macOS 235 | Thumbs.db #thumbnail cache on Windows 236 | 237 | # profiling data 238 | .prof 239 | 240 | 241 | ### VirtualEnv ### 242 | # Virtualenv 243 | # http://iamzed.com/2009/05/07/a-primer-on-virtualenv/ 244 | [Bb]in 245 | [Ii]nclude 246 | [Ll]ib 247 | [Ll]ib64 248 | [Ll]ocal 249 | [Ss]cripts 250 | pyvenv.cfg 251 | .venv 252 | pip-selfcheck.json 253 | 254 | # End of https://www.toptal.com/developers/gitignore/api/python,pycharm+all,virtualenv 255 | -------------------------------------------------------------------------------- /project1/example/input.csv: -------------------------------------------------------------------------------- 1 | name,type 2 | radin-shayanfar.ir,A 3 | google.com,A 4 | rshayanfar.ir,MX 5 | aut.ac.ir,A 6 | github.com,A 7 | radin-shayanfar.ir,AAAA 8 | an-imaginary-domain.ir,A 9 | -------------------------------------------------------------------------------- /project1/example/output.csv: -------------------------------------------------------------------------------- 1 | name,type,value 2 | radin-shayanfar.ir.,A,104.21.91.45 3 | radin-shayanfar.ir.,A,172.67.210.51 4 | google.com.,A,216.58.208.78 5 | rshayanfar.ir.,MX,mx.zoho.com. 6 | rshayanfar.ir.,MX,mx2.zoho.com. 7 | rshayanfar.ir.,MX,mx3.zoho.com. 8 | aut.ac.ir.,A,185.211.88.131 9 | github.com.,A,140.82.121.3 10 | radin-shayanfar.ir.,AAAA,2606:4700:3036:0:0:0:ac43:d233 11 | radin-shayanfar.ir.,AAAA,2606:4700:3030:0:0:0:6815:5b2d 12 | -------------------------------------------------------------------------------- /project1/requirements.txt: -------------------------------------------------------------------------------- 1 | redis==4.5.4 2 | -------------------------------------------------------------------------------- /project1/src/DNSQuery.py: -------------------------------------------------------------------------------- 1 | import struct 2 | 3 | from Header import Header 4 | from Question import Question 5 | from ResourceRecord import ResourceRecord 6 | 7 | 8 | class DNSQuery: 9 | @staticmethod 10 | def create_query(questions, recursion=False): 11 | query = DNSQuery() 12 | query.header = Header.create_header(len(questions), recursion) 13 | 14 | query.questions = [] 15 | for question in questions: 16 | query.questions.append(Question.create_question(**question)) 17 | 18 | return query 19 | 20 | @staticmethod 21 | def bytes_to_name(byte_data, position): 22 | qname = '' 23 | while True: 24 | length = struct.unpack("!B", byte_data[position:position + 1])[0] 25 | position += 1 26 | 27 | if length == 0: # root label 28 | break 29 | 30 | if length >> 6 == 0b11: # a pointer 31 | offset = struct.unpack("!H", byte_data[position - 1: position + 1])[0] & 0x3FFF 32 | ptr_star, _ = DNSQuery.bytes_to_name(byte_data, offset) 33 | qname += ptr_star 34 | position += 1 35 | break 36 | 37 | qname += byte_data[position:position + length].decode('ascii') + '.' 38 | position += length 39 | 40 | return qname, position 41 | 42 | @staticmethod 43 | def from_bytes(byte_data): 44 | query = DNSQuery() 45 | query.header = Header.from_bytes(byte_data[:12]) 46 | position = 12 47 | 48 | query.questions = [] 49 | for i in range(query.header.QDCOUNT): 50 | question, position = Question.from_bytes(byte_data, position) 51 | query.questions.append(question) 52 | 53 | query.answers = [] 54 | for i in range(query.header.ANCOUNT): 55 | rr, position = ResourceRecord.from_bytes(byte_data, position) 56 | query.answers.append(rr) 57 | 58 | query.authorities = [] 59 | for i in range(query.header.NSCOUNT): 60 | rr, position = ResourceRecord.from_bytes(byte_data, position) 61 | query.authorities.append(rr) 62 | 63 | query.additionals = [] 64 | for i in range(query.header.ARCOUNT): 65 | rr, position = ResourceRecord.from_bytes(byte_data, position) 66 | query.additionals.append(rr) 67 | 68 | return query 69 | 70 | def to_bytes(self): 71 | out = bytearray() 72 | out.extend(self.header.to_bytes()) 73 | 74 | for question in self.questions: 75 | out.extend(question.to_bytes()) 76 | 77 | return out 78 | 79 | def __str__(self): 80 | out = '' 81 | 82 | out += 'Questions:\n' if len(self.questions) > 0 else '' 83 | for question in self.questions: 84 | out += f"\tName: {question.qname}\n" 85 | 86 | out += 'Answers:\n' if len(self.answers) > 0 else '' 87 | for answer in self.answers: 88 | out += f"\tName: {answer.NAME}, Answer: {answer.RDATA}\n" 89 | 90 | out += 'Authorities:\n' if len(self.authorities) > 0 else '' 91 | for auth in self.authorities: 92 | out += f"\tName: {auth.NAME}, Address: {auth.RDATA}\n" 93 | 94 | out += 'Additionals:\n' if len(self.additionals) > 0 else '' 95 | for additional in self.additionals: 96 | out += f"\tName: {additional.NAME}, Address: {additional.RDATA}\n" 97 | 98 | return out 99 | -------------------------------------------------------------------------------- /project1/src/Header.py: -------------------------------------------------------------------------------- 1 | import struct 2 | 3 | 4 | class Header: 5 | last_id = 0 6 | 7 | @staticmethod 8 | def create_header(questions_count, recursion): 9 | header = Header() 10 | header.ID = Header.last_id 11 | 12 | header.QR = 0b0 13 | header.OPCODE = 0x0 14 | header.AA = 0b0 15 | header.TC = 0b0 16 | header.RD = 0b1 if recursion else 0b0 17 | header.RA = 0b0 18 | header.Z = 0b000 19 | header.RCODE = 0x0 20 | 21 | header.QDCOUNT = questions_count 22 | header.ANCOUNT = 0x00 23 | header.NSCOUNT = 0x00 24 | header.ARCOUNT = 0x00 25 | 26 | Header.last_id += 1 27 | 28 | return header 29 | 30 | @staticmethod 31 | def from_bytes(byte_data): 32 | header = Header() 33 | header.ID, options, header.QDCOUNT, header.ANCOUNT, header.NSCOUNT, header.ARCOUNT = struct.unpack("!HHHHHH", 34 | byte_data) 35 | 36 | header.QR = (options & 0x8000) >> 15 37 | header.OPCODE = (options & 0x3800) >> 11 38 | header.AA = (options & 0x0400) >> 10 39 | header.TC = (options & 0x0200) >> 9 40 | header.RD = (options & 0x0100) >> 8 41 | header.RA = (options & 0x0080) >> 7 42 | header.Z = (options & 0x0070) >> 4 43 | header.RCODE = options & 0x000F 44 | 45 | return header 46 | 47 | def to_bytes(self): 48 | out = bytearray() 49 | options = struct.pack("!BB", ((self.QR & 0b1) << 7) | ((self.OPCODE & 0xF) << 3) | ((self.AA & 0b1) << 2) | ( 50 | (self.TC & 0b1) << 1) | (self.RD & 0b1), 51 | ((self.RA & 0b1) << 7) | ((self.Z & 0x7) << 4) | ((self.RCODE & 0xF))) 52 | 53 | out.extend(struct.pack("!H", self.ID)) 54 | out.extend(options) 55 | out.extend(struct.pack("!H", self.QDCOUNT)) 56 | out.extend(struct.pack("!H", self.ANCOUNT)) 57 | out.extend(struct.pack("!H", self.NSCOUNT)) 58 | out.extend(struct.pack("!H", self.ARCOUNT)) 59 | 60 | return out 61 | -------------------------------------------------------------------------------- /project1/src/Question.py: -------------------------------------------------------------------------------- 1 | import re 2 | import struct 3 | 4 | import DNSQuery 5 | 6 | 7 | class Question: 8 | CLASS_IN = 1 9 | 10 | QTYPE_A = 1 11 | QTYPE_NS = 2 12 | QTYPE_CNAME = 5 13 | QTYPE_SOA = 6 14 | QTYPE_MX = 15 15 | QTYPE_TXT = 16 16 | QTYPE_AAAA = 28 17 | 18 | @staticmethod 19 | def create_question(qname, qtype, qclass): 20 | question = Question() 21 | 22 | question.qname = qname 23 | question.qtype = qtype 24 | question.qclass = qclass 25 | 26 | return question 27 | 28 | @staticmethod 29 | def from_bytes(byte_data, position): 30 | question = Question() 31 | 32 | question.qname, position = DNSQuery.DNSQuery.bytes_to_name(byte_data, position) 33 | 34 | question.qtype = struct.unpack("!H", byte_data[position: position + 2])[0] 35 | position += 2 36 | question.qclass = struct.unpack("!H", byte_data[position: position + 2])[0] 37 | position += 2 38 | 39 | return question, position 40 | 41 | def name_to_bytes(self): 42 | qname = self.qname 43 | 44 | valid_name = r"^([A-Za-z0-9\-]+\.)*[A-Za-z0-9\-]+\.?$" 45 | if not bool(re.match(valid_name, qname)): 46 | raise Exception("Entered QName is not valid name") 47 | 48 | if qname[-1] != '.': 49 | qname += '.' 50 | 51 | out = bytearray() 52 | labels = qname.split(".") 53 | for label in labels: 54 | size = len(label) 55 | out.extend(struct.pack("!B", size)) 56 | out.extend(label.encode("ascii")) 57 | 58 | return out 59 | 60 | def to_bytes(self): 61 | out = bytearray() 62 | 63 | out.extend(self.name_to_bytes()) 64 | out.extend(struct.pack("!H", self.qtype)) 65 | out.extend(struct.pack("!H", self.qclass)) 66 | 67 | return out 68 | -------------------------------------------------------------------------------- /project1/src/ResourceRecord.py: -------------------------------------------------------------------------------- 1 | import struct 2 | 3 | import DNSQuery 4 | from Question import Question 5 | 6 | 7 | class ResourceRecord: 8 | @staticmethod 9 | def from_bytes(byte_data, position): 10 | RR = ResourceRecord() 11 | 12 | RR.NAME, position = DNSQuery.DNSQuery.bytes_to_name(byte_data, position) 13 | 14 | RR.TYPE = struct.unpack("!H", byte_data[position: position + 2])[0] 15 | position += 2 16 | 17 | RR.CLASS = struct.unpack("!H", byte_data[position: position + 2])[0] 18 | position += 2 19 | 20 | RR.TTL = struct.unpack("!i", byte_data[position: position + 4])[0] 21 | position += 4 22 | 23 | RR.RDLENGTH = struct.unpack("!H", byte_data[position: position + 2])[0] 24 | position += 2 25 | 26 | RR.RDATA = byte_data[position: position + RR.RDLENGTH] 27 | RR.RDATA = RR.__process_data(byte_data, position) 28 | position += RR.RDLENGTH 29 | 30 | return RR, position 31 | 32 | def get_type_text(self): 33 | if self.TYPE == Question.QTYPE_A: 34 | return 'A' 35 | elif self.TYPE == Question.QTYPE_AAAA: 36 | return 'AAAA' 37 | elif self.TYPE == Question.QTYPE_NS: 38 | return 'NS' 39 | elif self.TYPE == Question.QTYPE_CNAME: 40 | pass 41 | elif self.TYPE == Question.QTYPE_MX: 42 | return 'MX' 43 | elif self.TYPE == Question.QTYPE_TXT: 44 | return 'TXT' 45 | 46 | def __process_data(self, byte_data, position): 47 | if self.TYPE == Question.QTYPE_A: 48 | return self.__parse_a() 49 | elif self.TYPE == Question.QTYPE_AAAA: 50 | return self.__parse_aaaa() 51 | elif self.TYPE == Question.QTYPE_NS: 52 | return self.__parse_ns(byte_data, position) 53 | elif self.TYPE == Question.QTYPE_CNAME: 54 | return self.__parse_cname(byte_data, position) 55 | elif self.TYPE == Question.QTYPE_MX: 56 | return self.__parse_mx(byte_data, position) 57 | elif self.TYPE == Question.QTYPE_TXT: 58 | return self.__parse_txt() 59 | 60 | def __parse_a(self): 61 | ip_address = struct.unpack("!BBBB", self.RDATA) 62 | return ".".join(map(str, ip_address)) 63 | 64 | def __parse_ns(self, byte_data, position): 65 | return DNSQuery.DNSQuery.bytes_to_name(byte_data, position)[0] 66 | 67 | def __parse_aaaa(self): 68 | ip_address = struct.unpack("!HHHHHHHH", self.RDATA) 69 | return ":".join([format(part, 'x') for part in ip_address]) 70 | 71 | def __parse_txt(self): 72 | return self.RDATA.decode('ascii') 73 | 74 | def __parse_mx(self, byte_data, position): 75 | return DNSQuery.DNSQuery.bytes_to_name(byte_data, position + 2)[0] 76 | 77 | def __parse_cname(self, byte_data, position): 78 | return DNSQuery.DNSQuery.bytes_to_name(byte_data, position)[0] 79 | -------------------------------------------------------------------------------- /project1/src/main.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import csv 3 | import pickle 4 | import re 5 | import socket 6 | 7 | import redis 8 | 9 | from DNSQuery import DNSQuery 10 | from Question import Question 11 | 12 | # DNS_IP = "1.1.1.1" 13 | # DNS_IP = "8.8.8.8" 14 | DNS_IP = "4.2.2.4" 15 | # DNS_IP = "a.nic.ir" 16 | # DNS_IP = "pat.ns.cloudflare.com" 17 | DNS_PORT = 53 18 | 19 | 20 | def type_string_to_number(qtype): 21 | if qtype == 'A': 22 | qtype = Question.QTYPE_A 23 | elif qtype == 'AAAA': 24 | qtype = Question.QTYPE_AAAA 25 | elif qtype == 'NS': 26 | qtype = Question.QTYPE_NS 27 | elif qtype == 'CNAME': 28 | qtype = Question.QTYPE_CNAME 29 | elif qtype == 'MX': 30 | qtype = Question.QTYPE_MX 31 | elif qtype == 'TXT': 32 | qtype = Question.QTYPE_TXT 33 | 34 | return qtype 35 | 36 | 37 | def ns_name_to_ip(server_name, additionals): 38 | ipv4_regex = r"^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$" 39 | if re.match(ipv4_regex, server_name): 40 | return server_name 41 | 42 | if additionals is not None: 43 | for additional in additionals: 44 | if additional.TYPE == Question.QTYPE_A and server_name == additional.NAME: 45 | return additional.RDATA 46 | 47 | resolved = resolve_single(server_name, Question.QTYPE_A, True, (DNS_IP, DNS_PORT), False) 48 | if resolved is None: 49 | raise Exception(f"{server_name} not found!") 50 | return resolved.answers[0].RDATA 51 | 52 | 53 | def resolve_dfs(query, recursion, server, print_output, resolved_server=None): 54 | if resolved_server is None: 55 | resolved_server = server[0] 56 | sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) 57 | sock.settimeout(1) 58 | 59 | data = None 60 | for i in range(3): # trying 3 times if timeout occurred 61 | try: 62 | sock.sendto(query.to_bytes(), (resolved_server, server[1])) 63 | data, _ = sock.recvfrom(512) 64 | break 65 | except: 66 | print(f"Request to '{resolved_server}:{server[1]}' timed out") 67 | 68 | if data is None: 69 | return None 70 | 71 | response = DNSQuery.from_bytes(data) 72 | 73 | if print_output: 74 | print(F"Server: {server[0]} ({resolved_server})") 75 | print(response) 76 | 77 | if response.header.ANCOUNT > 0: 78 | return response 79 | if len(response.authorities) == 1 and response.authorities[0].TYPE == Question.QTYPE_SOA: 80 | return None 81 | 82 | for auth in response.authorities: 83 | try: 84 | resolvent = resolve_dfs(query, recursion, (auth.RDATA, DNS_PORT), print_output, 85 | ns_name_to_ip(auth.RDATA, response.additionals)) 86 | if resolvent is not None: 87 | return resolvent 88 | except Exception as e: 89 | print(str(e)) 90 | 91 | return None 92 | 93 | 94 | def resolve_single(qname, qtype, recursion, server=(DNS_IP, DNS_PORT), print_output=True): 95 | qtype = type_string_to_number(qtype) 96 | 97 | query = DNSQuery.create_query([{"qname": qname, "qtype": qtype, "qclass": Question.CLASS_IN}], recursion) 98 | result = resolve_dfs(query, recursion, server, print_output) 99 | if print_output and result is None: 100 | print("===== Not found :(") 101 | 102 | return result 103 | 104 | 105 | def resolve_from_file(filename, output_filename, recursion, server=(DNS_IP, DNS_PORT)): 106 | results = [] 107 | with open(filename, 'r') as f: 108 | queries_reader = csv.DictReader(f) 109 | for row in queries_reader: 110 | resolved = resolve_single(row['name'], row['type'].upper(), True, server, False) 111 | if resolved is None: 112 | continue 113 | for answer in resolved.answers: 114 | results.append([resolved.questions[0].qname, answer.get_type_text(), answer.RDATA]) 115 | 116 | with open(output_filename, 'w') as f: 117 | results_writer = csv.writer(f) 118 | results_writer.writerow(['name', 'type', 'value']) 119 | results_writer.writerows(results) 120 | 121 | 122 | def from_cache(qname, qtype): 123 | if qname[-1] != '.': 124 | qname += '.' 125 | qtype = type_string_to_number(qtype) 126 | 127 | serialized = rdb.get(f"{qname}:{qtype}") 128 | if serialized is None: 129 | return None 130 | return pickle.loads(serialized) 131 | 132 | 133 | def to_cache(query): 134 | if query is None: 135 | return 136 | 137 | repeats = rdb.incr(f"{query.questions[0].qname}:{query.questions[0].qtype}:count") 138 | rdb.expire(f"{query.questions[0].qname}:{query.questions[0].qtype}:count", query.answers[0].TTL) 139 | if repeats == 3: 140 | serialized = pickle.dumps(query) 141 | rdb.set(f"{query.questions[0].qname}:{query.questions[0].qtype}", serialized, ex=query.answers[0].TTL) 142 | 143 | 144 | if __name__ == '__main__': 145 | parser = argparse.ArgumentParser(prog="MyNSLookup", allow_abbrev=False) 146 | 147 | parser.add_argument('-r', '--recursive', action='store_true') 148 | parser.add_argument('-f', '--file', action='store_true', help='reading input from csv') 149 | parser.add_argument('-s', '--server', type=str, action='store') 150 | parser.add_argument('-p', '--port', type=int, action='store') 151 | std_file_mutex = parser.add_mutually_exclusive_group() 152 | std_file_mutex.add_argument('-t', '--type', type=str.upper, choices=['A', 'AAAA', 'NS', 'MX', 'TXT'], 153 | default='A', metavar='') 154 | std_file_mutex.add_argument('-o', '--output', type=str, metavar='') 155 | 156 | parser.add_argument('qname', type=str, metavar='') 157 | 158 | args = parser.parse_args() 159 | 160 | if args.file and args.output is None: 161 | parser.error('the following arguments are required when using --file: -o, --output') 162 | if args.server is not None: 163 | DNS_IP = args.server 164 | if args.port is not None: 165 | DNS_PORT = args.port 166 | 167 | rdb = redis.Redis(host='localhost', port=6379, db=1) 168 | 169 | if not args.file: 170 | cache = from_cache(args.qname, args.type) 171 | if cache is None: 172 | resolved = resolve_single(args.qname, args.type, args.recursive, (DNS_IP, DNS_PORT)) 173 | to_cache(resolved) 174 | else: 175 | print("===== From cache:") 176 | print(cache) 177 | else: 178 | resolve_from_file(args.qname, args.output, args.recursive, (DNS_IP, DNS_PORT)) 179 | -------------------------------------------------------------------------------- /project1/test_server.py: -------------------------------------------------------------------------------- 1 | import socket 2 | 3 | UDP_IP = "127.0.0.1" 4 | UDP_PORT = 3232 5 | 6 | sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) 7 | sock.bind((UDP_IP, UDP_PORT)) 8 | 9 | while True: 10 | data, addr = sock.recvfrom(512) 11 | print(f"Received message from {addr}: {data}") 12 | -------------------------------------------------------------------------------- /project2/.gitignore: -------------------------------------------------------------------------------- 1 | 2 | # Created by https://www.toptal.com/developers/gitignore/api/python,pycharm+all,virtualenv 3 | # Edit at https://www.toptal.com/developers/gitignore?templates=python,pycharm+all,virtualenv 4 | 5 | ### PyCharm+all ### 6 | # Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio, WebStorm and Rider 7 | # Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 8 | 9 | # User-specific stuff 10 | .idea/**/workspace.xml 11 | .idea/**/tasks.xml 12 | .idea/**/usage.statistics.xml 13 | .idea/**/dictionaries 14 | .idea/**/shelf 15 | 16 | # Generated files 17 | .idea/**/contentModel.xml 18 | 19 | # Sensitive or high-churn files 20 | .idea/**/dataSources/ 21 | .idea/**/dataSources.ids 22 | .idea/**/dataSources.local.xml 23 | .idea/**/sqlDataSources.xml 24 | .idea/**/dynamic.xml 25 | .idea/**/uiDesigner.xml 26 | .idea/**/dbnavigator.xml 27 | 28 | # Gradle 29 | .idea/**/gradle.xml 30 | .idea/**/libraries 31 | 32 | # Gradle and Maven with auto-import 33 | # When using Gradle or Maven with auto-import, you should exclude module files, 34 | # since they will be recreated, and may cause churn. Uncomment if using 35 | # auto-import. 36 | # .idea/artifacts 37 | # .idea/compiler.xml 38 | # .idea/jarRepositories.xml 39 | # .idea/modules.xml 40 | # .idea/*.iml 41 | # .idea/modules 42 | # *.iml 43 | # *.ipr 44 | 45 | # CMake 46 | cmake-build-*/ 47 | 48 | # Mongo Explorer plugin 49 | .idea/**/mongoSettings.xml 50 | 51 | # File-based project format 52 | *.iws 53 | 54 | # IntelliJ 55 | out/ 56 | 57 | # mpeltonen/sbt-idea plugin 58 | .idea_modules/ 59 | 60 | # JIRA plugin 61 | atlassian-ide-plugin.xml 62 | 63 | # Cursive Clojure plugin 64 | .idea/replstate.xml 65 | 66 | # Crashlytics plugin (for Android Studio and IntelliJ) 67 | com_crashlytics_export_strings.xml 68 | crashlytics.properties 69 | crashlytics-build.properties 70 | fabric.properties 71 | 72 | # Editor-based Rest Client 73 | .idea/httpRequests 74 | 75 | # Android studio 3.1+ serialized cache file 76 | .idea/caches/build_file_checksums.ser 77 | 78 | ### PyCharm+all Patch ### 79 | # Ignores the whole .idea folder and all .iml files 80 | # See https://github.com/joeblau/gitignore.io/issues/186 and https://github.com/joeblau/gitignore.io/issues/360 81 | 82 | .idea/ 83 | 84 | # Reason: https://github.com/joeblau/gitignore.io/issues/186#issuecomment-249601023 85 | 86 | *.iml 87 | modules.xml 88 | .idea/misc.xml 89 | *.ipr 90 | 91 | # Sonarlint plugin 92 | .idea/sonarlint 93 | 94 | ### Python ### 95 | # Byte-compiled / optimized / DLL files 96 | __pycache__/ 97 | *.py[cod] 98 | *$py.class 99 | 100 | # C extensions 101 | *.so 102 | 103 | # Distribution / packaging 104 | .Python 105 | build/ 106 | develop-eggs/ 107 | dist/ 108 | downloads/ 109 | eggs/ 110 | .eggs/ 111 | parts/ 112 | sdist/ 113 | var/ 114 | wheels/ 115 | pip-wheel-metadata/ 116 | share/python-wheels/ 117 | *.egg-info/ 118 | .installed.cfg 119 | *.egg 120 | MANIFEST 121 | 122 | # PyInstaller 123 | # Usually these files are written by a python script from a template 124 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 125 | *.manifest 126 | *.spec 127 | 128 | # Installer logs 129 | pip-log.txt 130 | pip-delete-this-directory.txt 131 | 132 | # Unit test / coverage reports 133 | htmlcov/ 134 | .tox/ 135 | .nox/ 136 | .coverage 137 | .coverage.* 138 | .cache 139 | nosetests.xml 140 | coverage.xml 141 | *.cover 142 | *.py,cover 143 | .hypothesis/ 144 | .pytest_cache/ 145 | pytestdebug.log 146 | 147 | # Translations 148 | *.mo 149 | *.pot 150 | 151 | # Django stuff: 152 | *.log 153 | local_settings.py 154 | db.sqlite3 155 | db.sqlite3-journal 156 | 157 | # Flask stuff: 158 | instance/ 159 | .webassets-cache 160 | 161 | # Scrapy stuff: 162 | .scrapy 163 | 164 | # Sphinx documentation 165 | docs/_build/ 166 | doc/_build/ 167 | 168 | # PyBuilder 169 | target/ 170 | 171 | # Jupyter Notebook 172 | .ipynb_checkpoints 173 | 174 | # IPython 175 | profile_default/ 176 | ipython_config.py 177 | 178 | # pyenv 179 | .python-version 180 | 181 | # pipenv 182 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 183 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 184 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 185 | # install all needed dependencies. 186 | #Pipfile.lock 187 | 188 | # poetry 189 | #poetry.lock 190 | 191 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 192 | __pypackages__/ 193 | 194 | # Celery stuff 195 | celerybeat-schedule 196 | celerybeat.pid 197 | 198 | # SageMath parsed files 199 | *.sage.py 200 | 201 | # Environments 202 | # .env 203 | .env/ 204 | .venv/ 205 | env/ 206 | venv/ 207 | ENV/ 208 | env.bak/ 209 | venv.bak/ 210 | pythonenv* 211 | 212 | # Spyder project settings 213 | .spyderproject 214 | .spyproject 215 | 216 | # Rope project settings 217 | .ropeproject 218 | 219 | # mkdocs documentation 220 | /site 221 | 222 | # mypy 223 | .mypy_cache/ 224 | .dmypy.json 225 | dmypy.json 226 | 227 | # Pyre type checker 228 | .pyre/ 229 | 230 | # pytype static type analyzer 231 | .pytype/ 232 | 233 | # operating system-related files 234 | *.DS_Store #file properties cache/storage on macOS 235 | Thumbs.db #thumbnail cache on Windows 236 | 237 | # profiling data 238 | .prof 239 | 240 | 241 | ### VirtualEnv ### 242 | # Virtualenv 243 | # http://iamzed.com/2009/05/07/a-primer-on-virtualenv/ 244 | [Bb]in 245 | [Ii]nclude 246 | [Ll]ib 247 | [Ll]ib64 248 | [Ll]ocal 249 | [Ss]cripts 250 | pyvenv.cfg 251 | .venv 252 | pip-selfcheck.json 253 | 254 | # End of https://www.toptal.com/developers/gitignore/api/python,pycharm+all,virtualenv 255 | -------------------------------------------------------------------------------- /project2/cert/test.crt: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIID3TCCAsWgAwIBAgIUeG8D4FJLxNCAcZ3l2RVtDKB3rjIwDQYJKoZIhvcNAQEL 3 | BQAwfjELMAkGA1UEBhMCSVIxDzANBgNVBAgMBlRlaHJhbjEPMA0GA1UEBwwGVGVo 4 | cmFuMRIwEAYDVQQKDAlTSEFZQU5GQVIxGDAWBgNVBAMMD1JhZGluIFNoYXlhbmZh 5 | cjEfMB0GCSqGSIb3DQEJARYQbWVAcnNoYXlhbmZhci5pcjAeFw0yMTA1MjEyMDU2 6 | MjlaFw0zMTA1MTkyMDU2MjlaMH4xCzAJBgNVBAYTAklSMQ8wDQYDVQQIDAZUZWhy 7 | YW4xDzANBgNVBAcMBlRlaHJhbjESMBAGA1UECgwJU0hBWUFORkFSMRgwFgYDVQQD 8 | DA9SYWRpbiBTaGF5YW5mYXIxHzAdBgkqhkiG9w0BCQEWEG1lQHJzaGF5YW5mYXIu 9 | aXIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDE6vtxRosLEB/ffHgi 10 | GqCQjM7No+4eDZfgJ6ovzcKpjCBpWCqYGaPOQ7UhMCS7D67rKYU9kNKag3tWX9/h 11 | /qk84PpjGz23eaMQdhINAxosDltM4WyoZjPvbpstj/vnj3l+FxCrfGNjl94P0whN 12 | qnXUehcOFUCH0sDZ7+GqmWsdrLXImqZeUbtKM/+piGfrHn2fEwFXiuOtpktXNpIj 13 | JhLWeHq3htPLS4WFrZq0T50bfcaibMujG7QEJkKY87Ezf1OrlXXsycL25erWkr+3 14 | CzSXjjCc/5jMZFvsitqPL3+T1EOXFrrgzyGgFygtTn85TnJNymP4Ka8O4HEOenQX 15 | 5Lm/AgMBAAGjUzBRMB0GA1UdDgQWBBSAA6wGa0lp6m8SiBHia70Fgt1L5zAfBgNV 16 | HSMEGDAWgBSAA6wGa0lp6m8SiBHia70Fgt1L5zAPBgNVHRMBAf8EBTADAQH/MA0G 17 | CSqGSIb3DQEBCwUAA4IBAQAzcptKwamICjXGoGBeGuB29qsXMzr0dUrJM++iC4CW 18 | h6RZtetKLpi7hxDCeM3X3H1056yV0a5o5+AkkwWfwZs71yv7Nu2l/zPf2NR7wxgg 19 | YydAqFBhFUZSE36kfQraIEBIopMmuxNpUaC1uEip9kyNPqRJTR6M19/TBOWGd4ww 20 | xs/RsrK5g6Pp4JWKlh1/IBkfvWl7RJcFa7RR+OsC1xmlHberXIBhC8k0Nj0F/KpH 21 | PTtX4OFwfMjr+7q3VP42lLk4gmhuiLQcU7h1l26K+fuqZ1KxXcrlIr3X8q0psz5M 22 | BRI2UDQ9EqvCWDXJNWZaSG0Xw2WxvzsegkUdnffdb5ZE 23 | -----END CERTIFICATE----- 24 | -------------------------------------------------------------------------------- /project2/cert/test.key: -------------------------------------------------------------------------------- 1 | -----BEGIN PRIVATE KEY----- 2 | MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDE6vtxRosLEB/f 3 | fHgiGqCQjM7No+4eDZfgJ6ovzcKpjCBpWCqYGaPOQ7UhMCS7D67rKYU9kNKag3tW 4 | X9/h/qk84PpjGz23eaMQdhINAxosDltM4WyoZjPvbpstj/vnj3l+FxCrfGNjl94P 5 | 0whNqnXUehcOFUCH0sDZ7+GqmWsdrLXImqZeUbtKM/+piGfrHn2fEwFXiuOtpktX 6 | NpIjJhLWeHq3htPLS4WFrZq0T50bfcaibMujG7QEJkKY87Ezf1OrlXXsycL25erW 7 | kr+3CzSXjjCc/5jMZFvsitqPL3+T1EOXFrrgzyGgFygtTn85TnJNymP4Ka8O4HEO 8 | enQX5Lm/AgMBAAECggEBALAuvcFfqE/BoYLHToTU5gvur6j1nrps1xHK603R/vTL 9 | VsRqCdIEPrbxhUGoXXSwV4Q3peqrhIKSWOuy5BDDVJAPinbZjKJ3xhmYShYyTFxW 10 | jAGBP0J8DsOOZnChfnRzvfO+lz+XDyWwXxIxciBq/vmYMsB66z2apslXAsUsdtYY 11 | epWc29js4ZJj3Ru4o1abIFWGI89FEZWcyuPxzsT3Pza90Nd1f6W0vvBEod47qYF+ 12 | uDY0YqhGck6cWQd/PnvH2euzqmThAxH1tfMLgOfm3Bok6tsdLIamsBJKlbr25b2q 13 | dSsPdX/ShcoGvH5c7edl7KIBQqyujWUtiM8YnxyJEOECgYEA6LSDhB+hX6bNmzy8 14 | qve6ApgGj+rhe2YVAKrW7N1IBSDvUDJsjx1BXdL3YWvVx/BbMFlAUj7irm+nVyMW 15 | p6bwftIG5uhNBdSpmntof9onEFUsZ2SEk1aEBlPjmURhlCKAdHoFfGbNrobokcQj 16 | /8UhwQ9g15nekSYJZWIPn2l9GmMCgYEA2KFbQNDkP8T58e9YvnA/PDU2NdmGxezw 17 | K213Ztz0TS5PYXAH/Nrv4wtBJ8oaIgVlJrBdLTZJ2HMGFxXY0eWGcQMPpBgDpVpK 18 | Y9efTwYfukkMdEJ8swzV8vUKw1HoyxAE1+iajvhElyQYMCf8QL2F06o+Aib/gezY 19 | CEhlh5dwc/UCgYEAl2r+OLasNqU/HEgFK+pN6uopOvRHDBh8ihagCzhhaJTpo3Fs 20 | GiObKQXhWk7kqFjJvUgWBm5M6dxVMunuD+EclCjtvMpjc+vRAGhUsvysdoA1iIFr 21 | ksO03LkxW8WXCOD6XNAZSb1LEMlQA6lxEzieohTzmtbQb/okGwgXyk7FzKkCgYA5 22 | Io2T0cRxgT0+7xaj260B7zLmDwI5z50khxGX4qzYcl9jXo3sxb3BTyWXHfm4gV3F 23 | T3yzwP7HBMdVbWCrM3M6+Ui1ydOkV+UI4kTVLdNOaZfMTgDfpIIFO1XkYAUbEZWF 24 | cIbbMGW+yzlJ8jHYzWOviNYj8S8wkEwGGzoUAgAZxQKBgAidIFAh/JBk3B9eNDci 25 | EEcQv+IYtF2xk4GS2bXpN2J/1xCxbtgL5dp4f7m1OAEx3gY0XXv1PeNM40NUgut+ 26 | giZOF/WUeylfHDxy2XvvWGwZe+BRP5omDpr0det8ttcUR4hI13Fy69W4FLZ/x187 27 | 9B2Xp9LJFrAOhdG8oojxAziN 28 | -----END PRIVATE KEY----- 29 | -------------------------------------------------------------------------------- /project2/requirements.txt: -------------------------------------------------------------------------------- 1 | redis==4.5.4 2 | -------------------------------------------------------------------------------- /project2/src/ClientHandler.py: -------------------------------------------------------------------------------- 1 | import os 2 | import socket 3 | import ssl 4 | import time 5 | from enum import Enum 6 | 7 | IAP: int = 0xff 8 | LENGTH_SIZE: int = 10 9 | 10 | 11 | class Commands(Enum): 12 | UPLOAD = 254 13 | EXEC_ = 253 14 | SEND = 252 15 | S_SEND = 251 16 | 17 | 18 | class ClientHandler: 19 | def __init__(self, sock: socket.socket): 20 | self.sock = sock 21 | self.iap_on = False 22 | 23 | def download_file(self): 24 | file_name_len = int(self.sock.recv(LENGTH_SIZE).decode()) 25 | file_name = self.sock.recv(file_name_len).decode() 26 | 27 | file_len = int(self.sock.recv(LENGTH_SIZE).decode()) 28 | 29 | with open(file_name, "wb") as f: 30 | while file_len > 0: 31 | received = self.sock.recv(4096) 32 | file_len -= len(received) 33 | f.write(received) 34 | 35 | def retrieve_exec(self): 36 | command_len = int(self.sock.recv(LENGTH_SIZE).decode()) 37 | command = self.sock.recv(command_len).decode() 38 | 39 | output = os.popen(command).read().encode() 40 | output_len = str(len(output)) 41 | self.sock.sendall(b' ' * (LENGTH_SIZE - len(output_len)) + output_len.encode()) 42 | self.sock.sendall(output) 43 | 44 | def receive_message(self, sock: socket.socket = None): 45 | if sock is None: 46 | sock = self.sock 47 | 48 | message_len = int(sock.recv(LENGTH_SIZE).decode()) 49 | message = sock.recv(message_len).decode() 50 | 51 | print(f"---> Message from {sock.getpeername()[0]}:{sock.getpeername()[1]}:\n" 52 | f"---> {message}") 53 | 54 | def receive_e_message(self): 55 | # context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2) 56 | # context.load_cert_chain(certfile='../cert/test.crt', keyfile='../cert/test.key') 57 | ssock = ssl.wrap_socket(self.sock, server_side=True, certfile="../cert/test.crt", keyfile="../cert/test.key", 58 | ssl_version=ssl.PROTOCOL_TLSv1_2) 59 | 60 | self.receive_message(ssock) 61 | self.sock = ssock.unwrap() 62 | 63 | def process_data(self, data: int): 64 | if self.iap_on: 65 | if data == IAP: 66 | print(chr(data), end='') 67 | elif data == Commands.UPLOAD.value: 68 | self.download_file() 69 | elif data == Commands.EXEC_.value: 70 | self.retrieve_exec() 71 | elif data == Commands.SEND.value: 72 | self.receive_message() 73 | elif data == Commands.S_SEND.value: 74 | self.receive_e_message() 75 | 76 | self.iap_on = False 77 | elif data == IAP: 78 | self.iap_on = True 79 | else: 80 | print(chr(data), end='') 81 | 82 | def handler(self): 83 | try: 84 | print(f"-> {self.sock.getpeername()[0]} connected on port {self.sock.getpeername()[1]}") 85 | 86 | while True: 87 | data = self.sock.recv(1) 88 | if not data: 89 | print(f"-> {self.sock.getpeername()[0]}:{self.sock.getpeername()[1]} disconnected") 90 | break 91 | 92 | self.process_data(data[0]) 93 | finally: 94 | self.sock.close() 95 | -------------------------------------------------------------------------------- /project2/src/CommandsLogger.py: -------------------------------------------------------------------------------- 1 | import redis 2 | 3 | 4 | class CommandsLogger: 5 | __KEY: str = 'commands_history' 6 | 7 | def __init__(self): 8 | self._rdb = redis.Redis(host='localhost', port=6379, db=1) 9 | 10 | def log_command(self, command: str): 11 | self._rdb.rpush(self.__KEY, command) 12 | self._rdb.ltrim(self.__KEY, -20, -1) 13 | 14 | def get_logs(self) -> list: 15 | return self._rdb.lrange(self.__KEY, 0, -1) 16 | -------------------------------------------------------------------------------- /project2/src/MITMProxy.py: -------------------------------------------------------------------------------- 1 | import os 2 | import queue 3 | import select 4 | import socket 5 | import sys 6 | import threading 7 | 8 | 9 | class MITMProxy: 10 | PROXY_PORT: int = None 11 | PROXY_HOST: str = '127.0.0.1' 12 | 13 | def __init__(self): 14 | self._client_to_server_queue = queue.Queue() 15 | self._server_to_client_queue = queue.Queue() 16 | self.sent_buffer = bytearray() 17 | self.recv_buffer = bytearray() 18 | 19 | def run_server(self, hostname: str, port: int, timeout: int): 20 | self._start_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) 21 | self._start_socket.bind((self.PROXY_HOST, 0)) 22 | MITMProxy.PROXY_PORT = self._start_socket.getsockname()[1] 23 | 24 | threading.Thread(target=self._server_handler, args=(hostname, port, timeout), daemon=False).start() 25 | threading.Thread(target=self._client_handler, daemon=False).start() 26 | 27 | return self.PROXY_HOST, self.PROXY_PORT 28 | 29 | def _server_handler(self, hostname: str, port: int, timeout: int): 30 | server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) 31 | server_socket.settimeout(timeout) 32 | 33 | try: 34 | server_socket.connect((hostname, port)) 35 | print(f"Connected to {server_socket.getpeername()[0]}:{server_socket.getpeername()[1]}") 36 | except socket.timeout as timeout: 37 | print("Connection timed out") 38 | self._server_to_client_queue.put(None) 39 | return 40 | except socket.error as error: 41 | print(os.strerror(error.errno), file=sys.stderr) 42 | self._server_to_client_queue.put(None) 43 | return 44 | 45 | while True: 46 | try: 47 | read, write, _error = select.select([server_socket], [], [], 0.001) 48 | 49 | if server_socket in read: 50 | data = server_socket.recv(4096) 51 | if not data: 52 | print("Connection closed by foreign host.") 53 | self._server_to_client_queue.put(None) 54 | break 55 | self._server_to_client_queue.put(data) 56 | 57 | try: 58 | data = self._client_to_server_queue.get_nowait() 59 | if data is None: 60 | break 61 | server_socket.sendall(data) 62 | except queue.Empty: 63 | pass 64 | 65 | except socket.error as error: 66 | print(os.strerror(error.errno), file=sys.stderr) 67 | self._server_to_client_queue.put(None) 68 | break 69 | 70 | server_socket.close() 71 | 72 | def _client_handler(self): 73 | self._start_socket.listen(1) 74 | 75 | client_socket, address = self._start_socket.accept() 76 | 77 | while True: 78 | try: 79 | read, write, _error = select.select([client_socket], [], [], 0.001) 80 | 81 | if client_socket in read: 82 | data = client_socket.recv(4096) 83 | if not data: 84 | break 85 | self.sent_buffer.extend(data) 86 | self._client_to_server_queue.put(data) 87 | 88 | try: 89 | data = self._server_to_client_queue.get_nowait() 90 | if data is None: 91 | break 92 | self.recv_buffer.extend(data) 93 | client_socket.sendall(data) 94 | except queue.Empty: 95 | pass 96 | except socket.error as error: 97 | print(os.strerror(error.errno), file=sys.stderr) 98 | break 99 | 100 | self._client_to_server_queue.put(None) 101 | self._start_socket.close() 102 | client_socket.close() 103 | 104 | def close_sockets(self): 105 | self._client_to_server_queue.put(None) 106 | self._server_to_client_queue.put(None) 107 | -------------------------------------------------------------------------------- /project2/src/client.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import ipaddress 3 | import os 4 | import select 5 | import shlex 6 | import socket 7 | import ssl 8 | import sys 9 | import traceback 10 | from enum import Enum 11 | 12 | from MITMProxy import MITMProxy 13 | from CommandsLogger import CommandsLogger 14 | 15 | 16 | class Commands(Enum): 17 | UPLOAD = 254 18 | EXEC_ = 253 19 | SEND = 252 20 | S_SEND = 251 21 | 22 | 23 | IAP: int = 0xff 24 | LENGTH_SIZE: int = 10 25 | 26 | 27 | def escape_iap(data: bytes): 28 | data = bytearray(data) 29 | i = 0 30 | while i < len(data): 31 | if data[i] == IAP: 32 | data.insert(i, IAP) 33 | i += 1 34 | i += 1 35 | 36 | return bytes(data) 37 | 38 | 39 | def command_parser(in_str: list): 40 | cmd_parser = argparse.ArgumentParser(prog="", allow_abbrev=False, add_help=False, exit_on_error=False) 41 | subparser = cmd_parser.add_subparsers(dest='commands') 42 | 43 | upload = subparser.add_parser('upload') 44 | _exec = subparser.add_parser('exec') 45 | send = subparser.add_parser('send') 46 | history = subparser.add_parser('history') 47 | log = subparser.add_parser('log') 48 | 49 | upload.add_argument('path', type=str, metavar='Path to the file') 50 | _exec.add_argument('command', type=str, metavar='Command to be executed on the host') 51 | send.add_argument('-e', '--encrypt', action='store_true', help='Encrypt the message using TLS') 52 | send.add_argument('message', type=str, metavar='Message to be sent') 53 | log_mutex = log.add_mutually_exclusive_group(required=True) 54 | log_mutex.add_argument('-s', '--sent', action='store_true', help='Prints sent bytes') 55 | log_mutex.add_argument('-r', '--received', action='store_true', help='Prints received bytes') 56 | 57 | cmd_args = cmd_parser.parse_args(in_str) 58 | return cmd_args 59 | 60 | 61 | def upload_file(sock: socket.socket, path: str): 62 | out = bytearray(b'\xff') 63 | out.append(Commands.UPLOAD.value) 64 | 65 | file = bytearray() 66 | with open(path, "rb") as f: 67 | while byte := f.read(1): 68 | file.extend(byte) 69 | 70 | file_name = os.path.basename(path) 71 | file_name_len = str(len(file_name)) 72 | out.extend(b' ' * (LENGTH_SIZE - len(file_name_len)) + file_name_len.encode()) 73 | out.extend(file_name.encode()) 74 | 75 | file_len = str(len(file)) 76 | out.extend(b' ' * (LENGTH_SIZE - len(file_len)) + file_len.encode()) 77 | out.extend(file) 78 | 79 | sock.sendall(bytes(out)) 80 | 81 | 82 | def send_exec(sock: socket.socket, command: str): 83 | out = bytearray(b'\xff') 84 | out.append(Commands.EXEC_.value) 85 | 86 | command_len = str(len(command)) 87 | out.extend(b' ' * (LENGTH_SIZE - len(command_len)) + command_len.encode()) 88 | out.extend(command.encode()) 89 | 90 | sock.sendall(bytes(out)) 91 | 92 | output_len = int(sock.recv(LENGTH_SIZE).decode()) 93 | while output_len > 0: 94 | received = sock.recv(4096) 95 | output_len -= len(received) 96 | print(received.decode(), end='') 97 | 98 | 99 | def send_message(sock: socket.socket, message: str, send_iac: bool = True): 100 | out = bytearray() 101 | if send_iac: 102 | out.append(0xff) 103 | out.append(Commands.SEND.value) 104 | 105 | message_len = str(len(message)) 106 | out.extend(b' ' * (LENGTH_SIZE - len(message_len)) + message_len.encode()) 107 | out.extend(message.encode()) 108 | 109 | sock.send(bytes(out)) 110 | 111 | 112 | def send_e_message(sock: socket.socket, message: str): 113 | sock.send(b'\xff') 114 | sock.send(Commands.S_SEND.value.to_bytes(1, 'big')) 115 | 116 | ssock = ssl.wrap_socket(sock, server_side=False, ssl_version=ssl.PROTOCOL_TLSv1_2, do_handshake_on_connect=True) 117 | send_message(ssock, message, send_iac=False) 118 | 119 | return ssock.unwrap() 120 | 121 | 122 | def sock_send_recv(sock: socket.socket): 123 | command_mode = False 124 | while True: 125 | try: 126 | read, write, _error = select.select([sys.stdin, sock], [], []) 127 | 128 | if sock in read: 129 | data = sock.recv(4096) 130 | if not data: 131 | print("Connection closed by foreign host.") 132 | break 133 | print(data.decode(), end='') 134 | 135 | if sys.stdin in read: 136 | in_str = sys.stdin.readline() 137 | data = in_str.encode() 138 | if data[0] == 0x1d: # Toggle mode 139 | command_mode = not command_mode 140 | else: 141 | try: 142 | if command_mode: 143 | cmd_logger.log_command(in_str.strip()) 144 | cmd_args = command_parser(shlex.split(in_str.strip())) 145 | if cmd_args.commands == 'upload': 146 | upload_file(sock, cmd_args.path) 147 | elif cmd_args.commands == 'exec': 148 | send_exec(sock, cmd_args.command) 149 | elif cmd_args.commands == 'send': 150 | if not cmd_args.encrypt: 151 | send_message(sock, cmd_args.message) 152 | else: 153 | sock = send_e_message(sock, cmd_args.message) 154 | elif cmd_args.commands == 'history': 155 | [print(cmd.decode()) for cmd in cmd_logger.get_logs()] 156 | elif cmd_args.commands == 'log': 157 | print(proxy.sent_buffer if cmd_args.sent else proxy.recv_buffer) 158 | else: 159 | data = escape_iap(data) 160 | sock.sendall(data) 161 | except argparse.ArgumentError as e: 162 | print(str(e)) 163 | except Exception as e: 164 | traceback.print_exc() 165 | if command_mode: 166 | print("telnet> ", end='') 167 | sys.stdout.flush() 168 | except KeyboardInterrupt as e: 169 | break 170 | except socket.error as error: 171 | print(os.strerror(error.errno), file=sys.stderr) 172 | break 173 | 174 | 175 | def connection_mode(args): 176 | sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) 177 | 178 | try: 179 | if not args.proxy: 180 | sock.settimeout(args.timeout) 181 | sock.connect((args.hostname, args.port)) 182 | print(f"Connected to {sock.getpeername()[0]}:{sock.getpeername()[1]}") 183 | else: 184 | proxy.run_server(args.hostname, args.port, args.timeout) 185 | sock.connect((MITMProxy.PROXY_HOST, MITMProxy.PROXY_PORT)) 186 | except socket.timeout as timeout: 187 | print("Connection timed out") 188 | proxy.close_sockets() 189 | return 190 | except socket.error as error: 191 | print(os.strerror(error.errno), file=sys.stderr) 192 | proxy.close_sockets() 193 | return 194 | 195 | sock_send_recv(sock) 196 | 197 | sock.close() 198 | 199 | 200 | def scan_mode(args): 201 | for ip in ipaddress.IPv4Network(args.scan): 202 | print(f"-> Scanning {str(ip)} ports 0-1023") 203 | for port in range(1024): 204 | with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock: 205 | sock.settimeout(args.timeout) 206 | try: 207 | sock.connect((str(ip), port)) 208 | print(f"-> {str(ip)}:{port} is open") 209 | except socket.timeout as timeout: 210 | pass 211 | except socket.error as error: 212 | # print(f"port number: {port}, error: {os.strerror(error.errno)}", file=sys.stderr) 213 | pass 214 | 215 | 216 | if __name__ == '__main__': 217 | parser = argparse.ArgumentParser(prog="MyTelnet", allow_abbrev=False) 218 | 219 | parser.add_argument('-s', '--scan', type=str, action='store') 220 | parser.add_argument('-t', '--timeout', type=float, action='store', metavar='Timeout in seconds', default=2) 221 | parser.add_argument('-p', '--proxy', action='store_true', help='Proxy mode for logging') 222 | 223 | parser.add_argument('hostname', type=str, nargs='?', metavar='Host name') 224 | parser.add_argument('port', type=int, nargs='?', metavar='Port number') 225 | 226 | args = parser.parse_args() 227 | 228 | cmd_logger = CommandsLogger() 229 | if args.proxy: 230 | proxy = MITMProxy() 231 | 232 | if args.scan is None and (args.hostname is None or args.port is None): 233 | parser.error('Hostname and port number must be given in non-scan mode.') 234 | 235 | if args.scan is not None: 236 | scan_mode(args) 237 | else: 238 | connection_mode(args) 239 | -------------------------------------------------------------------------------- /project2/src/server.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import socket 3 | import threading 4 | 5 | from ClientHandler import ClientHandler 6 | 7 | if __name__ == '__main__': 8 | parser = argparse.ArgumentParser(prog="MyTelnetServer", allow_abbrev=False) 9 | 10 | parser.add_argument('-s', '--server', type=str, action='store', default='0.0.0.0', metavar='Server address') 11 | parser.add_argument('-p', '--port', type=int, action='store', default=23, metavar='Port number') 12 | 13 | args = parser.parse_args() 14 | 15 | sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) 16 | sock.bind((args.server, args.port)) 17 | sock.listen(5) 18 | 19 | print(f"Listening on {args.server}:{args.port}") 20 | 21 | while True: 22 | connection, address = sock.accept() 23 | new_client = ClientHandler(connection) 24 | threading.Thread(target=new_client.handler).start() 25 | -------------------------------------------------------------------------------- /project3/.gitignore: -------------------------------------------------------------------------------- 1 | 2 | # Created by https://www.toptal.com/developers/gitignore/api/python,pycharm+all,virtualenv 3 | # Edit at https://www.toptal.com/developers/gitignore?templates=python,pycharm+all,virtualenv 4 | 5 | ### PyCharm+all ### 6 | # Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio, WebStorm and Rider 7 | # Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 8 | 9 | # User-specific stuff 10 | .idea/**/workspace.xml 11 | .idea/**/tasks.xml 12 | .idea/**/usage.statistics.xml 13 | .idea/**/dictionaries 14 | .idea/**/shelf 15 | 16 | # Generated files 17 | .idea/**/contentModel.xml 18 | 19 | # Sensitive or high-churn files 20 | .idea/**/dataSources/ 21 | .idea/**/dataSources.ids 22 | .idea/**/dataSources.local.xml 23 | .idea/**/sqlDataSources.xml 24 | .idea/**/dynamic.xml 25 | .idea/**/uiDesigner.xml 26 | .idea/**/dbnavigator.xml 27 | 28 | # Gradle 29 | .idea/**/gradle.xml 30 | .idea/**/libraries 31 | 32 | # Gradle and Maven with auto-import 33 | # When using Gradle or Maven with auto-import, you should exclude module files, 34 | # since they will be recreated, and may cause churn. Uncomment if using 35 | # auto-import. 36 | # .idea/artifacts 37 | # .idea/compiler.xml 38 | # .idea/jarRepositories.xml 39 | # .idea/modules.xml 40 | # .idea/*.iml 41 | # .idea/modules 42 | # *.iml 43 | # *.ipr 44 | 45 | # CMake 46 | cmake-build-*/ 47 | 48 | # Mongo Explorer plugin 49 | .idea/**/mongoSettings.xml 50 | 51 | # File-based project format 52 | *.iws 53 | 54 | # IntelliJ 55 | out/ 56 | 57 | # mpeltonen/sbt-idea plugin 58 | .idea_modules/ 59 | 60 | # JIRA plugin 61 | atlassian-ide-plugin.xml 62 | 63 | # Cursive Clojure plugin 64 | .idea/replstate.xml 65 | 66 | # Crashlytics plugin (for Android Studio and IntelliJ) 67 | com_crashlytics_export_strings.xml 68 | crashlytics.properties 69 | crashlytics-build.properties 70 | fabric.properties 71 | 72 | # Editor-based Rest Client 73 | .idea/httpRequests 74 | 75 | # Android studio 3.1+ serialized cache file 76 | .idea/caches/build_file_checksums.ser 77 | 78 | ### PyCharm+all Patch ### 79 | # Ignores the whole .idea folder and all .iml files 80 | # See https://github.com/joeblau/gitignore.io/issues/186 and https://github.com/joeblau/gitignore.io/issues/360 81 | 82 | .idea/ 83 | 84 | # Reason: https://github.com/joeblau/gitignore.io/issues/186#issuecomment-249601023 85 | 86 | *.iml 87 | modules.xml 88 | .idea/misc.xml 89 | *.ipr 90 | 91 | # Sonarlint plugin 92 | .idea/sonarlint 93 | 94 | ### Python ### 95 | # Byte-compiled / optimized / DLL files 96 | __pycache__/ 97 | *.py[cod] 98 | *$py.class 99 | 100 | # C extensions 101 | *.so 102 | 103 | # Distribution / packaging 104 | .Python 105 | build/ 106 | develop-eggs/ 107 | dist/ 108 | downloads/ 109 | eggs/ 110 | .eggs/ 111 | parts/ 112 | sdist/ 113 | var/ 114 | wheels/ 115 | pip-wheel-metadata/ 116 | share/python-wheels/ 117 | *.egg-info/ 118 | .installed.cfg 119 | *.egg 120 | MANIFEST 121 | 122 | # PyInstaller 123 | # Usually these files are written by a python script from a template 124 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 125 | *.manifest 126 | *.spec 127 | 128 | # Installer logs 129 | pip-log.txt 130 | pip-delete-this-directory.txt 131 | 132 | # Unit test / coverage reports 133 | htmlcov/ 134 | .tox/ 135 | .nox/ 136 | .coverage 137 | .coverage.* 138 | .cache 139 | nosetests.xml 140 | coverage.xml 141 | *.cover 142 | *.py,cover 143 | .hypothesis/ 144 | .pytest_cache/ 145 | pytestdebug.log 146 | 147 | # Translations 148 | *.mo 149 | *.pot 150 | 151 | # Django stuff: 152 | *.log 153 | local_settings.py 154 | db.sqlite3 155 | db.sqlite3-journal 156 | 157 | # Flask stuff: 158 | instance/ 159 | .webassets-cache 160 | 161 | # Scrapy stuff: 162 | .scrapy 163 | 164 | # Sphinx documentation 165 | docs/_build/ 166 | doc/_build/ 167 | 168 | # PyBuilder 169 | target/ 170 | 171 | # Jupyter Notebook 172 | .ipynb_checkpoints 173 | 174 | # IPython 175 | profile_default/ 176 | ipython_config.py 177 | 178 | # pyenv 179 | .python-version 180 | 181 | # pipenv 182 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 183 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 184 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 185 | # install all needed dependencies. 186 | #Pipfile.lock 187 | 188 | # poetry 189 | #poetry.lock 190 | 191 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 192 | __pypackages__/ 193 | 194 | # Celery stuff 195 | celerybeat-schedule 196 | celerybeat.pid 197 | 198 | # SageMath parsed files 199 | *.sage.py 200 | 201 | # Environments 202 | # .env 203 | .env/ 204 | .venv/ 205 | env/ 206 | venv/ 207 | ENV/ 208 | env.bak/ 209 | venv.bak/ 210 | pythonenv* 211 | 212 | # Spyder project settings 213 | .spyderproject 214 | .spyproject 215 | 216 | # Rope project settings 217 | .ropeproject 218 | 219 | # mkdocs documentation 220 | /site 221 | 222 | # mypy 223 | .mypy_cache/ 224 | .dmypy.json 225 | dmypy.json 226 | 227 | # Pyre type checker 228 | .pyre/ 229 | 230 | # pytype static type analyzer 231 | .pytype/ 232 | 233 | # operating system-related files 234 | *.DS_Store #file properties cache/storage on macOS 235 | Thumbs.db #thumbnail cache on Windows 236 | 237 | # profiling data 238 | .prof 239 | 240 | 241 | ### VirtualEnv ### 242 | # Virtualenv 243 | # http://iamzed.com/2009/05/07/a-primer-on-virtualenv/ 244 | [Bb]in 245 | [Ii]nclude 246 | [Ll]ib 247 | [Ll]ib64 248 | [Ll]ocal 249 | [Ss]cripts 250 | pyvenv.cfg 251 | .venv 252 | pip-selfcheck.json 253 | 254 | # End of https://www.toptal.com/developers/gitignore/api/python,pycharm+all,virtualenv 255 | 256 | config.json -------------------------------------------------------------------------------- /project3/src/DHCPPacket.py: -------------------------------------------------------------------------------- 1 | import ipaddress 2 | import socket 3 | import struct 4 | from random import randint 5 | 6 | 7 | class DHCPPacket: 8 | MAGIC_COOKIE = 0x63825363 9 | OPTIONS = {"MSGType": 53, "ClientID": 61, "ParameterList": 55, "SubnetMask": 1, "DNS": 6, "Hostname": 12, 10 | "ServerId": 54, "AddressRequest": 50, "AddressTime": 51, "End": 255} 11 | 12 | MESSAGE_TYPES = {"DISCOVER": 1, "OFFER": 2, "REQUEST": 3, "ACK": 5, "NAK": 6} 13 | 14 | @staticmethod 15 | def create_discover(interface: tuple) -> '__class__': 16 | packet = DHCPPacket() 17 | packet.type = DHCPPacket.MESSAGE_TYPES["DISCOVER"] 18 | packet.op = 1 19 | packet.htype = 1 20 | packet.hlen = 6 21 | packet.hops = 0 22 | packet.xid = randint(0, 1 << 31) 23 | packet.secs = 0 24 | packet.broadcast = 0 25 | packet.ciaddr = 0 26 | packet.yiaddr = 0 27 | packet.siaddr = 0 28 | packet.giaddr = 0 29 | packet.chaddr = int(interface[1].replace(":", ""), 16) 30 | 31 | packet.hostname = socket.gethostname() 32 | 33 | return packet 34 | 35 | @staticmethod 36 | def create_offer(discover_packet: '__class__', offered_ip: int, subnet_mask: str, dns: str, lease: int, 37 | server_id: str) -> '__class__': 38 | packet = DHCPPacket() 39 | packet.type = DHCPPacket.MESSAGE_TYPES["OFFER"] 40 | packet.op = 2 41 | packet.htype = 1 42 | packet.hlen = 6 43 | packet.hops = 0 44 | packet.xid = discover_packet.xid 45 | packet.secs = 0 46 | packet.broadcast = 0 47 | packet.ciaddr = 0 48 | packet.yiaddr = offered_ip 49 | packet.siaddr = int(ipaddress.IPv4Address(server_id)) 50 | packet.giaddr = 0 51 | packet.chaddr = discover_packet.chaddr 52 | 53 | if DHCPPacket.OPTIONS["SubnetMask"] in discover_packet.options[DHCPPacket.OPTIONS["ParameterList"]]: 54 | packet.subnet = int(ipaddress.IPv4Address(subnet_mask)) 55 | if DHCPPacket.OPTIONS["DNS"] in discover_packet.options[DHCPPacket.OPTIONS["ParameterList"]]: 56 | packet.dns = int(ipaddress.IPv4Address(dns)) 57 | packet.lease = lease 58 | 59 | return packet 60 | 61 | @staticmethod 62 | def create_request(offer_packet: '__class__') -> '__class__': 63 | packet = DHCPPacket() 64 | packet.type = DHCPPacket.MESSAGE_TYPES["REQUEST"] 65 | packet.op = 1 66 | packet.htype = 1 67 | packet.hlen = 6 68 | packet.hops = 0 69 | packet.xid = offer_packet.xid 70 | packet.secs = 0 71 | packet.broadcast = 0 72 | packet.ciaddr = 0 73 | packet.yiaddr = 0 74 | packet.siaddr = 0 75 | packet.giaddr = 0 76 | packet.chaddr = offer_packet.chaddr 77 | 78 | packet.hostname = socket.gethostname() 79 | packet.ip_address = offer_packet.yiaddr 80 | 81 | # This value is stored as byte-like object. It doesn't need further packing 82 | packet.server_id = offer_packet.options[DHCPPacket.OPTIONS["ServerId"]] 83 | 84 | return packet 85 | 86 | @staticmethod 87 | def create_nak(request_packet: '__class__') -> '__class__': 88 | packet = DHCPPacket() 89 | packet.type = DHCPPacket.MESSAGE_TYPES["NAK"] 90 | packet.op = 2 91 | packet.htype = 1 92 | packet.hlen = 6 93 | packet.hops = 0 94 | packet.xid = request_packet.xid 95 | packet.secs = 0 96 | packet.broadcast = 0 97 | packet.ciaddr = 0 98 | packet.yiaddr = 0 99 | packet.siaddr = int(ipaddress.IPv4Address(server_id)) 100 | packet.giaddr = 0 101 | packet.chaddr = request_packet.chaddr 102 | 103 | return packet 104 | 105 | @staticmethod 106 | def create_ack(request_packet: '__class__', requested_ip: int, subnet_mask: str, dns: str, lease: int, 107 | server_id: str) -> '__class__': 108 | packet = DHCPPacket() 109 | packet.type = DHCPPacket.MESSAGE_TYPES["ACK"] 110 | packet.op = 2 111 | packet.htype = 1 112 | packet.hlen = 6 113 | packet.hops = 0 114 | packet.xid = request_packet.xid 115 | packet.secs = 0 116 | packet.broadcast = 0 117 | packet.ciaddr = 0 118 | packet.yiaddr = requested_ip 119 | packet.siaddr = int(ipaddress.IPv4Address(server_id)) 120 | packet.giaddr = 0 121 | packet.chaddr = request_packet.chaddr 122 | 123 | if DHCPPacket.OPTIONS["SubnetMask"] in request_packet.options[DHCPPacket.OPTIONS["ParameterList"]]: 124 | packet.subnet = int(ipaddress.IPv4Address(subnet_mask)) 125 | if DHCPPacket.OPTIONS["DNS"] in request_packet.options[DHCPPacket.OPTIONS["ParameterList"]]: 126 | packet.dns = int(ipaddress.IPv4Address(dns)) 127 | packet.lease = lease 128 | 129 | return packet 130 | 131 | @staticmethod 132 | def create_from_bytes(_in: bytes): 133 | packet = DHCPPacket() 134 | packet.op, packet.htype, packet.hlen, packet.hops, packet.xid, packet.secs, flags, packet.ciaddr, \ 135 | packet.yiaddr, packet.siaddr, packet.giaddr = struct.unpack("!BBBBIHHIIII", _in[:7 * 4]) 136 | 137 | packet.broadcast = flags >> 15 138 | 139 | chaddr = struct.unpack("!IH", _in[7 * 4:7 * 4 + 6]) 140 | packet.chaddr = (chaddr[0] << 16) | chaddr[1] 141 | 142 | # +4 is for magic cookie 143 | options_bytes = _in[59 * 4 + 4:] 144 | 145 | packet.options = {} 146 | i = 0 147 | while (option := options_bytes[i]) != 0xff: 148 | length = options_bytes[i + 1] 149 | i += 2 150 | packet.options[option] = options_bytes[i: i + length] 151 | i += length 152 | 153 | packet.type = struct.unpack("!B", packet.options[DHCPPacket.OPTIONS["MSGType"]])[0] 154 | 155 | return packet 156 | 157 | def fixed_fields_to_bytes(self) -> bytearray: 158 | out = bytearray() 159 | 160 | # HEADER 161 | out.extend(struct.pack("!B", self.op)) 162 | out.extend(struct.pack("!B", self.htype)) 163 | out.extend(struct.pack("!B", self.hlen)) 164 | out.extend(struct.pack("!B", self.hops)) 165 | out.extend(struct.pack("!I", self.xid)) 166 | out.extend(struct.pack("!H", self.secs)) 167 | out.extend(struct.pack("!H", self.broadcast << 15)) 168 | out.extend(struct.pack("!I", self.ciaddr)) 169 | out.extend(struct.pack("!I", self.yiaddr)) 170 | out.extend(struct.pack("!I", self.siaddr)) 171 | out.extend(struct.pack("!I", self.giaddr)) 172 | 173 | out.extend(struct.pack("!IH", self.chaddr >> 16, self.chaddr & 0xffff)) 174 | out.extend(struct.pack("!dh", 0, 0)) 175 | 176 | # BOOTP legacy padding 177 | out.extend(bytes(192)) 178 | 179 | # Magic cookie 180 | out.extend(struct.pack("!I", DHCPPacket.MAGIC_COOKIE)) 181 | 182 | return out 183 | 184 | def discover_to_bytes(self) -> bytearray: 185 | out = bytearray() 186 | 187 | out.extend(self.fixed_fields_to_bytes()) 188 | 189 | # Options 190 | out.extend(struct.pack("!BBB", DHCPPacket.OPTIONS["MSGType"], 1, self.type)) 191 | out.extend(struct.pack("!BBBB", DHCPPacket.OPTIONS["ParameterList"], 2, DHCPPacket.OPTIONS["SubnetMask"], 192 | DHCPPacket.OPTIONS["DNS"])) 193 | 194 | out.extend(struct.pack("!BB", DHCPPacket.OPTIONS["Hostname"], len(self.hostname))) 195 | out.extend(self.hostname.encode()) 196 | 197 | out.extend(struct.pack("!B", DHCPPacket.OPTIONS["End"])) 198 | 199 | return out 200 | 201 | def offer_to_bytes(self): 202 | out = bytearray() 203 | 204 | out.extend(self.fixed_fields_to_bytes()) 205 | 206 | # Options 207 | out.extend(struct.pack("!BBB", DHCPPacket.OPTIONS["MSGType"], 1, self.type)) 208 | 209 | out.extend(struct.pack("!BBI", DHCPPacket.OPTIONS["ServerId"], 4, self.siaddr)) 210 | out.extend(struct.pack("!BBI", DHCPPacket.OPTIONS["AddressTime"], 4, self.lease)) 211 | if self.subnet is not None: 212 | out.extend(struct.pack("!BBI", DHCPPacket.OPTIONS["SubnetMask"], 4, self.subnet)) 213 | if self.dns is not None: 214 | out.extend(struct.pack("!BBI", DHCPPacket.OPTIONS["DNS"], 4, self.dns)) 215 | 216 | out.extend(struct.pack("!B", DHCPPacket.OPTIONS["End"])) 217 | 218 | return out 219 | 220 | def request_to_bytes(self) -> bytearray: 221 | out = bytearray() 222 | 223 | out.extend(self.fixed_fields_to_bytes()) 224 | 225 | # Options 226 | out.extend(struct.pack("!BBB", DHCPPacket.OPTIONS["MSGType"], 1, self.type)) 227 | out.extend(struct.pack("!BBBB", DHCPPacket.OPTIONS["ParameterList"], 2, DHCPPacket.OPTIONS["SubnetMask"], 228 | DHCPPacket.OPTIONS["DNS"])) 229 | 230 | out.extend(struct.pack("!BB", DHCPPacket.OPTIONS["Hostname"], len(self.hostname))) 231 | out.extend(self.hostname.encode()) 232 | 233 | out.extend(struct.pack("!BBI", DHCPPacket.OPTIONS["AddressRequest"], 4, self.ip_address)) 234 | out.extend(struct.pack("!BB", DHCPPacket.OPTIONS["ServerId"], 4)) 235 | out.extend(self.server_id) 236 | 237 | out.extend(struct.pack("!B", DHCPPacket.OPTIONS["End"])) 238 | 239 | return out 240 | 241 | def nak_to_bytes(self): 242 | out = bytearray() 243 | 244 | out.extend(self.fixed_fields_to_bytes()) 245 | 246 | # Options 247 | out.extend(struct.pack("!BBB", DHCPPacket.OPTIONS["MSGType"], 1, self.type)) 248 | 249 | out.extend(struct.pack("!BBI", DHCPPacket.OPTIONS["ServerId"], 4, self.siaddr)) 250 | 251 | out.extend(struct.pack("!B", DHCPPacket.OPTIONS["End"])) 252 | 253 | return out 254 | 255 | def ack_to_bytes(self): 256 | out = bytearray() 257 | 258 | out.extend(self.fixed_fields_to_bytes()) 259 | 260 | # Options 261 | out.extend(struct.pack("!BBB", DHCPPacket.OPTIONS["MSGType"], 1, self.type)) 262 | 263 | out.extend(struct.pack("!BBI", DHCPPacket.OPTIONS["ServerId"], 4, self.siaddr)) 264 | out.extend(struct.pack("!BBI", DHCPPacket.OPTIONS["AddressTime"], 4, self.lease)) 265 | if self.subnet is not None: 266 | out.extend(struct.pack("!BBI", DHCPPacket.OPTIONS["SubnetMask"], 4, self.subnet)) 267 | if self.dns is not None: 268 | out.extend(struct.pack("!BBI", DHCPPacket.OPTIONS["DNS"], 4, self.dns)) 269 | 270 | out.extend(struct.pack("!B", DHCPPacket.OPTIONS["End"])) 271 | 272 | return out 273 | 274 | def to_bytes(self): 275 | if self.type == DHCPPacket.MESSAGE_TYPES["DISCOVER"]: 276 | return self.discover_to_bytes() 277 | elif self.type == DHCPPacket.MESSAGE_TYPES["OFFER"]: 278 | return self.offer_to_bytes() 279 | elif self.type == DHCPPacket.MESSAGE_TYPES["REQUEST"]: 280 | return self.request_to_bytes() 281 | elif self.type == DHCPPacket.MESSAGE_TYPES["NAK"]: 282 | return self.nak_to_bytes() 283 | elif self.type == DHCPPacket.MESSAGE_TYPES["ACK"]: 284 | return self.ack_to_bytes() 285 | -------------------------------------------------------------------------------- /project3/src/Timeout.py: -------------------------------------------------------------------------------- 1 | import random 2 | import time 3 | from math import floor 4 | 5 | 6 | class Timeout: 7 | MAX_TIMEOUT = 64 8 | INITIAL_TIMEOUT = 4 9 | MAX_COUNT = 5 10 | 11 | def __init__(self): 12 | self.timeout: int = Timeout.INITIAL_TIMEOUT 13 | self.random: int 14 | self.last_time: int = 0 15 | self.count: int = 0 16 | self.__randomize() 17 | 18 | def __randomize(self): 19 | self.random = random.randint(-1, 1) 20 | 21 | def start_time(self): 22 | self.last_time = time.time() 23 | 24 | def get_timeout(self) -> int: 25 | current_time = time.time() 26 | retransmit = False 27 | if self.last_time + self.timeout + self.random < current_time: # timed out 28 | if self.count >= Timeout.MAX_COUNT: 29 | return -1 30 | self.last_time = time.time() 31 | self.timeout = min(self.timeout << 1, Timeout.MAX_TIMEOUT) 32 | self.__randomize() 33 | self.count += 1 34 | print(f"Timed out! New timeout: {self.timeout + self.random}") 35 | retransmit = True 36 | 37 | return floor(self.last_time + self.timeout + self.random - time.time()), retransmit 38 | -------------------------------------------------------------------------------- /project3/src/client.py: -------------------------------------------------------------------------------- 1 | import ipaddress 2 | import re 3 | import socket 4 | import struct 5 | import subprocess 6 | 7 | from DHCPPacket import DHCPPacket 8 | from Timeout import Timeout 9 | 10 | CLIENT_PORT = 68 11 | SERVER_PORT = 67 12 | 13 | 14 | def get_network_interfaces() -> list: 15 | output = subprocess.Popen(['ip', 'link', 'show'], stdout=subprocess.PIPE).communicate()[0].decode() 16 | return re.findall(r"^\d: (.+):.*\s+link/.+ ((?:\w{2}:){5}\w{2}) ", output, re.MULTILINE) 17 | 18 | 19 | def select_nic(nics: list) -> tuple: 20 | print("Select network interface:") 21 | for i, nic in enumerate(nics): 22 | print(f"[{i + 1}] {nic[0]} ({nic[1]})") 23 | selected_num = int(input("-> ")) - 1 24 | 25 | return nics[selected_num] 26 | 27 | 28 | if __name__ == '__main__': 29 | nic_list = get_network_interfaces() 30 | selected_nic = select_nic(nic_list) 31 | 32 | sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) 33 | sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1) 34 | sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) 35 | sock.setsockopt(socket.SOL_SOCKET, 25, (selected_nic[0]+'\0').encode()) 36 | 37 | sock.bind(("0.0.0.0", CLIENT_PORT)) 38 | 39 | discover = DHCPPacket.create_discover(selected_nic) 40 | 41 | sock.sendto(last_sent := discover.to_bytes(), ('', SERVER_PORT)) 42 | 43 | timeout = Timeout() 44 | timeout.start_time() 45 | wait_state = DHCPPacket.MESSAGE_TYPES["OFFER"] 46 | while True: 47 | new_timeout, retransmit = timeout.get_timeout() 48 | if new_timeout == -1: 49 | print("Timeout occurred more than max limit. Starting over...") 50 | discover = DHCPPacket.create_discover(selected_nic) 51 | sock.sendto(last_sent := discover.to_bytes(), ('', SERVER_PORT)) 52 | timeout = Timeout() 53 | timeout.start_time() 54 | new_timeout, _ = timeout.get_timeout() 55 | wait_state = DHCPPacket.MESSAGE_TYPES["OFFER"] 56 | elif retransmit: 57 | sock.sendto(last_sent, ('', SERVER_PORT)) 58 | 59 | sock.settimeout(new_timeout) 60 | try: 61 | packet_bytes = sock.recv(1024) 62 | except (BlockingIOError, socket.timeout) as e: 63 | continue 64 | 65 | packet = DHCPPacket.create_from_bytes(packet_bytes) 66 | if packet.xid != discover.xid: 67 | continue 68 | if packet.type != wait_state: 69 | continue 70 | if packet.type == DHCPPacket.MESSAGE_TYPES["NAK"]: 71 | print("Unable to get IP!") 72 | break 73 | if struct.unpack("!I", packet.options[DHCPPacket.OPTIONS["ServerId"]])[0] == int( 74 | ipaddress.IPv4Address('192.168.1.1')): 75 | continue 76 | 77 | if wait_state == DHCPPacket.MESSAGE_TYPES["OFFER"]: 78 | request = DHCPPacket.create_request(packet) 79 | sock.sendto(last_sent := request.to_bytes(), ('', SERVER_PORT)) 80 | wait_state = DHCPPacket.MESSAGE_TYPES["ACK"] 81 | timeout = Timeout() 82 | timeout.start_time() 83 | elif wait_state == DHCPPacket.MESSAGE_TYPES["ACK"]: 84 | expire = struct.unpack("!I", packet.options[DHCPPacket.OPTIONS["AddressTime"]])[0] 85 | print("Got IP address:") 86 | print(f"-> IP: {str(ipaddress.IPv4Address(packet.yiaddr))}") 87 | print(f"-> Expire: {expire} second(s)") 88 | break 89 | 90 | sock.close() 91 | -------------------------------------------------------------------------------- /project3/src/server.py: -------------------------------------------------------------------------------- 1 | import json 2 | import socket 3 | import struct 4 | import threading 5 | from ipaddress import IPv4Address 6 | 7 | import redis 8 | 9 | from DHCPPacket import DHCPPacket 10 | 11 | CLIENT_PORT = 68 12 | SERVER_PORT = 67 13 | 14 | with open("config.json", "r") as fh: 15 | CONFIG = json.load(fh) 16 | 17 | SERVER_ID = CONFIG["server_address"] 18 | 19 | if CONFIG["pool_mode"] == "range": 20 | ADDRESS_RANGE = list(map(int, map(IPv4Address, CONFIG["range"].values()))) 21 | elif CONFIG["pool_mode"] == "subnet": 22 | block_range = 0xffffffff - int(IPv4Address(CONFIG["subnet"]["subnet_mask"])) 23 | ADDRESS_RANGE = (network := int(IPv4Address(CONFIG["subnet"]["ip_block"]))) + 1, network + block_range - 1 24 | 25 | rdb = redis.Redis(host='localhost', port=6379, db=2) 26 | 27 | 28 | def mac_num_to_str(mac_num: int) -> str: 29 | return ':'.join(format(s, '02x') for s in mac_num.to_bytes(6, byteorder='big')) 30 | 31 | 32 | def get_new_ip(mac: int, hostname: str) -> int: 33 | # Check if client already has IP 34 | redis_ip_key = rdb.get(f"dhcp:mac:{mac}") 35 | if redis_ip_key is not None: 36 | return int(redis_ip_key.decode().split(":")[-1]) 37 | 38 | # Searching for new available IP 39 | for ip in range(ADDRESS_RANGE[0], ADDRESS_RANGE[1] + 1): 40 | if str(IPv4Address(ip)) not in CONFIG["reservation_list"].values() and not rdb.exists(f"dhcp:ip:{ip}"): 41 | with rdb.pipeline() as pipe: 42 | pipe.lpush(f"dhcp:ip:{ip}", hostname, mac) 43 | pipe.expire(f"dhcp:ip:{ip}", CONFIG["offer_lease"]) 44 | pipe.set(f"dhcp:mac:{mac}", f"dhcp:ip:{ip}", ex=CONFIG["offer_lease"]) 45 | pipe.execute() 46 | 47 | return ip 48 | 49 | return -1 50 | 51 | 52 | def discover_handle(discover: 'DHCPPacket') -> 'DHCPPacket': 53 | # Check for blocked or reserved MACs 54 | mac_str = mac_num_to_str(discover.chaddr) 55 | if mac_str in CONFIG["black_list"]: 56 | return None 57 | elif mac_str in CONFIG["reservation_list"]: 58 | return DHCPPacket.create_offer(discover, int(IPv4Address(CONFIG["reservation_list"][mac_str])), '255.255.255.0', 59 | CONFIG["dns"], (1 << 32) - 1, SERVER_ID) 60 | 61 | new_ip = get_new_ip(discover.chaddr, discover.options[DHCPPacket.OPTIONS["Hostname"]].decode()) 62 | if new_ip == -1: # No IP available 63 | return None 64 | offer = DHCPPacket.create_offer(discover, new_ip, '255.255.255.0', CONFIG["dns"], CONFIG["lease_time"], SERVER_ID) 65 | 66 | return offer 67 | 68 | 69 | def request_handle(request: 'DHCPPacket') -> 'DHCPPacket': 70 | server_id = struct.unpack("!I", request.options.get(DHCPPacket.OPTIONS["ServerId"], bytes(4)))[0] 71 | if server_id != int(IPv4Address(SERVER_ID)): 72 | return None 73 | 74 | # Check for blocked or reserved MACs 75 | mac_str = mac_num_to_str(request.chaddr) 76 | if mac_str in CONFIG["black_list"]: 77 | return None 78 | elif mac_str in CONFIG["reservation_list"]: 79 | lease_time = (1 << 32) - 1 80 | else: 81 | lease_time = CONFIG["lease_time"] 82 | 83 | requested_ip = struct.unpack("!I", request.options[DHCPPacket.OPTIONS["AddressRequest"]])[0] 84 | db_record = rdb.lrange(f"dhcp:ip:{requested_ip}", 0, 0) 85 | 86 | if len(db_record) == 1 and int(db_record[0].decode()) != request.chaddr: # reserved for someone else 87 | return DHCPPacket.create_nak(request) 88 | if (requested_ip_str := str(IPv4Address(requested_ip))) in CONFIG["reservation_list"].values() and mac_str in \ 89 | CONFIG["reservation_list"] and CONFIG["reservation_list"][mac_str] != requested_ip_str: 90 | return DHCPPacket.create_nak(request) 91 | 92 | with rdb.pipeline() as pipe: 93 | pipe.delete(f"dhcp:ip:{requested_ip}") 94 | pipe.lpush(f"dhcp:ip:{requested_ip}", request.options[DHCPPacket.OPTIONS["Hostname"]].decode(), request.chaddr) 95 | pipe.expire(f"dhcp:ip:{requested_ip}", lease_time) 96 | pipe.set(f"dhcp:mac:{request.chaddr}", f"dhcp:ip:{requested_ip}", ex=lease_time) 97 | pipe.execute() 98 | 99 | return DHCPPacket.create_ack(request, requested_ip, '255.255.255.0', CONFIG["dns"], lease_time, 100 | SERVER_ID) 101 | 102 | 103 | def show_clients(): 104 | print(header := "No.\t|Hostname\t|IP Address\t|\tMAC Address\t|Expire\t") 105 | print("-" * len(header.expandtabs())) 106 | 107 | with rdb.pipeline() as pipe: 108 | for client_key in (keys := rdb.keys("dhcp:ip:*")): 109 | ip = (decoded_key := client_key.decode()).split(":")[-1] 110 | pipe.lrange(decoded_key, 0, -1) 111 | pipe.ttl(decoded_key) 112 | data = pipe.execute() 113 | 114 | for i, client in enumerate(zip(data[0::2], data[1::2])): 115 | hostname = client[0][1].decode() 116 | ip = str(IPv4Address(int(keys[i].decode().split(":")[-1]))) 117 | mac = mac_num_to_str(int(client[0][0].decode())) 118 | ttl = client[1] 119 | print(f"{i+1}\t|{hostname}\t|{ip}\t|{mac}\t|{ttl}\t") 120 | 121 | 122 | def network_handler(): 123 | sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) 124 | sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1) 125 | sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) 126 | 127 | sock.bind(("0.0.0.0", SERVER_PORT)) 128 | 129 | while True: 130 | packet_bytes, addr = sock.recvfrom(1024) 131 | print(f"Received packet from {addr[0]}:{addr[1]}") 132 | 133 | packet = DHCPPacket.create_from_bytes(packet_bytes) 134 | 135 | response_packet = None 136 | if packet.type == DHCPPacket.MESSAGE_TYPES["DISCOVER"]: 137 | response_packet = discover_handle(packet) 138 | elif packet.type == DHCPPacket.MESSAGE_TYPES["REQUEST"]: 139 | response_packet = request_handle(packet) 140 | 141 | if response_packet is not None: 142 | sock.sendto(response_packet.to_bytes(), ('', CLIENT_PORT)) 143 | 144 | sock.close() 145 | 146 | 147 | if __name__ == '__main__': 148 | threading.Thread(target=network_handler, daemon=True).start() 149 | 150 | while True: 151 | command = input() 152 | if command == "show_clients": 153 | show_clients() 154 | --------------------------------------------------------------------------------