├── Dockerfile ├── KmsDataBase.xml ├── LICENSE ├── README.md ├── client.py ├── dcerpc.py ├── filetimes.py ├── kmsBase.py ├── kmsPidGenFromDB.py ├── kmsPidGenerator.py ├── kmsRequestUnknown.py ├── kmsRequestV4.py ├── kmsRequestV5.py ├── kmsRequestV6.py ├── pyaes ├── __init__.py ├── aes.py ├── blockfeeder.py └── util.py ├── requirements-micropython.txt ├── rpcBase.py ├── rpcBind.py ├── rpcRequest.py ├── server.py ├── structure.py ├── systemd-unit-file ├── upy ├── __init__.py ├── codecs.py ├── fakerandom.py ├── socketserver.py ├── test_socketserver.py ├── test_uuid.py └── uuid.py ├── uxml2dict.py └── xmltok.py /Dockerfile: -------------------------------------------------------------------------------- 1 | # Dockerfile for py-kms 2 | 3 | FROM python:2-alpine 4 | MAINTAINER Matsuz 5 | 6 | ADD . /kms 7 | 8 | EXPOSE 1688 9 | 10 | CMD ["python", "/kms/server.py"] 11 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | py-kms itself has no license so far, however, these modules have their own 2 | license: 3 | 4 | xmltok.py from micropython-lib is licensed under MIT license. 5 | https://github.com/micropython/micropython-lib 6 | 7 | uxml2dict.py from uxml2dict is licensed under Apache license version 2.0. 8 | https://github.com/ThunderEX/uxml2dict 9 | 10 | 11 | =============== MIT License =============== 12 | 13 | The MIT License (MIT) 14 | 15 | Copyright (c) 2013, 2014 micropython-lib contributors 16 | 17 | Permission is hereby granted, free of charge, to any person obtaining a copy 18 | of this software and associated documentation files (the "Software"), to deal 19 | in the Software without restriction, including without limitation the rights 20 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 21 | copies of the Software, and to permit persons to whom the Software is 22 | furnished to do so, subject to the following conditions: 23 | 24 | The above copyright notice and this permission notice shall be included in 25 | all copies or substantial portions of the Software. 26 | 27 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 28 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 29 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 30 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 31 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 32 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 33 | THE SOFTWARE. 34 | 35 | =============== Apache License =============== 36 | 37 | Copyright 2018 ThunderEX 38 | 39 | Licensed under the Apache License, Version 2.0 (the "License"); 40 | you may not use this file except in compliance with the License. 41 | You may obtain a copy of the License at 42 | 43 | http://www.apache.org/licenses/LICENSE-2.0 44 | 45 | Unless required by applicable law or agreed to in writing, software 46 | distributed under the License is distributed on an "AS IS" BASIS, 47 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 48 | See the License for the specific language governing permissions and 49 | limitations under the License. 50 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # History 2 | py-kms is a port of node-kms by [markedsword](http://forums.mydigitallife.info/members/183074-markedsword), which is a port of either the C#, C++, or .NET implementations of KMSEmulator, of which the original version was written by [CODYQX4](http://forums.mydigitallife.info/members/89933-CODYQX4) and is derived from the reverse-engineered code of Microsoft's official KMS. 3 | 4 | # Features 5 | - Responds to V4, V5, and V6 KMS requests. 6 | - Supports activating all versions after Windows Vista / 2008 and Office 2010. 7 | - If you do want to activate a new version with correct csvlk (not necessary unless you want to pretend an official server), generate a new KmsDataBase.xml using License Manager by Hotbird64 and replace the one in this repo. 8 | - It's written in Python. 9 | 10 | # Dependencies 11 | - Python 2.6 or higher, micropython v1.9 or higher. 12 | - Python 2.6 needs the "argparse" module installed. 13 | - Micropython needs libffi, and modules in micropythonlib, check section below. 14 | 15 | # Usage 16 | - To start the server, execute `python server.py [listen_address] [port]`. The default listening address is `0.0.0.0` (all interfaces) and the default port is `1688`. 17 | - To run the client, use `python client.py server_address [port]`. The default port is `1688`. 18 | - To run on ipv6, please use a valid ipv6 address, for example `::` for broadcast. 19 | 20 | # Micropython support (unix port only) 21 | - Micropython v1.9 is necessary since large int support starts here. 22 | - You need libffi. 23 | - You need some standard library of module. Use `micropython -m upip install -r requirements-micropython.txt` to install. 24 | - Only unix port is supported. Other platforms don't have some stdlib and performance may be a problem. 25 | -------------------------------------------------------------------------------- /client.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import binascii 3 | import time 4 | import random 5 | import socket 6 | import string 7 | import sys 8 | import uuid 9 | import errno 10 | try: 11 | import codecs 12 | except ImportError: 13 | import upy.codecs as codecs 14 | 15 | import filetimes, rpcBind, rpcRequest 16 | from dcerpc import MSRPCHeader, MSRPCBindNak, MSRPCRespHeader, MSRPC_BINDACK, MSRPC_BINDNAK 17 | from kmsBase import kmsRequestStruct, UUID 18 | from kmsRequestV4 import kmsRequestV4, generateHash 19 | from kmsRequestV5 import kmsRequestV5 20 | from kmsRequestV6 import kmsRequestV6 21 | 22 | config = {} 23 | 24 | def main(): 25 | parser = argparse.ArgumentParser() 26 | parser.add_argument("ip", action="store", help="The IP address or hostname of the KMS host.", type=str) 27 | parser.add_argument("port", nargs="?", action="store", default=1688, help="The port the KMS service is listening on. The default is \"1688\".", type=int) 28 | parser.add_argument("-m", "--mode", dest="mode", choices=["WindowsVista","Windows7","Windows8","Windows81","Windows10","Office2010","Office2013","Office2016"], default="Windows7") 29 | parser.add_argument("-c", "--cmid", dest="cmid", default=None, help="Use this flag to manually specify a CMID to use. If no CMID is specified, a random CMID will be generated.", type=str) 30 | parser.add_argument("-n", "--name", dest="machineName", default=None, help="Use this flag to manually specify an ASCII machineName to use. If no machineName is specified, a random machineName will be generated.", type=str) 31 | parser.add_argument("-v", "--verbose", dest="verbose", action="store_const", const=True, default=False, help="Use this flag to enable verbose output.") 32 | parser.add_argument("-d", "--debug", dest="debug", action="store_const", const=True, default=False, help="Use this flag to enable debug output. Implies \"-v\".") 33 | config.update(vars(parser.parse_args())) 34 | checkConfig() 35 | config['call_id'] = 1 36 | if config['debug']: 37 | config['verbose'] = True 38 | updateConfig() 39 | print("Connecting to %s on port %d..." % (config['ip'], config['port'])) 40 | s = socket.create_connection((config['ip'], config['port'])) 41 | if config['verbose']: 42 | print("Connection successful!") 43 | binder = rpcBind.handler(None, config) 44 | RPC_Bind = bytes(binder.generateRequest()) 45 | if config['verbose']: 46 | print("Sending RPC bind request...") 47 | s.send(RPC_Bind) 48 | try: 49 | bindResponse = s.recv(1024) 50 | except socket.error as e: 51 | if e.errno == errno.ECONNRESET: 52 | print("Error: Connection reset by peer. Exiting...") 53 | sys.exit() 54 | else: 55 | raise 56 | if bindResponse == '' or not bindResponse: 57 | print("No data received! Exiting...") 58 | sys.exit() 59 | packetType = MSRPCHeader(bindResponse)['type'] 60 | if packetType == MSRPC_BINDACK: 61 | if config['verbose']: 62 | print("RPC bind acknowledged.") 63 | kmsRequest = createKmsRequest() 64 | requester = rpcRequest.handler(kmsRequest, config) 65 | s.send(bytes(requester.generateRequest())) 66 | response = s.recv(1024) 67 | if config['debug']: 68 | print("Response:", binascii.b2a_hex(response)) 69 | parsed = MSRPCRespHeader(response) 70 | kmsData = readKmsResponse(parsed['pduData'], kmsRequest, config) 71 | kmsResp = kmsData['response'] 72 | try: 73 | hwid = kmsData['hwid'] 74 | print("KMS Host HWID:", binascii.b2a_hex(hwid).upper()) 75 | except KeyError: 76 | pass 77 | print("KMS Host ePID:", kmsResp['kmsEpid']) 78 | print("KMS Host Current Client Count:", kmsResp['currentClientCount']) 79 | print("KMS VL Activation Interval:", kmsResp['vLActivationInterval']) 80 | print("KMS VL Renewal Interval:", kmsResp['vLRenewalInterval']) 81 | elif packetType == MSRPC_BINDNAK: 82 | print(MSRPCBindNak(bindResponse).dump()) 83 | sys.exit() 84 | else: 85 | print("Something went wrong.") 86 | sys.exit() 87 | 88 | def checkConfig(): 89 | if config['cmid'] is not None: 90 | try: 91 | uuid.UUID(config['cmid']) 92 | except ValueError: 93 | print("Error: Bad CMID. Exiting...") 94 | raise 95 | if config['machineName'] is not None: 96 | if len(config['machineName']) < 2 or len(config['machineName']) > 63: 97 | print("Error: machineName must be between 2 and 63 characters in length.") 98 | sys.exit() 99 | 100 | def updateConfig(): 101 | if config['mode'] == 'WindowsVista': 102 | config['RequiredClientCount'] = 25 103 | config['KMSProtocolMajorVersion'] = 4 104 | config['KMSProtocolMinorVersion'] = 0 105 | config['KMSClientLicenseStatus'] = 2 106 | config['KMSClientAppID'] = "55c92734-d682-4d71-983e-d6ec3f16059f" 107 | config['KMSClientSkuID'] = "cfd8ff08-c0d7-452b-9f60-ef5c70c32094" 108 | config['KMSClientKMSCountedID'] = "212a64dc-43b1-4d3d-a30c-2fc69d2095c6" 109 | elif config['mode'] == 'Windows7': 110 | config['RequiredClientCount'] = 25 111 | config['KMSProtocolMajorVersion'] = 4 112 | config['KMSProtocolMinorVersion'] = 0 113 | config['KMSClientLicenseStatus'] = 2 114 | config['KMSClientAppID'] = "55c92734-d682-4d71-983e-d6ec3f16059f" 115 | config['KMSClientSkuID'] = "ae2ee509-1b34-41c0-acb7-6d4650168915" 116 | config['KMSClientKMSCountedID'] = "7fde5219-fbfa-484a-82c9-34d1ad53e856" 117 | elif config['mode'] == 'Windows8': 118 | config['RequiredClientCount'] = 25 119 | config['KMSProtocolMajorVersion'] = 5 120 | config['KMSProtocolMinorVersion'] = 0 121 | config['KMSClientLicenseStatus'] = 2 122 | config['KMSClientAppID'] = "55c92734-d682-4d71-983e-d6ec3f16059f" 123 | config['KMSClientSkuID'] = "458e1bec-837a-45f6-b9d5-925ed5d299de" 124 | config['KMSClientKMSCountedID'] = "3c40b358-5948-45af-923b-53d21fcc7e79" 125 | elif config['mode'] == 'Windows81': 126 | config['RequiredClientCount'] = 25 127 | config['KMSProtocolMajorVersion'] = 6 128 | config['KMSProtocolMinorVersion'] = 0 129 | config['KMSClientLicenseStatus'] = 2 130 | config['KMSClientAppID'] = "55c92734-d682-4d71-983e-d6ec3f16059f" 131 | config['KMSClientSkuID'] = "81671aaf-79d1-4eb1-b004-8cbbe173afea" 132 | config['KMSClientKMSCountedID'] = "cb8fc780-2c05-495a-9710-85afffc904d7" 133 | elif config['mode'] == 'Windows10': 134 | config['RequiredClientCount'] = 25 135 | config['KMSProtocolMajorVersion'] = 6 136 | config['KMSProtocolMinorVersion'] = 0 137 | config['KMSClientLicenseStatus'] = 2 138 | config['KMSClientAppID'] = "55c92734-d682-4d71-983e-d6ec3f16059f" 139 | config['KMSClientSkuID'] = "73111121-5638-40f6-bc11-f1d7b0d64300" 140 | config['KMSClientKMSCountedID'] = "58e2134f-8e11-4d17-9cb2-91069c151148" 141 | elif config['mode'] == 'Office2010': 142 | config['RequiredClientCount'] = 5 143 | config['KMSProtocolMajorVersion'] = 4 144 | config['KMSProtocolMinorVersion'] = 0 145 | config['KMSClientLicenseStatus'] = 2 146 | config['KMSClientAppID'] = "59a52881-a989-479d-af46-f275c6370663" 147 | config['KMSClientSkuID'] = "6f327760-8c5c-417c-9b61-836a98287e0c" 148 | config['KMSClientKMSCountedID'] = "e85af946-2e25-47b7-83e1-bebcebeac611" 149 | elif config['mode'] == 'Office2013': 150 | config['RequiredClientCount'] = 5 151 | config['KMSProtocolMajorVersion'] = 5 152 | config['KMSProtocolMinorVersion'] = 0 153 | config['KMSClientLicenseStatus'] = 2 154 | config['KMSClientAppID'] = "0ff1ce15-a989-479d-af46-f275c6370663" 155 | config['KMSClientSkuID'] = "b322da9c-a2e2-4058-9e4e-f59a6970bd69" 156 | config['KMSClientKMSCountedID'] = "e6a6f1bf-9d40-40c3-aa9f-c77ba21578c0" 157 | elif config['mode'] == 'Office2016': 158 | config['RequiredClientCount'] = 5 159 | config['KMSProtocolMajorVersion'] = 6 160 | config['KMSProtocolMinorVersion'] = 0 161 | config['KMSClientLicenseStatus'] = 2 162 | config['KMSClientAppID'] = "0ff1ce15-a989-479d-af46-f275c6370663" 163 | config['KMSClientSkuID'] = "d450596f-894d-49e0-966a-fd39ed4c4c64" 164 | config['KMSClientKMSCountedID'] = "85b5f61b-320b-4be3-814a-b76b2bfafc82" 165 | 166 | def createKmsRequestBase(): 167 | requestDict = kmsRequestStruct() 168 | requestDict['versionMinor'] = config['KMSProtocolMinorVersion'] 169 | requestDict['versionMajor'] = config['KMSProtocolMajorVersion'] 170 | requestDict['isClientVm'] = 0 171 | requestDict['licenseStatus'] = config['KMSClientLicenseStatus'] 172 | requestDict['graceTime'] = 43200 173 | requestDict['applicationId'] = UUID(uuid.UUID(config['KMSClientAppID']).bytes_le) 174 | requestDict['skuId'] = UUID(uuid.UUID(config['KMSClientSkuID']).bytes_le) 175 | requestDict['kmsCountedId'] = UUID(uuid.UUID(config['KMSClientKMSCountedID']).bytes_le) 176 | requestDict['clientMachineId'] = UUID(uuid.UUID(config['cmid']).bytes_le if (config['cmid'] is not None) else uuid.uuid4().bytes_le) 177 | requestDict['previousClientMachineId'] = b'\0' * 16 #requestDict['clientMachineId'] # I'm pretty sure this is supposed to be a null UUID. 178 | requestDict['requiredClientCount'] = config['RequiredClientCount'] 179 | requestDict['requestTime'] = filetimes.timestamp2filetime(time.time()) 180 | requestDict['machineName'] = codecs.encode(config['machineName'] or ''.join(random.choice(string.ascii_letters + string.digits) for i in range(random.randint(2,63))), 'utf_16_le') 181 | requestDict['mnPad'] = codecs.encode('\0', 'utf_16_le') * (63 - len(codecs.decode(requestDict['machineName'], 'utf_16_le'))) 182 | 183 | # Debug Stuff 184 | if config['debug']: 185 | print("Request Base Dictionary:", requestDict.dump()) 186 | 187 | return requestDict 188 | 189 | def createKmsRequest(): 190 | # Update the call ID 191 | config['call_id'] += 1 192 | 193 | # KMS Protocol Major Version 194 | if config['KMSProtocolMajorVersion'] == 4: 195 | handler = kmsRequestV4(None, config) 196 | elif config['KMSProtocolMajorVersion'] == 5: 197 | handler = kmsRequestV5(None, config) 198 | elif config['KMSProtocolMajorVersion'] == 6: 199 | handler = kmsRequestV6(None, config) 200 | else: 201 | return None 202 | 203 | requestBase = createKmsRequestBase() 204 | return handler.generateRequest(requestBase) 205 | 206 | def readKmsResponse(data, request, config): 207 | if config['KMSProtocolMajorVersion'] == 4: 208 | print("Received V4 response") 209 | response = readKmsResponseV4(data) 210 | elif config['KMSProtocolMajorVersion'] == 5: 211 | print("Received V5 response") 212 | response = readKmsResponseV5(data) 213 | elif config['KMSProtocolMajorVersion'] == 6: 214 | print("Received V6 response") 215 | response = readKmsResponseV6(data) 216 | else: 217 | print("Unhandled response version: %d.%d" % (config['KMSProtocolMajorVersion'], config['KMSProtocolMinorVersion'])) 218 | print("I'm not even sure how this happened...") 219 | return response 220 | 221 | def readKmsResponseV4(data): 222 | response = kmsRequestV4.ResponseV4(data) 223 | hashed = generateHash(bytearray(bytes(response['response']))) 224 | print("Response Hash has expected value:", hashed == response['hash']) 225 | return response 226 | 227 | def readKmsResponseV5(data): 228 | response = kmsRequestV5.ResponseV5(data) 229 | decrypted = kmsRequestV5(data, config).decryptResponse(response) 230 | return decrypted 231 | 232 | def readKmsResponseV6(data): 233 | response = kmsRequestV6.ResponseV5(data) 234 | decrypted = kmsRequestV6(data, config).decryptResponse(response) 235 | message = decrypted['message'] 236 | return message 237 | 238 | if __name__ == "__main__": 239 | main() 240 | -------------------------------------------------------------------------------- /filetimes.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2009, David Buxton 2 | # All rights reserved. 3 | # 4 | # Redistribution and use in source and binary forms, with or without 5 | # modification, are permitted provided that the following conditions are 6 | # met: 7 | # 8 | # * Redistributions of source code must retain the above copyright 9 | # notice, this list of conditions and the following disclaimer. 10 | # * Redistributions in binary form must reproduce the above copyright 11 | # notice, this list of conditions and the following disclaimer in the 12 | # documentation and/or other materials provided with the distribution. 13 | # 14 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS 15 | # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 16 | # TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A 17 | # PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 18 | # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 19 | # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 20 | # TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 21 | # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 22 | # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 23 | # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 24 | # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 | """Tools to convert between Python datetime instances and Microsoft times. 26 | """ 27 | 28 | 29 | # http://support.microsoft.com/kb/167296 30 | # How To Convert a UNIX time_t to a Win32 FILETIME or SYSTEMTIME 31 | EPOCH_AS_FILETIME = 116444736000000000 # January 1, 1970 as MS file time 32 | HUNDREDS_OF_NANOSECONDS = 10000000 33 | 34 | 35 | def timestamp2filetime(ts): 36 | """Converts a datetime to Microsoft filetime format. If the object is 37 | time zone-naive, it is forced to UTC before conversion. 38 | 39 | >>> import calendar 40 | >>> "%.0f" % timestamp2filetime(calendar.timegm((2009, 7, 25, 23, 0, 0, 0, 0, 0))) 41 | '128930364000000000' 42 | 43 | >>> "%.0f" % timestamp2filetime(calendar.timegm((1970, 1, 1, 0, 0, 0, 0, 0))) 44 | '116444736000000000' 45 | 46 | >>> timestamp2filetime(calendar.timegm((2009, 7, 25, 23, 0, 0, 0, 0, 0)) + 0.001) 47 | 128930364000010000 48 | """ 49 | return int(ts * HUNDREDS_OF_NANOSECONDS) + EPOCH_AS_FILETIME 50 | 51 | 52 | def filetime2timestamp(ft): 53 | """Converts a Microsoft filetime number to a Python datetime. The new 54 | datetime object is time zone-naive but is equivalent to tzinfo=utc. 55 | 56 | >>> filetime2timestamp(116444736000000000) 57 | 0.0 58 | 59 | >>> filetime2timestamp(128930364000000000) 60 | 1248562800.0 61 | 62 | >>> filetime2timestamp(128930364000001000) 63 | 1248562800.0001 64 | """ 65 | # Get seconds and remainder in terms of Unix epoch 66 | return (ft - EPOCH_AS_FILETIME) / float(HUNDREDS_OF_NANOSECONDS) 67 | 68 | 69 | if __name__ == "__main__": 70 | import doctest 71 | 72 | doctest.testmod() 73 | 74 | -------------------------------------------------------------------------------- /kmsBase.py: -------------------------------------------------------------------------------- 1 | import binascii 2 | import filetimes 3 | from kmsPidGenFromDB import epidGenerator, kmsdb 4 | import os 5 | import os.path 6 | import sys 7 | import time 8 | try: 9 | import uuid 10 | except ImportError: 11 | import upy.uuid as uuid 12 | try: 13 | import codecs 14 | except ImportError: 15 | import upy.codecs as codecs 16 | 17 | from structure import Structure 18 | 19 | # sqlite3 is optional 20 | try: 21 | import sqlite3 22 | except ImportError: 23 | pass 24 | 25 | from xmltok import tokenize 26 | from uxml2dict import parse 27 | 28 | licenseStates = { 29 | 0 : "Unlicensed", 30 | 1 : "Activated", 31 | 2 : "Grace Period", 32 | 3 : "Out-of-Tolerance Grace Period", 33 | 4 : "Non-Genuine Grace Period", 34 | 5 : "Notifications Mode", 35 | 6 : "Extended Grace Period", 36 | } 37 | 38 | licenseStatesEnum = { 39 | 'unlicensed': 0, 40 | 'licensed': 1, 41 | 'oobGrace': 2, 42 | 'ootGrace': 3, 43 | 'nonGenuineGrace': 4, 44 | 'notification': 5, 45 | 'extendedGrace': 6 46 | } 47 | 48 | errorCodes = { 49 | 'SL_E_VL_NOT_WINDOWS_SLP' : 0xC004F035, 50 | 'SL_E_VL_NOT_ENOUGH_COUNT' : 0xC004F038, 51 | 'SL_E_VL_BINDING_SERVICE_NOT_ENABLED' : 0xC004F039, 52 | 'SL_E_VL_INFO_PRODUCT_USER_RIGHT' : 0x4004F040, 53 | 'SL_I_VL_OOB_NO_BINDING_SERVER_REGISTRATION' : 0x4004F041, 54 | 'SL_E_VL_KEY_MANAGEMENT_SERVICE_ID_MISMATCH' : 0xC004F042, 55 | 'SL_E_VL_MACHINE_NOT_BOUND' : 0xC004F056 56 | } 57 | 58 | class UUID(Structure): 59 | commonHdr = () 60 | structure = ( 61 | ('raw', '16s'), 62 | ) 63 | 64 | def get(self): 65 | return uuid.UUID(bytes_le=self.__bytes__()) 66 | 67 | class kmsRequestStruct(Structure): 68 | commonHdr = () 69 | structure = ( 70 | ('versionMinor', '> 4 36 | 37 | # Remainding bytes 38 | k = messageSize & 0xf 39 | 40 | # Hash 41 | for i in range(0, j): 42 | xorBuffer(message, i << 4, hashBuffer, 16) 43 | hashBuffer = bytearray(aes.encrypt(hashBuffer)) 44 | 45 | # Bit Padding 46 | ii = 0 47 | for i in range(j << 4, k + (j << 4)): 48 | lastBlock[ii] = message[i] 49 | ii += 1 50 | lastBlock[k] = 0x80 51 | 52 | xorBuffer(lastBlock, 0, hashBuffer, 16) 53 | hashBuffer = bytearray(aes.encrypt(hashBuffer)) 54 | 55 | return bytes(hashBuffer) 56 | 57 | 58 | class kmsRequestV4(kmsBase): 59 | class RequestV4(Structure): 60 | commonHdr = () 61 | structure = ( 62 | ('bodyLength1', '= 16: return 16 56 | return 0 57 | 58 | # After padding, we may have more than one block 59 | def _block_final_encrypt(self, data, padding = PADDING_DEFAULT): 60 | if padding == PADDING_DEFAULT: 61 | data = append_PKCS7_padding(data) 62 | 63 | elif padding == PADDING_NONE: 64 | if len(data) != 16: 65 | raise Exception('invalid data length for final block') 66 | else: 67 | raise Exception('invalid padding option') 68 | 69 | if len(data) == 32: 70 | return self.encrypt(data[:16]) + self.encrypt(data[16:]) 71 | 72 | return self.encrypt(data) 73 | 74 | 75 | def _block_final_decrypt(self, data, padding = PADDING_DEFAULT): 76 | if padding == PADDING_DEFAULT: 77 | return strip_PKCS7_padding(self.decrypt(data)) 78 | 79 | if padding == PADDING_NONE: 80 | if len(data) != 16: 81 | raise Exception('invalid data length for final block') 82 | return self.decrypt(data) 83 | 84 | raise Exception('invalid padding option') 85 | 86 | AESBlockModeOfOperation._can_consume = _block_can_consume 87 | AESBlockModeOfOperation._final_encrypt = _block_final_encrypt 88 | AESBlockModeOfOperation._final_decrypt = _block_final_decrypt 89 | 90 | 91 | 92 | # CFB is a segment cipher 93 | 94 | def _segment_can_consume(self, size): 95 | return self.segment_bytes * int(size // self.segment_bytes) 96 | 97 | # CFB can handle a non-segment-sized block at the end using the remaining cipherblock 98 | def _segment_final_encrypt(self, data, padding = PADDING_DEFAULT): 99 | if padding != PADDING_DEFAULT: 100 | raise Exception('invalid padding option') 101 | 102 | faux_padding = (b'\x00' * (self.segment_bytes - (len(data) % self.segment_bytes))) 103 | padded = data + bytes(faux_padding) 104 | return self.encrypt(padded)[:len(data)] 105 | 106 | # CFB can handle a non-segment-sized block at the end using the remaining cipherblock 107 | def _segment_final_decrypt(self, data, padding = PADDING_DEFAULT): 108 | if padding != PADDING_DEFAULT: 109 | raise Exception('invalid padding option') 110 | 111 | faux_padding = (b'\x00' * (self.segment_bytes - (len(data) % self.segment_bytes))) 112 | padded = data + bytes(faux_padding) 113 | return self.decrypt(padded)[:len(data)] 114 | 115 | AESSegmentModeOfOperation._can_consume = _segment_can_consume 116 | AESSegmentModeOfOperation._final_encrypt = _segment_final_encrypt 117 | AESSegmentModeOfOperation._final_decrypt = _segment_final_decrypt 118 | 119 | 120 | 121 | # OFB and CTR are stream ciphers 122 | 123 | def _stream_can_consume(self, size): 124 | return size 125 | 126 | def _stream_final_encrypt(self, data, padding = PADDING_DEFAULT): 127 | if padding not in [PADDING_NONE, PADDING_DEFAULT]: 128 | raise Exception('invalid padding option') 129 | 130 | return self.encrypt(data) 131 | 132 | def _stream_final_decrypt(self, data, padding = PADDING_DEFAULT): 133 | if padding not in [PADDING_NONE, PADDING_DEFAULT]: 134 | raise Exception('invalid padding option') 135 | 136 | return self.decrypt(data) 137 | 138 | AESStreamModeOfOperation._can_consume = _stream_can_consume 139 | AESStreamModeOfOperation._final_encrypt = _stream_final_encrypt 140 | AESStreamModeOfOperation._final_decrypt = _stream_final_decrypt 141 | 142 | 143 | 144 | class BlockFeeder(object): 145 | '''The super-class for objects to handle chunking a stream of bytes 146 | into the appropriate block size for the underlying mode of operation 147 | and applying (or stripping) padding, as necessary.''' 148 | 149 | def __init__(self, mode, feed, final, padding = PADDING_DEFAULT): 150 | self._mode = mode 151 | self._feed = feed 152 | self._final = final 153 | self._buffer = b"" 154 | self._padding = padding 155 | 156 | def feed(self, data = None): 157 | '''Provide bytes to encrypt (or decrypt), returning any bytes 158 | possible from this or any previous calls to feed. 159 | 160 | Call with None or an empty string to flush the mode of 161 | operation and return any final bytes; no further calls to 162 | feed may be made.''' 163 | 164 | if self._buffer is None: 165 | raise ValueError('already finished feeder') 166 | 167 | # Finalize; process the spare bytes we were keeping 168 | if not data: 169 | result = self._final(self._buffer, self._padding) 170 | self._buffer = None 171 | return result 172 | 173 | self._buffer += bytes(data) 174 | 175 | # We keep 16 bytes around so we can determine padding 176 | result = b'' 177 | while len(self._buffer) > 16: 178 | can_consume = self._mode._can_consume(len(self._buffer) - 16) 179 | if can_consume == 0: break 180 | result += self._feed(self._buffer[:can_consume]) 181 | self._buffer = self._buffer[can_consume:] 182 | 183 | return result 184 | 185 | 186 | class Encrypter(BlockFeeder): 187 | 'Accepts bytes of plaintext and returns encrypted ciphertext.' 188 | 189 | def __init__(self, mode, padding = PADDING_DEFAULT): 190 | BlockFeeder.__init__(self, mode, mode.encrypt, mode._final_encrypt, padding) 191 | 192 | 193 | class Decrypter(BlockFeeder): 194 | 'Accepts bytes of ciphertext and returns decrypted plaintext.' 195 | 196 | def __init__(self, mode, padding = PADDING_DEFAULT): 197 | BlockFeeder.__init__(self, mode, mode.decrypt, mode._final_decrypt, padding) 198 | 199 | 200 | # 8kb blocks 201 | BLOCK_SIZE = (1 << 13) 202 | 203 | def _feed_stream(feeder, in_stream, out_stream, block_size = BLOCK_SIZE): 204 | 'Uses feeder to read and convert from in_stream and write to out_stream.' 205 | 206 | while True: 207 | chunk = in_stream.read(block_size) 208 | if not chunk: 209 | break 210 | converted = feeder.feed(chunk) 211 | out_stream.write(converted) 212 | converted = feeder.feed() 213 | out_stream.write(converted) 214 | 215 | 216 | def encrypt_stream(mode, in_stream, out_stream, block_size = BLOCK_SIZE, padding = PADDING_DEFAULT): 217 | 'Encrypts a stream of bytes from in_stream to out_stream using mode.' 218 | 219 | encrypter = Encrypter(mode, padding = padding) 220 | _feed_stream(encrypter, in_stream, out_stream, block_size) 221 | 222 | 223 | def decrypt_stream(mode, in_stream, out_stream, block_size = BLOCK_SIZE, padding = PADDING_DEFAULT): 224 | 'Decrypts a stream of bytes from in_stream to out_stream using mode.' 225 | 226 | decrypter = Decrypter(mode, padding = padding) 227 | _feed_stream(decrypter, in_stream, out_stream, block_size) 228 | -------------------------------------------------------------------------------- /pyaes/util.py: -------------------------------------------------------------------------------- 1 | # The MIT License (MIT) 2 | # 3 | # Copyright (c) 2014 Richard Moore 4 | # 5 | # Permission is hereby granted, free of charge, to any person obtaining a copy 6 | # of this software and associated documentation files (the "Software"), to deal 7 | # in the Software without restriction, including without limitation the rights 8 | # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | # copies of the Software, and to permit persons to whom the Software is 10 | # furnished to do so, subject to the following conditions: 11 | # 12 | # The above copyright notice and this permission notice shall be included in 13 | # all copies or substantial portions of the Software. 14 | # 15 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 21 | # THE SOFTWARE. 22 | 23 | # Why to_bufferable? 24 | # Python 3 is very different from Python 2.x when it comes to strings of text 25 | # and strings of bytes; in Python 3, strings of bytes do not exist, instead to 26 | # represent arbitrary binary data, we must use the "bytes" object. This method 27 | # ensures the object behaves as we need it to. 28 | 29 | 30 | def append_PKCS7_padding(data): 31 | pad = 16 - (len(data) % 16) 32 | return data + bytes(bytearray([pad])) * pad 33 | 34 | def strip_PKCS7_padding(data): 35 | if len(data) % 16 != 0: 36 | raise ValueError("invalid length") 37 | 38 | pad = bytearray(data)[-1] 39 | 40 | if pad > 16: 41 | return data 42 | # raise ValueError("invalid padding byte") 43 | 44 | return data[:-pad] 45 | -------------------------------------------------------------------------------- /requirements-micropython.txt: -------------------------------------------------------------------------------- 1 | micropython-argparse 2 | micropython-errno 3 | micropython-future 4 | micropython-hmac 5 | micropython-stat 6 | micropython-warnings 7 | micropython-binascii 8 | micropython-ffilib 9 | micropython-hashlib 10 | micropython-os 11 | micropython-os.path 12 | micropython-socket 13 | micropython-time -------------------------------------------------------------------------------- /rpcBase.py: -------------------------------------------------------------------------------- 1 | 2 | class rpcBase: 3 | 4 | packetFlags = { 5 | 'firstFrag' : 1, # 0x01 6 | 'lastFrag' : 2, # 0x02 7 | 'cancelPending' : 4, # 0x04 8 | 'reserved' : 8, # 0x08 9 | 'multiplex' : 16, # 0x10 10 | 'didNotExecute' : 32, # 0x20 11 | 'maybe' : 64, # 0x40 12 | 'objectUuid' : 128 # 0x80 13 | } 14 | 15 | def __init__(self, data, config): 16 | self.data = data 17 | self.config = config 18 | 19 | def populate(self): 20 | return self.generateResponse(self.parseRequest()) 21 | 22 | def parseRequest(self): 23 | return {} 24 | -------------------------------------------------------------------------------- /rpcBind.py: -------------------------------------------------------------------------------- 1 | import binascii 2 | import rpcBase 3 | try: 4 | import uuid 5 | except ImportError: 6 | import upy.uuid as uuid 7 | 8 | from dcerpc import MSRPCHeader, MSRPCBindAck, MSRPC_BINDACK, MSRPC_BIND, MSRPC_ALTERCTX, MSRPC_ALTERCTX_R 9 | from structure import Structure 10 | 11 | uuidNDR32 = uuid.UUID('8a885d04-1ceb-11c9-9fe8-08002b104860') 12 | uuidNDR64 = uuid.UUID('71710533-beba-4937-8319-b5dbef9ccc36') 13 | uuidTime = uuid.UUID('6cb71c2c-9812-4540-0300-000000000000') 14 | uuidEmpty = uuid.UUID('00000000-0000-0000-0000-000000000000') 15 | 16 | class CtxItem(Structure): 17 | structure = ( 18 | ('ContextID', ' 16: 64 | print("Error: HWID \"%s\" is invalid. Hex string is too long." % binascii.b2a_hex(config['hwid'])) 65 | return 66 | except TypeError: 67 | print("Error: HWID \"%s\" is invalid. Odd-length hex string." % binascii.b2a_hex(config['hwid'])) 68 | return 69 | if not config['lcid']: 70 | # http://stackoverflow.com/questions/3425294/how-to-detect-the-os-default-language-in-python 71 | if hasattr(sys, 'implementation') and sys.implementation.name == 'micropython': 72 | config['lcid'] = 1033 73 | elif os.name == 'nt': 74 | import ctypes 75 | 76 | config['lcid'] = ctypes.windll.kernel32.GetUserDefaultUILanguage() # TODO: or GetSystemDefaultUILanguage? 77 | else: 78 | import locale 79 | 80 | try: 81 | config['lcid'] = next(k for k, v in locale.windows_locale.items() if v == locale.getdefaultlocale()[0]) 82 | except StopIteration: 83 | config['lcid'] = 1033 84 | if config['debug']: 85 | config['verbose'] = True 86 | try: 87 | import sqlite3 88 | except ImportError: 89 | print("Warning: Module \"sqlite3\" is not installed--database support disabled.") 90 | config['dbSupport'] = False 91 | else: 92 | config['dbSupport'] = True 93 | TCPServer.address_family = socket.getaddrinfo(config['ip'], config['port'], 0, socket.SOCK_DGRAM)[0][0] 94 | try: 95 | server = TCPServer((config['ip'], config['port']), kmsServer) 96 | except OSError: # micropython can't recognize 2-tuple server_address 97 | server = TCPServer((config['ip'], config['port'], socket.AF_INET6), kmsServer) 98 | server.timeout = 5 99 | print("TCP server listening at %s on port %d." % (config['ip'],config['port'])) 100 | server.serve_forever() 101 | 102 | class kmsServer(socketserver.BaseRequestHandler): 103 | def setup(self): 104 | print("Connection accepted: %s:%d" % (self.client_address[0],self.client_address[1])) 105 | 106 | def handle(self): 107 | while True: 108 | # self.request is the TCP socket connected to the client 109 | try: 110 | data = self.request.recv(1024) 111 | except socket.error as e: 112 | if e.errno == errno.ECONNRESET: 113 | print("Error: Connection reset by peer.") 114 | break 115 | else: 116 | raise 117 | if not data: 118 | print("No data received!") 119 | break 120 | # data = bytearray(data.strip()) 121 | # print binascii.b2a_hex(str(data)) 122 | packetType = MSRPCHeader(data)['type'] 123 | if packetType in (MSRPC_BIND, MSRPC_ALTERCTX): 124 | if config['verbose']: 125 | print("RPC bind request received.") 126 | handler = rpcBind.handler(data, config) 127 | elif packetType == MSRPC_REQUEST: 128 | if config['verbose']: 129 | print("Received activation request.") 130 | handler = rpcRequest.handler(data, config) 131 | else: 132 | print("Error: Invalid RPC request type", packetType) 133 | break 134 | 135 | res = handler.populate().__bytes__() 136 | self.request.send(res) 137 | 138 | if packetType == MSRPC_BIND: 139 | if config['verbose']: 140 | print("RPC bind acknowledged.") 141 | elif packetType == MSRPC_REQUEST: 142 | if config['verbose']: 143 | print("Responded to activation request.") 144 | break 145 | 146 | def finish(self): 147 | self.request.close() 148 | print("Connection closed: %s:%d" % (self.client_address[0],self.client_address[1])) 149 | 150 | if __name__ == "__main__": 151 | main() 152 | -------------------------------------------------------------------------------- /structure.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2003-2012 CORE Security Technologies 2 | # 3 | # This software is provided under under a slightly modified version 4 | # of the Apache Software License. See the accompanying LICENSE file 5 | # for more information. 6 | # 7 | # $Id$ 8 | # 9 | from __future__ import print_function 10 | try: 11 | from struct import pack, unpack, calcsize 12 | except ImportError: 13 | from ustruct import pack, unpack, calcsize 14 | 15 | debug = 0 16 | 17 | class Structure: 18 | """ sublcasses can define commonHdr and/or structure. 19 | each of them is an tuple of either two: (fieldName, format) or three: (fieldName, ':', class) fields. 20 | [it can't be a dictionary, because order is important] 21 | 22 | where format specifies how the data in the field will be converted to/from bytes (string) 23 | class is the class to use when unpacking ':' fields. 24 | 25 | each field can only contain one value (or an array of values for *) 26 | i.e. struct.pack('Hl',1,2) is valid, but format specifier 'Hl' is not (you must use 2 dfferent fields) 27 | 28 | format specifiers: 29 | specifiers from module pack can be used with the same format 30 | see struct.__doc__ (pack/unpack is finally called) 31 | x [padding byte] 32 | c [character] 33 | b [signed byte] 34 | B [unsigned byte] 35 | h [signed short] 36 | H [unsigned short] 37 | l [signed long] 38 | L [unsigned long] 39 | i [signed integer] 40 | I [unsigned integer] 41 | q [signed long long (quad)] 42 | Q [unsigned long long (quad)] 43 | s [string (array of chars), must be preceded with length in format specifier, padded with zeros] 44 | p [pascal string (includes byte count), must be preceded with length in format specifier, padded with zeros] 45 | f [float] 46 | d [double] 47 | = [native byte ordering, size and alignment] 48 | @ [native byte ordering, standard size and alignment] 49 | ! [network byte ordering] 50 | < [little endian] 51 | > [big endian] 52 | 53 | usual printf like specifiers can be used (if started with %) 54 | [not recommeneded, there is no why to unpack this] 55 | 56 | %08x will output an 8 bytes hex 57 | %s will output a string 58 | %s\\x00 will output a NUL terminated string 59 | %d%d will output 2 decimal digits (against the very same specification of Structure) 60 | ... 61 | 62 | some additional format specifiers: 63 | : just copy the bytes from the field into the output string (input may be string, other structure, or anything responding to __str__()) (for unpacking, all what's left is returned) 64 | z same as :, but adds a NUL byte at the end (asciiz) (for unpacking the first NUL byte is used as terminator) [asciiz string] 65 | u same as z, but adds two NUL bytes at the end (after padding to an even size with NULs). (same for unpacking) [unicode string] 66 | w DCE-RPC/NDR string (it's a macro for [ ' 2: 139 | dataClassOrCode = field[2] 140 | try: 141 | self[field[0]] = self.unpack(field[1], data[:size], dataClassOrCode = dataClassOrCode, field = field[0]) 142 | except Exception as e: 143 | e.args += ("When unpacking field '%s | %s | %r[:%d]'" % (field[0], field[1], data, size),) 144 | raise 145 | 146 | size = self.calcPackSize(field[1], self[field[0]], field[0]) 147 | if self.alignment and size % self.alignment: 148 | size += self.alignment - (size % self.alignment) 149 | data = data[size:] 150 | 151 | if debug: 152 | self.dump(indent=4) 153 | return self 154 | 155 | def __setitem__(self, key, value): 156 | self.fields[key] = value 157 | self.data = None # force recompute 158 | 159 | def __getitem__(self, key): 160 | return self.fields[key] 161 | 162 | def __delitem__(self, key): 163 | del self.fields[key] 164 | 165 | def __bytes__(self): 166 | if debug: 167 | self.dump(indent=4) 168 | return self.getData() 169 | 170 | def __str__(self): 171 | """ 172 | In python 2, func `bytes` is alias of `str` and redirect to `__str__`, 173 | workaround here is to redirect back to `__bytes__` 174 | """ 175 | if str is bytes: 176 | return self.__bytes__() 177 | else: 178 | return super(Structure, self).__str__() 179 | 180 | def __len__(self): 181 | # XXX: improve 182 | return len(self.getData()) 183 | 184 | def pack(self, format, data, field = None): 185 | if debug: 186 | print(" pack( %s | %r | %s)" % (format, data, field)) 187 | 188 | if field: 189 | addressField = self.findAddressFieldFor(field) 190 | if (addressField is not None) and (data is None): 191 | return b'' 192 | 193 | # void specifier 194 | if format[:1] == '_': 195 | return b'' 196 | 197 | # quote specifier 198 | if format[:1] == "'" or format[:1] == '"': 199 | return format[1:].encode() 200 | 201 | # code specifier 202 | two = format.split('=') 203 | if len(two) >= 2: 204 | if data: 205 | return self.pack(two[0], data) 206 | else: 207 | fields = {'self':self} 208 | fields.update(self.fields) 209 | self[field] = eval(two[1], {}, fields) 210 | return self.pack(two[0], self[field]) 211 | 212 | # address specifier 213 | two = format.split('&') 214 | if len(two) == 2: 215 | if data: 216 | return self.pack(two[0], data) 217 | else: 218 | if (two[1] in self.fields) and (self[two[1]] is not None): 219 | self[field] = id(self[two[1]]) & ((1<<(calcsize(two[0])*8))-1) 220 | else: 221 | self[field] = 0 222 | return self.pack(two[0], self[field]) 223 | 224 | # length specifier 225 | two = format.split('-') 226 | if len(two) == 2: 227 | if data: 228 | return self.pack(two[0],data) 229 | else: 230 | self[field] = self.calcPackFieldSize(two[1]) 231 | return self.pack(two[0], self[field]) 232 | 233 | # array specifier 234 | two = format.split('*') 235 | if len(two) == 2: 236 | answer = b'' 237 | for each in data: 238 | answer += self.pack(two[1], each) 239 | if two[0]: 240 | if two[0].isdigit(): 241 | if int(two[0]) != len(data): 242 | raise Exception("Array field has a constant size, and it doesn't match the actual value") 243 | else: 244 | return self.pack(two[0], len(data))+answer 245 | return answer 246 | 247 | # "printf" string specifier 248 | if format[:1] == '%': 249 | # format string like specifier 250 | return format % data 251 | 252 | # asciiz specifier 253 | if format[:1] == 'z': 254 | return data+b'\0' 255 | 256 | # unicode specifier 257 | if format[:1] == 'u': 258 | return data+b'\0\0' + (len(data) & 1 and b'\0' or b'') 259 | 260 | # DCE-RPC/NDR string specifier 261 | if format[:1] == 'w': 262 | if len(data) == 0: 263 | data = b'\0\0' 264 | elif len(data) % 2: 265 | data = data.encode() + b'\0' 266 | l = pack('= 2: 316 | return self.unpack(two[0],data) 317 | 318 | # length specifier 319 | two = format.split('-') 320 | if len(two) == 2: 321 | return self.unpack(two[0],data) 322 | 323 | # array specifier 324 | two = format.split('*') 325 | if len(two) == 2: 326 | answer = [] 327 | sofar = 0 328 | if two[0].isdigit(): 329 | number = int(two[0]) 330 | elif two[0]: 331 | sofar += self.calcUnpackSize(two[0], data) 332 | number = self.unpack(two[0], data[:sofar]) 333 | else: 334 | number = -1 335 | 336 | while number and sofar < len(data): 337 | nsofar = sofar + self.calcUnpackSize(two[1],data[sofar:]) 338 | answer.append(self.unpack(two[1], data[sofar:nsofar], dataClassOrCode)) 339 | number -= 1 340 | sofar = nsofar 341 | return answer 342 | 343 | # "printf" string specifier 344 | if format[:1] == '%': 345 | # format string like specifier 346 | return format % data 347 | 348 | # asciiz specifier 349 | if format == 'z': 350 | if data[-1:] != b'\x00': 351 | raise Exception("%s 'z' field is not NUL terminated: %r" % (field, data)) 352 | return data[:-1] # remove trailing NUL 353 | 354 | # unicode specifier 355 | if format == 'u': 356 | if data[-2:] != b'\x00\x00': 357 | raise Exception("%s 'u' field is not NUL-NUL terminated: %r" % (field, data)) 358 | return data[:-2] # remove trailing NUL 359 | 360 | # DCE-RPC/NDR string specifier 361 | if format == 'w': 362 | l = unpack('= 2: 396 | return self.calcPackSize(two[0], data) 397 | 398 | # length specifier 399 | two = format.split('-') 400 | if len(two) == 2: 401 | return self.calcPackSize(two[0], data) 402 | 403 | # array specifier 404 | two = format.split('*') 405 | if len(two) == 2: 406 | answer = 0 407 | if two[0].isdigit(): 408 | if int(two[0]) != len(data): 409 | raise Exception("Array field has a constant size, and it doesn't match the actual value") 410 | elif two[0]: 411 | answer += self.calcPackSize(two[0], len(data)) 412 | 413 | for each in data: 414 | answer += self.calcPackSize(two[1], each) 415 | return answer 416 | 417 | # "printf" string specifier 418 | if format[:1] == '%': 419 | # format string like specifier 420 | return len(format % data) 421 | 422 | # asciiz specifier 423 | if format[:1] == 'z': 424 | return len(data)+1 425 | 426 | # asciiz specifier 427 | if format[:1] == 'u': 428 | l = len(data) 429 | return l + (l & 1 and 3 or 2) 430 | 431 | # DCE-RPC/NDR string specifier 432 | if format[:1] == 'w': 433 | l = len(data) 434 | return 12+l+l % 2 435 | 436 | # literal specifier 437 | if format[:1] == ':': 438 | return len(data) 439 | 440 | # struct like specifier 441 | return calcsize(format) 442 | 443 | def calcUnpackSize(self, format, data, field = None): 444 | if debug: 445 | print(" calcUnpackSize( %s | %s | %r)" % (field, format, data)) 446 | 447 | # void specifier 448 | if format[:1] == '_': 449 | return 0 450 | 451 | addressField = self.findAddressFieldFor(field) 452 | if addressField is not None: 453 | if not self[addressField]: 454 | return 0 455 | 456 | try: 457 | lengthField = self.findLengthFieldFor(field) 458 | return self[lengthField] 459 | except: 460 | pass 461 | 462 | # XXX: Try to match to actual values, raise if no match 463 | 464 | # quote specifier 465 | if format[:1] == "'" or format[:1] == '"': 466 | return len(format)-1 467 | 468 | # address specifier 469 | two = format.split('&') 470 | if len(two) == 2: 471 | return self.calcUnpackSize(two[0], data) 472 | 473 | # code specifier 474 | two = format.split('=') 475 | if len(two) >= 2: 476 | return self.calcUnpackSize(two[0], data) 477 | 478 | # length specifier 479 | two = format.split('-') 480 | if len(two) == 2: 481 | return self.calcUnpackSize(two[0], data) 482 | 483 | # array specifier 484 | two = format.split('*') 485 | if len(two) == 2: 486 | answer = 0 487 | if two[0]: 488 | if two[0].isdigit(): 489 | number = int(two[0]) 490 | else: 491 | answer += self.calcUnpackSize(two[0], data) 492 | number = self.unpack(two[0], data[:answer]) 493 | 494 | while number: 495 | number -= 1 496 | answer += self.calcUnpackSize(two[1], data[answer:]) 497 | else: 498 | while answer < len(data): 499 | answer += self.calcUnpackSize(two[1], data[answer:]) 500 | return answer 501 | 502 | # "printf" string specifier 503 | if format[:1] == '%': 504 | raise Exception("Can't guess the size of a printf like specifier for unpacking") 505 | 506 | # asciiz specifier 507 | if format[:1] == 'z': 508 | return data.index(b'\x00')+1 509 | 510 | # asciiz specifier 511 | if format[:1] == 'u': 512 | l = data.index(b'\x00\x00') 513 | return l + (l & 1 and 3 or 2) 514 | 515 | # DCE-RPC/NDR string specifier 516 | if format[:1] == 'w': 517 | l = unpack('L'), 621 | ('code1','>L=len(arr1)*2+0x1000'), 622 | ) 623 | 624 | def populate(self, a): 625 | a['default'] = 'hola' 626 | a['int1'] = 0x3131 627 | a['int3'] = 0x45444342 628 | a['z1'] = b'hola' 629 | a['u1'] = 'hola'.encode('utf_16_le') 630 | a[':1'] = b':1234:' 631 | a['arr1'] = (0x12341234,0x88990077,0x41414141) 632 | # a['len1'] = 0x42424242 633 | 634 | class _Test_fixedLength(_Test_simple): 635 | def populate(self, a): 636 | _Test_simple.populate(self, a) 637 | a['len1'] = 0x42424242 638 | 639 | class _Test_simple_aligned4(_Test_simple): 640 | alignment = 4 641 | 642 | class _Test_nested(_StructureTest): 643 | class theClass(Structure): 644 | class _Inner(Structure): 645 | structure = (('data', 'z'),) 646 | 647 | structure = ( 648 | ('nest1', ':', _Inner), 649 | ('nest2', ':', _Inner), 650 | ('int', '> 8)'), 710 | ('pad', '_','((iv >>2) & 0x3F)'), 711 | ('keyid', '_','( iv & 0x03 )'), 712 | ('dataLen', '_-data', 'len(inputDataLeft)-4'), 713 | ('data',':'), 714 | ('icv','>L'), 715 | ) 716 | 717 | def populate(self, a): 718 | a['init_vector']=0x01020304 719 | #a['pad']=int('01010101',2) 720 | a['pad']=int('010101',2) 721 | a['keyid']=0x07 722 | a['data']=b"\xA0\xA1\xA2\xA3\xA4\xA5\xA6\xA7\xA8\xA9" 723 | a['icv'] = 0x05060708 724 | #a['iv'] = 0x01020304 725 | 726 | if __name__ == '__main__': 727 | _Test_simple().run() 728 | 729 | try: 730 | _Test_fixedLength().run() 731 | except: 732 | print("cannot repack because length is bogus") 733 | 734 | _Test_simple_aligned4().run() 735 | _Test_nested().run() 736 | _Test_Optional().run() 737 | _Test_Optional_sparse().run() 738 | _Test_AsciiZArray().run() 739 | _Test_UnpackCode().run() 740 | _Test_AAA().run() 741 | -------------------------------------------------------------------------------- /systemd-unit-file: -------------------------------------------------------------------------------- 1 | # systemd unit file, copy to /etc/systemd/system/pykms.service 2 | # adjust WorkingDirectory 3 | # "systemctl start pykms" to start 4 | # "systemctl enable pykms" to auto-start 5 | 6 | [Unit] 7 | Description=py-kms Key Management Server for Microsoft products 8 | After=network.target 9 | 10 | [Service] 11 | WorkingDirectory=/path/to/py-kms/ 12 | ExecStart=/usr/bin/python server.py 13 | User=nobody 14 | Group=nogroup 15 | 16 | [Install] 17 | WantedBy=multi-user.target 18 | -------------------------------------------------------------------------------- /upy/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ThunderEX/py-kms/628242373cea4e10cb2b0c625cc2b41cc9312ced/upy/__init__.py -------------------------------------------------------------------------------- /upy/codecs.py: -------------------------------------------------------------------------------- 1 | __all__ = ['encode'] 2 | # from source of pypy: 3 | # pypy / pypy / module / _codecs / interp_codecs.py 4 | # pypy / rpython / rlib / runicode.py 5 | import sys 6 | 7 | BYTEORDER = sys.byteorder 8 | BYTEORDER2 = BYTEORDER[0] + 'e' # either "le" or "be" 9 | assert BYTEORDER2 in ('le', 'be') 10 | 11 | 12 | def _storechar(result, CH, byteorder): 13 | hi = ((CH) >> 8) & 0xff 14 | lo = (CH) & 0xff 15 | if byteorder == 'little': 16 | result.append(lo) 17 | result.append(hi) 18 | else: 19 | result.append(hi) 20 | result.append(lo) 21 | 22 | def encode_utf_16(s, errors, 23 | errorhandler=None, 24 | allow_surrogates=True, 25 | byteorder='little', 26 | public_encoding_name='utf16'): 27 | if errorhandler is None: 28 | pass 29 | # errorhandler = default_unicode_error_encode 30 | 31 | result = bytearray() 32 | if byteorder == 'native': 33 | _storechar(result, 0xFEFF, BYTEORDER) 34 | byteorder = BYTEORDER 35 | 36 | for pos, c in enumerate(s): 37 | ch = ord(c) 38 | 39 | if ch < 0xD800: 40 | _storechar(result, ch, byteorder) 41 | elif ch >= 0x10000: 42 | _storechar(result, 0xD800 | ((ch-0x10000) >> 10), byteorder) 43 | _storechar(result, 0xDC00 | ((ch-0x10000) & 0x3FF), byteorder) 44 | elif ch >= 0xE000 or allow_surrogates: 45 | _storechar(result, ch, byteorder) 46 | else: 47 | ru, rs, pos = errorhandler(errors, public_encoding_name, 48 | 'surrogates not allowed', 49 | s, pos-1, pos) 50 | if rs is not None: 51 | # py3k only 52 | if len(rs) % 2 != 0: 53 | errorhandler('strict', public_encoding_name, 54 | 'surrogates not allowed', 55 | s, pos-1, pos) 56 | result.append(rs) 57 | continue 58 | for ch in ru: 59 | if ord(ch) < 0xD800: 60 | _storechar(result, ord(ch), byteorder) 61 | else: 62 | errorhandler('strict', public_encoding_name, 63 | 'surrogates not allowed', 64 | s, pos-1, pos) 65 | continue 66 | 67 | return bytes(result) 68 | 69 | def encode(obj, encoding='utf_8', errors='strict'): 70 | if encoding == 'utf_8': 71 | return obj.encode(encoding, errors) 72 | elif encoding == 'utf_16': 73 | return encode_utf_16(obj, [], None, 74 | True, 'native', 75 | 'utf-16-' + BYTEORDER2) 76 | elif encoding == 'utf_16_be': 77 | return encode_utf_16(obj, [], None, 78 | True, 'big', 79 | 'utf-16-be') 80 | elif encoding == 'utf_16_le': 81 | return encode_utf_16(obj, [], None, 82 | True, 'little', 83 | 'utf-16-le') 84 | else: 85 | raise NotImplementedError('Encoding of {} not implemented'.format(encoding)) 86 | -------------------------------------------------------------------------------- /upy/fakerandom.py: -------------------------------------------------------------------------------- 1 | from urandom import * 2 | import math 3 | 4 | def randint(a, b): 5 | while True: 6 | r = getrandbits(math.ceil(math.log2(b - a))) 7 | if r < b - a: 8 | break 9 | return a + r 10 | 11 | def choice(seq): 12 | """Choose a random element from a non-empty sequence.""" 13 | try: 14 | i = randint(0, len(seq)) 15 | except ValueError: 16 | raise IndexError('Cannot choose from an empty sequence') 17 | return seq[i] 18 | -------------------------------------------------------------------------------- /upy/socketserver.py: -------------------------------------------------------------------------------- 1 | """Generic socket server classes. 2 | 3 | This module tries to capture the various aspects of defining a server: 4 | 5 | For socket-based servers: 6 | 7 | - address family: 8 | - AF_INET{,6}: IP (Internet Protocol) sockets (default) 9 | - AF_UNIX: Unix domain sockets 10 | - others, e.g. AF_DECNET are conceivable (see 11 | - socket type: 12 | - SOCK_STREAM (reliable stream, e.g. TCP) 13 | - SOCK_DGRAM (datagrams, e.g. UDP) 14 | 15 | For request-based servers (including socket-based): 16 | 17 | - client address verification before further looking at the request 18 | (This is actually a hook for any processing that needs to look 19 | at the request before anything else, e.g. logging) 20 | - how to handle multiple requests: 21 | - synchronous (one request is handled at a time) 22 | - forking (each request is handled by a new process) 23 | - threading (each request is handled by a new thread) 24 | 25 | The classes in this module favor the server type that is simplest to 26 | write: a synchronous TCP/IP server. This is bad class design, but 27 | save some typing. (There's also the issue that a deep class hierarchy 28 | slows down method lookups.) 29 | 30 | There are five classes in an inheritance diagram, four of which represent 31 | synchronous servers of four types: 32 | 33 | +------------+ 34 | | BaseServer | 35 | +------------+ 36 | | 37 | v 38 | +-----------+ +------------------+ 39 | | TCPServer |------->| UnixStreamServer | 40 | +-----------+ +------------------+ 41 | | 42 | v 43 | +-----------+ +--------------------+ 44 | | UDPServer |------->| UnixDatagramServer | 45 | +-----------+ +--------------------+ 46 | 47 | Note that UnixDatagramServer derives from UDPServer, not from 48 | UnixStreamServer -- the only difference between an IP and a Unix 49 | stream server is the address family, which is simply repeated in both 50 | unix server classes. 51 | 52 | Forking and threading versions of each type of server can be created 53 | using the ForkingMixIn and ThreadingMixIn mix-in classes. For 54 | instance, a threading UDP server class is created as follows: 55 | 56 | class ThreadingUDPServer(ThreadingMixIn, UDPServer): pass 57 | 58 | The Mix-in class must come first, since it overrides a method defined 59 | in UDPServer! Setting the various member variables also changes 60 | the behavior of the underlying server mechanism. 61 | 62 | To implement a service, you must derive a class from 63 | BaseRequestHandler and redefine its handle() method. You can then run 64 | various versions of the service by combining one of the server classes 65 | with your request handler class. 66 | 67 | The request handler class must be different for datagram or stream 68 | services. This can be hidden by using the request handler 69 | subclasses StreamRequestHandler or DatagramRequestHandler. 70 | 71 | Of course, you still have to use your head! 72 | 73 | For instance, it makes no sense to use a forking server if the service 74 | contains state in memory that can be modified by requests (since the 75 | modifications in the child process would never reach the initial state 76 | kept in the parent process and passed to each child). In this case, 77 | you can use a threading server, but you will probably have to use 78 | locks to avoid two requests that come in nearly simultaneous to apply 79 | conflicting changes to the server state. 80 | 81 | On the other hand, if you are building e.g. an HTTP server, where all 82 | data is stored externally (e.g. in the file system), a synchronous 83 | class will essentially render the service "deaf" while one request is 84 | being handled -- which may be for a very long time if a client is slow 85 | to read all the data it has requested. Here a threading or forking 86 | server is appropriate. 87 | 88 | In some cases, it may be appropriate to process part of a request 89 | synchronously, but to finish processing in a forked child depending on 90 | the request data. This can be implemented by using a synchronous 91 | server and doing an explicit fork in the request handler class 92 | handle() method. 93 | 94 | Another approach to handling multiple simultaneous requests in an 95 | environment that supports neither threads nor fork (or where these are 96 | too expensive or inappropriate for the service) is to maintain an 97 | explicit table of partially finished requests and to use select() to 98 | decide which request to work on next (or whether to handle a new 99 | incoming request). This is particularly important for stream services 100 | where each client can potentially be connected for a long time (if 101 | threads or subprocesses cannot be used). 102 | 103 | Future work: 104 | - Standard classes for Sun RPC (which uses either UDP or TCP) 105 | - Standard mix-in classes to implement various authentication 106 | and encryption schemes 107 | - Standard framework for select-based multiplexing 108 | 109 | XXX Open problems: 110 | - What to do with out-of-band data? 111 | 112 | BaseServer: 113 | - split generic "request" functionality out into BaseServer class. 114 | Copyright (C) 2000 Luke Kenneth Casson Leighton 115 | 116 | example: read entries from a SQL database (requires overriding 117 | get_request() to return a table entry from the database). 118 | entry is processed by a RequestHandlerClass. 119 | 120 | """ 121 | 122 | # Author of the BaseServer patch: Luke Kenneth Casson Leighton 123 | 124 | # XXX Warning! 125 | # There is a test suite for this module, but it cannot be run by the 126 | # standard regression test. 127 | # To run it manually, run Lib/test/test_socketserver.py. 128 | 129 | __version__ = "0.4" 130 | 131 | try: 132 | import usocket as socket 133 | except ImportError: 134 | import socket 135 | else: 136 | import socket as micropython_socket 137 | try: 138 | import uselect as select 139 | except ImportError: 140 | import select 141 | import os 142 | import errno 143 | try: 144 | import _thread 145 | except ImportError: 146 | import _dummy_thread as _thread 147 | 148 | __all__ = ["BaseServer", "TCPServer", "UDPServer", "ForkingUDPServer", 149 | "ForkingTCPServer", "ThreadingUDPServer", "ThreadingTCPServer", 150 | "BaseRequestHandler", "StreamRequestHandler", 151 | "DatagramRequestHandler", "ThreadingMixIn", "ForkingMixIn"] 152 | if hasattr(socket, "AF_UNIX"): 153 | __all__.extend(["UnixStreamServer","UnixDatagramServer", 154 | "ThreadingUnixStreamServer", 155 | "ThreadingUnixDatagramServer"]) 156 | 157 | def _eintr_retry(func, *args): 158 | """restart a system call interrupted by EINTR""" 159 | while True: 160 | try: 161 | return func(*args) 162 | except OSError as e: 163 | if e.errno != errno.EINTR: 164 | raise 165 | 166 | class BaseServer: 167 | 168 | """Base class for server classes. 169 | 170 | Methods for the caller: 171 | 172 | - __init__(server_address, RequestHandlerClass) 173 | - serve_forever(poll_interval=0.5) 174 | - shutdown() 175 | - handle_request() # if you do not use serve_forever() 176 | - fileno() -> int # for select() 177 | 178 | Methods that may be overridden: 179 | 180 | - server_bind() 181 | - server_activate() 182 | - get_request() -> request, client_address 183 | - handle_timeout() 184 | - verify_request(request, client_address) 185 | - server_close() 186 | - process_request(request, client_address) 187 | - shutdown_request(request) 188 | - close_request(request) 189 | - service_actions() 190 | - handle_error() 191 | 192 | Methods for derived classes: 193 | 194 | - finish_request(request, client_address) 195 | 196 | Class variables that may be overridden by derived classes or 197 | instances: 198 | 199 | - timeout 200 | - address_family 201 | - socket_type 202 | - allow_reuse_address 203 | 204 | Instance variables: 205 | 206 | - RequestHandlerClass 207 | - socket 208 | 209 | """ 210 | 211 | timeout = None 212 | 213 | def __init__(self, server_address, RequestHandlerClass): 214 | """Constructor. May be extended, do not override.""" 215 | self.server_address = server_address 216 | self.RequestHandlerClass = RequestHandlerClass 217 | self.__is_shut_down = _thread.allocate_lock() 218 | self.__shutdown_request = False 219 | 220 | def server_activate(self): 221 | """Called by constructor to activate the server. 222 | 223 | May be overridden. 224 | 225 | """ 226 | pass 227 | 228 | def serve_forever(self, poll_interval=0.5): 229 | """Handle one request at a time until shutdown. 230 | 231 | Polls for shutdown every poll_interval seconds. Ignores 232 | self.timeout. If you need to do periodic tasks, do them in 233 | another thread. 234 | """ 235 | self.__is_shut_down.acquire() 236 | 237 | # Commonly used flag setes 238 | READ_ONLY = select.POLLIN | getattr(select, 'POLLPRI', 0) | select.POLLHUP | select.POLLERR 239 | 240 | # Set up the poller 241 | poller = select.poll() 242 | poller.register(self.socket, READ_ONLY) 243 | 244 | # Map file descriptors to socket objects 245 | fd_to_socket = {self.socket.fileno(): self.socket, 246 | } 247 | 248 | try: 249 | while not self.__shutdown_request: 250 | events = poller.poll(int(poll_interval * 1000)) 251 | if any(self.socket == fd for fd, flag in events): 252 | # if any(self.socket == fd_to_socket[fd] for fd, flag in events): 253 | self._handle_request_noblock() 254 | 255 | self.service_actions() 256 | finally: 257 | self.__shutdown_request = False 258 | self.__is_shut_down.release() 259 | 260 | def shutdown(self): 261 | """Stops the serve_forever loop. 262 | 263 | Blocks until the loop has finished. This must be called while 264 | serve_forever() is running in another thread, or it will 265 | deadlock. 266 | """ 267 | self.__shutdown_request = True 268 | self.__is_shut_down.acquire() 269 | self.__is_shut_down.release() 270 | 271 | def service_actions(self): 272 | """Called by the serve_forever() loop. 273 | 274 | May be overridden by a subclass / Mixin to implement any code that 275 | needs to be run during the loop. 276 | """ 277 | pass 278 | 279 | # The distinction between handling, getting, processing and 280 | # finishing a request is fairly arbitrary. Remember: 281 | # 282 | # - handle_request() is the top-level call. It calls 283 | # select, get_request(), verify_request() and process_request() 284 | # - get_request() is different for stream or datagram sockets 285 | # - process_request() is the place that may fork a new process 286 | # or create a new thread to finish the request 287 | # - finish_request() instantiates the request handler class; 288 | # this constructor will handle the request all by itself 289 | 290 | def handle_request(self): 291 | """Handle one request, possibly blocking. 292 | 293 | Respects self.timeout. 294 | """ 295 | # Support people who used socket.settimeout() to escape 296 | # handle_request before self.timeout was available. 297 | timeout = self.socket.gettimeout() 298 | if timeout is None: 299 | timeout = self.timeout 300 | elif self.timeout is not None: 301 | timeout = min(timeout, self.timeout) 302 | fd_sets = _eintr_retry(select.select, [self], [], [], timeout) 303 | if not fd_sets[0]: 304 | self.handle_timeout() 305 | return 306 | self._handle_request_noblock() 307 | 308 | def _handle_request_noblock(self): 309 | """Handle one request, without blocking. 310 | 311 | I assume that select.select has returned that the socket is 312 | readable before this function was called, so there should be 313 | no risk of blocking in get_request(). 314 | """ 315 | try: 316 | request, client_address = self.get_request() 317 | except OSError: 318 | return 319 | if self.verify_request(request, client_address): 320 | try: 321 | self.process_request(request, client_address) 322 | except: 323 | self.handle_error(request, client_address) 324 | self.shutdown_request(request) 325 | 326 | def handle_timeout(self): 327 | """Called if no new request arrives within self.timeout. 328 | 329 | Overridden by ForkingMixIn. 330 | """ 331 | pass 332 | 333 | def verify_request(self, request, client_address): 334 | """Verify the request. May be overridden. 335 | 336 | Return True if we should proceed with this request. 337 | 338 | """ 339 | return True 340 | 341 | def process_request(self, request, client_address): 342 | """Call finish_request. 343 | 344 | Overridden by ForkingMixIn and ThreadingMixIn. 345 | 346 | """ 347 | self.finish_request(request, client_address) 348 | self.shutdown_request(request) 349 | 350 | def server_close(self): 351 | """Called to clean-up the server. 352 | 353 | May be overridden. 354 | 355 | """ 356 | pass 357 | 358 | def finish_request(self, request, client_address): 359 | """Finish one request by instantiating RequestHandlerClass.""" 360 | self.RequestHandlerClass(request, client_address, self) 361 | 362 | def shutdown_request(self, request): 363 | """Called to shutdown and close an individual request.""" 364 | self.close_request(request) 365 | 366 | def close_request(self, request): 367 | """Called to clean up an individual request.""" 368 | pass 369 | 370 | def handle_error(self, request, client_address): 371 | """Handle an error gracefully. May be overridden. 372 | 373 | The default is to print a traceback and continue. 374 | 375 | """ 376 | print('-'*40) 377 | print('Exception happened during processing of request from', end=' ') 378 | print(client_address) 379 | import traceback 380 | traceback.print_exc() # XXX But this goes to stderr! 381 | print('-'*40) 382 | 383 | 384 | class TCPServer(BaseServer): 385 | 386 | """Base class for various socket-based server classes. 387 | 388 | Defaults to synchronous IP stream (i.e., TCP). 389 | 390 | Methods for the caller: 391 | 392 | - __init__(server_address, RequestHandlerClass, bind_and_activate=True) 393 | - serve_forever(poll_interval=0.5) 394 | - shutdown() 395 | - handle_request() # if you don't use serve_forever() 396 | - fileno() -> int # for select() 397 | 398 | Methods that may be overridden: 399 | 400 | - server_bind() 401 | - server_activate() 402 | - get_request() -> request, client_address 403 | - handle_timeout() 404 | - verify_request(request, client_address) 405 | - process_request(request, client_address) 406 | - shutdown_request(request) 407 | - close_request(request) 408 | - handle_error() 409 | 410 | Methods for derived classes: 411 | 412 | - finish_request(request, client_address) 413 | 414 | Class variables that may be overridden by derived classes or 415 | instances: 416 | 417 | - timeout 418 | - address_family 419 | - socket_type 420 | - request_queue_size (only for stream sockets) 421 | - allow_reuse_address 422 | 423 | Instance variables: 424 | 425 | - server_address 426 | - RequestHandlerClass 427 | - socket 428 | 429 | """ 430 | 431 | address_family = socket.AF_INET 432 | 433 | socket_type = socket.SOCK_STREAM 434 | 435 | request_queue_size = 5 436 | 437 | allow_reuse_address = False 438 | 439 | def __init__(self, server_address, RequestHandlerClass, bind_and_activate=True): 440 | """Constructor. May be extended, do not override.""" 441 | BaseServer.__init__(self, server_address, RequestHandlerClass) 442 | self.socket = socket.socket(self.address_family, 443 | self.socket_type) 444 | if bind_and_activate: 445 | try: 446 | self.server_bind() 447 | self.server_activate() 448 | except: 449 | self.server_close() 450 | raise 451 | 452 | def server_bind(self): 453 | """Called by constructor to bind the socket. 454 | 455 | May be overridden. 456 | 457 | """ 458 | if self.allow_reuse_address: 459 | self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) 460 | try: 461 | micropython_socket 462 | except NameError: 463 | self.socket.bind(self.server_address) 464 | else: 465 | self.socket.bind(micropython_socket._resolve_addr(self.server_address)) 466 | # self.server_address = self.socket.getsockname() 467 | 468 | def server_activate(self): 469 | """Called by constructor to activate the server. 470 | 471 | May be overridden. 472 | 473 | """ 474 | self.socket.listen(self.request_queue_size) 475 | 476 | def server_close(self): 477 | """Called to clean-up the server. 478 | 479 | May be overridden. 480 | 481 | """ 482 | self.socket.close() 483 | 484 | def fileno(self): 485 | """Return socket file number. 486 | 487 | Interface required by select(). 488 | 489 | """ 490 | return self.socket.fileno() 491 | 492 | def get_request(self): 493 | """Get the request and client address from the socket. 494 | 495 | May be overridden. 496 | 497 | """ 498 | try: 499 | micropython_socket 500 | except NameError: 501 | return self.socket.accept() 502 | else: 503 | s, addr = self.socket.accept() 504 | addr = socket.sockaddr(addr) 505 | return (s, (socket.inet_ntop(addr[0], addr[1]), addr[2])) 506 | 507 | def shutdown_request(self, request): 508 | """Called to shutdown and close an individual request.""" 509 | try: 510 | #explicitly shutdown. socket.close() merely releases 511 | #the socket and waits for GC to perform the actual close. 512 | request.shutdown(socket.SHUT_WR) 513 | except (OSError, AttributeError): 514 | pass #some platforms may raise ENOTCONN here 515 | self.close_request(request) 516 | 517 | def close_request(self, request): 518 | """Called to clean up an individual request.""" 519 | request.close() 520 | 521 | 522 | class UDPServer(TCPServer): 523 | 524 | """UDP server class.""" 525 | 526 | allow_reuse_address = False 527 | 528 | socket_type = socket.SOCK_DGRAM 529 | 530 | max_packet_size = 8192 531 | 532 | def get_request(self): 533 | data, client_addr = self.socket.recvfrom(self.max_packet_size) 534 | return (data, self.socket), client_addr 535 | 536 | def server_activate(self): 537 | # No need to call listen() for UDP. 538 | pass 539 | 540 | def shutdown_request(self, request): 541 | # No need to shutdown anything. 542 | self.close_request(request) 543 | 544 | def close_request(self, request): 545 | # No need to close anything. 546 | pass 547 | 548 | class ForkingMixIn: 549 | 550 | """Mix-in class to handle each request in a new process.""" 551 | 552 | timeout = 300 553 | active_children = None 554 | max_children = 40 555 | 556 | def collect_children(self): 557 | """Internal routine to wait for children that have exited.""" 558 | if self.active_children is None: 559 | return 560 | 561 | # If we're above the max number of children, wait and reap them until 562 | # we go back below threshold. Note that we use waitpid(-1) below to be 563 | # able to collect children in size() syscalls instead 564 | # of size(): the downside is that this might reap children 565 | # which we didn't spawn, which is why we only resort to this when we're 566 | # above max_children. 567 | while len(self.active_children) >= self.max_children: 568 | try: 569 | pid, _ = os.waitpid(-1, 0) 570 | self.active_children.discard(pid) 571 | except InterruptedError: 572 | pass 573 | except ChildProcessError: 574 | # we don't have any children, we're done 575 | self.active_children.clear() 576 | except OSError: 577 | break 578 | 579 | # Now reap all defunct children. 580 | for pid in self.active_children.copy(): 581 | try: 582 | pid, _ = os.waitpid(pid, os.WNOHANG) 583 | # if the child hasn't exited yet, pid will be 0 and ignored by 584 | # discard() below 585 | self.active_children.discard(pid) 586 | except ChildProcessError: 587 | # someone else reaped it 588 | self.active_children.discard(pid) 589 | except OSError: 590 | pass 591 | 592 | def handle_timeout(self): 593 | """Wait for zombies after self.timeout seconds of inactivity. 594 | 595 | May be extended, do not override. 596 | """ 597 | self.collect_children() 598 | 599 | def service_actions(self): 600 | """Collect the zombie child processes regularly in the ForkingMixIn. 601 | 602 | service_actions is called in the BaseServer's serve_forver loop. 603 | """ 604 | self.collect_children() 605 | 606 | def process_request(self, request, client_address): 607 | """Fork a new subprocess to process the request.""" 608 | pid = os.fork() 609 | if pid: 610 | # Parent process 611 | if self.active_children is None: 612 | self.active_children = set() 613 | self.active_children.add(pid) 614 | self.close_request(request) 615 | return 616 | else: 617 | # Child process. 618 | # This must never return, hence os._exit()! 619 | try: 620 | self.finish_request(request, client_address) 621 | self.shutdown_request(request) 622 | os._exit(0) 623 | except: 624 | try: 625 | self.handle_error(request, client_address) 626 | self.shutdown_request(request) 627 | finally: 628 | os._exit(1) 629 | 630 | 631 | class ThreadingMixIn: 632 | """Mix-in class to handle each request in a new thread.""" 633 | 634 | # Decides how threads will act upon termination of the 635 | # main process 636 | daemon_threads = False 637 | 638 | def process_request_thread(self, request, client_address): 639 | """Same as in BaseServer but as a thread. 640 | 641 | In addition, exception handling is done here. 642 | 643 | """ 644 | try: 645 | self.finish_request(request, client_address) 646 | self.shutdown_request(request) 647 | except: 648 | self.handle_error(request, client_address) 649 | self.shutdown_request(request) 650 | 651 | def process_request(self, request, client_address): 652 | """Start a new thread to process the request.""" 653 | try: 654 | import threading 655 | except ImportError: 656 | import dummy_threading as threading 657 | 658 | t = threading.Thread(target = self.process_request_thread, 659 | args = (request, client_address)) 660 | t.daemon = self.daemon_threads 661 | t.start() 662 | 663 | 664 | class ForkingUDPServer(ForkingMixIn, UDPServer): pass 665 | class ForkingTCPServer(ForkingMixIn, TCPServer): pass 666 | 667 | class ThreadingUDPServer(ThreadingMixIn, UDPServer): pass 668 | class ThreadingTCPServer(ThreadingMixIn, TCPServer): pass 669 | 670 | if hasattr(socket, 'AF_UNIX'): 671 | 672 | class UnixStreamServer(TCPServer): 673 | address_family = socket.AF_UNIX 674 | 675 | class UnixDatagramServer(UDPServer): 676 | address_family = socket.AF_UNIX 677 | 678 | class ThreadingUnixStreamServer(ThreadingMixIn, UnixStreamServer): pass 679 | 680 | class ThreadingUnixDatagramServer(ThreadingMixIn, UnixDatagramServer): pass 681 | 682 | class BaseRequestHandler: 683 | 684 | """Base class for request handler classes. 685 | 686 | This class is instantiated for each request to be handled. The 687 | constructor sets the instance variables request, client_address 688 | and server, and then calls the handle() method. To implement a 689 | specific service, all you need to do is to derive a class which 690 | defines a handle() method. 691 | 692 | The handle() method can find the request as self.request, the 693 | client address as self.client_address, and the server (in case it 694 | needs access to per-server information) as self.server. Since a 695 | separate instance is created for each request, the handle() method 696 | can define arbitrary other instance variariables. 697 | 698 | """ 699 | 700 | def __init__(self, request, client_address, server): 701 | self.request = request 702 | self.client_address = client_address 703 | self.server = server 704 | self.setup() 705 | try: 706 | self.handle() 707 | finally: 708 | self.finish() 709 | 710 | def setup(self): 711 | pass 712 | 713 | def handle(self): 714 | pass 715 | 716 | def finish(self): 717 | pass 718 | 719 | 720 | # The following two classes make it possible to use the same service 721 | # class for stream or datagram servers. 722 | # Each class sets up these instance variables: 723 | # - rfile: a file object from which receives the request is read 724 | # - wfile: a file object to which the reply is written 725 | # When the handle() method returns, wfile is flushed properly 726 | 727 | 728 | class StreamRequestHandler(BaseRequestHandler): 729 | 730 | """Define self.rfile and self.wfile for stream sockets.""" 731 | 732 | # Default buffer sizes for rfile, wfile. 733 | # We default rfile to buffered because otherwise it could be 734 | # really slow for large data (a getc() call per byte); we make 735 | # wfile unbuffered because (a) often after a write() we want to 736 | # read and we need to flush the line; (b) big writes to unbuffered 737 | # files are typically optimized by stdio even when big reads 738 | # aren't. 739 | rbufsize = -1 740 | wbufsize = 0 741 | 742 | # A timeout to apply to the request socket, if not None. 743 | timeout = None 744 | 745 | # Disable nagle algorithm for this socket, if True. 746 | # Use only when wbufsize != 0, to avoid small packets. 747 | disable_nagle_algorithm = False 748 | 749 | def setup(self): 750 | self.connection = self.request 751 | if self.timeout is not None: 752 | self.connection.settimeout(self.timeout) 753 | if self.disable_nagle_algorithm: 754 | self.connection.setsockopt(socket.IPPROTO_TCP, 755 | socket.TCP_NODELAY, True) 756 | self.rfile = self.connection.makefile('rb', self.rbufsize) 757 | self.wfile = self.connection.makefile('wb', self.wbufsize) 758 | 759 | def finish(self): 760 | if not self.wfile.closed: 761 | try: 762 | self.wfile.flush() 763 | except socket.error: 764 | # An final socket error may have occurred here, such as 765 | # the local error ECONNABORTED. 766 | pass 767 | self.wfile.close() 768 | self.rfile.close() 769 | 770 | 771 | class DatagramRequestHandler(BaseRequestHandler): 772 | 773 | # XXX Regrettably, I cannot get this working on Linux; 774 | # s.recvfrom() doesn't return a meaningful client address. 775 | 776 | """Define self.rfile and self.wfile for datagram sockets.""" 777 | 778 | def setup(self): 779 | from io import BytesIO 780 | self.packet, self.socket = self.request 781 | self.rfile = BytesIO(self.packet) 782 | self.wfile = BytesIO() 783 | 784 | def finish(self): 785 | try: 786 | micropython_socket 787 | except NameError: 788 | self.socket.sendto(self.wfile.getvalue(), self.client_address) 789 | else: 790 | self.socket.sendto(self.wfile.getvalue(), micropython_socket._resolve_addr(self.client_address)) 791 | -------------------------------------------------------------------------------- /upy/test_socketserver.py: -------------------------------------------------------------------------------- 1 | """ 2 | Test suite for socketserver. 3 | """ 4 | 5 | import contextlib 6 | import os 7 | import select 8 | import signal 9 | import socket 10 | import select 11 | import errno 12 | import tempfile 13 | import unittest 14 | import socketserver 15 | 16 | import test.support 17 | from test.support import reap_children, reap_threads, verbose 18 | try: 19 | import threading 20 | except ImportError: 21 | threading = None 22 | 23 | test.support.requires("network") 24 | 25 | TEST_STR = b"hello world\n" 26 | HOST = test.support.HOST 27 | 28 | HAVE_UNIX_SOCKETS = hasattr(socket, "AF_UNIX") 29 | requires_unix_sockets = unittest.skipUnless(HAVE_UNIX_SOCKETS, 30 | 'requires Unix sockets') 31 | HAVE_FORKING = hasattr(os, "fork") 32 | requires_forking = unittest.skipUnless(HAVE_FORKING, 'requires forking') 33 | 34 | def signal_alarm(n): 35 | """Call signal.alarm when it exists (i.e. not on Windows).""" 36 | if hasattr(signal, 'alarm'): 37 | signal.alarm(n) 38 | 39 | # Remember real select() to avoid interferences with mocking 40 | _real_select = select.select 41 | 42 | def receive(sock, n, timeout=20): 43 | r, w, x = _real_select([sock], [], [], timeout) 44 | if sock in r: 45 | return sock.recv(n) 46 | else: 47 | raise RuntimeError("timed out on %r" % (sock,)) 48 | 49 | if HAVE_UNIX_SOCKETS: 50 | class ForkingUnixStreamServer(socketserver.ForkingMixIn, 51 | socketserver.UnixStreamServer): 52 | pass 53 | 54 | class ForkingUnixDatagramServer(socketserver.ForkingMixIn, 55 | socketserver.UnixDatagramServer): 56 | pass 57 | 58 | 59 | @contextlib.contextmanager 60 | def simple_subprocess(testcase): 61 | pid = os.fork() 62 | if pid == 0: 63 | # Don't raise an exception; it would be caught by the test harness. 64 | os._exit(72) 65 | yield None 66 | pid2, status = os.waitpid(pid, 0) 67 | testcase.assertEqual(pid2, pid) 68 | testcase.assertEqual(72 << 8, status) 69 | 70 | 71 | @unittest.skipUnless(threading, 'Threading required for this test.') 72 | class SocketServerTest(unittest.TestCase): 73 | """Test all socket servers.""" 74 | 75 | def setUp(self): 76 | signal_alarm(60) # Kill deadlocks after 60 seconds. 77 | self.port_seed = 0 78 | self.test_files = [] 79 | 80 | def tearDown(self): 81 | signal_alarm(0) # Didn't deadlock. 82 | reap_children() 83 | 84 | for fn in self.test_files: 85 | try: 86 | os.remove(fn) 87 | except OSError: 88 | pass 89 | self.test_files[:] = [] 90 | 91 | def pickaddr(self, proto): 92 | if proto == socket.AF_INET: 93 | return (HOST, 0) 94 | else: 95 | # XXX: We need a way to tell AF_UNIX to pick its own name 96 | # like AF_INET provides port==0. 97 | dir = None 98 | fn = tempfile.mktemp(prefix='unix_socket.', dir=dir) 99 | self.test_files.append(fn) 100 | return fn 101 | 102 | def make_server(self, addr, svrcls, hdlrbase): 103 | class MyServer(svrcls): 104 | def handle_error(self, request, client_address): 105 | self.close_request(request) 106 | self.server_close() 107 | raise 108 | 109 | class MyHandler(hdlrbase): 110 | def handle(self): 111 | line = self.rfile.readline() 112 | self.wfile.write(line) 113 | 114 | if verbose: print("creating server") 115 | server = MyServer(addr, MyHandler) 116 | self.assertEqual(server.server_address, server.socket.getsockname()) 117 | return server 118 | 119 | @reap_threads 120 | def run_server(self, svrcls, hdlrbase, testfunc): 121 | server = self.make_server(self.pickaddr(svrcls.address_family), 122 | svrcls, hdlrbase) 123 | # We had the OS pick a port, so pull the real address out of 124 | # the server. 125 | addr = server.server_address 126 | if verbose: 127 | print("ADDR =", addr) 128 | print("CLASS =", svrcls) 129 | 130 | t = threading.Thread( 131 | name='%s serving' % svrcls, 132 | target=server.serve_forever, 133 | # Short poll interval to make the test finish quickly. 134 | # Time between requests is short enough that we won't wake 135 | # up spuriously too many times. 136 | kwargs={'poll_interval':0.01}) 137 | t.daemon = True # In case this function raises. 138 | t.start() 139 | if verbose: print("server running") 140 | for i in range(3): 141 | if verbose: print("test client", i) 142 | testfunc(svrcls.address_family, addr) 143 | if verbose: print("waiting for server") 144 | server.shutdown() 145 | t.join() 146 | server.server_close() 147 | self.assertEqual(-1, server.socket.fileno()) 148 | if verbose: print("done") 149 | 150 | def stream_examine(self, proto, addr): 151 | s = socket.socket(proto, socket.SOCK_STREAM) 152 | s.connect(addr) 153 | s.sendall(TEST_STR) 154 | buf = data = receive(s, 100) 155 | while data and b'\n' not in buf: 156 | data = receive(s, 100) 157 | buf += data 158 | self.assertEqual(buf, TEST_STR) 159 | s.close() 160 | 161 | def dgram_examine(self, proto, addr): 162 | s = socket.socket(proto, socket.SOCK_DGRAM) 163 | s.sendto(TEST_STR, addr) 164 | buf = data = receive(s, 100) 165 | while data and b'\n' not in buf: 166 | data = receive(s, 100) 167 | buf += data 168 | self.assertEqual(buf, TEST_STR) 169 | s.close() 170 | 171 | def test_TCPServer(self): 172 | self.run_server(socketserver.TCPServer, 173 | socketserver.StreamRequestHandler, 174 | self.stream_examine) 175 | 176 | def test_ThreadingTCPServer(self): 177 | self.run_server(socketserver.ThreadingTCPServer, 178 | socketserver.StreamRequestHandler, 179 | self.stream_examine) 180 | 181 | @requires_forking 182 | def test_ForkingTCPServer(self): 183 | with simple_subprocess(self): 184 | self.run_server(socketserver.ForkingTCPServer, 185 | socketserver.StreamRequestHandler, 186 | self.stream_examine) 187 | 188 | @requires_unix_sockets 189 | def test_UnixStreamServer(self): 190 | self.run_server(socketserver.UnixStreamServer, 191 | socketserver.StreamRequestHandler, 192 | self.stream_examine) 193 | 194 | @requires_unix_sockets 195 | def test_ThreadingUnixStreamServer(self): 196 | self.run_server(socketserver.ThreadingUnixStreamServer, 197 | socketserver.StreamRequestHandler, 198 | self.stream_examine) 199 | 200 | @requires_unix_sockets 201 | @requires_forking 202 | def test_ForkingUnixStreamServer(self): 203 | with simple_subprocess(self): 204 | self.run_server(ForkingUnixStreamServer, 205 | socketserver.StreamRequestHandler, 206 | self.stream_examine) 207 | 208 | def test_UDPServer(self): 209 | self.run_server(socketserver.UDPServer, 210 | socketserver.DatagramRequestHandler, 211 | self.dgram_examine) 212 | 213 | def test_ThreadingUDPServer(self): 214 | self.run_server(socketserver.ThreadingUDPServer, 215 | socketserver.DatagramRequestHandler, 216 | self.dgram_examine) 217 | 218 | @requires_forking 219 | def test_ForkingUDPServer(self): 220 | with simple_subprocess(self): 221 | self.run_server(socketserver.ForkingUDPServer, 222 | socketserver.DatagramRequestHandler, 223 | self.dgram_examine) 224 | 225 | @contextlib.contextmanager 226 | def mocked_select_module(self): 227 | """Mocks the select.select() call to raise EINTR for first call""" 228 | old_select = select.select 229 | 230 | class MockSelect: 231 | def __init__(self): 232 | self.called = 0 233 | 234 | def __call__(self, *args): 235 | self.called += 1 236 | if self.called == 1: 237 | # raise the exception on first call 238 | raise OSError(errno.EINTR, os.strerror(errno.EINTR)) 239 | else: 240 | # Return real select value for consecutive calls 241 | return old_select(*args) 242 | 243 | select.select = MockSelect() 244 | try: 245 | yield select.select 246 | finally: 247 | select.select = old_select 248 | 249 | def test_InterruptServerSelectCall(self): 250 | with self.mocked_select_module() as mock_select: 251 | pid = self.run_server(socketserver.TCPServer, 252 | socketserver.StreamRequestHandler, 253 | self.stream_examine) 254 | # Make sure select was called again: 255 | self.assertGreater(mock_select.called, 1) 256 | 257 | # Alas, on Linux (at least) recvfrom() doesn't return a meaningful 258 | # client address so this cannot work: 259 | 260 | # @requires_unix_sockets 261 | # def test_UnixDatagramServer(self): 262 | # self.run_server(socketserver.UnixDatagramServer, 263 | # socketserver.DatagramRequestHandler, 264 | # self.dgram_examine) 265 | # 266 | # @requires_unix_sockets 267 | # def test_ThreadingUnixDatagramServer(self): 268 | # self.run_server(socketserver.ThreadingUnixDatagramServer, 269 | # socketserver.DatagramRequestHandler, 270 | # self.dgram_examine) 271 | # 272 | # @requires_unix_sockets 273 | # @requires_forking 274 | # def test_ForkingUnixDatagramServer(self): 275 | # self.run_server(socketserver.ForkingUnixDatagramServer, 276 | # socketserver.DatagramRequestHandler, 277 | # self.dgram_examine) 278 | 279 | @reap_threads 280 | def test_shutdown(self): 281 | # Issue #2302: shutdown() should always succeed in making an 282 | # other thread leave serve_forever(). 283 | class MyServer(socketserver.TCPServer): 284 | pass 285 | 286 | class MyHandler(socketserver.StreamRequestHandler): 287 | pass 288 | 289 | threads = [] 290 | for i in range(20): 291 | s = MyServer((HOST, 0), MyHandler) 292 | t = threading.Thread( 293 | name='MyServer serving', 294 | target=s.serve_forever, 295 | kwargs={'poll_interval':0.01}) 296 | t.daemon = True # In case this function raises. 297 | threads.append((t, s)) 298 | for t, s in threads: 299 | t.start() 300 | s.shutdown() 301 | for t, s in threads: 302 | t.join() 303 | s.server_close() 304 | 305 | def test_tcpserver_bind_leak(self): 306 | # Issue #22435: the server socket wouldn't be closed if bind()/listen() 307 | # failed. 308 | # Create many servers for which bind() will fail, to see if this result 309 | # in FD exhaustion. 310 | for i in range(1024): 311 | with self.assertRaises(OverflowError): 312 | socketserver.TCPServer((HOST, -1), 313 | socketserver.StreamRequestHandler) 314 | 315 | 316 | class MiscTestCase(unittest.TestCase): 317 | 318 | def test_all(self): 319 | # objects defined in the module should be in __all__ 320 | expected = [] 321 | for name in dir(socketserver): 322 | if not name.startswith('_'): 323 | mod_object = getattr(socketserver, name) 324 | if getattr(mod_object, '__module__', None) == 'socketserver': 325 | expected.append(name) 326 | self.assertCountEqual(socketserver.__all__, expected) 327 | 328 | 329 | if __name__ == "__main__": 330 | unittest.main() 331 | -------------------------------------------------------------------------------- /upy/test_uuid.py: -------------------------------------------------------------------------------- 1 | import unittest.mock 2 | from test import support 3 | import builtins 4 | import io 5 | import os 6 | import shutil 7 | import subprocess 8 | import uuid 9 | 10 | def importable(name): 11 | try: 12 | __import__(name) 13 | return True 14 | except: 15 | return False 16 | 17 | class TestUUID(unittest.TestCase): 18 | def test_UUID(self): 19 | equal = self.assertEqual 20 | ascending = [] 21 | for (string, curly, hex, bytes, bytes_le, fields, integer, urn, 22 | time, clock_seq, variant, version) in [ 23 | ('00000000-0000-0000-0000-000000000000', 24 | '{00000000-0000-0000-0000-000000000000}', 25 | '00000000000000000000000000000000', 26 | b'\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0', 27 | b'\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0', 28 | (0, 0, 0, 0, 0, 0), 29 | 0, 30 | 'urn:uuid:00000000-0000-0000-0000-000000000000', 31 | 0, 0, uuid.RESERVED_NCS, None), 32 | ('00010203-0405-0607-0809-0a0b0c0d0e0f', 33 | '{00010203-0405-0607-0809-0a0b0c0d0e0f}', 34 | '000102030405060708090a0b0c0d0e0f', 35 | b'\0\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\x0d\x0e\x0f', 36 | b'\x03\x02\x01\0\x05\x04\x07\x06\x08\t\n\x0b\x0c\x0d\x0e\x0f', 37 | (0x00010203, 0x0405, 0x0607, 8, 9, 0x0a0b0c0d0e0f), 38 | 0x000102030405060708090a0b0c0d0e0f, 39 | 'urn:uuid:00010203-0405-0607-0809-0a0b0c0d0e0f', 40 | 0x607040500010203, 0x809, uuid.RESERVED_NCS, None), 41 | ('02d9e6d5-9467-382e-8f9b-9300a64ac3cd', 42 | '{02d9e6d5-9467-382e-8f9b-9300a64ac3cd}', 43 | '02d9e6d59467382e8f9b9300a64ac3cd', 44 | b'\x02\xd9\xe6\xd5\x94\x67\x38\x2e\x8f\x9b\x93\x00\xa6\x4a\xc3\xcd', 45 | b'\xd5\xe6\xd9\x02\x67\x94\x2e\x38\x8f\x9b\x93\x00\xa6\x4a\xc3\xcd', 46 | (0x02d9e6d5, 0x9467, 0x382e, 0x8f, 0x9b, 0x9300a64ac3cd), 47 | 0x02d9e6d59467382e8f9b9300a64ac3cd, 48 | 'urn:uuid:02d9e6d5-9467-382e-8f9b-9300a64ac3cd', 49 | 0x82e946702d9e6d5, 0xf9b, uuid.RFC_4122, 3), 50 | ('12345678-1234-5678-1234-567812345678', 51 | '{12345678-1234-5678-1234-567812345678}', 52 | '12345678123456781234567812345678', 53 | b'\x12\x34\x56\x78'*4, 54 | b'\x78\x56\x34\x12\x34\x12\x78\x56\x12\x34\x56\x78\x12\x34\x56\x78', 55 | (0x12345678, 0x1234, 0x5678, 0x12, 0x34, 0x567812345678), 56 | 0x12345678123456781234567812345678, 57 | 'urn:uuid:12345678-1234-5678-1234-567812345678', 58 | 0x678123412345678, 0x1234, uuid.RESERVED_NCS, None), 59 | ('6ba7b810-9dad-11d1-80b4-00c04fd430c8', 60 | '{6ba7b810-9dad-11d1-80b4-00c04fd430c8}', 61 | '6ba7b8109dad11d180b400c04fd430c8', 62 | b'\x6b\xa7\xb8\x10\x9d\xad\x11\xd1\x80\xb4\x00\xc0\x4f\xd4\x30\xc8', 63 | b'\x10\xb8\xa7\x6b\xad\x9d\xd1\x11\x80\xb4\x00\xc0\x4f\xd4\x30\xc8', 64 | (0x6ba7b810, 0x9dad, 0x11d1, 0x80, 0xb4, 0x00c04fd430c8), 65 | 0x6ba7b8109dad11d180b400c04fd430c8, 66 | 'urn:uuid:6ba7b810-9dad-11d1-80b4-00c04fd430c8', 67 | 0x1d19dad6ba7b810, 0xb4, uuid.RFC_4122, 1), 68 | ('6ba7b811-9dad-11d1-80b4-00c04fd430c8', 69 | '{6ba7b811-9dad-11d1-80b4-00c04fd430c8}', 70 | '6ba7b8119dad11d180b400c04fd430c8', 71 | b'\x6b\xa7\xb8\x11\x9d\xad\x11\xd1\x80\xb4\x00\xc0\x4f\xd4\x30\xc8', 72 | b'\x11\xb8\xa7\x6b\xad\x9d\xd1\x11\x80\xb4\x00\xc0\x4f\xd4\x30\xc8', 73 | (0x6ba7b811, 0x9dad, 0x11d1, 0x80, 0xb4, 0x00c04fd430c8), 74 | 0x6ba7b8119dad11d180b400c04fd430c8, 75 | 'urn:uuid:6ba7b811-9dad-11d1-80b4-00c04fd430c8', 76 | 0x1d19dad6ba7b811, 0xb4, uuid.RFC_4122, 1), 77 | ('6ba7b812-9dad-11d1-80b4-00c04fd430c8', 78 | '{6ba7b812-9dad-11d1-80b4-00c04fd430c8}', 79 | '6ba7b8129dad11d180b400c04fd430c8', 80 | b'\x6b\xa7\xb8\x12\x9d\xad\x11\xd1\x80\xb4\x00\xc0\x4f\xd4\x30\xc8', 81 | b'\x12\xb8\xa7\x6b\xad\x9d\xd1\x11\x80\xb4\x00\xc0\x4f\xd4\x30\xc8', 82 | (0x6ba7b812, 0x9dad, 0x11d1, 0x80, 0xb4, 0x00c04fd430c8), 83 | 0x6ba7b8129dad11d180b400c04fd430c8, 84 | 'urn:uuid:6ba7b812-9dad-11d1-80b4-00c04fd430c8', 85 | 0x1d19dad6ba7b812, 0xb4, uuid.RFC_4122, 1), 86 | ('6ba7b814-9dad-11d1-80b4-00c04fd430c8', 87 | '{6ba7b814-9dad-11d1-80b4-00c04fd430c8}', 88 | '6ba7b8149dad11d180b400c04fd430c8', 89 | b'\x6b\xa7\xb8\x14\x9d\xad\x11\xd1\x80\xb4\x00\xc0\x4f\xd4\x30\xc8', 90 | b'\x14\xb8\xa7\x6b\xad\x9d\xd1\x11\x80\xb4\x00\xc0\x4f\xd4\x30\xc8', 91 | (0x6ba7b814, 0x9dad, 0x11d1, 0x80, 0xb4, 0x00c04fd430c8), 92 | 0x6ba7b8149dad11d180b400c04fd430c8, 93 | 'urn:uuid:6ba7b814-9dad-11d1-80b4-00c04fd430c8', 94 | 0x1d19dad6ba7b814, 0xb4, uuid.RFC_4122, 1), 95 | ('7d444840-9dc0-11d1-b245-5ffdce74fad2', 96 | '{7d444840-9dc0-11d1-b245-5ffdce74fad2}', 97 | '7d4448409dc011d1b2455ffdce74fad2', 98 | b'\x7d\x44\x48\x40\x9d\xc0\x11\xd1\xb2\x45\x5f\xfd\xce\x74\xfa\xd2', 99 | b'\x40\x48\x44\x7d\xc0\x9d\xd1\x11\xb2\x45\x5f\xfd\xce\x74\xfa\xd2', 100 | (0x7d444840, 0x9dc0, 0x11d1, 0xb2, 0x45, 0x5ffdce74fad2), 101 | 0x7d4448409dc011d1b2455ffdce74fad2, 102 | 'urn:uuid:7d444840-9dc0-11d1-b245-5ffdce74fad2', 103 | 0x1d19dc07d444840, 0x3245, uuid.RFC_4122, 1), 104 | ('e902893a-9d22-3c7e-a7b8-d6e313b71d9f', 105 | '{e902893a-9d22-3c7e-a7b8-d6e313b71d9f}', 106 | 'e902893a9d223c7ea7b8d6e313b71d9f', 107 | b'\xe9\x02\x89\x3a\x9d\x22\x3c\x7e\xa7\xb8\xd6\xe3\x13\xb7\x1d\x9f', 108 | b'\x3a\x89\x02\xe9\x22\x9d\x7e\x3c\xa7\xb8\xd6\xe3\x13\xb7\x1d\x9f', 109 | (0xe902893a, 0x9d22, 0x3c7e, 0xa7, 0xb8, 0xd6e313b71d9f), 110 | 0xe902893a9d223c7ea7b8d6e313b71d9f, 111 | 'urn:uuid:e902893a-9d22-3c7e-a7b8-d6e313b71d9f', 112 | 0xc7e9d22e902893a, 0x27b8, uuid.RFC_4122, 3), 113 | ('eb424026-6f54-4ef8-a4d0-bb658a1fc6cf', 114 | '{eb424026-6f54-4ef8-a4d0-bb658a1fc6cf}', 115 | 'eb4240266f544ef8a4d0bb658a1fc6cf', 116 | b'\xeb\x42\x40\x26\x6f\x54\x4e\xf8\xa4\xd0\xbb\x65\x8a\x1f\xc6\xcf', 117 | b'\x26\x40\x42\xeb\x54\x6f\xf8\x4e\xa4\xd0\xbb\x65\x8a\x1f\xc6\xcf', 118 | (0xeb424026, 0x6f54, 0x4ef8, 0xa4, 0xd0, 0xbb658a1fc6cf), 119 | 0xeb4240266f544ef8a4d0bb658a1fc6cf, 120 | 'urn:uuid:eb424026-6f54-4ef8-a4d0-bb658a1fc6cf', 121 | 0xef86f54eb424026, 0x24d0, uuid.RFC_4122, 4), 122 | ('f81d4fae-7dec-11d0-a765-00a0c91e6bf6', 123 | '{f81d4fae-7dec-11d0-a765-00a0c91e6bf6}', 124 | 'f81d4fae7dec11d0a76500a0c91e6bf6', 125 | b'\xf8\x1d\x4f\xae\x7d\xec\x11\xd0\xa7\x65\x00\xa0\xc9\x1e\x6b\xf6', 126 | b'\xae\x4f\x1d\xf8\xec\x7d\xd0\x11\xa7\x65\x00\xa0\xc9\x1e\x6b\xf6', 127 | (0xf81d4fae, 0x7dec, 0x11d0, 0xa7, 0x65, 0x00a0c91e6bf6), 128 | 0xf81d4fae7dec11d0a76500a0c91e6bf6, 129 | 'urn:uuid:f81d4fae-7dec-11d0-a765-00a0c91e6bf6', 130 | 0x1d07decf81d4fae, 0x2765, uuid.RFC_4122, 1), 131 | ('fffefdfc-fffe-fffe-fffe-fffefdfcfbfa', 132 | '{fffefdfc-fffe-fffe-fffe-fffefdfcfbfa}', 133 | 'fffefdfcfffefffefffefffefdfcfbfa', 134 | b'\xff\xfe\xfd\xfc\xff\xfe\xff\xfe\xff\xfe\xff\xfe\xfd\xfc\xfb\xfa', 135 | b'\xfc\xfd\xfe\xff\xfe\xff\xfe\xff\xff\xfe\xff\xfe\xfd\xfc\xfb\xfa', 136 | (0xfffefdfc, 0xfffe, 0xfffe, 0xff, 0xfe, 0xfffefdfcfbfa), 137 | 0xfffefdfcfffefffefffefffefdfcfbfa, 138 | 'urn:uuid:fffefdfc-fffe-fffe-fffe-fffefdfcfbfa', 139 | 0xffefffefffefdfc, 0x3ffe, uuid.RESERVED_FUTURE, None), 140 | ('ffffffff-ffff-ffff-ffff-ffffffffffff', 141 | '{ffffffff-ffff-ffff-ffff-ffffffffffff}', 142 | 'ffffffffffffffffffffffffffffffff', 143 | b'\xff'*16, 144 | b'\xff'*16, 145 | (0xffffffff, 0xffff, 0xffff, 0xff, 0xff, 0xffffffffffff), 146 | 0xffffffffffffffffffffffffffffffff, 147 | 'urn:uuid:ffffffff-ffff-ffff-ffff-ffffffffffff', 148 | 0xfffffffffffffff, 0x3fff, uuid.RESERVED_FUTURE, None), 149 | ]: 150 | equivalents = [] 151 | # Construct each UUID in several different ways. 152 | for u in [uuid.UUID(string), uuid.UUID(curly), uuid.UUID(hex), 153 | uuid.UUID(bytes=bytes), uuid.UUID(bytes_le=bytes_le), 154 | uuid.UUID(fields=fields), uuid.UUID(int=integer), 155 | uuid.UUID(urn)]: 156 | # Test all conversions and properties of the UUID object. 157 | equal(str(u), string) 158 | equal(int(u), integer) 159 | equal(u.bytes, bytes) 160 | equal(u.bytes_le, bytes_le) 161 | equal(u.fields, fields) 162 | equal(u.time_low, fields[0]) 163 | equal(u.time_mid, fields[1]) 164 | equal(u.time_hi_version, fields[2]) 165 | equal(u.clock_seq_hi_variant, fields[3]) 166 | equal(u.clock_seq_low, fields[4]) 167 | equal(u.node, fields[5]) 168 | equal(u.hex, hex) 169 | equal(u.int, integer) 170 | equal(u.urn, urn) 171 | equal(u.time, time) 172 | equal(u.clock_seq, clock_seq) 173 | equal(u.variant, variant) 174 | equal(u.version, version) 175 | equivalents.append(u) 176 | 177 | # Different construction methods should give the same UUID. 178 | for u in equivalents: 179 | for v in equivalents: 180 | equal(u, v) 181 | 182 | # Bug 7380: "bytes" and "bytes_le" should give the same type. 183 | equal(type(u.bytes), builtins.bytes) 184 | equal(type(u.bytes_le), builtins.bytes) 185 | 186 | ascending.append(u) 187 | 188 | # Test comparison of UUIDs. 189 | for i in range(len(ascending)): 190 | for j in range(len(ascending)): 191 | equal(i < j, ascending[i] < ascending[j]) 192 | equal(i <= j, ascending[i] <= ascending[j]) 193 | equal(i == j, ascending[i] == ascending[j]) 194 | equal(i > j, ascending[i] > ascending[j]) 195 | equal(i >= j, ascending[i] >= ascending[j]) 196 | equal(i != j, ascending[i] != ascending[j]) 197 | 198 | # Test sorting of UUIDs (above list is in ascending order). 199 | resorted = ascending[:] 200 | resorted.reverse() 201 | resorted.sort() 202 | equal(ascending, resorted) 203 | 204 | def test_exceptions(self): 205 | badvalue = lambda f: self.assertRaises(ValueError, f) 206 | badtype = lambda f: self.assertRaises(TypeError, f) 207 | 208 | # Badly formed hex strings. 209 | badvalue(lambda: uuid.UUID('')) 210 | badvalue(lambda: uuid.UUID('abc')) 211 | badvalue(lambda: uuid.UUID('1234567812345678123456781234567')) 212 | badvalue(lambda: uuid.UUID('123456781234567812345678123456789')) 213 | badvalue(lambda: uuid.UUID('123456781234567812345678z2345678')) 214 | 215 | # Badly formed bytes. 216 | badvalue(lambda: uuid.UUID(bytes='abc')) 217 | badvalue(lambda: uuid.UUID(bytes='\0'*15)) 218 | badvalue(lambda: uuid.UUID(bytes='\0'*17)) 219 | 220 | # Badly formed bytes_le. 221 | badvalue(lambda: uuid.UUID(bytes_le='abc')) 222 | badvalue(lambda: uuid.UUID(bytes_le='\0'*15)) 223 | badvalue(lambda: uuid.UUID(bytes_le='\0'*17)) 224 | 225 | # Badly formed fields. 226 | badvalue(lambda: uuid.UUID(fields=(1,))) 227 | badvalue(lambda: uuid.UUID(fields=(1, 2, 3, 4, 5))) 228 | badvalue(lambda: uuid.UUID(fields=(1, 2, 3, 4, 5, 6, 7))) 229 | 230 | # Field values out of range. 231 | badvalue(lambda: uuid.UUID(fields=(-1, 0, 0, 0, 0, 0))) 232 | badvalue(lambda: uuid.UUID(fields=(0x100000000, 0, 0, 0, 0, 0))) 233 | badvalue(lambda: uuid.UUID(fields=(0, -1, 0, 0, 0, 0))) 234 | badvalue(lambda: uuid.UUID(fields=(0, 0x10000, 0, 0, 0, 0))) 235 | badvalue(lambda: uuid.UUID(fields=(0, 0, -1, 0, 0, 0))) 236 | badvalue(lambda: uuid.UUID(fields=(0, 0, 0x10000, 0, 0, 0))) 237 | badvalue(lambda: uuid.UUID(fields=(0, 0, 0, -1, 0, 0))) 238 | badvalue(lambda: uuid.UUID(fields=(0, 0, 0, 0x100, 0, 0))) 239 | badvalue(lambda: uuid.UUID(fields=(0, 0, 0, 0, -1, 0))) 240 | badvalue(lambda: uuid.UUID(fields=(0, 0, 0, 0, 0x100, 0))) 241 | badvalue(lambda: uuid.UUID(fields=(0, 0, 0, 0, 0, -1))) 242 | badvalue(lambda: uuid.UUID(fields=(0, 0, 0, 0, 0, 0x1000000000000))) 243 | 244 | # Version number out of range. 245 | badvalue(lambda: uuid.UUID('00'*16, version=0)) 246 | badvalue(lambda: uuid.UUID('00'*16, version=6)) 247 | 248 | # Integer value out of range. 249 | badvalue(lambda: uuid.UUID(int=-1)) 250 | badvalue(lambda: uuid.UUID(int=1<<128)) 251 | 252 | # Must supply exactly one of hex, bytes, fields, int. 253 | h, b, f, i = '00'*16, b'\0'*16, (0, 0, 0, 0, 0, 0), 0 254 | uuid.UUID(h) 255 | uuid.UUID(hex=h) 256 | uuid.UUID(bytes=b) 257 | uuid.UUID(bytes_le=b) 258 | uuid.UUID(fields=f) 259 | uuid.UUID(int=i) 260 | 261 | # Wrong number of arguments (positional). 262 | badtype(lambda: uuid.UUID()) 263 | badtype(lambda: uuid.UUID(h, b)) 264 | badtype(lambda: uuid.UUID(h, b, b)) 265 | badtype(lambda: uuid.UUID(h, b, b, f)) 266 | badtype(lambda: uuid.UUID(h, b, b, f, i)) 267 | 268 | # Duplicate arguments. 269 | for hh in [[], [('hex', h)]]: 270 | for bb in [[], [('bytes', b)]]: 271 | for bble in [[], [('bytes_le', b)]]: 272 | for ii in [[], [('int', i)]]: 273 | for ff in [[], [('fields', f)]]: 274 | args = dict(hh + bb + bble + ii + ff) 275 | if len(args) != 0: 276 | badtype(lambda: uuid.UUID(h, **args)) 277 | if len(args) != 1: 278 | badtype(lambda: uuid.UUID(**args)) 279 | 280 | # Immutability. 281 | u = uuid.UUID(h) 282 | badtype(lambda: setattr(u, 'hex', h)) 283 | badtype(lambda: setattr(u, 'bytes', b)) 284 | badtype(lambda: setattr(u, 'bytes_le', b)) 285 | badtype(lambda: setattr(u, 'fields', f)) 286 | badtype(lambda: setattr(u, 'int', i)) 287 | badtype(lambda: setattr(u, 'time_low', 0)) 288 | badtype(lambda: setattr(u, 'time_mid', 0)) 289 | badtype(lambda: setattr(u, 'time_hi_version', 0)) 290 | badtype(lambda: setattr(u, 'time_hi_version', 0)) 291 | badtype(lambda: setattr(u, 'clock_seq_hi_variant', 0)) 292 | badtype(lambda: setattr(u, 'clock_seq_low', 0)) 293 | badtype(lambda: setattr(u, 'node', 0)) 294 | 295 | # Comparison with a non-UUID object 296 | badtype(lambda: u < object()) 297 | badtype(lambda: u > object()) 298 | 299 | def test_getnode(self): 300 | node1 = uuid.getnode() 301 | self.assertTrue(0 < node1 < (1 << 48), '%012x' % node1) 302 | 303 | # Test it again to ensure consistency. 304 | node2 = uuid.getnode() 305 | self.assertEqual(node1, node2, '%012x != %012x' % (node1, node2)) 306 | 307 | @unittest.skipUnless(importable('ctypes'), 'requires ctypes') 308 | def test_uuid1(self): 309 | equal = self.assertEqual 310 | 311 | # Make sure uuid1() generates UUIDs that are actually version 1. 312 | for u in [uuid.uuid1() for i in range(10)]: 313 | equal(u.variant, uuid.RFC_4122) 314 | equal(u.version, 1) 315 | 316 | # Make sure the generated UUIDs are actually unique. 317 | uuids = {} 318 | for u in [uuid.uuid1() for i in range(1000)]: 319 | uuids[u] = 1 320 | equal(len(uuids.keys()), 1000) 321 | 322 | # Make sure the supplied node ID appears in the UUID. 323 | u = uuid.uuid1(0) 324 | equal(u.node, 0) 325 | u = uuid.uuid1(0x123456789abc) 326 | equal(u.node, 0x123456789abc) 327 | u = uuid.uuid1(0xffffffffffff) 328 | equal(u.node, 0xffffffffffff) 329 | 330 | # Make sure the supplied clock sequence appears in the UUID. 331 | u = uuid.uuid1(0x123456789abc, 0) 332 | equal(u.node, 0x123456789abc) 333 | equal(((u.clock_seq_hi_variant & 0x3f) << 8) | u.clock_seq_low, 0) 334 | u = uuid.uuid1(0x123456789abc, 0x1234) 335 | equal(u.node, 0x123456789abc) 336 | equal(((u.clock_seq_hi_variant & 0x3f) << 8) | 337 | u.clock_seq_low, 0x1234) 338 | u = uuid.uuid1(0x123456789abc, 0x3fff) 339 | equal(u.node, 0x123456789abc) 340 | equal(((u.clock_seq_hi_variant & 0x3f) << 8) | 341 | u.clock_seq_low, 0x3fff) 342 | 343 | def test_uuid3(self): 344 | equal = self.assertEqual 345 | 346 | # Test some known version-3 UUIDs. 347 | for u, v in [(uuid.uuid3(uuid.NAMESPACE_DNS, 'python.org'), 348 | '6fa459ea-ee8a-3ca4-894e-db77e160355e'), 349 | (uuid.uuid3(uuid.NAMESPACE_URL, 'http://python.org/'), 350 | '9fe8e8c4-aaa8-32a9-a55c-4535a88b748d'), 351 | (uuid.uuid3(uuid.NAMESPACE_OID, '1.3.6.1'), 352 | 'dd1a1cef-13d5-368a-ad82-eca71acd4cd1'), 353 | (uuid.uuid3(uuid.NAMESPACE_X500, 'c=ca'), 354 | '658d3002-db6b-3040-a1d1-8ddd7d189a4d'), 355 | ]: 356 | equal(u.variant, uuid.RFC_4122) 357 | equal(u.version, 3) 358 | equal(u, uuid.UUID(v)) 359 | equal(str(u), v) 360 | 361 | def test_uuid4(self): 362 | equal = self.assertEqual 363 | 364 | # Make sure uuid4() generates UUIDs that are actually version 4. 365 | for u in [uuid.uuid4() for i in range(10)]: 366 | equal(u.variant, uuid.RFC_4122) 367 | equal(u.version, 4) 368 | 369 | # Make sure the generated UUIDs are actually unique. 370 | uuids = {} 371 | for u in [uuid.uuid4() for i in range(1000)]: 372 | uuids[u] = 1 373 | equal(len(uuids.keys()), 1000) 374 | 375 | def test_uuid5(self): 376 | equal = self.assertEqual 377 | 378 | # Test some known version-5 UUIDs. 379 | for u, v in [(uuid.uuid5(uuid.NAMESPACE_DNS, 'python.org'), 380 | '886313e1-3b8a-5372-9b90-0c9aee199e5d'), 381 | (uuid.uuid5(uuid.NAMESPACE_URL, 'http://python.org/'), 382 | '4c565f0d-3f5a-5890-b41b-20cf47701c5e'), 383 | (uuid.uuid5(uuid.NAMESPACE_OID, '1.3.6.1'), 384 | '1447fa61-5277-5fef-a9b3-fbc6e44f4af3'), 385 | (uuid.uuid5(uuid.NAMESPACE_X500, 'c=ca'), 386 | 'cc957dd1-a972-5349-98cd-874190002798'), 387 | ]: 388 | equal(u.variant, uuid.RFC_4122) 389 | equal(u.version, 5) 390 | equal(u, uuid.UUID(v)) 391 | equal(str(u), v) 392 | 393 | @unittest.skipUnless(os.name == 'posix', 'requires Posix') 394 | def testIssue8621(self): 395 | # On at least some versions of OSX uuid.uuid4 generates 396 | # the same sequence of UUIDs in the parent and any 397 | # children started using fork. 398 | fds = os.pipe() 399 | pid = os.fork() 400 | if pid == 0: 401 | os.close(fds[0]) 402 | value = uuid.uuid4() 403 | os.write(fds[1], value.hex.encode('latin-1')) 404 | os._exit(0) 405 | 406 | else: 407 | os.close(fds[1]) 408 | self.addCleanup(os.close, fds[0]) 409 | parent_value = uuid.uuid4().hex 410 | os.waitpid(pid, 0) 411 | child_value = os.read(fds[0], 100).decode('latin-1') 412 | 413 | self.assertNotEqual(parent_value, child_value) 414 | 415 | 416 | class TestInternals(unittest.TestCase): 417 | @unittest.skipUnless(os.name == 'posix', 'requires Posix') 418 | def test_find_mac(self): 419 | data = ''' 420 | fake hwaddr 421 | cscotun0 Link encap:UNSPEC HWaddr 00-00-00-00-00-00-00-00-00-00-00-00-00-00-00-00 422 | eth0 Link encap:Ethernet HWaddr 12:34:56:78:90:ab 423 | ''' 424 | 425 | popen = unittest.mock.MagicMock() 426 | popen.stdout = io.BytesIO(data.encode()) 427 | 428 | with unittest.mock.patch.object(shutil, 'which', 429 | return_value='/sbin/ifconfig'): 430 | with unittest.mock.patch.object(subprocess, 'Popen', 431 | return_value=popen): 432 | mac = uuid._find_mac( 433 | command='ifconfig', 434 | args='', 435 | hw_identifiers=[b'hwaddr'], 436 | get_index=lambda x: x + 1, 437 | ) 438 | 439 | self.assertEqual(mac, 0x1234567890ab) 440 | 441 | def check_node(self, node, requires=None, network=False): 442 | if requires and node is None: 443 | self.skipTest('requires ' + requires) 444 | hex = '%012x' % node 445 | if support.verbose >= 2: 446 | print(hex, end=' ') 447 | if network: 448 | # 47 bit will never be set in IEEE 802 addresses obtained 449 | # from network cards. 450 | self.assertFalse(node & 0x010000000000, hex) 451 | self.assertTrue(0 < node < (1 << 48), 452 | "%s is not an RFC 4122 node ID" % hex) 453 | 454 | @unittest.skipUnless(os.name == 'posix', 'requires Posix') 455 | def test_ifconfig_getnode(self): 456 | node = uuid._ifconfig_getnode() 457 | self.check_node(node, 'ifconfig', True) 458 | 459 | @unittest.skipUnless(os.name == 'posix', 'requires Posix') 460 | def test_ip_getnode(self): 461 | node = uuid._ip_getnode() 462 | self.check_node(node, 'ip', True) 463 | 464 | @unittest.skipUnless(os.name == 'posix', 'requires Posix') 465 | def test_arp_getnode(self): 466 | node = uuid._arp_getnode() 467 | self.check_node(node, 'arp', True) 468 | 469 | @unittest.skipUnless(os.name == 'posix', 'requires Posix') 470 | def test_lanscan_getnode(self): 471 | node = uuid._lanscan_getnode() 472 | self.check_node(node, 'lanscan', True) 473 | 474 | @unittest.skipUnless(os.name == 'posix', 'requires Posix') 475 | def test_netstat_getnode(self): 476 | node = uuid._netstat_getnode() 477 | self.check_node(node, 'netstat', True) 478 | 479 | @unittest.skipUnless(os.name == 'nt', 'requires Windows') 480 | def test_ipconfig_getnode(self): 481 | node = uuid._ipconfig_getnode() 482 | self.check_node(node, 'ipconfig', True) 483 | 484 | @unittest.skipUnless(importable('win32wnet'), 'requires win32wnet') 485 | @unittest.skipUnless(importable('netbios'), 'requires netbios') 486 | def test_netbios_getnode(self): 487 | node = uuid._netbios_getnode() 488 | self.check_node(node, network=True) 489 | 490 | def test_random_getnode(self): 491 | node = uuid._random_getnode() 492 | # Least significant bit of first octet must be set. 493 | self.assertTrue(node & 0x010000000000, '%012x' % node) 494 | self.check_node(node) 495 | 496 | @unittest.skipUnless(os.name == 'posix', 'requires Posix') 497 | @unittest.skipUnless(importable('ctypes'), 'requires ctypes') 498 | def test_unixdll_getnode(self): 499 | try: # Issues 1481, 3581: _uuid_generate_time() might be None. 500 | node = uuid._unixdll_getnode() 501 | except TypeError: 502 | self.skipTest('requires uuid_generate_time') 503 | self.check_node(node) 504 | 505 | @unittest.skipUnless(os.name == 'nt', 'requires Windows') 506 | @unittest.skipUnless(importable('ctypes'), 'requires ctypes') 507 | def test_windll_getnode(self): 508 | node = uuid._windll_getnode() 509 | self.check_node(node) 510 | 511 | 512 | if __name__ == '__main__': 513 | unittest.main() 514 | -------------------------------------------------------------------------------- /upy/uuid.py: -------------------------------------------------------------------------------- 1 | r"""UUID objects (universally unique identifiers) according to RFC 4122. 2 | 3 | This module provides immutable UUID objects (class UUID) and the functions 4 | uuid1(), uuid3(), uuid4(), uuid5() for generating version 1, 3, 4, and 5 5 | UUIDs as specified in RFC 4122. 6 | 7 | If all you want is a unique ID, you should probably call uuid1() or uuid4(). 8 | Note that uuid1() may compromise privacy since it creates a UUID containing 9 | the computer's network address. uuid4() creates a random UUID. 10 | 11 | Typical usage: 12 | 13 | >>> import uuid 14 | 15 | # make a UUID based on the host ID and current time 16 | >>> uuid.uuid1() # doctest: +SKIP 17 | UUID('a8098c1a-f86e-11da-bd1a-00112444be1e') 18 | 19 | # make a UUID using an MD5 hash of a namespace UUID and a name 20 | >>> uuid.uuid3(uuid.NAMESPACE_DNS, 'python.org') 21 | UUID('6fa459ea-ee8a-3ca4-894e-db77e160355e') 22 | 23 | # make a random UUID 24 | >>> uuid.uuid4() # doctest: +SKIP 25 | UUID('16fd2706-8baf-433b-82eb-8c7fada847da') 26 | 27 | # make a UUID using a SHA-1 hash of a namespace UUID and a name 28 | >>> uuid.uuid5(uuid.NAMESPACE_DNS, 'python.org') 29 | UUID('886313e1-3b8a-5372-9b90-0c9aee199e5d') 30 | 31 | # make a UUID from a string of hex digits (braces and hyphens ignored) 32 | >>> x = uuid.UUID('{00010203-0405-0607-0809-0a0b0c0d0e0f}') 33 | 34 | # convert a UUID to a string of hex digits in standard form 35 | >>> str(x) 36 | '00010203-0405-0607-0809-0a0b0c0d0e0f' 37 | 38 | # get the raw 16 bytes of the UUID 39 | >>> x.bytes 40 | b'\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f' 41 | 42 | # make a UUID from a 16-byte string 43 | >>> uuid.UUID(bytes=x.bytes) 44 | UUID('00010203-0405-0607-0809-0a0b0c0d0e0f') 45 | """ 46 | 47 | __author__ = 'Ka-Ping Yee ' 48 | 49 | RESERVED_NCS, RFC_4122, RESERVED_MICROSOFT, RESERVED_FUTURE = [ 50 | 'reserved for NCS compatibility', 'specified in RFC 4122', 51 | 'reserved for Microsoft compatibility', 'reserved for future definition'] 52 | 53 | int_ = int # The built-in int type 54 | bytes_ = bytes # The built-in bytes type 55 | 56 | class UUID(object): 57 | """Instances of the UUID class represent UUIDs as specified in RFC 4122. 58 | UUID objects are immutable, hashable, and usable as dictionary keys. 59 | Converting a UUID to a string with str() yields something in the form 60 | '12345678-1234-1234-1234-123456789abc'. The UUID constructor accepts 61 | five possible forms: a similar string of hexadecimal digits, or a tuple 62 | of six integer fields (with 32-bit, 16-bit, 16-bit, 8-bit, 8-bit, and 63 | 48-bit values respectively) as an argument named 'fields', or a string 64 | of 16 bytes (with all the integer fields in big-endian order) as an 65 | argument named 'bytes', or a string of 16 bytes (with the first three 66 | fields in little-endian order) as an argument named 'bytes_le', or a 67 | single 128-bit integer as an argument named 'int'. 68 | 69 | UUIDs have these read-only attributes: 70 | 71 | bytes the UUID as a 16-byte string (containing the six 72 | integer fields in big-endian byte order) 73 | 74 | bytes_le the UUID as a 16-byte string (with time_low, time_mid, 75 | and time_hi_version in little-endian byte order) 76 | 77 | fields a tuple of the six integer fields of the UUID, 78 | which are also available as six individual attributes 79 | and two derived attributes: 80 | 81 | time_low the first 32 bits of the UUID 82 | time_mid the next 16 bits of the UUID 83 | time_hi_version the next 16 bits of the UUID 84 | clock_seq_hi_variant the next 8 bits of the UUID 85 | clock_seq_low the next 8 bits of the UUID 86 | node the last 48 bits of the UUID 87 | 88 | time the 60-bit timestamp 89 | clock_seq the 14-bit sequence number 90 | 91 | hex the UUID as a 32-character hexadecimal string 92 | 93 | int the UUID as a 128-bit integer 94 | 95 | urn the UUID as a URN as specified in RFC 4122 96 | 97 | variant the UUID variant (one of the constants RESERVED_NCS, 98 | RFC_4122, RESERVED_MICROSOFT, or RESERVED_FUTURE) 99 | 100 | version the UUID version number (1 through 5, meaningful only 101 | when the variant is RFC_4122) 102 | """ 103 | 104 | def __init__(self, hex=None, bytes=None, bytes_le=None, fields=None, 105 | int=None, version=None): 106 | r"""Create a UUID from either a string of 32 hexadecimal digits, 107 | a string of 16 bytes as the 'bytes' argument, a string of 16 bytes 108 | in little-endian order as the 'bytes_le' argument, a tuple of six 109 | integers (32-bit time_low, 16-bit time_mid, 16-bit time_hi_version, 110 | 8-bit clock_seq_hi_variant, 8-bit clock_seq_low, 48-bit node) as 111 | the 'fields' argument, or a single 128-bit integer as the 'int' 112 | argument. When a string of hex digits is given, curly braces, 113 | hyphens, and a URN prefix are all optional. For example, these 114 | expressions all yield the same UUID: 115 | 116 | UUID('{12345678-1234-5678-1234-567812345678}') 117 | UUID('12345678123456781234567812345678') 118 | UUID('urn:uuid:12345678-1234-5678-1234-567812345678') 119 | UUID(bytes='\x12\x34\x56\x78'*4) 120 | UUID(bytes_le='\x78\x56\x34\x12\x34\x12\x78\x56' + 121 | '\x12\x34\x56\x78\x12\x34\x56\x78') 122 | UUID(fields=(0x12345678, 0x1234, 0x5678, 0x12, 0x34, 0x567812345678)) 123 | UUID(int=0x12345678123456781234567812345678) 124 | 125 | Exactly one of 'hex', 'bytes', 'bytes_le', 'fields', or 'int' must 126 | be given. The 'version' argument is optional; if given, the resulting 127 | UUID will have its variant and version set according to RFC 4122, 128 | overriding the given 'hex', 'bytes', 'bytes_le', 'fields', or 'int'. 129 | """ 130 | 131 | if [hex, bytes, bytes_le, fields, int].count(None) != 4: 132 | raise TypeError('one of the hex, bytes, bytes_le, fields, ' 133 | 'or int arguments must be given') 134 | if hex is not None: 135 | hex = hex.replace('urn:', '').replace('uuid:', '') 136 | hex = hex.strip('{}').replace('-', '') 137 | if len(hex) != 32: 138 | raise ValueError('badly formed hexadecimal UUID string') 139 | int = int_(hex, 16) 140 | if bytes_le is not None: 141 | if len(bytes_le) != 16: 142 | raise ValueError('bytes_le is not a 16-char string') 143 | bytes = (bytes_(reversed(bytes_le[:4])) + 144 | bytes_(reversed(bytes_le[4:6])) + 145 | bytes_(reversed(bytes_le[6:8])) + 146 | bytes_le[8:]) 147 | if bytes is not None: 148 | if len(bytes) != 16: 149 | raise ValueError('bytes is not a 16-char string') 150 | assert isinstance(bytes, bytes_), repr(bytes) 151 | int = int_.from_bytes(bytes, 'big') 152 | if fields is not None: 153 | if len(fields) != 6: 154 | raise ValueError('fields is not a 6-tuple') 155 | (time_low, time_mid, time_hi_version, 156 | clock_seq_hi_variant, clock_seq_low, node) = fields 157 | if not 0 <= time_low < 1<<32: 158 | raise ValueError('field 1 out of range (need a 32-bit value)') 159 | if not 0 <= time_mid < 1<<16: 160 | raise ValueError('field 2 out of range (need a 16-bit value)') 161 | if not 0 <= time_hi_version < 1<<16: 162 | raise ValueError('field 3 out of range (need a 16-bit value)') 163 | if not 0 <= clock_seq_hi_variant < 1<<8: 164 | raise ValueError('field 4 out of range (need an 8-bit value)') 165 | if not 0 <= clock_seq_low < 1<<8: 166 | raise ValueError('field 5 out of range (need an 8-bit value)') 167 | if not 0 <= node < 1<<48: 168 | raise ValueError('field 6 out of range (need a 48-bit value)') 169 | clock_seq = (clock_seq_hi_variant << 8) | clock_seq_low 170 | int = ((time_low << 96) | (time_mid << 80) | 171 | (time_hi_version << 64) | (clock_seq << 48) | node) 172 | if int is not None: 173 | if not 0 <= int < 1<<128: 174 | raise ValueError('int is out of range (need a 128-bit value)') 175 | if version is not None: 176 | if not 1 <= version <= 5: 177 | raise ValueError('illegal version number') 178 | # Set the variant to RFC 4122. 179 | int &= ~(0xc000 << 48) 180 | int |= 0x8000 << 48 181 | # Set the version number. 182 | int &= ~(0xf000 << 64) 183 | int |= version << 76 184 | self.__dict__['int'] = int 185 | # __dict__ and __setattr__ don't work for micropython 186 | if not self.__dict__: 187 | self.int = int 188 | 189 | def __eq__(self, other): 190 | if isinstance(other, UUID): 191 | return self.int == other.int 192 | return NotImplemented 193 | 194 | # Q. What's the value of being able to sort UUIDs? 195 | # A. Use them as keys in a B-Tree or similar mapping. 196 | 197 | def __lt__(self, other): 198 | if isinstance(other, UUID): 199 | return self.int < other.int 200 | return NotImplemented 201 | 202 | def __gt__(self, other): 203 | if isinstance(other, UUID): 204 | return self.int > other.int 205 | return NotImplemented 206 | 207 | def __le__(self, other): 208 | if isinstance(other, UUID): 209 | return self.int <= other.int 210 | return NotImplemented 211 | 212 | def __ge__(self, other): 213 | if isinstance(other, UUID): 214 | return self.int >= other.int 215 | return NotImplemented 216 | 217 | def __hash__(self): 218 | return hash(self.int) 219 | 220 | def __int__(self): 221 | return self.int 222 | 223 | def __repr__(self): 224 | return '%s(%r)' % (self.__class__.__name__, str(self)) 225 | 226 | def __setattr__(self, name, value): 227 | raise TypeError('UUID objects are immutable') 228 | 229 | def __str__(self): 230 | hex = '%032x' % self.int 231 | return '%s-%s-%s-%s-%s' % ( 232 | hex[:8], hex[8:12], hex[12:16], hex[16:20], hex[20:]) 233 | 234 | @property 235 | def bytes(self): 236 | return self.int.to_bytes(16, 'big') 237 | 238 | @property 239 | def bytes_le(self): 240 | bytes = self.bytes 241 | return (bytes_(reversed(bytes[:4])) + 242 | bytes_(reversed(bytes[4:6])) + 243 | bytes_(reversed(bytes[6:8])) + 244 | bytes[8:]) 245 | 246 | @property 247 | def fields(self): 248 | return (self.time_low, self.time_mid, self.time_hi_version, 249 | self.clock_seq_hi_variant, self.clock_seq_low, self.node) 250 | 251 | @property 252 | def time_low(self): 253 | return self.int >> 96 254 | 255 | @property 256 | def time_mid(self): 257 | return (self.int >> 80) & 0xffff 258 | 259 | @property 260 | def time_hi_version(self): 261 | return (self.int >> 64) & 0xffff 262 | 263 | @property 264 | def clock_seq_hi_variant(self): 265 | return (self.int >> 56) & 0xff 266 | 267 | @property 268 | def clock_seq_low(self): 269 | return (self.int >> 48) & 0xff 270 | 271 | @property 272 | def time(self): 273 | return (((self.time_hi_version & 0x0fff) << 48) | 274 | (self.time_mid << 32) | self.time_low) 275 | 276 | @property 277 | def clock_seq(self): 278 | return (((self.clock_seq_hi_variant & 0x3f) << 8) | 279 | self.clock_seq_low) 280 | 281 | @property 282 | def node(self): 283 | return self.int & 0xffffffffffff 284 | 285 | @property 286 | def hex(self): 287 | return '%032x' % self.int 288 | 289 | @property 290 | def urn(self): 291 | return 'urn:uuid:' + str(self) 292 | 293 | @property 294 | def variant(self): 295 | if not self.int & (0x8000 << 48): 296 | return RESERVED_NCS 297 | elif not self.int & (0x4000 << 48): 298 | return RFC_4122 299 | elif not self.int & (0x2000 << 48): 300 | return RESERVED_MICROSOFT 301 | else: 302 | return RESERVED_FUTURE 303 | 304 | @property 305 | def version(self): 306 | # The version bits are only meaningful for RFC 4122 UUIDs. 307 | if self.variant == RFC_4122: 308 | return int((self.int >> 76) & 0xf) 309 | 310 | def _popen(command, *args): 311 | import os, shutil, subprocess 312 | executable = shutil.which(command) 313 | if executable is None: 314 | path = os.pathsep.join(('/sbin', '/usr/sbin')) 315 | executable = shutil.which(command, path=path) 316 | if executable is None: 317 | return None 318 | # LC_ALL=C to ensure English output, stderr=DEVNULL to prevent output 319 | # on stderr (Note: we don't have an example where the words we search 320 | # for are actually localized, but in theory some system could do so.) 321 | env = dict(os.environ) 322 | env['LC_ALL'] = 'C' 323 | proc = subprocess.Popen((executable,) + args, 324 | stdout=subprocess.PIPE, 325 | stderr=subprocess.DEVNULL, 326 | env=env) 327 | return proc 328 | 329 | def _find_mac(command, args, hw_identifiers, get_index): 330 | try: 331 | proc = _popen(command, *args.split()) 332 | if not proc: 333 | return 334 | with proc: 335 | for line in proc.stdout: 336 | words = line.lower().rstrip().split() 337 | for i in range(len(words)): 338 | if words[i] in hw_identifiers: 339 | try: 340 | word = words[get_index(i)] 341 | mac = int(word.replace(b':', b''), 16) 342 | if mac: 343 | return mac 344 | except (ValueError, IndexError): 345 | # Virtual interfaces, such as those provided by 346 | # VPNs, do not have a colon-delimited MAC address 347 | # as expected, but a 16-byte HWAddr separated by 348 | # dashes. These should be ignored in favor of a 349 | # real MAC address 350 | pass 351 | except OSError: 352 | pass 353 | 354 | def _ifconfig_getnode(): 355 | """Get the hardware address on Unix by running ifconfig.""" 356 | # This works on Linux ('' or '-a'), Tru64 ('-av'), but not all Unixes. 357 | for args in ('', '-a', '-av'): 358 | mac = _find_mac('ifconfig', args, [b'hwaddr', b'ether'], lambda i: i+1) 359 | if mac: 360 | return mac 361 | 362 | def _ip_getnode(): 363 | """Get the hardware address on Unix by running ip.""" 364 | # This works on Linux with iproute2. 365 | mac = _find_mac('ip', 'link list', [b'link/ether'], lambda i: i+1) 366 | if mac: 367 | return mac 368 | 369 | def _arp_getnode(): 370 | """Get the hardware address on Unix by running arp.""" 371 | import os, socket 372 | try: 373 | ip_addr = socket.gethostbyname(socket.gethostname()) 374 | except OSError: 375 | return None 376 | 377 | # Try getting the MAC addr from arp based on our IP address (Solaris). 378 | return _find_mac('arp', '-an', [os.fsencode(ip_addr)], lambda i: -1) 379 | 380 | def _lanscan_getnode(): 381 | """Get the hardware address on Unix by running lanscan.""" 382 | # This might work on HP-UX. 383 | return _find_mac('lanscan', '-ai', [b'lan0'], lambda i: 0) 384 | 385 | def _netstat_getnode(): 386 | """Get the hardware address on Unix by running netstat.""" 387 | # This might work on AIX, Tru64 UNIX and presumably on IRIX. 388 | try: 389 | proc = _popen('netstat', '-ia') 390 | if not proc: 391 | return 392 | with proc: 393 | words = proc.stdout.readline().rstrip().split() 394 | try: 395 | i = words.index(b'Address') 396 | except ValueError: 397 | return 398 | for line in proc.stdout: 399 | try: 400 | words = line.rstrip().split() 401 | word = words[i] 402 | if len(word) == 17 and word.count(b':') == 5: 403 | mac = int(word.replace(b':', b''), 16) 404 | if mac: 405 | return mac 406 | except (ValueError, IndexError): 407 | pass 408 | except OSError: 409 | pass 410 | 411 | def _ipconfig_getnode(): 412 | """Get the hardware address on Windows by running ipconfig.exe.""" 413 | import os, re 414 | dirs = ['', r'c:\windows\system32', r'c:\winnt\system32'] 415 | try: 416 | import ctypes 417 | buffer = ctypes.create_string_buffer(300) 418 | ctypes.windll.kernel32.GetSystemDirectoryA(buffer, 300) 419 | dirs.insert(0, buffer.value.decode('mbcs')) 420 | except: 421 | pass 422 | for dir in dirs: 423 | try: 424 | pipe = os.popen(os.path.join(dir, 'ipconfig') + ' /all') 425 | except OSError: 426 | continue 427 | with pipe: 428 | for line in pipe: 429 | value = line.split(':')[-1].strip().lower() 430 | if re.match('([0-9a-f][0-9a-f]-){5}[0-9a-f][0-9a-f]', value): 431 | return int(value.replace('-', ''), 16) 432 | 433 | def _netbios_getnode(): 434 | """Get the hardware address on Windows using NetBIOS calls. 435 | See http://support.microsoft.com/kb/118623 for details.""" 436 | import win32wnet, netbios 437 | ncb = netbios.NCB() 438 | ncb.Command = netbios.NCBENUM 439 | ncb.Buffer = adapters = netbios.LANA_ENUM() 440 | adapters._pack() 441 | if win32wnet.Netbios(ncb) != 0: 442 | return 443 | adapters._unpack() 444 | for i in range(adapters.length): 445 | ncb.Reset() 446 | ncb.Command = netbios.NCBRESET 447 | ncb.Lana_num = ord(adapters.lana[i]) 448 | if win32wnet.Netbios(ncb) != 0: 449 | continue 450 | ncb.Reset() 451 | ncb.Command = netbios.NCBASTAT 452 | ncb.Lana_num = ord(adapters.lana[i]) 453 | ncb.Callname = '*'.ljust(16) 454 | ncb.Buffer = status = netbios.ADAPTER_STATUS() 455 | if win32wnet.Netbios(ncb) != 0: 456 | continue 457 | status._unpack() 458 | bytes = status.adapter_address[:6] 459 | if len(bytes) != 6: 460 | continue 461 | return int.from_bytes(bytes, 'big') 462 | 463 | # Thanks to Thomas Heller for ctypes and for his help with its use here. 464 | 465 | # If ctypes is available, use it to find system routines for UUID generation. 466 | # XXX This makes the module non-thread-safe! 467 | _uuid_generate_time = _UuidCreate = None 468 | try: 469 | import ctypes, ctypes.util 470 | import sys 471 | 472 | # The uuid_generate_* routines are provided by libuuid on at least 473 | # Linux and FreeBSD, and provided by libc on Mac OS X. 474 | _libnames = ['uuid'] 475 | if not sys.platform.startswith('win'): 476 | _libnames.append('c') 477 | for libname in _libnames: 478 | try: 479 | lib = ctypes.CDLL(ctypes.util.find_library(libname)) 480 | except Exception: 481 | continue 482 | if hasattr(lib, 'uuid_generate_time'): 483 | _uuid_generate_time = lib.uuid_generate_time 484 | break 485 | del _libnames 486 | 487 | # The uuid_generate_* functions are broken on MacOS X 10.5, as noted 488 | # in issue #8621 the function generates the same sequence of values 489 | # in the parent process and all children created using fork (unless 490 | # those children use exec as well). 491 | # 492 | # Assume that the uuid_generate functions are broken from 10.5 onward, 493 | # the test can be adjusted when a later version is fixed. 494 | if sys.platform == 'darwin': 495 | import os 496 | if int(os.uname().release.split('.')[0]) >= 9: 497 | _uuid_generate_time = None 498 | 499 | # On Windows prior to 2000, UuidCreate gives a UUID containing the 500 | # hardware address. On Windows 2000 and later, UuidCreate makes a 501 | # random UUID and UuidCreateSequential gives a UUID containing the 502 | # hardware address. These routines are provided by the RPC runtime. 503 | # NOTE: at least on Tim's WinXP Pro SP2 desktop box, while the last 504 | # 6 bytes returned by UuidCreateSequential are fixed, they don't appear 505 | # to bear any relationship to the MAC address of any network device 506 | # on the box. 507 | try: 508 | lib = ctypes.windll.rpcrt4 509 | except: 510 | lib = None 511 | _UuidCreate = getattr(lib, 'UuidCreateSequential', 512 | getattr(lib, 'UuidCreate', None)) 513 | except: 514 | pass 515 | 516 | def _unixdll_getnode(): 517 | """Get the hardware address on Unix using ctypes.""" 518 | _buffer = ctypes.create_string_buffer(16) 519 | _uuid_generate_time(_buffer) 520 | return UUID(bytes=bytes_(_buffer.raw)).node 521 | 522 | def _windll_getnode(): 523 | """Get the hardware address on Windows using ctypes.""" 524 | _buffer = ctypes.create_string_buffer(16) 525 | if _UuidCreate(_buffer) == 0: 526 | return UUID(bytes=bytes_(_buffer.raw)).node 527 | 528 | def _random_getnode(): 529 | """Get a random node ID, with eighth bit set as suggested by RFC 4122.""" 530 | import random 531 | return random.getrandbits(48) | 0x010000000000 532 | 533 | _node = None 534 | 535 | def getnode(): 536 | """Get the hardware address as a 48-bit positive integer. 537 | 538 | The first time this runs, it may launch a separate program, which could 539 | be quite slow. If all attempts to obtain the hardware address fail, we 540 | choose a random 48-bit number with its eighth bit set to 1 as recommended 541 | in RFC 4122. 542 | """ 543 | 544 | global _node 545 | if _node is not None: 546 | return _node 547 | 548 | import sys 549 | if sys.platform == 'win32': 550 | getters = [_windll_getnode, _netbios_getnode, _ipconfig_getnode] 551 | else: 552 | getters = [_unixdll_getnode, _ifconfig_getnode, _ip_getnode, 553 | _arp_getnode, _lanscan_getnode, _netstat_getnode] 554 | 555 | for getter in getters + [_random_getnode]: 556 | try: 557 | _node = getter() 558 | except: 559 | continue 560 | if _node is not None: 561 | return _node 562 | 563 | _last_timestamp = None 564 | 565 | def uuid1(node=None, clock_seq=None): 566 | """Generate a UUID from a host ID, sequence number, and the current time. 567 | If 'node' is not given, getnode() is used to obtain the hardware 568 | address. If 'clock_seq' is given, it is used as the sequence number; 569 | otherwise a random 14-bit sequence number is chosen.""" 570 | 571 | # When the system provides a version-1 UUID generator, use it (but don't 572 | # use UuidCreate here because its UUIDs don't conform to RFC 4122). 573 | if _uuid_generate_time and node is clock_seq is None: 574 | _buffer = ctypes.create_string_buffer(16) 575 | _uuid_generate_time(_buffer) 576 | return UUID(bytes=bytes_(_buffer.raw)) 577 | 578 | global _last_timestamp 579 | import time 580 | nanoseconds = int(time.time() * 1e9) 581 | # 0x01b21dd213814000 is the number of 100-ns intervals between the 582 | # UUID epoch 1582-10-15 00:00:00 and the Unix epoch 1970-01-01 00:00:00. 583 | timestamp = int(nanoseconds/100) + 0x01b21dd213814000 584 | if _last_timestamp is not None and timestamp <= _last_timestamp: 585 | timestamp = _last_timestamp + 1 586 | _last_timestamp = timestamp 587 | if clock_seq is None: 588 | import random 589 | clock_seq = random.getrandbits(14) # instead of stable storage 590 | time_low = timestamp & 0xffffffff 591 | time_mid = (timestamp >> 32) & 0xffff 592 | time_hi_version = (timestamp >> 48) & 0x0fff 593 | clock_seq_low = clock_seq & 0xff 594 | clock_seq_hi_variant = (clock_seq >> 8) & 0x3f 595 | if node is None: 596 | node = getnode() 597 | return UUID(fields=(time_low, time_mid, time_hi_version, 598 | clock_seq_hi_variant, clock_seq_low, node), version=1) 599 | 600 | def uuid3(namespace, name): 601 | """Generate a UUID from the MD5 hash of a namespace UUID and a name.""" 602 | from hashlib import md5 603 | hash = md5(namespace.bytes + bytes(name, "utf-8")).digest() 604 | return UUID(bytes=hash[:16], version=3) 605 | 606 | def uuid4(): 607 | """Generate a random UUID.""" 608 | import os 609 | return UUID(bytes=os.urandom(16), version=4) 610 | 611 | def uuid5(namespace, name): 612 | """Generate a UUID from the SHA-1 hash of a namespace UUID and a name.""" 613 | from hashlib import sha1 614 | hash = sha1(namespace.bytes + bytes(name, "utf-8")).digest() 615 | return UUID(bytes=hash[:16], version=5) 616 | 617 | # The following standard UUIDs are for use with uuid3() or uuid5(). 618 | 619 | NAMESPACE_DNS = UUID('6ba7b810-9dad-11d1-80b4-00c04fd430c8') 620 | NAMESPACE_URL = UUID('6ba7b811-9dad-11d1-80b4-00c04fd430c8') 621 | NAMESPACE_OID = UUID('6ba7b812-9dad-11d1-80b4-00c04fd430c8') 622 | NAMESPACE_X500 = UUID('6ba7b814-9dad-11d1-80b4-00c04fd430c8') 623 | -------------------------------------------------------------------------------- /uxml2dict.py: -------------------------------------------------------------------------------- 1 | try: # pragma no cover 2 | from collections import OrderedDict 3 | except ImportError: # pragma no cover 4 | try: 5 | from ordereddict import OrderedDict 6 | except ImportError: 7 | try: 8 | from ucollections import OrderedDict # micropython 9 | except ImportError: 10 | OrderedDict = dict 11 | 12 | TEXT = "TEXT" 13 | START_TAG = "START_TAG" 14 | #START_TAG_DONE = "START_TAG_DONE" 15 | END_TAG = "END_TAG" 16 | PI = "PI" 17 | #PI_DONE = "PI_DONE" 18 | ATTR = "ATTR" 19 | #ATTR_VAL = "ATTR_VAL" 20 | 21 | 22 | def parseitem(iter_tok, parsed, lesslist): 23 | while True: 24 | try: 25 | tok = next(iter_tok) 26 | except (StopIteration, RuntimeError): # RuntimeError in micropython 27 | return iter_tok 28 | if tok[0] == PI: 29 | pass 30 | elif tok[0] == ATTR: 31 | _, (namespace, attr), value = tok 32 | if namespace: 33 | attr = namespace + ':' + attr 34 | parsed['@' + attr] = value 35 | elif tok[0] == TEXT: 36 | _, text = tok 37 | parsed['#text'] = text 38 | elif tok[0] == START_TAG: 39 | _, (namespace, tag) = tok 40 | if namespace: 41 | tag = namespace + ':' + tag 42 | d = OrderedDict() 43 | iter_tok = parseitem(iter_tok, d, lesslist) 44 | if not d: 45 | d = None 46 | elif len(d) == 1 and '#text' in d: 47 | d = d['#text'] 48 | parsed.setdefault(tag, []) 49 | if lesslist and len(parsed[tag]) == 1: 50 | parsed[tag] = [parsed[tag]] 51 | parsed[tag].append(d) 52 | if lesslist and len(parsed[tag]) == 1: 53 | parsed[tag] = parsed[tag][0] 54 | elif tok[0] == END_TAG: 55 | return iter_tok 56 | else: 57 | raise NotImplementedError('Token %s not support' % tok[0]) 58 | 59 | 60 | def parse(iter_tok, lesslist=True): 61 | parsed = OrderedDict() 62 | parseitem(iter_tok, parsed, lesslist) 63 | return parsed 64 | 65 | 66 | if __name__ == '__main__': 67 | import json 68 | import xmltok 69 | iter_tok = xmltok.tokenize(open('vector-text.svg')) 70 | parsed = parse(iter_tok) 71 | print(json.dumps(parsed, indent=4)) 72 | -------------------------------------------------------------------------------- /xmltok.py: -------------------------------------------------------------------------------- 1 | TEXT = "TEXT" 2 | START_TAG = "START_TAG" 3 | #START_TAG_DONE = "START_TAG_DONE" 4 | END_TAG = "END_TAG" 5 | PI = "PI" 6 | #PI_DONE = "PI_DONE" 7 | ATTR = "ATTR" 8 | #ATTR_VAL = "ATTR_VAL" 9 | 10 | class XMLSyntaxError(Exception): 11 | pass 12 | 13 | class XMLTokenizer: 14 | 15 | def __init__(self, f): 16 | self.f = f 17 | self.nextch() 18 | 19 | def curch(self): 20 | return self.c 21 | 22 | def getch(self): 23 | c = self.c 24 | self.nextch() 25 | return c 26 | 27 | def eof(self): 28 | return self.c == "" 29 | 30 | def nextch(self): 31 | self.c = self.f.read(1) 32 | if not self.c: 33 | raise StopIteration 34 | return self.c 35 | 36 | def skip_ws(self): 37 | while self.curch().isspace(): 38 | self.nextch() 39 | 40 | def isident(self): 41 | self.skip_ws() 42 | return self.curch().isalpha() 43 | 44 | def getident(self): 45 | self.skip_ws() 46 | ident = "" 47 | while True: 48 | c = self.curch() 49 | if not(c.isalpha() or c.isdigit() or c in "_-."): 50 | break 51 | ident += self.getch() 52 | return ident 53 | 54 | def getnsident(self): 55 | ns = "" 56 | ident = self.getident() 57 | if self.curch() == ":": 58 | self.nextch() 59 | ns = ident 60 | ident = self.getident() 61 | return (ns, ident) 62 | 63 | def match(self, c): 64 | self.skip_ws() 65 | if self.curch() == c: 66 | self.nextch() 67 | return True 68 | return False 69 | 70 | def expect(self, c): 71 | if not self.match(c): 72 | raise XMLSyntaxError 73 | 74 | def lex_attrs_till(self): 75 | while self.isident(): 76 | attr = self.getnsident() 77 | #yield (ATTR, attr) 78 | self.expect("=") 79 | self.expect('"') 80 | val = "" 81 | while self.curch() != '"': 82 | val += self.getch() 83 | #yield (ATTR_VAL, val) 84 | self.expect('"') 85 | yield (ATTR, attr, val) 86 | 87 | def tokenize(self): 88 | while not self.eof(): 89 | if self.match("<"): 90 | if self.match("/"): 91 | yield (END_TAG, self.getnsident()) 92 | self.expect(">") 93 | elif self.match("?"): 94 | yield (PI, self.getident()) 95 | for tok in self.lex_attrs_till(): 96 | yield tok 97 | self.expect("?") 98 | self.expect(">") 99 | elif self.match("!"): 100 | self.expect("-") 101 | self.expect("-") 102 | last3 = '' 103 | while True: 104 | last3 = last3[-2:] + self.getch() 105 | if last3 == "-->": 106 | break 107 | else: 108 | tag = self.getnsident() 109 | yield (START_TAG, tag) 110 | for tok in self.lex_attrs_till(): 111 | yield tok 112 | if self.match("/"): 113 | yield (END_TAG, tag) 114 | self.expect(">") 115 | else: 116 | text = "" 117 | while self.curch() != "<": 118 | text += self.getch() 119 | if text: 120 | yield (TEXT, text) 121 | 122 | 123 | def gfind(gen, pred): 124 | for i in gen: 125 | if pred(i): 126 | return i 127 | 128 | def text_of(gen, tag): 129 | # Return text content of a leaf tag 130 | def match_tag(t): 131 | if t[0] != START_TAG: 132 | return False 133 | if isinstance(tag, ()): 134 | return t[1] == tag 135 | return t[1][1] == tag 136 | 137 | gfind(gen, match_tag) 138 | # Assumes no attributes 139 | t, val = next(gen) 140 | assert t == TEXT 141 | return val 142 | 143 | def tokenize(file): 144 | return XMLTokenizer(file).tokenize() 145 | --------------------------------------------------------------------------------