├── .gitignore
├── ASKN.png
├── README.md
├── StrawHat.png
├── Tool
├── FunctionsAV.py
├── __init__.py
├── adbClass.py
├── androidSwissKnife.py
├── axmlparserpy
│ ├── __init__.py
│ ├── __init__.pyc
│ ├── __pycache__
│ │ ├── __init__.cpython-35.pyc
│ │ └── axmlprinter.cpython-35.pyc
│ ├── axmlparser.py
│ ├── axmlparser.pyc
│ ├── axmlprinter.py
│ ├── axmlprinter.pyc
│ ├── bytecode.py
│ ├── bytecode.pyc
│ ├── stringblock.py
│ ├── stringblock.pyc
│ ├── typeconstants.py
│ └── typeconstants.pyc
├── manifestDecoder.py
├── monkeyFaren.py
├── parsetab.py
├── ply
│ ├── __init__.py
│ ├── cpp.py
│ ├── ctokens.py
│ ├── lex.py
│ ├── yacc.py
│ └── ygen.py
└── supportClasses
│ ├── __init__.py
│ ├── filters.py
│ ├── koodous.py
│ ├── permissions.py
│ └── utilities.py
├── anemf.png
└── smalidea-0.03.zip
/.gitignore:
--------------------------------------------------------------------------------
1 | __pycache__/
2 | *.py[cod]
3 | *$py.class
4 | Tool/axmlparserpy/__pycache__/
5 | Tool/images/
6 | Tool/Tools/
--------------------------------------------------------------------------------
/ASKN.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Fare9/AndroidSwissKnife/96f2fd682acfe048b324db7937e43636d5747703/ASKN.png
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # AndroidSwissKnife
2 |
3 |
4 | FrameWork to make static and dynamic analysis for Android APKs in Python
5 |
6 |
7 | If you want to ask me for changes you want to add (for example in AndroidManifest analysis),
8 | write to my email address: farenain9@gmail.com
9 |
10 |
11 | Please install this version from github: AndroidSwissKnife.
12 |
13 |
14 |
15 |
New argument parser !!!
16 |
17 | ```Python
18 |
19 | parser = argparse.ArgumentParser(description="AndroidSwissKnife application to help in apk analysis")
20 | parser.add_argument("--install",action="store_true",help="To install some necessary tools")
21 | parser.add_argument("-a","--apk",type=str,help="apk file in your directory or absolute path")
22 | parser.add_argument("-o","--output",type=str,help="Name for output directories")
23 | parser.add_argument("--apktool",action="store_true",help="use apktool in Analysis")
24 | parser.add_argument("--unzip",action="store_true",help="use unzip in Analysis")
25 | parser.add_argument("--regEx",type=str,help='with unzip function we use a strings searching, you can add a regular Expression (by default URLs and Java Classes)')
26 | parser.add_argument("--exiftool",action="store_true",help="use exiftool with some file formats (you need first --apktool)")
27 | parser.add_argument("--jadx",action="store_true",help="use jadx to try to get source code")
28 | parser.add_argument("--opcodes",action="store_true",help="Get information from opcodes")
29 | parser.add_argument("--get-jar",action="store_true",help="Get jar from apk and finally the .class in a folder")
30 | parser.add_argument("--connect",type=str,help="Connect to android device with adb ")
31 | parser.add_argument("--all",action="store_true",help="use all Analysis")
32 | parser.add_argument("--create-apk",action="store_true",help="generate an apk, from apktool folder")
33 | parser.add_argument("--man",action="store_true",help="Get all the help from the program as star wars film")
34 | parser.add_argument("--DroidBox",action="store_true",help="New feature to do a dynamic analysis of the apk (It's a \"wrapper\" of droidbox with Burpsuite)")
35 | parser.add_argument("--Koodous",action="store_true",help="Try to search your apk in Koodous, it will take some time")
36 | parser.add_argument("--upload",action="store_true",help="If APK is not in koodous upload to analysis")
37 | parser.add_argument("-f","--folder",type=str,help='folder from apktool (needs --create-apk)')
38 | parser.add_argument("--apk-output",type=str,help='Output apk (needs --create-apk)')
39 | args = parser.parse_args()
40 |
41 | ```
42 |
43 | ```Python
44 | totalHelp = '''
45 | All help is here...
46 |
47 | First use: --apktool
48 |
49 | We will use apktool to extract data compressed in your apk, please install
50 | the last version of apktool.
51 | When finished the process of descompressing with apktool, we will read the
52 | AndroidManifest.xml and show some strange data (or not).
53 | After that we will read libraries in apk to find some function that
54 | are stored in .so files and start by Java_ . Then that functions could be
55 | called from app code. New feature that use objdump to get assembly code.
56 | If you added --exiftool flag with --apktool we will extract some meta-data
57 | from some files with special extension.
58 | New feature to find databases (sqlite) show tables and schemas from tables.
59 |
60 | Second use: --unzip
61 |
62 | If you haven't got enough let's going to start with unzip function.
63 | We will use unzip to extract data compressed in apk, because you know
64 | an apk is like zip file.
65 | Then we will show the certificate from the apk (not good quality but
66 | you will have it in terminal)
67 | Then list assets directory, maybe some cool things can be found here.
68 | Now let's going to show some files can have interesting code.
69 | Finally show some strings in files (for now URLs), you can add some
70 | Regular Expression
71 |
72 | Third use: --jadx
73 |
74 | If you want to try to get java code, we will use jadx to get it.
75 | It's not better than smali, but If it works you will have source code
76 |
77 | Fourth use: --opcodes
78 |
79 | Get all instructions, operands used in classes and methods in the bytecode in opcode-name.txt
80 | Get headers of classes and methods in summary-name.txt
81 | Get aditional information about headers like classes' id and superclasses... in sumaryDetails-name.txt
82 | Get the receivers from code that are in AndroidManifest and not. (ORIGINAL IDEA: https://github.com/AndroidWordMalware/malware-samples/blob/master/tools/receiversFinder.py)
83 |
84 | Fifth use: --get-jar
85 |
86 | Get jar from apk with dex2jar, then get a folder with the jar file
87 | unzipped, you can create java file to call or test classes from this
88 | jar file.
89 |
90 |
91 | Final Use: --all
92 |
93 | Everything put together, live of color and music.
94 |
95 | ###### NEW FEATURES ########
96 | --create-apk
97 | Once you have used apktool to get smali code from an apk, you can modify it, and finally
98 | create another apk with your changes, you can use this feature to do it.
99 |
100 | ### FINALLY DYNAMIC ANALYSIS (DroidBox Wrapper)
101 | --DroidBox (Original Idea https://github.com/pjlantz/droidbox/tree/master/droidbox4.1.1)
102 | I modified DroidBox code to this framework, I rewrite some functions to work in python3
103 | but nothing change from this program. You need to have an android emulator, in Readme.md
104 | you can see the features of my emulator.
105 |
106 |
107 | ### Koodous extension ###
108 | --Koodous
109 | Try to quick analyze your apk with koodous the antivirus from community.
110 | https://koodous.com
111 | If exists the apk, you will get quick analysis
112 |
113 | --upload
114 | If you want to upload your APK to Koodous to analyze.
115 | '''
116 | ```
117 |
118 | Features for android Emulator
119 |
120 |
121 |
122 |
123 |
124 |
125 |
126 |
127 |
128 |
129 | Next Release
130 | For the next release, I will upload an Androidx86 6.0, virtual machine for Dynamic Analysis with Xposed and Droidmon
131 |
132 | Follow the straw hat Pirates
133 |
--------------------------------------------------------------------------------
/StrawHat.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Fare9/AndroidSwissKnife/96f2fd682acfe048b324db7937e43636d5747703/StrawHat.png
--------------------------------------------------------------------------------
/Tool/FunctionsAV.py:
--------------------------------------------------------------------------------
1 | '''
2 | Modules to get functions from smali files,
3 | we will look for example calls or sms sends...
4 |
5 | I will use lex class for the lexical Analyzer
6 | ( as I learnt in "Procesadores del Lenguaje" in UAH)
7 |
8 | If you have to resolve a problem with regular expression,
9 | well, you have two problems
10 | '''
11 |
12 | HEADER = '\033[95m'
13 | OKBLUE = '\033[94m'
14 | OKGREEN = '\033[92m'
15 | WARNING = '\033[93m'
16 | FAIL = '\033[91m'
17 | ENDC = '\033[0m'
18 |
19 |
20 | import ply.lex as lex
21 |
22 | ############################# TOKENS LEX #####################################
23 |
24 | # tokens we want to find
25 | tokens = (
26 | "CLASS",
27 | "METHOD",
28 | "INTENTS",
29 | # For Calls methods
30 | "ACTIONCALL",
31 | "INIT_INTENT",
32 | "SET_DATA",
33 | "START_ACTIVITY",
34 | # FOR SMS API MANAGER
35 | "SMSMANAGER_GETDEFAULT",
36 | "SMSMANAGER_SENDTEXTMESSAGE",
37 | # FOR HARDCODED EMAILS
38 | "EMAILS"
39 |
40 | )
41 |
42 |
43 | ############################# TOKENS DEFINITION ###############################
44 |
45 | # regular expressions for tokens
46 |
47 | # things to ignore
48 | t_ignore = "\t\r"
49 |
50 |
51 |
52 | # A method definition setting the regular expression for a class
53 | # and running an action
54 |
55 | # find classes
56 | def t_CLASS(t):
57 | r"(\.class)[ ]+((public)|(private)|(protected))[ ]+L[^;]+?;"
58 | print (OKBLUE+"[+] Class: "+t.value+" in line: "+str(t.lineno)+ENDC)
59 |
60 | # find methods
61 | def t_METHOD(t):
62 | r"(\.method)[ ]+((public)|(private)|(protected))[ ]+[a-zA-Z].*\(.*\)[ZCBSIJFDV].*"
63 | print(OKGREEN+"\t[+] Method: "+t.value+" in line: "+str(t.lineno)+ENDC)
64 |
65 | # Look for Intent object creation
66 | def t_INTENTS(t):
67 | r"new-instance[ ]+v[0-9]+,[ ]*Landroid/content/Intent;"
68 | print(WARNING+"\t\t[+] New Intent: "+t.value+" in line: "+str(t.lineno)+ENDC)
69 |
70 | # Look for action.CALL Strings
71 | def t_ACTIONCALL(t):
72 | r"const-string[ ]+v[0-9]+,[ ]*(\"android.intent.action.CALL\")"
73 | print(FAIL+"\t\t[+] Action Call Strings: "+t.value+" in line: "+str(t.lineno)+ENDC)
74 |
75 | # Catch Init Intent
76 | # invoke-direct {v0, v1}, Landroid/content/Intent;->(Ljava/lang/String;)V
77 | # invoke-direct {v0, v1, v2}, Landroid/content/Intent;->(Ljava/lang/String;Landroid/net/Uri;)V
78 | def t_INIT_INTENT(t):
79 | r"invoke-direct[ ]+{(p[0-9])?(v[0-9]?)(,[ ]*(p[0-9])?(v[0-9]?))*},[ ]*Landroid/content/Intent;->[ ]*\(.*\)[ZCBSIJFDVL].*"
80 | print(WARNING+"\t\t[+] Init Intent: "+t.value+" in line: "+str(t.lineno)+ENDC)
81 |
82 | # Catch SET DATA FOR INTENT
83 | # invoke-virtual {v0, v1}, Landroid/content/Intent;->setData(Landroid/net/Uri;)Landroid/content/Intent;
84 | def t_SET_DATA(t):
85 | r"invoke-virtual[ ]+{(p[0-9])?(v[0-9]?)(,[ ]*(p[0-9])?(v[0-9]?))*},[ ]*Landroid/content/Intent;->setData\(.*\)[ZCBSIJFDVL].*"
86 | print (FAIL+"\t\t[+] Set Data: "+t.value+" in line: "+str(t.lineno)+ENDC)
87 |
88 | # Catch START ACTIVITY
89 | # invoke-virtual {p0, v0}, Lcom/example/root/proofsmalware/MainActivity;->startActivity(Landroid/content/Intent;)V
90 | # invoke-virtual {p1, v0}, Landroid/content/Context;->startActivity(Landroid/content/Intent;)V
91 | def t_START_ACTIVITY(t):
92 | r"invoke-virtual[ ]*{(p[0-9])?(v[0-9])?(,[ ]*(p[0-9])?(v[0-9])?)?},[ ]*L[^;]+?;->startActivity\(.*\)[ZCBSIJFDVL].*"
93 |
94 | print(WARNING+"\t\t[+] Start Activity: "+t.value+" in line: "+str(t.lineno)+ENDC)
95 |
96 | # Catch GetDefaults from sms api
97 | # invoke-static {}, Landroid/telephony/SmsManager;->getDefault()Landroid/telephony/SmsManager;
98 | def t_SMSMANAGER_GETDEFAULT(t):
99 | r"invoke-static[ ]*{(p[0-9])?(v[0-9])?(,[ ]*(p[0-9])?(v[0-9])?)?},[ ]*Landroid/telephony/SmsManager;->getDefault\(.*\)[ZCBSIJFDVL].*"
100 | print(FAIL+"\t\t[+] Get Default SMS API: "+t.value+" in line: "+str(t.lineno)+ENDC)
101 |
102 | # Catch SendSMS
103 | # invoke-virtual/range {v0 .. v5}, Landroid/telephony/SmsManager;->sendTextMessage(Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Landroid/app/PendingIntent;Landroid/app/PendingIntent;)V
104 | def t_SMSMANAGER_SENDTEXTMESSAGE(t):
105 | r"invoke-virtual/range[ ]*{(p[0-9])?(v[0-9])?[ ]*\.\.[ ]*(p[0-9])?(v[0-9])?},[ ]*Landroid/telephony/SmsManager;->sendTextMessage\(.*\)[ZCBSIJFDVL].*"
106 |
107 | print(FAIL+"\t\t[+] Send Text Message API: "+t.value+" in line: "+str(t.lineno)+ENDC)
108 |
109 | # Catch Emails
110 | # example@example.com
111 | def t_EMAILS(t):
112 | r"([a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+)"
113 |
114 | print(FAIL+"\t\t[+] Email in code: "+t.value+" in line: "+str(t.lineno)+ENDC)
115 | # method to catch errors
116 | def t_error(t):
117 |
118 | #print ("[-] Error: "+t.value[0])
119 | t.lexer.skip(1)
120 |
121 | # Set new line
122 | def t_newline(t):
123 | r'\n+'
124 | t.lexer.lineno += t.value.count("\n")
125 |
126 | # End of file
127 | def t_eof(t):
128 | print ("[!] Finished program")
129 |
130 | filed = open("/root/Documentos/ProgramasSMALI/apktool-proofsmalware/smali/com/example/root/proofsmalware/MainActivity.smali","rb")
131 |
132 | # Build lexical analyzer
133 | lex.lex()
134 | lex.input(filed.read())
135 |
136 | filed.close()
137 | # Obtener los tokens.
138 | while 1:
139 | tok = lex.token()
140 | if not tok:
141 | break
142 | print (tok)
143 |
144 |
145 |
146 |
147 |
148 |
149 |
150 |
151 |
--------------------------------------------------------------------------------
/Tool/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Fare9/AndroidSwissKnife/96f2fd682acfe048b324db7937e43636d5747703/Tool/__init__.py
--------------------------------------------------------------------------------
/Tool/adbClass.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | '''
4 | Classes that will help with android dynamic analysis
5 | we will use adb to start an emulator, install apk, run
6 | and also use proxy
7 |
8 | Why another class file with classes? If you create a
9 | complex structure nobody can understand it.
10 |
11 | A sentence which can explain about this class:
12 |
13 | "Ja hem traçat un llarg camí fins aquí,
14 | I tots nosaltres portem dins dels nostres cors,
15 | promeses que no podem deixar enrere"
16 | Miree
17 |
18 | Original DroidBox Idea:
19 | Patrik Lantz patrik@pjlantz.com and Laurent Delosieres ldelosieres@hispasec.com
20 | The Honeynet Project
21 |
22 | I modified some parts from code and I use another methods for example to parse XML
23 | and I have adapted to my code. My ideas added to Program:
24 |
25 | - Start Burpsuite (I'm not owner from Burpsuite, https://portswigger.net/burp/)
26 | - No need startmenu.sh, gnome-terminal will open emulator in another window
27 | - Use of apktool to unzip apk
28 | - Use of BeautifulSoup to parse XML
29 |
30 | Any suggestion please contact me in farenain9@gmail.com
31 | '''
32 |
33 | import subprocess # for call
34 | import time
35 | import sys
36 | import os
37 | import hashlib
38 |
39 | from threading import Thread
40 | from bs4 import BeautifulSoup
41 |
42 | class threadAnalyzer(Thread):
43 | """
44 | This class Will show how many logs have been get it
45 | A good implementacion from Heritage of Thread :)
46 | """
47 |
48 | def __init__(self):
49 | """
50 | Constructor we need to overrite father constructor
51 | """
52 | Thread.__init__(self)
53 | self.stop = False # When User press CTRL-C we stop the thread
54 | self.logs = 0
55 |
56 | def stopThread(self):
57 | """
58 | Set stop to True
59 | """
60 | self.stop = True
61 |
62 | def increaseLogs(self):
63 | """
64 | We need to increase the logs from thread
65 | the we could show how many logs we have
66 | """
67 | self.logs = self.logs + 1
68 |
69 | def run(self):
70 | """
71 | Main Method from a Thread, here we will show
72 | user information about how many logs we have
73 | collected
74 | """
75 | signs = ['|', '/', '-', '\\']
76 | counter = 0
77 | while 1:
78 | sign = signs[counter % len(signs)]
79 | sys.stdout.write("[AndroidSwissKnife] \033[132m[%s] Collected %s sandbox logs\033[1m (Ctrl-C to view logs)\r" % (sign, str(self.logs)))
80 | sys.stdout.flush()
81 | time.sleep(0.5)
82 | counter = counter + 1
83 | if self.stop:
84 | sys.stdout.write("[AndroidSwissKnife] \033[132m[%s] Collected %s sandbox logs\033[1m%s\r" % ('*', str(self.logs), ' '*25))
85 | sys.stdout.flush()
86 | break
87 |
88 | class Proxy():
89 |
90 | def __init__(self,ip = None,port = None):
91 | '''
92 | Constructor for proxy, we will add some
93 | basic data
94 | '''
95 |
96 | self.proxy = "burpsuite"
97 | if ip == None:
98 | self.ip = "127.0.0.1"
99 | else:
100 | self.ip = ip
101 |
102 | if port == None:
103 | self.port = "8080"
104 | else:
105 | self.port = port
106 |
107 | self.correct = False
108 |
109 | def startBurp(self):
110 | thread = Thread(target = self.execProgram)
111 | thread.run()
112 | if(not self.correct):
113 | print("[-] Error with burp thread")
114 | sys.exit(-1)
115 |
116 | def execProgram(self):
117 |
118 | sentence = "burpsuite &"
119 |
120 | try:
121 | subprocess.call(sentence,shell=True)
122 | self.correct = True
123 | except:
124 | print("[-] Error with burpsuite, if you don't have it, install it to continue")
125 | self.correct = False
126 | sys.exit(-1)
127 |
128 | ## Tags from android
129 | tags = { 0x1 : "TAINT_LOCATION", 0x2: "TAINT_CONTACTS", 0x4: "TAINT_MIC", 0x8: "TAINT_PHONE_NUMBER",
130 | 0x10: "TAINT_LOCATION_GPS", 0x20: "TAINT_LOCATION_NET", 0x40: "TAINT_LOCATION_LAST", 0x80: "TAINT_CAMERA",
131 | 0x100: "TAINT_ACCELEROMETER", 0x200: "TAINT_SMS", 0x400: "TAINT_IMEI", 0x800: "TAINT_IMSI",
132 | 0x1000: "TAINT_ICCID", 0x2000: "TAINT_DEVICE_SN", 0x4000: "TAINT_ACCOUNT", 0x8000: "TAINT_BROWSER",
133 | 0x10000: "TAINT_OTHERDB", 0x20000: "TAINT_FILECONTENT", 0x40000: "TAINT_PACKAGE", 0x80000: "TAINT_CALL_LOG",
134 | 0x100000: "TAINT_EMAIL", 0x200000: "TAINT_CALENDAR", 0x400000: "TAINT_SETTINGS" }
135 |
136 | class Adb():
137 |
138 | def __init__(self,emulator = None,proxy = None):
139 | if emulator == None:
140 | print('[-] Specify emulator name')
141 | sys.exit(-1)
142 | else:
143 | self.emulator = emulator
144 | self.proxy = proxy
145 |
146 | def find(self,name):
147 | path = os.environ['PATH']
148 | path = path.split(":")
149 |
150 | for p in path:
151 | if 'AndroidSwissKnife' in p:
152 | ask = p
153 |
154 | return (p+'/'+name)
155 |
156 | def startEmulatorProxy(self):
157 | """
158 | Start android emulator to install the apk and start analyzer with Burpsuite proxy
159 | """
160 |
161 |
162 | system = self.find("images/system.img")
163 | ramdisk = self.find("images/ramdisk.img")
164 | sentence = 'gnome-terminal --command "emulator -avd '+self.emulator+' -http-proxy '+self.proxy.ip+':'+self.proxy.port+' -system '+system+' -ramdisk '+ramdisk+' -wipe-data -prop dalvik.vm.execution-mode=int:portable"'
165 | try:
166 | print("[+] Exec Emulator")
167 | #print(sentence)
168 | #input()
169 | os.system(sentence)
170 | self.correct = True
171 | except Exception as e:
172 | self.correct = False
173 | print("[-] Error with emulator: "+str(e))
174 | sys.exit(-1)
175 |
176 | def startEmulator(self):
177 | """
178 | Start android emulator to install the apk and start analyzer
179 | """
180 |
181 |
182 | system = self.find("images/system.img")
183 | ramdisk = self.find("images/ramdisk.img")
184 | sentence = 'gnome-terminal --command "emulator -avd '+self.emulator+' -system '+system+' -ramdisk '+ramdisk+' -wipe-data -prop dalvik.vm.execution-mode=int:portable"'
185 | try:
186 | print("[+] Exec Emulator")
187 | #print(sentence)
188 | #input()
189 | os.system(sentence)
190 | self.correct = True
191 | except Exception as e:
192 | self.correct = False
193 | print("[-] Error with emulator: "+str(e))
194 | sys.exit(-1)
195 |
196 | def cleanAdbLogcat(self):
197 | """
198 | Clean logcat to start the analysis
199 | """
200 | subprocess.call(["adb","logcat","-c"])
201 |
202 | def execAdbLogcat(self):
203 | """
204 | Exec logcat with propert arguments
205 | """
206 | adb = subprocess.Popen(["adb", "logcat", "DroidBox:W", "dalvikvm:W", "ActivityManager:I"], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
207 | return adb
208 |
209 |
210 |
211 | class DynamicAnalyzer():
212 | """
213 | Class taken from DroidBox for Dynamic Analysis
214 | I will change some functions which I think I can
215 | do in another way
216 | """
217 |
218 | def __init__(self,apk):
219 | self.apk = apk
220 | self.packages = []
221 | self.permissions = []
222 | self.outPermissions = [] #this permissions are for others apps
223 | self.receivers = []
224 | self.recvsactions = {}
225 | self.activities = {}
226 |
227 |
228 | self.mainActivity = None
229 |
230 | def extractingApk(self):
231 | """
232 | Extract information from apk, I will use BeautifulSoup instead of
233 | XML libraries
234 | """
235 | try:
236 | manifest = None
237 | apkName = self.apk
238 | apkName = os.path.basename(apkName)
239 | apkName = apkName.replace(".apk","")
240 |
241 | unzipFolder = "apktool-"+apkName
242 |
243 | folders = os.listdir('.')
244 |
245 | if not (unzipFolder in folders):
246 | actualDirectory = os.getcwd()
247 | outputFile = actualDirectory + "/" + "apktool-" + apkName
248 | sentence = 'apktool d '+self.apk+' -o '+outputFile
249 | print("[+] Unzipping apk")
250 | subprocess.call(sentence,shell=True)
251 |
252 | manifest = open(unzipFolder+"/AndroidManifest.xml","rb")
253 | bsObj = BeautifulSoup(manifest.read(),'html.parser')
254 |
255 | ## Now start parsing
256 |
257 | # package
258 | packages= bsObj.findAll("manifest")
259 | for package in packages:
260 | self.packages.append(str(package['package']))
261 |
262 | #print("[+] Packages: "+str(self.packages))
263 | # uses-permission
264 | usespermissions = bsObj.findAll("uses-permission")
265 | for usespermission in usespermissions:
266 | self.permissions.append(str(usespermission['android:name']))
267 |
268 | #print('[+] Uses-Permissions: '+str(self.permissions))
269 | # out permissions
270 | permissions = bsObj.findAll("permission")
271 | for permission in permissions:
272 | self.outPermissions.append(str(permission['android:name']))
273 | #print('[+] Permissions: '+str(self.outPermissions))
274 | # Receivers and actions from that receiver
275 | ## 1 Receiver can have 0 or more actions, then actions should be a list
276 | receivers = bsObj.findAll("receiver")
277 | for receiver in receivers:
278 | self.receivers.append(receiver['android:name'])
279 | actions = receiver.findAll("action")
280 | self.recvsactions[receiver['android:name']] = list()
281 | for action in actions:
282 | self.recvsactions[receiver['android:name']].append(action['android:name'])
283 |
284 | #print("[+] Receivers: "+str(self.receivers))
285 | #print("[+] RecvsActions: "+str(self.recvsactions))
286 | # Now activities, activities can have actions and categories
287 | activities = bsObj.findAll("activity")
288 | for activity in activities:
289 | # let's take actions and categories
290 | self.activities[activity['android:name']] = {}
291 | self.activities[activity['android:name']]["actions"] = list()
292 | actions = activity.findAll("action")
293 | for action in actions:
294 | self.activities[activity['android:name']]["actions"].append(action['android:name'])
295 |
296 | self.activities[activity['android:name']]["categories"] = list()
297 | categories = activity.findAll('category')
298 | for category in categories:
299 | self.activities[activity['android:name']]["categories"].append(category['android:name'])
300 | #print("[+] Activities: "+str(self.activities))
301 | # well extract which is the main activity
302 | for activity in self.activities:
303 | if "android.intent.action.MAIN" in self.activities[activity]["actions"]:
304 | self.mainActivity = activity
305 | break
306 | #print("[+] MainActivity: "+str(self.mainActivity))
307 |
308 | except Exception as e:
309 | print("[+] Error parsing AndroidManifest: "+str(e))
310 |
311 | def getHash(self):
312 | """
313 | Same way that Droidbox did, I will take
314 | hashes from apk
315 | """
316 | md5 = hashlib.md5()
317 | sha1 = hashlib.sha1()
318 | sha256 = hashlib.sha256()
319 | f = open(self.apk,'rb')
320 |
321 | #now we read the apk and create hashes
322 | while True:
323 | data = f.read(512)
324 | if not data:
325 | break
326 |
327 | md5.update(data)
328 | sha1.update(data)
329 | sha256.update(data)
330 |
331 | return [md5.hexdigest(),sha1.hexdigest(),sha256.hexdigest()]
332 |
333 | def getTags(tagUser):
334 | tagsFound = []
335 |
336 | for tag in tags.keys():
337 | # AND operation, if are the same number it will be different from 0
338 | if tagUser & tag != 0:
339 | tagsFound.append(tags[tag])
340 |
341 | return tagsFound
342 | def progressBar():
343 | """
344 | Progress bar for 4 seconds
345 | """
346 | print("[+] Loading Burpsuite")
347 | for i in range(0,26):
348 | x = i*100/25
349 | if i != 25:
350 | sys.stdout.write("["+"="*i+">]"+str(x)+"%\r")
351 | else:
352 | sys.stdout.write("["+"="*i+">]"+str(x)+"%\n")
353 | sys.stdout.flush()
354 | time.sleep(0.2)
355 |
356 | if __name__ == '__main__':
357 |
358 | burp = Proxy()
359 |
360 | print("Burp ip: "+burp.ip)
361 | print("Burp port: "+burp.port)
362 | burp.startBurp()
363 | input()
364 | adb = Adb(emulator="Sandbox",proxy=burp)
365 | adb.startEmulator()
366 | input()
367 | time.sleep(2)
368 | da = DynamicAnalyzer(apk="prueba.apk")
369 | da.extractingApk()
370 |
--------------------------------------------------------------------------------
/Tool/axmlparserpy/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Fare9/AndroidSwissKnife/96f2fd682acfe048b324db7937e43636d5747703/Tool/axmlparserpy/__init__.py
--------------------------------------------------------------------------------
/Tool/axmlparserpy/__init__.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Fare9/AndroidSwissKnife/96f2fd682acfe048b324db7937e43636d5747703/Tool/axmlparserpy/__init__.pyc
--------------------------------------------------------------------------------
/Tool/axmlparserpy/__pycache__/__init__.cpython-35.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Fare9/AndroidSwissKnife/96f2fd682acfe048b324db7937e43636d5747703/Tool/axmlparserpy/__pycache__/__init__.cpython-35.pyc
--------------------------------------------------------------------------------
/Tool/axmlparserpy/__pycache__/axmlprinter.cpython-35.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Fare9/AndroidSwissKnife/96f2fd682acfe048b324db7937e43636d5747703/Tool/axmlparserpy/__pycache__/axmlprinter.cpython-35.pyc
--------------------------------------------------------------------------------
/Tool/axmlparserpy/axmlparser.py:
--------------------------------------------------------------------------------
1 | # This file is part of Androguard.
2 | #
3 | # Copyright (C) 2010, Anthony Desnos
4 | # All rights reserved.
5 | #
6 | # Androguard is free software: you can redistribute it and/or modify
7 | # it under the terms of the GNU Lesser General Public License as published by
8 | # the Free Software Foundation, either version 3 of the License, or
9 | # (at your option) any later version.
10 | #
11 | # Androguard is distributed in the hope that it will be useful,
12 | # but WITHOUT ANY WARRANTY; without even the implied warranty of
13 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 | # GNU Lesser General Public License for more details.
15 | #
16 | # You should have received a copy of the GNU Lesser General Public License
17 | # along with Androguard. If not, see .
18 |
19 | import bytecode
20 |
21 | import stringblock
22 | import typeconstants as tc
23 | from stringblock import StringBlock
24 | from bytecode import SV
25 |
26 | import StringIO
27 | from struct import pack, unpack
28 | from xml.dom import minidom
29 |
30 |
31 | class AXMLParser:
32 |
33 | def __init__(self, raw_buff):
34 | self.reset()
35 |
36 | self.buff = bytecode.BuffHandle(raw_buff)
37 |
38 | self.buff.read(4)
39 | self.buff.read(4)
40 |
41 | self.sb = StringBlock(self.buff)
42 |
43 | self.m_resourceIDs = []
44 | self.m_prefixuri = {}
45 | self.m_uriprefix = {}
46 | self.m_prefixuriL = []
47 |
48 | def reset(self):
49 | self.m_event = -1
50 | self.m_lineNumber = -1
51 | self.m_name = -1
52 | self.m_namespaceUri = -1
53 | self.m_attributes = []
54 | self.m_idAttribute = -1
55 | self.m_classAttribute = -1
56 | self.m_styleAttribute = -1
57 |
58 | def next(self):
59 | self.doNext()
60 | return self.m_event
61 |
62 | def doNext(self):
63 | if self.m_event == tc.END_DOCUMENT:
64 | return
65 |
66 | event = self.m_event
67 |
68 | self.reset()
69 |
70 | while 1:
71 | chunkType = -1
72 |
73 | # Fake END_DOCUMENT event.
74 | if event == tc.END_TAG:
75 | pass
76 |
77 | # START_DOCUMENT
78 | if event == tc.START_DOCUMENT:
79 | chunkType = tc.CHUNK_XML_START_TAG
80 | else:
81 | if self.buff.end() == True:
82 | self.m_event = tc.END_DOCUMENT
83 | break
84 | chunkType = SV(' tc.CHUNK_XML_LAST:
99 | raise("ooo")
100 |
101 | # Fake START_DOCUMENT event.
102 | if chunkType == tc.CHUNK_XML_START_TAG and event == -1:
103 | self.m_event = tc.START_DOCUMENT
104 | break
105 |
106 | self.buff.read(4) #/*chunkSize*/
107 | lineNumber = SV('>16) - 1
138 | attributeCount = attributeCount & 0xFFFF
139 | self.m_classAttribute = SV('>16) - 1
141 |
142 | self.m_classAttribute = (self.m_classAttribute & 0xFFFF) - 1
143 |
144 | for i in range(0, attributeCount * tc.ATTRIBUTE_LENGTH):
145 | self.m_attributes.append(SV('>24)
149 |
150 | self.m_event = tc.START_TAG
151 | break
152 |
153 | if chunkType == tc.CHUNK_XML_END_TAG:
154 | self.m_namespaceUri = SV('= len(self.m_attributes):
212 | raise("Invalid attribute index")
213 |
214 | return offset
215 |
216 | def getAttributeCount(self):
217 | if self.m_event != tc.START_TAG:
218 | return -1
219 |
220 | return len(self.m_attributes) / tc.ATTRIBUTE_LENGTH
221 |
222 | def getAttributePrefix(self, index):
223 | offset = self.getAttributeOffset(index)
224 | uri = self.m_attributes[offset + tc.ATTRIBUTE_IX_NAMESPACE_URI]
225 |
226 | prefix = self.getPrefixByUri(uri)
227 | if prefix == -1:
228 | return ""
229 |
230 | return self.sb.getRaw(prefix)
231 |
232 | def getAttributeName(self, index):
233 | offset = self.getAttributeOffset(index)
234 | name = self.m_attributes[offset + tc.ATTRIBUTE_IX_NAME]
235 |
236 | if name == -1:
237 | return ""
238 |
239 | return self.sb.getRaw(name)
240 |
241 | def getAttributeValueType(self, index):
242 | offset = self.getAttributeOffset(index)
243 | return self.m_attributes[offset + tc.ATTRIBUTE_IX_VALUE_TYPE]
244 |
245 | def getAttributeValueData(self, index):
246 | offset = self.getAttributeOffset(index)
247 | return self.m_attributes[offset + tc.ATTRIBUTE_IX_VALUE_DATA]
248 |
249 | def getAttributeValue(self, index):
250 | offset = self.getAttributeOffset(index)
251 | valueType = self.m_attributes[offset + tc.ATTRIBUTE_IX_VALUE_TYPE]
252 | if valueType == tc.TYPE_STRING:
253 | valueString = self.m_attributes[offset + tc.ATTRIBUTE_IX_VALUE_STRING]
254 | return self.sb.getRaw(valueString)
255 | # WIP
256 | return ""
257 | #int valueData=m_attributes[offset+ATTRIBUTE_IX_VALUE_DATA];
258 | #return TypedValue.coerceToString(valueType,valueData);
259 |
260 |
--------------------------------------------------------------------------------
/Tool/axmlparserpy/axmlparser.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Fare9/AndroidSwissKnife/96f2fd682acfe048b324db7937e43636d5747703/Tool/axmlparserpy/axmlparser.pyc
--------------------------------------------------------------------------------
/Tool/axmlparserpy/axmlprinter.py:
--------------------------------------------------------------------------------
1 | # This file is part of Androguard.
2 | #
3 | # Copyright (C) 2010, Anthony Desnos
4 | # All rights reserved.
5 | #
6 | # Androguard is free software: you can redistribute it and/or modify
7 | # it under the terms of the GNU Lesser General Public License as published by
8 | # the Free Software Foundation, either version 3 of the License, or
9 | # (at your option) any later version.
10 | #
11 | # Androguard is distributed in the hope that it will be useful,
12 | # but WITHOUT ANY WARRANTY; without even the implied warranty of
13 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 | # GNU Lesser General Public License for more details.
15 | #
16 | # You should have received a copy of the GNU Lesser General Public License
17 | # along with Androguard. If not, see .
18 |
19 | import bytecode
20 |
21 | import typeconstants as tc
22 | from axmlparser import AXMLParser
23 | from bytecode import SV
24 |
25 | import StringIO
26 | from struct import pack, unpack
27 | from xml.dom import minidom
28 | from xml.sax.saxutils import escape
29 |
30 |
31 | class AXMLPrinter:
32 | def __init__(self, raw_buff):
33 | self.axml = AXMLParser(raw_buff)
34 | self.xmlns = False
35 |
36 | self.buff = ""
37 |
38 | while 1:
39 | _type = self.axml.next()
40 | #print "tagtype = ", _type
41 |
42 | if _type == tc.START_DOCUMENT:
43 | self.buff += "\n"
44 | elif _type == tc.START_TAG:
45 | self.buff += "<%s%s\n" % (self.getPrefix(self.axml.getPrefix()), self.axml.getName())
46 |
47 | # FIXME: use namespace
48 | if self.xmlns == False:
49 | self.buff += "xmlns:%s=\"%s\"\n" % (self.axml.getNamespacePrefix(0), self.axml.getNamespaceUri(0))
50 | self.xmlns = True
51 |
52 | for i in range(0, self.axml.getAttributeCount()):
53 | self.buff += "%s%s=\"%s\"\n" % (self.getPrefix(self.axml.getAttributePrefix(i)), self.axml.getAttributeName(i), self.getAttributeValue(i))
54 |
55 | self.buff += ">\n"
56 |
57 | elif _type == tc.END_TAG:
58 | self.buff += "%s%s>\n" % (self.getPrefix(self.axml.getPrefix()), self.axml.getName())
59 |
60 | elif _type == tc.TEXT:
61 | self.buff += "%s\n" % self.axml.getText()
62 |
63 | elif _type == tc.END_DOCUMENT:
64 | break
65 |
66 | def getBuff(self):
67 | return self.buff.encode("utf-8")
68 |
69 | def getPrefix(self, prefix):
70 | if prefix == None or len(prefix) == 0:
71 | return ""
72 |
73 | return prefix + ":"
74 |
75 | def getAttributeValue(self, index):
76 | _type = self.axml.getAttributeValueType(index)
77 | _data = self.axml.getAttributeValueData(index)
78 |
79 | #print _type, _data
80 | if _type == tc.TYPE_STRING:
81 | return escape(self.axml.getAttributeValue(index), entities={'"': '"'})
82 |
83 | elif _type == tc.TYPE_ATTRIBUTE:
84 | return "?%s%08X" % (self.getPackage(_data), _data)
85 |
86 | elif _type == tc.TYPE_REFERENCE:
87 | return "@%s%08X" % (self.getPackage(_data), _data)
88 |
89 | # WIP
90 | elif _type == tc.TYPE_FLOAT:
91 | return "%f" % unpack("=f", pack("=L", _data))[0]
92 |
93 | elif _type == tc.TYPE_INT_HEX:
94 | return "0x%08X" % _data
95 |
96 | elif _type == tc.TYPE_INT_BOOLEAN:
97 | if _data == 0:
98 | return "false"
99 | return "true"
100 |
101 | elif _type == tc.TYPE_DIMENSION:
102 | return "%f%s" % (self.complexToFloat(_data), tc.DIMENSION_UNITS[_data & tc.COMPLEX_UNIT_MASK])
103 |
104 | elif _type == tc.TYPE_FRACTION:
105 | return "%f%s" % (self.complexToFloat(_data), tc.FRACTION_UNITS[_data & tc.COMPLEX_UNIT_MASK])
106 |
107 | elif _type >= tc.TYPE_FIRST_COLOR_INT and _type <= tc.TYPE_LAST_COLOR_INT:
108 | return "#%08X" % _data
109 |
110 | elif _type >= tc.TYPE_FIRST_INT and _type <= tc.TYPE_LAST_INT:
111 | if _data > 0x7fffffff:
112 | _data = (0x7fffffff & _data) - 0x80000000
113 | return "%d" % _data
114 | elif _type == tc.TYPE_INT_DEC:
115 | return "%d" % _data
116 |
117 | # raise exception here?
118 | return "<0x%X, type 0x%02X>" % (_data, _type)
119 |
120 | def complexToFloat(self, xcomplex):
121 | return (float)(xcomplex & 0xFFFFFF00) * tc.RADIX_MULTS[(xcomplex>>4) & 3];
122 |
123 | def getPackage(self, id):
124 | if id >> 24 == 1:
125 | return "android:"
126 | return ""
127 |
128 |
--------------------------------------------------------------------------------
/Tool/axmlparserpy/axmlprinter.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Fare9/AndroidSwissKnife/96f2fd682acfe048b324db7937e43636d5747703/Tool/axmlparserpy/axmlprinter.pyc
--------------------------------------------------------------------------------
/Tool/axmlparserpy/bytecode.py:
--------------------------------------------------------------------------------
1 | # This file is part of Androguard.
2 | #
3 | # Copyright (C) 2010, Anthony Desnos
4 | # All rights reserved.
5 | #
6 | # Androguard is free software: you can redistribute it and/or modify
7 | # it under the terms of the GNU Lesser General Public License as published by
8 | # the Free Software Foundation, either version 3 of the License, or
9 | # (at your option) any later version.
10 | #
11 | # Androguard is distributed in the hope that it will be useful,
12 | # but WITHOUT ANY WARRANTY; without even the implied warranty of
13 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 | # GNU Lesser General Public License for more details.
15 | #
16 | # You should have received a copy of the GNU Lesser General Public License
17 | # along with Androguard. If not, see .
18 |
19 | from struct import unpack, pack
20 |
21 | global PRETTY_SHOW
22 | PRETTY_SHOW = 0
23 |
24 | # Print arg into a correct format
25 | def _Print(name, arg):
26 | buff = name + " "
27 |
28 | if type(arg).__name__ == 'int':
29 | buff += "0x%x" % arg
30 | elif type(arg).__name__ == 'long':
31 | buff += "0x%x" % arg
32 | elif type(arg).__name__ == 'str':
33 | buff += "%s" % arg
34 | elif isinstance(arg, SV):
35 | buff += "0x%x" % arg.get_value()
36 | elif isinstance(arg, SVs):
37 | buff += arg.get_value().__str__()
38 |
39 | print buff
40 |
41 | class SV:
42 | """SV is used to handle more easily a value"""
43 | def __init__(self, size, buff):
44 | self.__size = size
45 | self.__value = unpack(self.__size, buff)[0]
46 |
47 | def _get(self):
48 | return pack(self.__size, self.__value)
49 |
50 | def __str__(self):
51 | return "0x%x" % self.__value
52 |
53 | def __int__(self):
54 | return self.__value
55 |
56 | def get_value_buff(self):
57 | return self._get()
58 |
59 | def get_value(self):
60 | return self.__value
61 |
62 | def set_value(self, attr):
63 | self.__value = attr
64 |
65 | class SVs:
66 | """SVs is used to handle more easily a structure of different values"""
67 | def __init__(self, size, ntuple, buff):
68 | self.__size = size
69 |
70 | self.__value = ntuple._make(unpack(self.__size, buff))
71 |
72 | def _get(self):
73 | l = []
74 | for i in self.__value._fields:
75 | l.append(getattr(self.__value, i))
76 | return pack(self.__size, *l)
77 |
78 | def _export(self):
79 | return [ x for x in self.__value._fields ]
80 |
81 | def get_value_buff(self):
82 | return self._get()
83 |
84 | def get_value(self):
85 | return self.__value
86 |
87 | def set_value(self, attr):
88 | self.__value = self.__value._replace(**attr)
89 |
90 | def __str__(self):
91 | return self.__value.__str__()
92 |
93 | def object_to_str(obj):
94 | if isinstance(obj, str):
95 | return obj
96 | elif isinstance(obj, int):
97 | return pack("", "")
209 | i = i.replace("$", "_")
210 |
211 | return i
212 |
213 | def FormatDescriptorToPython(input):
214 | i = input.replace("/", "_")
215 | i = i.replace(";", "")
216 | i = i.replace("[", "")
217 | i = i.replace("(", "")
218 | i = i.replace(")", "")
219 | i = i.replace(" ", "")
220 | i = i.replace("$", "")
221 |
222 | return i
223 |
224 | # class/method/field export
225 | def ExportVMToPython(vm):
226 | """
227 | Export classes/methods/fields' names in the python namespace
228 |
229 | @param vm: a VM object (DalvikVMFormat, JVMFormat)
230 | """
231 | for _class in vm.get_classes():
232 | ### Class
233 | name = "CLASS_" + FormatClassToPython(_class.get_name())
234 | setattr(vm, name, _class)
235 |
236 | ### Methods
237 | m = {}
238 | for method in _class.get_methods():
239 | if method.get_name() not in m:
240 | m[ method.get_name() ] = []
241 | m[ method.get_name() ].append(method)
242 |
243 | for i in m:
244 | if len(m[i]) == 1:
245 | j = m[i][0]
246 | name = "METHOD_" + FormatNameToPython(j.get_name())
247 | setattr(_class, name, j)
248 | else:
249 | for j in m[i]:
250 | name = "METHOD_" + FormatNameToPython(j.get_name()) + "_" + FormatDescriptorToPython(j.get_descriptor())
251 | setattr(_class, name, j)
252 |
253 | ### Fields
254 | f = {}
255 | for field in _class.get_fields():
256 | if field.get_name() not in f:
257 | f[ field.get_name() ] = []
258 | f[ field.get_name() ].append(field)
259 |
260 | for i in f:
261 | if len(f[i]) == 1:
262 | j = f[i][0]
263 | name = "FIELD_" + FormatNameToPython(j.get_name())
264 | setattr(_class, name, j)
265 | else:
266 | for j in f[i]:
267 | name = "FIELD_" + FormatNameToPython(j.get_name()) + "_" + FormatDescriptorToPython(j.get_descriptor())
268 | setattr(_class, name, j)
269 |
270 | class XREF:
271 | pass
272 |
273 | def ExportXREFToPython(vm, gvm):
274 | for _class in vm.get_classes():
275 | for method in _class.get_methods():
276 | method.XREFfrom = XREF()
277 | method.XREFto = XREF()
278 |
279 | key = "%s %s %s" % (method.get_class_name(), method.get_name(), method.get_descriptor())
280 |
281 | if key in gvm.nodes:
282 | for i in gvm.G.predecessors(gvm.nodes[ key ].id):
283 | xref = gvm.nodes_id[ i ]
284 | xref_meth = vm.get_method_descriptor(xref.class_name, xref.method_name, xref.descriptor)
285 | if xref_meth != None:
286 | name = FormatClassToPython(xref_meth.get_class_name()) + "__" + FormatNameToPython(xref_meth.get_name()) + "__" + FormatDescriptorToPython(xref_meth.get_descriptor())
287 | setattr(method.XREFfrom, name, xref_meth)
288 |
289 | for i in gvm.G.successors(gvm.nodes[ key ].id):
290 | xref = gvm.nodes_id[ i ]
291 | xref_meth = vm.get_method_descriptor(xref.class_name, xref.method_name, xref.descriptor)
292 | if xref_meth != None:
293 | name = FormatClassToPython(xref_meth.get_class_name()) + "__" + FormatNameToPython(xref_meth.get_name()) + "__" + FormatDescriptorToPython(xref_meth.get_descriptor())
294 | setattr(method.XREFto, name, xref_meth)
295 |
296 | def ExportDREFToPython(vm, vmx):
297 | for _class in vm.get_classes():
298 | for field in _class.get_fields():
299 | field.DREFr = XREF()
300 | field.DREFw = XREF()
301 |
302 | paths = vmx.tainted_variables.get_field(field.get_class_name(), field.get_name(), field.get_descriptor())
303 | if paths != None:
304 | for path in paths.get_paths():
305 | if path.get_access_flag() == 'R':
306 | method_class_name = path.get_method().get_class_name()
307 | method_name = path.get_method().get_name()
308 | method_descriptor = path.get_method().get_descriptor()
309 |
310 | dref_meth = vm.get_method_descriptor(method_class_name, method_name, method_descriptor)
311 | name = FormatClassToPython(dref_meth.get_class_name()) + "__" + FormatNameToPython(dref_meth.get_name()) + "__" + FormatDescriptorToPython(dref_meth.get_descriptor())
312 | setattr(field.DREFr, name, dref_meth)
313 | else:
314 | method_class_name = path.get_method().get_class_name()
315 | method_name = path.get_method().get_name()
316 | method_descriptor = path.get_method().get_descriptor()
317 |
318 | dref_meth = vm.get_method_descriptor(method_class_name, method_name, method_descriptor)
319 | name = FormatClassToPython(dref_meth.get_class_name()) + "__" + FormatNameToPython(dref_meth.get_name()) + "__" + FormatDescriptorToPython(dref_meth.get_descriptor())
320 | setattr(field.DREFw, name, dref_meth)
321 |
322 |
--------------------------------------------------------------------------------
/Tool/axmlparserpy/bytecode.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Fare9/AndroidSwissKnife/96f2fd682acfe048b324db7937e43636d5747703/Tool/axmlparserpy/bytecode.pyc
--------------------------------------------------------------------------------
/Tool/axmlparserpy/stringblock.py:
--------------------------------------------------------------------------------
1 | # This file is part of Androguard.
2 | #
3 | # Copyright (C) 2010, Anthony Desnos
4 | # All rights reserved.
5 | #
6 | # Androguard is free software: you can redistribute it and/or modify
7 | # it under the terms of the GNU Lesser General Public License as published by
8 | # the Free Software Foundation, either version 3 of the License, or
9 | # (at your option) any later version.
10 | #
11 | # Androguard is distributed in the hope that it will be useful,
12 | # but WITHOUT ANY WARRANTY; without even the implied warranty of
13 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 | # GNU Lesser General Public License for more details.
15 | #
16 | # You should have received a copy of the GNU Lesser General Public License
17 | # along with Androguard. If not, see .
18 |
19 | import bytecode
20 |
21 | from bytecode import SV
22 |
23 | import StringIO
24 | from struct import pack, unpack
25 | from xml.dom import minidom
26 |
27 | class StringBlock:
28 | """
29 | axml format translated from:
30 | http://code.google.com/p/android4me/source/browse/src/android/content/res/AXmlResourceParser.java
31 | """
32 | def __init__(self, buff):
33 | buff.read(4)
34 |
35 | self.chunkSize = SV('= len(self.m_stringOffsets):
79 | return None
80 |
81 | offset = self.m_stringOffsets[ idx ].get_value()
82 | length = self.getShort(self.m_strings, offset)
83 |
84 | data = ""
85 |
86 | while length > 0:
87 | offset += 2
88 | # Unicode character
89 | data += unichr(self.getShort(self.m_strings, offset))
90 |
91 | # FIXME
92 | if data[-1] == "&":
93 | data = data[:-1]
94 |
95 | length -= 1
96 |
97 | return data
98 |
99 | def getShort(self, array, offset):
100 | value = array[offset / 4].get_value()
101 | if ((offset % 4) / 2) == 0:
102 | return value & 0xFFFF
103 | else:
104 | return value >> 16
105 |
106 |
--------------------------------------------------------------------------------
/Tool/axmlparserpy/stringblock.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Fare9/AndroidSwissKnife/96f2fd682acfe048b324db7937e43636d5747703/Tool/axmlparserpy/stringblock.pyc
--------------------------------------------------------------------------------
/Tool/axmlparserpy/typeconstants.py:
--------------------------------------------------------------------------------
1 | # This file is part of Androguard.
2 | #
3 | # Copyright (C) 2010, Anthony Desnos
4 | # All rights reserved.
5 | #
6 | # Androguard is free software: you can redistribute it and/or modify
7 | # it under the terms of the GNU Lesser General Public License as published by
8 | # the Free Software Foundation, either version 3 of the License, or
9 | # (at your option) any later version.
10 | #
11 | # Androguard is distributed in the hope that it will be useful,
12 | # but WITHOUT ANY WARRANTY; without even the implied warranty of
13 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 | # GNU Lesser General Public License for more details.
15 | #
16 | # You should have received a copy of the GNU Lesser General Public License
17 | # along with Androguard. If not, see .
18 |
19 | TYPE_NULL = 0
20 | TYPE_REFERENCE = 1
21 | TYPE_ATTRIBUTE = 2
22 | TYPE_STRING = 3
23 | TYPE_FLOAT = 4
24 | TYPE_DIMENSION = 5
25 | TYPE_FRACTION = 6
26 | TYPE_FIRST_INT = 16
27 | TYPE_INT_DEC = 16
28 | TYPE_INT_BOOLEAN = 18
29 | TYPE_FIRST_COLOR_INT = 28
30 | TYPE_INT_COLOR_ARGB4 = 30
31 | TYPE_INT_COLOR_ARGB8 = 28
32 | TYPE_INT_COLOR_RGB4 = 31
33 | TYPE_INT_COLOR_RGB8 = 29
34 | TYPE_INT_DEC = 16
35 | TYPE_INT_HEX = 17
36 | TYPE_LAST_COLOR_INT = 31
37 | TYPE_LAST_INT = 31
38 |
39 | RADIX_MULTS = [ 0.00390625, 3.051758E-005, 1.192093E-007, 4.656613E-010 ]
40 | DIMENSION_UNITS = [ "px","dip","sp","pt","in","mm","","" ]
41 | FRACTION_UNITS = [ "%","%p","","","","","","" ]
42 |
43 | COMPLEX_UNIT_MASK = 15
44 |
45 | ATTRIBUTE_IX_NAMESPACE_URI = 0
46 | ATTRIBUTE_IX_NAME = 1
47 | ATTRIBUTE_IX_VALUE_STRING = 2
48 | ATTRIBUTE_IX_VALUE_TYPE = 3
49 | ATTRIBUTE_IX_VALUE_DATA = 4
50 | ATTRIBUTE_LENGTH = 5
51 |
52 | CHUNK_AXML_FILE = 0x00080003
53 | CHUNK_RESOURCEIDS = 0x00080180
54 | CHUNK_XML_FIRST = 0x00100100
55 | CHUNK_XML_START_NAMESPACE = 0x00100100
56 | CHUNK_XML_END_NAMESPACE = 0x00100101
57 | CHUNK_XML_START_TAG = 0x00100102
58 | CHUNK_XML_END_TAG = 0x00100103
59 | CHUNK_XML_TEXT = 0x00100104
60 | CHUNK_XML_LAST = 0x00100104
61 |
62 | START_DOCUMENT = 0
63 | END_DOCUMENT = 1
64 | START_TAG = 2
65 | END_TAG = 3
66 | TEXT = 4
67 |
68 |
--------------------------------------------------------------------------------
/Tool/axmlparserpy/typeconstants.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Fare9/AndroidSwissKnife/96f2fd682acfe048b324db7937e43636d5747703/Tool/axmlparserpy/typeconstants.pyc
--------------------------------------------------------------------------------
/Tool/manifestDecoder.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python
2 |
3 | from axmlparserpy.axmlprinter import AXMLPrinter
4 | from xml.dom import minidom
5 | from zipfile import ZipFile
6 | import sys
7 |
8 |
9 | def extractManifest(apkFile):
10 | ap = AXMLPrinter(ZipFile(apkFile).read("AndroidManifest.xml"))
11 | return minidom.parseString(ap.getBuff()).toxml()
12 |
13 |
14 | if __name__ == "__main__":
15 |
16 | if len(sys.argv) != 2 or not sys.argv[1].lower().endswith(".apk"):
17 | print "Expected APK file, usage:"
18 | print "python " + sys.argv[0] + " /path/to/file.apk"
19 | quit()
20 | outputManifestFile = "/tmp/AndroidManifest.xml.tmp"
21 | manifestFile = open(outputManifestFile, "wb")
22 | manifestFile.write(extractManifest(sys.argv[1]))#.encode('utf-8'))
23 | manifestFile.close()
24 |
25 | #print extractManifest(sys.argv[1])
26 |
27 |
--------------------------------------------------------------------------------
/Tool/monkeyFaren.py:
--------------------------------------------------------------------------------
1 | #/usr/bin/env python3
2 |
3 | """
4 | My version of the file monkeyrunner
5 | from DroidBox Dynamic Analyzer
6 |
7 | Original DroidBox Idea:
8 | Patrik Lantz patrik@pjlantz.com and Laurent Delosieres ldelosieres@hispasec.com
9 | The Honeynet Project
10 |
11 | I modified some parts from code and I use another methods for example to parse XML
12 | and I have adapted to my code. My ideas added to Program:
13 |
14 | - Start Burpsuite (I'm not owner from Burpsuite, https://portswigger.net/burp/)
15 | - No need startmenu.sh, gnome-terminal will open emulator in another window
16 | - Use of apktool to unzip apk
17 | - Use of BeautifulSoup to parse XML
18 |
19 | Any suggestion please contact me in farenain9@gmail.com
20 | """
21 |
22 |
23 | import sys
24 |
25 | # This library will give us every function we need for Android
26 | from com.android.monkeyrunner import MonkeyRunner, MonkeyDevice
27 |
28 | import subprocess
29 | import logging
30 | import time
31 |
32 | # This program has been called from androidSwissKnife , we pass some arguments
33 | apkName = sys.argv[1]
34 | package = sys.argv[2]
35 | Mainactivity = sys.argv[3]
36 |
37 | # before call this program we started an emulator
38 | androidDevice = None
39 |
40 | # we will wait for the emulator and then install apk
41 | i = 1
42 | for r in range(9):
43 | dots = '.' * i
44 | sys.stdout.write(" \r")
45 | sys.stdout.write("Waiting for emulator"+str(dots)+"\r")
46 | sys.stdout.flush()
47 | time.sleep(0.5)
48 | i = i + 1
49 | if i == 4:
50 | i = 1
51 | while androidDevice == None:
52 | try:
53 | print("Waiting for emulator...")
54 | # now we wait for emulator 3 seconds of timeout
55 | androidDevice = MonkeyRunner.waitForConnection(3)
56 | except:
57 | pass
58 |
59 | # since this moment, we will use functions from monkeyrunner
60 | print("[+] Installing the application %s..." % apkName)
61 | androidDevice.installPackage(apkName)
62 |
63 | # Now create the name for MainActivity to start for example:
64 | #
65 | # package: .cnt MainActivity: Class Path = .cnt./.cnt.Class
66 | # package: com.cnt MainActivity: Class Path = com.cnt/Class
67 | # package: cnt MainActivity: Class Path = cnt/cnt.Class
68 |
69 | if "." in Mainactivity:
70 | if Mainactivity.startswith('.'):
71 | runComponent = "%s/%s%s" % (package, package, Mainactivity)
72 | else:
73 | runComponent = "%s/%s" % (package, Mainactivity)
74 | else:
75 | runComponent = "%s/%s.%s" % (package, package, Mainactivity)
76 |
77 |
78 | print("[+] Running the component %s..." % (runComponent))
79 |
80 | # Now start MainActivity, we execute it in child process with Popen, with STDOUT send to a PIPE
81 | p = subprocess.Popen(["adb", "shell", "am", "start", "-n", runComponent], stdout=subprocess.PIPE)
82 | # we use the pipe to get output
83 | output, error = p.communicate()
84 |
85 | #Activity not started?
86 | if "Error type" in output:
87 | print("[-] ERROR starting Main")
88 | sys.exit(1)
89 | else:
90 | print("[+] Succesfully exit MonkeyFaren")
91 | sys.exit(0)
--------------------------------------------------------------------------------
/Tool/parsetab.py:
--------------------------------------------------------------------------------
1 |
2 | # parsetab.py
3 | # This file is automatically generated. Do not edit.
4 | _tabversion = '3.8'
5 |
6 | _lr_method = 'LALR'
7 |
8 | _lr_signature = 'B95A62A4372D160AB2E84968CA8116F4'
9 |
10 | _lr_action_items = {'ACTIONCALL':([1,],[3,]),'INTENTS':([0,],[1,]),'SET_DATA':([4,],[5,]),'INIT_INTENT':([3,],[4,]),'$end':([2,5,],[0,-1,]),}
11 |
12 | _lr_action = {}
13 | for _k, _v in _lr_action_items.items():
14 | for _x,_y in zip(_v[0],_v[1]):
15 | if not _x in _lr_action: _lr_action[_x] = {}
16 | _lr_action[_x][_k] = _y
17 | del _lr_action_items
18 |
19 | _lr_goto_items = {'create_call':([0,],[2,]),}
20 |
21 | _lr_goto = {}
22 | for _k, _v in _lr_goto_items.items():
23 | for _x, _y in zip(_v[0], _v[1]):
24 | if not _x in _lr_goto: _lr_goto[_x] = {}
25 | _lr_goto[_x][_k] = _y
26 | del _lr_goto_items
27 | _lr_productions = [
28 | ("S' -> create_call","S'",1,None,None,None),
29 | ('create_call -> INTENTS ACTIONCALL INIT_INTENT SET_DATA','create_call',4,'p_Create_CALL','Analyzer.py',13),
30 | ]
31 |
--------------------------------------------------------------------------------
/Tool/ply/__init__.py:
--------------------------------------------------------------------------------
1 | # PLY package
2 | # Author: David Beazley (dave@dabeaz.com)
3 |
4 | __version__ = '3.9'
5 | __all__ = ['lex','yacc']
6 |
--------------------------------------------------------------------------------
/Tool/ply/cpp.py:
--------------------------------------------------------------------------------
1 | # -----------------------------------------------------------------------------
2 | # cpp.py
3 | #
4 | # Author: David Beazley (http://www.dabeaz.com)
5 | # Copyright (C) 2007
6 | # All rights reserved
7 | #
8 | # This module implements an ANSI-C style lexical preprocessor for PLY.
9 | # -----------------------------------------------------------------------------
10 | from __future__ import generators
11 |
12 | import sys
13 |
14 | # Some Python 3 compatibility shims
15 | if sys.version_info.major < 3:
16 | STRING_TYPES = (str, unicode)
17 | else:
18 | STRING_TYPES = str
19 | xrange = range
20 |
21 | # -----------------------------------------------------------------------------
22 | # Default preprocessor lexer definitions. These tokens are enough to get
23 | # a basic preprocessor working. Other modules may import these if they want
24 | # -----------------------------------------------------------------------------
25 |
26 | tokens = (
27 | 'CPP_ID','CPP_INTEGER', 'CPP_FLOAT', 'CPP_STRING', 'CPP_CHAR', 'CPP_WS', 'CPP_COMMENT1', 'CPP_COMMENT2', 'CPP_POUND','CPP_DPOUND'
28 | )
29 |
30 | literals = "+-*/%|&~^<>=!?()[]{}.,;:\\\'\""
31 |
32 | # Whitespace
33 | def t_CPP_WS(t):
34 | r'\s+'
35 | t.lexer.lineno += t.value.count("\n")
36 | return t
37 |
38 | t_CPP_POUND = r'\#'
39 | t_CPP_DPOUND = r'\#\#'
40 |
41 | # Identifier
42 | t_CPP_ID = r'[A-Za-z_][\w_]*'
43 |
44 | # Integer literal
45 | def CPP_INTEGER(t):
46 | r'(((((0x)|(0X))[0-9a-fA-F]+)|(\d+))([uU][lL]|[lL][uU]|[uU]|[lL])?)'
47 | return t
48 |
49 | t_CPP_INTEGER = CPP_INTEGER
50 |
51 | # Floating literal
52 | t_CPP_FLOAT = r'((\d+)(\.\d+)(e(\+|-)?(\d+))? | (\d+)e(\+|-)?(\d+))([lL]|[fF])?'
53 |
54 | # String literal
55 | def t_CPP_STRING(t):
56 | r'\"([^\\\n]|(\\(.|\n)))*?\"'
57 | t.lexer.lineno += t.value.count("\n")
58 | return t
59 |
60 | # Character constant 'c' or L'c'
61 | def t_CPP_CHAR(t):
62 | r'(L)?\'([^\\\n]|(\\(.|\n)))*?\''
63 | t.lexer.lineno += t.value.count("\n")
64 | return t
65 |
66 | # Comment
67 | def t_CPP_COMMENT1(t):
68 | r'(/\*(.|\n)*?\*/)'
69 | ncr = t.value.count("\n")
70 | t.lexer.lineno += ncr
71 | # replace with one space or a number of '\n'
72 | t.type = 'CPP_WS'; t.value = '\n' * ncr if ncr else ' '
73 | return t
74 |
75 | # Line comment
76 | def t_CPP_COMMENT2(t):
77 | r'(//.*?(\n|$))'
78 | # replace with '/n'
79 | t.type = 'CPP_WS'; t.value = '\n'
80 |
81 | def t_error(t):
82 | t.type = t.value[0]
83 | t.value = t.value[0]
84 | t.lexer.skip(1)
85 | return t
86 |
87 | import re
88 | import copy
89 | import time
90 | import os.path
91 |
92 | # -----------------------------------------------------------------------------
93 | # trigraph()
94 | #
95 | # Given an input string, this function replaces all trigraph sequences.
96 | # The following mapping is used:
97 | #
98 | # ??= #
99 | # ??/ \
100 | # ??' ^
101 | # ??( [
102 | # ??) ]
103 | # ??! |
104 | # ??< {
105 | # ??> }
106 | # ??- ~
107 | # -----------------------------------------------------------------------------
108 |
109 | _trigraph_pat = re.compile(r'''\?\?[=/\'\(\)\!<>\-]''')
110 | _trigraph_rep = {
111 | '=':'#',
112 | '/':'\\',
113 | "'":'^',
114 | '(':'[',
115 | ')':']',
116 | '!':'|',
117 | '<':'{',
118 | '>':'}',
119 | '-':'~'
120 | }
121 |
122 | def trigraph(input):
123 | return _trigraph_pat.sub(lambda g: _trigraph_rep[g.group()[-1]],input)
124 |
125 | # ------------------------------------------------------------------
126 | # Macro object
127 | #
128 | # This object holds information about preprocessor macros
129 | #
130 | # .name - Macro name (string)
131 | # .value - Macro value (a list of tokens)
132 | # .arglist - List of argument names
133 | # .variadic - Boolean indicating whether or not variadic macro
134 | # .vararg - Name of the variadic parameter
135 | #
136 | # When a macro is created, the macro replacement token sequence is
137 | # pre-scanned and used to create patch lists that are later used
138 | # during macro expansion
139 | # ------------------------------------------------------------------
140 |
141 | class Macro(object):
142 | def __init__(self,name,value,arglist=None,variadic=False):
143 | self.name = name
144 | self.value = value
145 | self.arglist = arglist
146 | self.variadic = variadic
147 | if variadic:
148 | self.vararg = arglist[-1]
149 | self.source = None
150 |
151 | # ------------------------------------------------------------------
152 | # Preprocessor object
153 | #
154 | # Object representing a preprocessor. Contains macro definitions,
155 | # include directories, and other information
156 | # ------------------------------------------------------------------
157 |
158 | class Preprocessor(object):
159 | def __init__(self,lexer=None):
160 | if lexer is None:
161 | lexer = lex.lexer
162 | self.lexer = lexer
163 | self.macros = { }
164 | self.path = []
165 | self.temp_path = []
166 |
167 | # Probe the lexer for selected tokens
168 | self.lexprobe()
169 |
170 | tm = time.localtime()
171 | self.define("__DATE__ \"%s\"" % time.strftime("%b %d %Y",tm))
172 | self.define("__TIME__ \"%s\"" % time.strftime("%H:%M:%S",tm))
173 | self.parser = None
174 |
175 | # -----------------------------------------------------------------------------
176 | # tokenize()
177 | #
178 | # Utility function. Given a string of text, tokenize into a list of tokens
179 | # -----------------------------------------------------------------------------
180 |
181 | def tokenize(self,text):
182 | tokens = []
183 | self.lexer.input(text)
184 | while True:
185 | tok = self.lexer.token()
186 | if not tok: break
187 | tokens.append(tok)
188 | return tokens
189 |
190 | # ---------------------------------------------------------------------
191 | # error()
192 | #
193 | # Report a preprocessor error/warning of some kind
194 | # ----------------------------------------------------------------------
195 |
196 | def error(self,file,line,msg):
197 | print("%s:%d %s" % (file,line,msg))
198 |
199 | # ----------------------------------------------------------------------
200 | # lexprobe()
201 | #
202 | # This method probes the preprocessor lexer object to discover
203 | # the token types of symbols that are important to the preprocessor.
204 | # If this works right, the preprocessor will simply "work"
205 | # with any suitable lexer regardless of how tokens have been named.
206 | # ----------------------------------------------------------------------
207 |
208 | def lexprobe(self):
209 |
210 | # Determine the token type for identifiers
211 | self.lexer.input("identifier")
212 | tok = self.lexer.token()
213 | if not tok or tok.value != "identifier":
214 | print("Couldn't determine identifier type")
215 | else:
216 | self.t_ID = tok.type
217 |
218 | # Determine the token type for integers
219 | self.lexer.input("12345")
220 | tok = self.lexer.token()
221 | if not tok or int(tok.value) != 12345:
222 | print("Couldn't determine integer type")
223 | else:
224 | self.t_INTEGER = tok.type
225 | self.t_INTEGER_TYPE = type(tok.value)
226 |
227 | # Determine the token type for strings enclosed in double quotes
228 | self.lexer.input("\"filename\"")
229 | tok = self.lexer.token()
230 | if not tok or tok.value != "\"filename\"":
231 | print("Couldn't determine string type")
232 | else:
233 | self.t_STRING = tok.type
234 |
235 | # Determine the token type for whitespace--if any
236 | self.lexer.input(" ")
237 | tok = self.lexer.token()
238 | if not tok or tok.value != " ":
239 | self.t_SPACE = None
240 | else:
241 | self.t_SPACE = tok.type
242 |
243 | # Determine the token type for newlines
244 | self.lexer.input("\n")
245 | tok = self.lexer.token()
246 | if not tok or tok.value != "\n":
247 | self.t_NEWLINE = None
248 | print("Couldn't determine token for newlines")
249 | else:
250 | self.t_NEWLINE = tok.type
251 |
252 | self.t_WS = (self.t_SPACE, self.t_NEWLINE)
253 |
254 | # Check for other characters used by the preprocessor
255 | chars = [ '<','>','#','##','\\','(',')',',','.']
256 | for c in chars:
257 | self.lexer.input(c)
258 | tok = self.lexer.token()
259 | if not tok or tok.value != c:
260 | print("Unable to lex '%s' required for preprocessor" % c)
261 |
262 | # ----------------------------------------------------------------------
263 | # add_path()
264 | #
265 | # Adds a search path to the preprocessor.
266 | # ----------------------------------------------------------------------
267 |
268 | def add_path(self,path):
269 | self.path.append(path)
270 |
271 | # ----------------------------------------------------------------------
272 | # group_lines()
273 | #
274 | # Given an input string, this function splits it into lines. Trailing whitespace
275 | # is removed. Any line ending with \ is grouped with the next line. This
276 | # function forms the lowest level of the preprocessor---grouping into text into
277 | # a line-by-line format.
278 | # ----------------------------------------------------------------------
279 |
280 | def group_lines(self,input):
281 | lex = self.lexer.clone()
282 | lines = [x.rstrip() for x in input.splitlines()]
283 | for i in xrange(len(lines)):
284 | j = i+1
285 | while lines[i].endswith('\\') and (j < len(lines)):
286 | lines[i] = lines[i][:-1]+lines[j]
287 | lines[j] = ""
288 | j += 1
289 |
290 | input = "\n".join(lines)
291 | lex.input(input)
292 | lex.lineno = 1
293 |
294 | current_line = []
295 | while True:
296 | tok = lex.token()
297 | if not tok:
298 | break
299 | current_line.append(tok)
300 | if tok.type in self.t_WS and '\n' in tok.value:
301 | yield current_line
302 | current_line = []
303 |
304 | if current_line:
305 | yield current_line
306 |
307 | # ----------------------------------------------------------------------
308 | # tokenstrip()
309 | #
310 | # Remove leading/trailing whitespace tokens from a token list
311 | # ----------------------------------------------------------------------
312 |
313 | def tokenstrip(self,tokens):
314 | i = 0
315 | while i < len(tokens) and tokens[i].type in self.t_WS:
316 | i += 1
317 | del tokens[:i]
318 | i = len(tokens)-1
319 | while i >= 0 and tokens[i].type in self.t_WS:
320 | i -= 1
321 | del tokens[i+1:]
322 | return tokens
323 |
324 |
325 | # ----------------------------------------------------------------------
326 | # collect_args()
327 | #
328 | # Collects comma separated arguments from a list of tokens. The arguments
329 | # must be enclosed in parenthesis. Returns a tuple (tokencount,args,positions)
330 | # where tokencount is the number of tokens consumed, args is a list of arguments,
331 | # and positions is a list of integers containing the starting index of each
332 | # argument. Each argument is represented by a list of tokens.
333 | #
334 | # When collecting arguments, leading and trailing whitespace is removed
335 | # from each argument.
336 | #
337 | # This function properly handles nested parenthesis and commas---these do not
338 | # define new arguments.
339 | # ----------------------------------------------------------------------
340 |
341 | def collect_args(self,tokenlist):
342 | args = []
343 | positions = []
344 | current_arg = []
345 | nesting = 1
346 | tokenlen = len(tokenlist)
347 |
348 | # Search for the opening '('.
349 | i = 0
350 | while (i < tokenlen) and (tokenlist[i].type in self.t_WS):
351 | i += 1
352 |
353 | if (i < tokenlen) and (tokenlist[i].value == '('):
354 | positions.append(i+1)
355 | else:
356 | self.error(self.source,tokenlist[0].lineno,"Missing '(' in macro arguments")
357 | return 0, [], []
358 |
359 | i += 1
360 |
361 | while i < tokenlen:
362 | t = tokenlist[i]
363 | if t.value == '(':
364 | current_arg.append(t)
365 | nesting += 1
366 | elif t.value == ')':
367 | nesting -= 1
368 | if nesting == 0:
369 | if current_arg:
370 | args.append(self.tokenstrip(current_arg))
371 | positions.append(i)
372 | return i+1,args,positions
373 | current_arg.append(t)
374 | elif t.value == ',' and nesting == 1:
375 | args.append(self.tokenstrip(current_arg))
376 | positions.append(i+1)
377 | current_arg = []
378 | else:
379 | current_arg.append(t)
380 | i += 1
381 |
382 | # Missing end argument
383 | self.error(self.source,tokenlist[-1].lineno,"Missing ')' in macro arguments")
384 | return 0, [],[]
385 |
386 | # ----------------------------------------------------------------------
387 | # macro_prescan()
388 | #
389 | # Examine the macro value (token sequence) and identify patch points
390 | # This is used to speed up macro expansion later on---we'll know
391 | # right away where to apply patches to the value to form the expansion
392 | # ----------------------------------------------------------------------
393 |
394 | def macro_prescan(self,macro):
395 | macro.patch = [] # Standard macro arguments
396 | macro.str_patch = [] # String conversion expansion
397 | macro.var_comma_patch = [] # Variadic macro comma patch
398 | i = 0
399 | while i < len(macro.value):
400 | if macro.value[i].type == self.t_ID and macro.value[i].value in macro.arglist:
401 | argnum = macro.arglist.index(macro.value[i].value)
402 | # Conversion of argument to a string
403 | if i > 0 and macro.value[i-1].value == '#':
404 | macro.value[i] = copy.copy(macro.value[i])
405 | macro.value[i].type = self.t_STRING
406 | del macro.value[i-1]
407 | macro.str_patch.append((argnum,i-1))
408 | continue
409 | # Concatenation
410 | elif (i > 0 and macro.value[i-1].value == '##'):
411 | macro.patch.append(('c',argnum,i-1))
412 | del macro.value[i-1]
413 | continue
414 | elif ((i+1) < len(macro.value) and macro.value[i+1].value == '##'):
415 | macro.patch.append(('c',argnum,i))
416 | i += 1
417 | continue
418 | # Standard expansion
419 | else:
420 | macro.patch.append(('e',argnum,i))
421 | elif macro.value[i].value == '##':
422 | if macro.variadic and (i > 0) and (macro.value[i-1].value == ',') and \
423 | ((i+1) < len(macro.value)) and (macro.value[i+1].type == self.t_ID) and \
424 | (macro.value[i+1].value == macro.vararg):
425 | macro.var_comma_patch.append(i-1)
426 | i += 1
427 | macro.patch.sort(key=lambda x: x[2],reverse=True)
428 |
429 | # ----------------------------------------------------------------------
430 | # macro_expand_args()
431 | #
432 | # Given a Macro and list of arguments (each a token list), this method
433 | # returns an expanded version of a macro. The return value is a token sequence
434 | # representing the replacement macro tokens
435 | # ----------------------------------------------------------------------
436 |
437 | def macro_expand_args(self,macro,args):
438 | # Make a copy of the macro token sequence
439 | rep = [copy.copy(_x) for _x in macro.value]
440 |
441 | # Make string expansion patches. These do not alter the length of the replacement sequence
442 |
443 | str_expansion = {}
444 | for argnum, i in macro.str_patch:
445 | if argnum not in str_expansion:
446 | str_expansion[argnum] = ('"%s"' % "".join([x.value for x in args[argnum]])).replace("\\","\\\\")
447 | rep[i] = copy.copy(rep[i])
448 | rep[i].value = str_expansion[argnum]
449 |
450 | # Make the variadic macro comma patch. If the variadic macro argument is empty, we get rid
451 | comma_patch = False
452 | if macro.variadic and not args[-1]:
453 | for i in macro.var_comma_patch:
454 | rep[i] = None
455 | comma_patch = True
456 |
457 | # Make all other patches. The order of these matters. It is assumed that the patch list
458 | # has been sorted in reverse order of patch location since replacements will cause the
459 | # size of the replacement sequence to expand from the patch point.
460 |
461 | expanded = { }
462 | for ptype, argnum, i in macro.patch:
463 | # Concatenation. Argument is left unexpanded
464 | if ptype == 'c':
465 | rep[i:i+1] = args[argnum]
466 | # Normal expansion. Argument is macro expanded first
467 | elif ptype == 'e':
468 | if argnum not in expanded:
469 | expanded[argnum] = self.expand_macros(args[argnum])
470 | rep[i:i+1] = expanded[argnum]
471 |
472 | # Get rid of removed comma if necessary
473 | if comma_patch:
474 | rep = [_i for _i in rep if _i]
475 |
476 | return rep
477 |
478 |
479 | # ----------------------------------------------------------------------
480 | # expand_macros()
481 | #
482 | # Given a list of tokens, this function performs macro expansion.
483 | # The expanded argument is a dictionary that contains macros already
484 | # expanded. This is used to prevent infinite recursion.
485 | # ----------------------------------------------------------------------
486 |
487 | def expand_macros(self,tokens,expanded=None):
488 | if expanded is None:
489 | expanded = {}
490 | i = 0
491 | while i < len(tokens):
492 | t = tokens[i]
493 | if t.type == self.t_ID:
494 | if t.value in self.macros and t.value not in expanded:
495 | # Yes, we found a macro match
496 | expanded[t.value] = True
497 |
498 | m = self.macros[t.value]
499 | if not m.arglist:
500 | # A simple macro
501 | ex = self.expand_macros([copy.copy(_x) for _x in m.value],expanded)
502 | for e in ex:
503 | e.lineno = t.lineno
504 | tokens[i:i+1] = ex
505 | i += len(ex)
506 | else:
507 | # A macro with arguments
508 | j = i + 1
509 | while j < len(tokens) and tokens[j].type in self.t_WS:
510 | j += 1
511 | if tokens[j].value == '(':
512 | tokcount,args,positions = self.collect_args(tokens[j:])
513 | if not m.variadic and len(args) != len(m.arglist):
514 | self.error(self.source,t.lineno,"Macro %s requires %d arguments" % (t.value,len(m.arglist)))
515 | i = j + tokcount
516 | elif m.variadic and len(args) < len(m.arglist)-1:
517 | if len(m.arglist) > 2:
518 | self.error(self.source,t.lineno,"Macro %s must have at least %d arguments" % (t.value, len(m.arglist)-1))
519 | else:
520 | self.error(self.source,t.lineno,"Macro %s must have at least %d argument" % (t.value, len(m.arglist)-1))
521 | i = j + tokcount
522 | else:
523 | if m.variadic:
524 | if len(args) == len(m.arglist)-1:
525 | args.append([])
526 | else:
527 | args[len(m.arglist)-1] = tokens[j+positions[len(m.arglist)-1]:j+tokcount-1]
528 | del args[len(m.arglist):]
529 |
530 | # Get macro replacement text
531 | rep = self.macro_expand_args(m,args)
532 | rep = self.expand_macros(rep,expanded)
533 | for r in rep:
534 | r.lineno = t.lineno
535 | tokens[i:j+tokcount] = rep
536 | i += len(rep)
537 | del expanded[t.value]
538 | continue
539 | elif t.value == '__LINE__':
540 | t.type = self.t_INTEGER
541 | t.value = self.t_INTEGER_TYPE(t.lineno)
542 |
543 | i += 1
544 | return tokens
545 |
546 | # ----------------------------------------------------------------------
547 | # evalexpr()
548 | #
549 | # Evaluate an expression token sequence for the purposes of evaluating
550 | # integral expressions.
551 | # ----------------------------------------------------------------------
552 |
553 | def evalexpr(self,tokens):
554 | # tokens = tokenize(line)
555 | # Search for defined macros
556 | i = 0
557 | while i < len(tokens):
558 | if tokens[i].type == self.t_ID and tokens[i].value == 'defined':
559 | j = i + 1
560 | needparen = False
561 | result = "0L"
562 | while j < len(tokens):
563 | if tokens[j].type in self.t_WS:
564 | j += 1
565 | continue
566 | elif tokens[j].type == self.t_ID:
567 | if tokens[j].value in self.macros:
568 | result = "1L"
569 | else:
570 | result = "0L"
571 | if not needparen: break
572 | elif tokens[j].value == '(':
573 | needparen = True
574 | elif tokens[j].value == ')':
575 | break
576 | else:
577 | self.error(self.source,tokens[i].lineno,"Malformed defined()")
578 | j += 1
579 | tokens[i].type = self.t_INTEGER
580 | tokens[i].value = self.t_INTEGER_TYPE(result)
581 | del tokens[i+1:j+1]
582 | i += 1
583 | tokens = self.expand_macros(tokens)
584 | for i,t in enumerate(tokens):
585 | if t.type == self.t_ID:
586 | tokens[i] = copy.copy(t)
587 | tokens[i].type = self.t_INTEGER
588 | tokens[i].value = self.t_INTEGER_TYPE("0L")
589 | elif t.type == self.t_INTEGER:
590 | tokens[i] = copy.copy(t)
591 | # Strip off any trailing suffixes
592 | tokens[i].value = str(tokens[i].value)
593 | while tokens[i].value[-1] not in "0123456789abcdefABCDEF":
594 | tokens[i].value = tokens[i].value[:-1]
595 |
596 | expr = "".join([str(x.value) for x in tokens])
597 | expr = expr.replace("&&"," and ")
598 | expr = expr.replace("||"," or ")
599 | expr = expr.replace("!"," not ")
600 | try:
601 | result = eval(expr)
602 | except Exception:
603 | self.error(self.source,tokens[0].lineno,"Couldn't evaluate expression")
604 | result = 0
605 | return result
606 |
607 | # ----------------------------------------------------------------------
608 | # parsegen()
609 | #
610 | # Parse an input string/
611 | # ----------------------------------------------------------------------
612 | def parsegen(self,input,source=None):
613 |
614 | # Replace trigraph sequences
615 | t = trigraph(input)
616 | lines = self.group_lines(t)
617 |
618 | if not source:
619 | source = ""
620 |
621 | self.define("__FILE__ \"%s\"" % source)
622 |
623 | self.source = source
624 | chunk = []
625 | enable = True
626 | iftrigger = False
627 | ifstack = []
628 |
629 | for x in lines:
630 | for i,tok in enumerate(x):
631 | if tok.type not in self.t_WS: break
632 | if tok.value == '#':
633 | # Preprocessor directive
634 |
635 | # insert necessary whitespace instead of eaten tokens
636 | for tok in x:
637 | if tok.type in self.t_WS and '\n' in tok.value:
638 | chunk.append(tok)
639 |
640 | dirtokens = self.tokenstrip(x[i+1:])
641 | if dirtokens:
642 | name = dirtokens[0].value
643 | args = self.tokenstrip(dirtokens[1:])
644 | else:
645 | name = ""
646 | args = []
647 |
648 | if name == 'define':
649 | if enable:
650 | for tok in self.expand_macros(chunk):
651 | yield tok
652 | chunk = []
653 | self.define(args)
654 | elif name == 'include':
655 | if enable:
656 | for tok in self.expand_macros(chunk):
657 | yield tok
658 | chunk = []
659 | oldfile = self.macros['__FILE__']
660 | for tok in self.include(args):
661 | yield tok
662 | self.macros['__FILE__'] = oldfile
663 | self.source = source
664 | elif name == 'undef':
665 | if enable:
666 | for tok in self.expand_macros(chunk):
667 | yield tok
668 | chunk = []
669 | self.undef(args)
670 | elif name == 'ifdef':
671 | ifstack.append((enable,iftrigger))
672 | if enable:
673 | if not args[0].value in self.macros:
674 | enable = False
675 | iftrigger = False
676 | else:
677 | iftrigger = True
678 | elif name == 'ifndef':
679 | ifstack.append((enable,iftrigger))
680 | if enable:
681 | if args[0].value in self.macros:
682 | enable = False
683 | iftrigger = False
684 | else:
685 | iftrigger = True
686 | elif name == 'if':
687 | ifstack.append((enable,iftrigger))
688 | if enable:
689 | result = self.evalexpr(args)
690 | if not result:
691 | enable = False
692 | iftrigger = False
693 | else:
694 | iftrigger = True
695 | elif name == 'elif':
696 | if ifstack:
697 | if ifstack[-1][0]: # We only pay attention if outer "if" allows this
698 | if enable: # If already true, we flip enable False
699 | enable = False
700 | elif not iftrigger: # If False, but not triggered yet, we'll check expression
701 | result = self.evalexpr(args)
702 | if result:
703 | enable = True
704 | iftrigger = True
705 | else:
706 | self.error(self.source,dirtokens[0].lineno,"Misplaced #elif")
707 |
708 | elif name == 'else':
709 | if ifstack:
710 | if ifstack[-1][0]:
711 | if enable:
712 | enable = False
713 | elif not iftrigger:
714 | enable = True
715 | iftrigger = True
716 | else:
717 | self.error(self.source,dirtokens[0].lineno,"Misplaced #else")
718 |
719 | elif name == 'endif':
720 | if ifstack:
721 | enable,iftrigger = ifstack.pop()
722 | else:
723 | self.error(self.source,dirtokens[0].lineno,"Misplaced #endif")
724 | else:
725 | # Unknown preprocessor directive
726 | pass
727 |
728 | else:
729 | # Normal text
730 | if enable:
731 | chunk.extend(x)
732 |
733 | for tok in self.expand_macros(chunk):
734 | yield tok
735 | chunk = []
736 |
737 | # ----------------------------------------------------------------------
738 | # include()
739 | #
740 | # Implementation of file-inclusion
741 | # ----------------------------------------------------------------------
742 |
743 | def include(self,tokens):
744 | # Try to extract the filename and then process an include file
745 | if not tokens:
746 | return
747 | if tokens:
748 | if tokens[0].value != '<' and tokens[0].type != self.t_STRING:
749 | tokens = self.expand_macros(tokens)
750 |
751 | if tokens[0].value == '<':
752 | # Include <...>
753 | i = 1
754 | while i < len(tokens):
755 | if tokens[i].value == '>':
756 | break
757 | i += 1
758 | else:
759 | print("Malformed #include <...>")
760 | return
761 | filename = "".join([x.value for x in tokens[1:i]])
762 | path = self.path + [""] + self.temp_path
763 | elif tokens[0].type == self.t_STRING:
764 | filename = tokens[0].value[1:-1]
765 | path = self.temp_path + [""] + self.path
766 | else:
767 | print("Malformed #include statement")
768 | return
769 | for p in path:
770 | iname = os.path.join(p,filename)
771 | try:
772 | data = open(iname,"r").read()
773 | dname = os.path.dirname(iname)
774 | if dname:
775 | self.temp_path.insert(0,dname)
776 | for tok in self.parsegen(data,filename):
777 | yield tok
778 | if dname:
779 | del self.temp_path[0]
780 | break
781 | except IOError:
782 | pass
783 | else:
784 | print("Couldn't find '%s'" % filename)
785 |
786 | # ----------------------------------------------------------------------
787 | # define()
788 | #
789 | # Define a new macro
790 | # ----------------------------------------------------------------------
791 |
792 | def define(self,tokens):
793 | if isinstance(tokens,STRING_TYPES):
794 | tokens = self.tokenize(tokens)
795 |
796 | linetok = tokens
797 | try:
798 | name = linetok[0]
799 | if len(linetok) > 1:
800 | mtype = linetok[1]
801 | else:
802 | mtype = None
803 | if not mtype:
804 | m = Macro(name.value,[])
805 | self.macros[name.value] = m
806 | elif mtype.type in self.t_WS:
807 | # A normal macro
808 | m = Macro(name.value,self.tokenstrip(linetok[2:]))
809 | self.macros[name.value] = m
810 | elif mtype.value == '(':
811 | # A macro with arguments
812 | tokcount, args, positions = self.collect_args(linetok[1:])
813 | variadic = False
814 | for a in args:
815 | if variadic:
816 | print("No more arguments may follow a variadic argument")
817 | break
818 | astr = "".join([str(_i.value) for _i in a])
819 | if astr == "...":
820 | variadic = True
821 | a[0].type = self.t_ID
822 | a[0].value = '__VA_ARGS__'
823 | variadic = True
824 | del a[1:]
825 | continue
826 | elif astr[-3:] == "..." and a[0].type == self.t_ID:
827 | variadic = True
828 | del a[1:]
829 | # If, for some reason, "." is part of the identifier, strip off the name for the purposes
830 | # of macro expansion
831 | if a[0].value[-3:] == '...':
832 | a[0].value = a[0].value[:-3]
833 | continue
834 | if len(a) > 1 or a[0].type != self.t_ID:
835 | print("Invalid macro argument")
836 | break
837 | else:
838 | mvalue = self.tokenstrip(linetok[1+tokcount:])
839 | i = 0
840 | while i < len(mvalue):
841 | if i+1 < len(mvalue):
842 | if mvalue[i].type in self.t_WS and mvalue[i+1].value == '##':
843 | del mvalue[i]
844 | continue
845 | elif mvalue[i].value == '##' and mvalue[i+1].type in self.t_WS:
846 | del mvalue[i+1]
847 | i += 1
848 | m = Macro(name.value,mvalue,[x[0].value for x in args],variadic)
849 | self.macro_prescan(m)
850 | self.macros[name.value] = m
851 | else:
852 | print("Bad macro definition")
853 | except LookupError:
854 | print("Bad macro definition")
855 |
856 | # ----------------------------------------------------------------------
857 | # undef()
858 | #
859 | # Undefine a macro
860 | # ----------------------------------------------------------------------
861 |
862 | def undef(self,tokens):
863 | id = tokens[0].value
864 | try:
865 | del self.macros[id]
866 | except LookupError:
867 | pass
868 |
869 | # ----------------------------------------------------------------------
870 | # parse()
871 | #
872 | # Parse input text.
873 | # ----------------------------------------------------------------------
874 | def parse(self,input,source=None,ignore={}):
875 | self.ignore = ignore
876 | self.parser = self.parsegen(input,source)
877 |
878 | # ----------------------------------------------------------------------
879 | # token()
880 | #
881 | # Method to return individual tokens
882 | # ----------------------------------------------------------------------
883 | def token(self):
884 | try:
885 | while True:
886 | tok = next(self.parser)
887 | if tok.type not in self.ignore: return tok
888 | except StopIteration:
889 | self.parser = None
890 | return None
891 |
892 | if __name__ == '__main__':
893 | import ply.lex as lex
894 | lexer = lex.lex()
895 |
896 | # Run a preprocessor
897 | import sys
898 | f = open(sys.argv[1])
899 | input = f.read()
900 |
901 | p = Preprocessor(lexer)
902 | p.parse(input,sys.argv[1])
903 | while True:
904 | tok = p.token()
905 | if not tok: break
906 | print(p.source, tok)
907 |
908 |
909 |
910 |
911 |
912 |
913 |
914 |
915 |
916 |
917 |
918 |
--------------------------------------------------------------------------------
/Tool/ply/ctokens.py:
--------------------------------------------------------------------------------
1 | # ----------------------------------------------------------------------
2 | # ctokens.py
3 | #
4 | # Token specifications for symbols in ANSI C and C++. This file is
5 | # meant to be used as a library in other tokenizers.
6 | # ----------------------------------------------------------------------
7 |
8 | # Reserved words
9 |
10 | tokens = [
11 | # Literals (identifier, integer constant, float constant, string constant, char const)
12 | 'ID', 'TYPEID', 'INTEGER', 'FLOAT', 'STRING', 'CHARACTER',
13 |
14 | # Operators (+,-,*,/,%,|,&,~,^,<<,>>, ||, &&, !, <, <=, >, >=, ==, !=)
15 | 'PLUS', 'MINUS', 'TIMES', 'DIVIDE', 'MODULO',
16 | 'OR', 'AND', 'NOT', 'XOR', 'LSHIFT', 'RSHIFT',
17 | 'LOR', 'LAND', 'LNOT',
18 | 'LT', 'LE', 'GT', 'GE', 'EQ', 'NE',
19 |
20 | # Assignment (=, *=, /=, %=, +=, -=, <<=, >>=, &=, ^=, |=)
21 | 'EQUALS', 'TIMESEQUAL', 'DIVEQUAL', 'MODEQUAL', 'PLUSEQUAL', 'MINUSEQUAL',
22 | 'LSHIFTEQUAL','RSHIFTEQUAL', 'ANDEQUAL', 'XOREQUAL', 'OREQUAL',
23 |
24 | # Increment/decrement (++,--)
25 | 'INCREMENT', 'DECREMENT',
26 |
27 | # Structure dereference (->)
28 | 'ARROW',
29 |
30 | # Ternary operator (?)
31 | 'TERNARY',
32 |
33 | # Delimeters ( ) [ ] { } , . ; :
34 | 'LPAREN', 'RPAREN',
35 | 'LBRACKET', 'RBRACKET',
36 | 'LBRACE', 'RBRACE',
37 | 'COMMA', 'PERIOD', 'SEMI', 'COLON',
38 |
39 | # Ellipsis (...)
40 | 'ELLIPSIS',
41 | ]
42 |
43 | # Operators
44 | t_PLUS = r'\+'
45 | t_MINUS = r'-'
46 | t_TIMES = r'\*'
47 | t_DIVIDE = r'/'
48 | t_MODULO = r'%'
49 | t_OR = r'\|'
50 | t_AND = r'&'
51 | t_NOT = r'~'
52 | t_XOR = r'\^'
53 | t_LSHIFT = r'<<'
54 | t_RSHIFT = r'>>'
55 | t_LOR = r'\|\|'
56 | t_LAND = r'&&'
57 | t_LNOT = r'!'
58 | t_LT = r'<'
59 | t_GT = r'>'
60 | t_LE = r'<='
61 | t_GE = r'>='
62 | t_EQ = r'=='
63 | t_NE = r'!='
64 |
65 | # Assignment operators
66 |
67 | t_EQUALS = r'='
68 | t_TIMESEQUAL = r'\*='
69 | t_DIVEQUAL = r'/='
70 | t_MODEQUAL = r'%='
71 | t_PLUSEQUAL = r'\+='
72 | t_MINUSEQUAL = r'-='
73 | t_LSHIFTEQUAL = r'<<='
74 | t_RSHIFTEQUAL = r'>>='
75 | t_ANDEQUAL = r'&='
76 | t_OREQUAL = r'\|='
77 | t_XOREQUAL = r'\^='
78 |
79 | # Increment/decrement
80 | t_INCREMENT = r'\+\+'
81 | t_DECREMENT = r'--'
82 |
83 | # ->
84 | t_ARROW = r'->'
85 |
86 | # ?
87 | t_TERNARY = r'\?'
88 |
89 | # Delimeters
90 | t_LPAREN = r'\('
91 | t_RPAREN = r'\)'
92 | t_LBRACKET = r'\['
93 | t_RBRACKET = r'\]'
94 | t_LBRACE = r'\{'
95 | t_RBRACE = r'\}'
96 | t_COMMA = r','
97 | t_PERIOD = r'\.'
98 | t_SEMI = r';'
99 | t_COLON = r':'
100 | t_ELLIPSIS = r'\.\.\.'
101 |
102 | # Identifiers
103 | t_ID = r'[A-Za-z_][A-Za-z0-9_]*'
104 |
105 | # Integer literal
106 | t_INTEGER = r'\d+([uU]|[lL]|[uU][lL]|[lL][uU])?'
107 |
108 | # Floating literal
109 | t_FLOAT = r'((\d+)(\.\d+)(e(\+|-)?(\d+))? | (\d+)e(\+|-)?(\d+))([lL]|[fF])?'
110 |
111 | # String literal
112 | t_STRING = r'\"([^\\\n]|(\\.))*?\"'
113 |
114 | # Character constant 'c' or L'c'
115 | t_CHARACTER = r'(L)?\'([^\\\n]|(\\.))*?\''
116 |
117 | # Comment (C-Style)
118 | def t_COMMENT(t):
119 | r'/\*(.|\n)*?\*/'
120 | t.lexer.lineno += t.value.count('\n')
121 | return t
122 |
123 | # Comment (C++-Style)
124 | def t_CPPCOMMENT(t):
125 | r'//.*\n'
126 | t.lexer.lineno += 1
127 | return t
128 |
129 |
130 |
131 |
132 |
133 |
134 |
--------------------------------------------------------------------------------
/Tool/ply/lex.py:
--------------------------------------------------------------------------------
1 | # -----------------------------------------------------------------------------
2 | # ply: lex.py
3 | #
4 | # Copyright (C) 2001-2016
5 | # David M. Beazley (Dabeaz LLC)
6 | # All rights reserved.
7 | #
8 | # Redistribution and use in source and binary forms, with or without
9 | # modification, are permitted provided that the following conditions are
10 | # met:
11 | #
12 | # * Redistributions of source code must retain the above copyright notice,
13 | # this list of conditions and the following disclaimer.
14 | # * Redistributions in binary form must reproduce the above copyright notice,
15 | # this list of conditions and the following disclaimer in the documentation
16 | # and/or other materials provided with the distribution.
17 | # * Neither the name of the David Beazley or Dabeaz LLC may be used to
18 | # endorse or promote products derived from this software without
19 | # specific prior written permission.
20 | #
21 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 | # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 | # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 | # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 | # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 | # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 | # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 | # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 | # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 | # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 | # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 | # -----------------------------------------------------------------------------
33 |
34 | __version__ = '3.9'
35 | __tabversion__ = '3.8'
36 |
37 | import re
38 | import sys
39 | import types
40 | import copy
41 | import os
42 | import inspect
43 |
44 | # This tuple contains known string types
45 | try:
46 | # Python 2.6
47 | StringTypes = (types.StringType, types.UnicodeType)
48 | except AttributeError:
49 | # Python 3.0
50 | StringTypes = (str, bytes)
51 |
52 | # This regular expression is used to match valid token names
53 | _is_identifier = re.compile(r'^[a-zA-Z0-9_]+$')
54 |
55 | # Exception thrown when invalid token encountered and no default error
56 | # handler is defined.
57 | class LexError(Exception):
58 | def __init__(self, message, s):
59 | self.args = (message,)
60 | self.text = s
61 |
62 |
63 | # Token class. This class is used to represent the tokens produced.
64 | class LexToken(object):
65 | def __str__(self):
66 | return 'LexToken(%s,%r,%d,%d)' % (self.type, self.value, self.lineno, self.lexpos)
67 |
68 | def __repr__(self):
69 | return str(self)
70 |
71 |
72 | # This object is a stand-in for a logging object created by the
73 | # logging module.
74 |
75 | class PlyLogger(object):
76 | def __init__(self, f):
77 | self.f = f
78 |
79 | def critical(self, msg, *args, **kwargs):
80 | self.f.write((msg % args) + '\n')
81 |
82 | def warning(self, msg, *args, **kwargs):
83 | self.f.write('WARNING: ' + (msg % args) + '\n')
84 |
85 | def error(self, msg, *args, **kwargs):
86 | self.f.write('ERROR: ' + (msg % args) + '\n')
87 |
88 | info = critical
89 | debug = critical
90 |
91 |
92 | # Null logger is used when no output is generated. Does nothing.
93 | class NullLogger(object):
94 | def __getattribute__(self, name):
95 | return self
96 |
97 | def __call__(self, *args, **kwargs):
98 | return self
99 |
100 |
101 | # -----------------------------------------------------------------------------
102 | # === Lexing Engine ===
103 | #
104 | # The following Lexer class implements the lexer runtime. There are only
105 | # a few public methods and attributes:
106 | #
107 | # input() - Store a new string in the lexer
108 | # token() - Get the next token
109 | # clone() - Clone the lexer
110 | #
111 | # lineno - Current line number
112 | # lexpos - Current position in the input string
113 | # -----------------------------------------------------------------------------
114 |
115 | class Lexer:
116 | def __init__(self):
117 | self.lexre = None # Master regular expression. This is a list of
118 | # tuples (re, findex) where re is a compiled
119 | # regular expression and findex is a list
120 | # mapping regex group numbers to rules
121 | self.lexretext = None # Current regular expression strings
122 | self.lexstatere = {} # Dictionary mapping lexer states to master regexs
123 | self.lexstateretext = {} # Dictionary mapping lexer states to regex strings
124 | self.lexstaterenames = {} # Dictionary mapping lexer states to symbol names
125 | self.lexstate = 'INITIAL' # Current lexer state
126 | self.lexstatestack = [] # Stack of lexer states
127 | self.lexstateinfo = None # State information
128 | self.lexstateignore = {} # Dictionary of ignored characters for each state
129 | self.lexstateerrorf = {} # Dictionary of error functions for each state
130 | self.lexstateeoff = {} # Dictionary of eof functions for each state
131 | self.lexreflags = 0 # Optional re compile flags
132 | self.lexdata = None # Actual input data (as a string)
133 | self.lexpos = 0 # Current position in input text
134 | self.lexlen = 0 # Length of the input text
135 | self.lexerrorf = None # Error rule (if any)
136 | self.lexeoff = None # EOF rule (if any)
137 | self.lextokens = None # List of valid tokens
138 | self.lexignore = '' # Ignored characters
139 | self.lexliterals = '' # Literal characters that can be passed through
140 | self.lexmodule = None # Module
141 | self.lineno = 1 # Current line number
142 | self.lexoptimize = False # Optimized mode
143 |
144 | def clone(self, object=None):
145 | c = copy.copy(self)
146 |
147 | # If the object parameter has been supplied, it means we are attaching the
148 | # lexer to a new object. In this case, we have to rebind all methods in
149 | # the lexstatere and lexstateerrorf tables.
150 |
151 | if object:
152 | newtab = {}
153 | for key, ritem in self.lexstatere.items():
154 | newre = []
155 | for cre, findex in ritem:
156 | newfindex = []
157 | for f in findex:
158 | if not f or not f[0]:
159 | newfindex.append(f)
160 | continue
161 | newfindex.append((getattr(object, f[0].__name__), f[1]))
162 | newre.append((cre, newfindex))
163 | newtab[key] = newre
164 | c.lexstatere = newtab
165 | c.lexstateerrorf = {}
166 | for key, ef in self.lexstateerrorf.items():
167 | c.lexstateerrorf[key] = getattr(object, ef.__name__)
168 | c.lexmodule = object
169 | return c
170 |
171 | # ------------------------------------------------------------
172 | # writetab() - Write lexer information to a table file
173 | # ------------------------------------------------------------
174 | def writetab(self, lextab, outputdir=''):
175 | if isinstance(lextab, types.ModuleType):
176 | raise IOError("Won't overwrite existing lextab module")
177 | basetabmodule = lextab.split('.')[-1]
178 | filename = os.path.join(outputdir, basetabmodule) + '.py'
179 | with open(filename, 'w') as tf:
180 | tf.write('# %s.py. This file automatically created by PLY (version %s). Don\'t edit!\n' % (basetabmodule, __version__))
181 | tf.write('_tabversion = %s\n' % repr(__tabversion__))
182 | tf.write('_lextokens = set(%s)\n' % repr(tuple(self.lextokens)))
183 | tf.write('_lexreflags = %s\n' % repr(self.lexreflags))
184 | tf.write('_lexliterals = %s\n' % repr(self.lexliterals))
185 | tf.write('_lexstateinfo = %s\n' % repr(self.lexstateinfo))
186 |
187 | # Rewrite the lexstatere table, replacing function objects with function names
188 | tabre = {}
189 | for statename, lre in self.lexstatere.items():
190 | titem = []
191 | for (pat, func), retext, renames in zip(lre, self.lexstateretext[statename], self.lexstaterenames[statename]):
192 | titem.append((retext, _funcs_to_names(func, renames)))
193 | tabre[statename] = titem
194 |
195 | tf.write('_lexstatere = %s\n' % repr(tabre))
196 | tf.write('_lexstateignore = %s\n' % repr(self.lexstateignore))
197 |
198 | taberr = {}
199 | for statename, ef in self.lexstateerrorf.items():
200 | taberr[statename] = ef.__name__ if ef else None
201 | tf.write('_lexstateerrorf = %s\n' % repr(taberr))
202 |
203 | tabeof = {}
204 | for statename, ef in self.lexstateeoff.items():
205 | tabeof[statename] = ef.__name__ if ef else None
206 | tf.write('_lexstateeoff = %s\n' % repr(tabeof))
207 |
208 | # ------------------------------------------------------------
209 | # readtab() - Read lexer information from a tab file
210 | # ------------------------------------------------------------
211 | def readtab(self, tabfile, fdict):
212 | if isinstance(tabfile, types.ModuleType):
213 | lextab = tabfile
214 | else:
215 | exec('import %s' % tabfile)
216 | lextab = sys.modules[tabfile]
217 |
218 | if getattr(lextab, '_tabversion', '0.0') != __tabversion__:
219 | raise ImportError('Inconsistent PLY version')
220 |
221 | self.lextokens = lextab._lextokens
222 | self.lexreflags = lextab._lexreflags
223 | self.lexliterals = lextab._lexliterals
224 | self.lextokens_all = self.lextokens | set(self.lexliterals)
225 | self.lexstateinfo = lextab._lexstateinfo
226 | self.lexstateignore = lextab._lexstateignore
227 | self.lexstatere = {}
228 | self.lexstateretext = {}
229 | for statename, lre in lextab._lexstatere.items():
230 | titem = []
231 | txtitem = []
232 | for pat, func_name in lre:
233 | titem.append((re.compile(pat, lextab._lexreflags | re.VERBOSE), _names_to_funcs(func_name, fdict)))
234 |
235 | self.lexstatere[statename] = titem
236 | self.lexstateretext[statename] = txtitem
237 |
238 | self.lexstateerrorf = {}
239 | for statename, ef in lextab._lexstateerrorf.items():
240 | self.lexstateerrorf[statename] = fdict[ef]
241 |
242 | self.lexstateeoff = {}
243 | for statename, ef in lextab._lexstateeoff.items():
244 | self.lexstateeoff[statename] = fdict[ef]
245 |
246 | self.begin('INITIAL')
247 |
248 | # ------------------------------------------------------------
249 | # input() - Push a new string into the lexer
250 | # ------------------------------------------------------------
251 | def input(self, s):
252 | # Pull off the first character to see if s looks like a string
253 | c = s[:1]
254 | if not isinstance(c, StringTypes):
255 | raise ValueError('Expected a string')
256 | self.lexdata = s
257 | self.lexpos = 0
258 | self.lexlen = len(s)
259 |
260 | # ------------------------------------------------------------
261 | # begin() - Changes the lexing state
262 | # ------------------------------------------------------------
263 | def begin(self, state):
264 | if state not in self.lexstatere:
265 | raise ValueError('Undefined state')
266 | self.lexre = self.lexstatere[state]
267 | self.lexretext = self.lexstateretext[state]
268 | self.lexignore = self.lexstateignore.get(state, '')
269 | self.lexerrorf = self.lexstateerrorf.get(state, None)
270 | self.lexeoff = self.lexstateeoff.get(state, None)
271 | self.lexstate = state
272 |
273 | # ------------------------------------------------------------
274 | # push_state() - Changes the lexing state and saves old on stack
275 | # ------------------------------------------------------------
276 | def push_state(self, state):
277 | self.lexstatestack.append(self.lexstate)
278 | self.begin(state)
279 |
280 | # ------------------------------------------------------------
281 | # pop_state() - Restores the previous state
282 | # ------------------------------------------------------------
283 | def pop_state(self):
284 | self.begin(self.lexstatestack.pop())
285 |
286 | # ------------------------------------------------------------
287 | # current_state() - Returns the current lexing state
288 | # ------------------------------------------------------------
289 | def current_state(self):
290 | return self.lexstate
291 |
292 | # ------------------------------------------------------------
293 | # skip() - Skip ahead n characters
294 | # ------------------------------------------------------------
295 | def skip(self, n):
296 | self.lexpos += n
297 |
298 | # ------------------------------------------------------------
299 | # opttoken() - Return the next token from the Lexer
300 | #
301 | # Note: This function has been carefully implemented to be as fast
302 | # as possible. Don't make changes unless you really know what
303 | # you are doing
304 | # ------------------------------------------------------------
305 | def token(self):
306 | # Make local copies of frequently referenced attributes
307 | lexpos = self.lexpos
308 | lexlen = self.lexlen
309 | lexignore = self.lexignore
310 | lexdata = self.lexdata
311 |
312 | while lexpos < lexlen:
313 | # This code provides some short-circuit code for whitespace, tabs, and other ignored characters
314 | if lexdata[lexpos] in lexignore:
315 | lexpos += 1
316 | continue
317 |
318 | # Look for a regular expression match
319 | for lexre, lexindexfunc in self.lexre:
320 | m = lexre.match(lexdata, lexpos)
321 | if not m:
322 | continue
323 |
324 | # Create a token for return
325 | tok = LexToken()
326 | tok.value = m.group()
327 | tok.lineno = self.lineno
328 | tok.lexpos = lexpos
329 |
330 | i = m.lastindex
331 | func, tok.type = lexindexfunc[i]
332 |
333 | if not func:
334 | # If no token type was set, it's an ignored token
335 | if tok.type:
336 | self.lexpos = m.end()
337 | return tok
338 | else:
339 | lexpos = m.end()
340 | break
341 |
342 | lexpos = m.end()
343 |
344 | # If token is processed by a function, call it
345 |
346 | tok.lexer = self # Set additional attributes useful in token rules
347 | self.lexmatch = m
348 | self.lexpos = lexpos
349 |
350 | newtok = func(tok)
351 |
352 | # Every function must return a token, if nothing, we just move to next token
353 | if not newtok:
354 | lexpos = self.lexpos # This is here in case user has updated lexpos.
355 | lexignore = self.lexignore # This is here in case there was a state change
356 | break
357 |
358 | # Verify type of the token. If not in the token map, raise an error
359 | if not self.lexoptimize:
360 | if newtok.type not in self.lextokens_all:
361 | raise LexError("%s:%d: Rule '%s' returned an unknown token type '%s'" % (
362 | func.__code__.co_filename, func.__code__.co_firstlineno,
363 | func.__name__, newtok.type), lexdata[lexpos:])
364 |
365 | return newtok
366 | else:
367 | # No match, see if in literals
368 | if lexdata[lexpos] in self.lexliterals:
369 | tok = LexToken()
370 | tok.value = lexdata[lexpos]
371 | tok.lineno = self.lineno
372 | tok.type = tok.value
373 | tok.lexpos = lexpos
374 | self.lexpos = lexpos + 1
375 | return tok
376 |
377 | # No match. Call t_error() if defined.
378 | if self.lexerrorf:
379 | tok = LexToken()
380 | tok.value = self.lexdata[lexpos:]
381 | tok.lineno = self.lineno
382 | tok.type = 'error'
383 | tok.lexer = self
384 | tok.lexpos = lexpos
385 | self.lexpos = lexpos
386 | newtok = self.lexerrorf(tok)
387 | if lexpos == self.lexpos:
388 | # Error method didn't change text position at all. This is an error.
389 | raise LexError("Scanning error. Illegal character '%s'" % (lexdata[lexpos]), lexdata[lexpos:])
390 | lexpos = self.lexpos
391 | if not newtok:
392 | continue
393 | return newtok
394 |
395 | self.lexpos = lexpos
396 | raise LexError("Illegal character '%s' at index %d" % (lexdata[lexpos], lexpos), lexdata[lexpos:])
397 |
398 | if self.lexeoff:
399 | tok = LexToken()
400 | tok.type = 'eof'
401 | tok.value = ''
402 | tok.lineno = self.lineno
403 | tok.lexpos = lexpos
404 | tok.lexer = self
405 | self.lexpos = lexpos
406 | newtok = self.lexeoff(tok)
407 | return newtok
408 |
409 | self.lexpos = lexpos + 1
410 | if self.lexdata is None:
411 | raise RuntimeError('No input string given with input()')
412 | return None
413 |
414 | # Iterator interface
415 | def __iter__(self):
416 | return self
417 |
418 | def next(self):
419 | t = self.token()
420 | if t is None:
421 | raise StopIteration
422 | return t
423 |
424 | __next__ = next
425 |
426 | # -----------------------------------------------------------------------------
427 | # ==== Lex Builder ===
428 | #
429 | # The functions and classes below are used to collect lexing information
430 | # and build a Lexer object from it.
431 | # -----------------------------------------------------------------------------
432 |
433 | # -----------------------------------------------------------------------------
434 | # _get_regex(func)
435 | #
436 | # Returns the regular expression assigned to a function either as a doc string
437 | # or as a .regex attribute attached by the @TOKEN decorator.
438 | # -----------------------------------------------------------------------------
439 | def _get_regex(func):
440 | return getattr(func, 'regex', func.__doc__)
441 |
442 | # -----------------------------------------------------------------------------
443 | # get_caller_module_dict()
444 | #
445 | # This function returns a dictionary containing all of the symbols defined within
446 | # a caller further down the call stack. This is used to get the environment
447 | # associated with the yacc() call if none was provided.
448 | # -----------------------------------------------------------------------------
449 | def get_caller_module_dict(levels):
450 | f = sys._getframe(levels)
451 | ldict = f.f_globals.copy()
452 | if f.f_globals != f.f_locals:
453 | ldict.update(f.f_locals)
454 | return ldict
455 |
456 | # -----------------------------------------------------------------------------
457 | # _funcs_to_names()
458 | #
459 | # Given a list of regular expression functions, this converts it to a list
460 | # suitable for output to a table file
461 | # -----------------------------------------------------------------------------
462 | def _funcs_to_names(funclist, namelist):
463 | result = []
464 | for f, name in zip(funclist, namelist):
465 | if f and f[0]:
466 | result.append((name, f[1]))
467 | else:
468 | result.append(f)
469 | return result
470 |
471 | # -----------------------------------------------------------------------------
472 | # _names_to_funcs()
473 | #
474 | # Given a list of regular expression function names, this converts it back to
475 | # functions.
476 | # -----------------------------------------------------------------------------
477 | def _names_to_funcs(namelist, fdict):
478 | result = []
479 | for n in namelist:
480 | if n and n[0]:
481 | result.append((fdict[n[0]], n[1]))
482 | else:
483 | result.append(n)
484 | return result
485 |
486 | # -----------------------------------------------------------------------------
487 | # _form_master_re()
488 | #
489 | # This function takes a list of all of the regex components and attempts to
490 | # form the master regular expression. Given limitations in the Python re
491 | # module, it may be necessary to break the master regex into separate expressions.
492 | # -----------------------------------------------------------------------------
493 | def _form_master_re(relist, reflags, ldict, toknames):
494 | if not relist:
495 | return []
496 | regex = '|'.join(relist)
497 | try:
498 | lexre = re.compile(regex, re.VERBOSE | reflags)
499 |
500 | # Build the index to function map for the matching engine
501 | lexindexfunc = [None] * (max(lexre.groupindex.values()) + 1)
502 | lexindexnames = lexindexfunc[:]
503 |
504 | for f, i in lexre.groupindex.items():
505 | handle = ldict.get(f, None)
506 | if type(handle) in (types.FunctionType, types.MethodType):
507 | lexindexfunc[i] = (handle, toknames[f])
508 | lexindexnames[i] = f
509 | elif handle is not None:
510 | lexindexnames[i] = f
511 | if f.find('ignore_') > 0:
512 | lexindexfunc[i] = (None, None)
513 | else:
514 | lexindexfunc[i] = (None, toknames[f])
515 |
516 | return [(lexre, lexindexfunc)], [regex], [lexindexnames]
517 | except Exception:
518 | m = int(len(relist)/2)
519 | if m == 0:
520 | m = 1
521 | llist, lre, lnames = _form_master_re(relist[:m], reflags, ldict, toknames)
522 | rlist, rre, rnames = _form_master_re(relist[m:], reflags, ldict, toknames)
523 | return (llist+rlist), (lre+rre), (lnames+rnames)
524 |
525 | # -----------------------------------------------------------------------------
526 | # def _statetoken(s,names)
527 | #
528 | # Given a declaration name s of the form "t_" and a dictionary whose keys are
529 | # state names, this function returns a tuple (states,tokenname) where states
530 | # is a tuple of state names and tokenname is the name of the token. For example,
531 | # calling this with s = "t_foo_bar_SPAM" might return (('foo','bar'),'SPAM')
532 | # -----------------------------------------------------------------------------
533 | def _statetoken(s, names):
534 | nonstate = 1
535 | parts = s.split('_')
536 | for i, part in enumerate(parts[1:], 1):
537 | if part not in names and part != 'ANY':
538 | break
539 |
540 | if i > 1:
541 | states = tuple(parts[1:i])
542 | else:
543 | states = ('INITIAL',)
544 |
545 | if 'ANY' in states:
546 | states = tuple(names)
547 |
548 | tokenname = '_'.join(parts[i:])
549 | return (states, tokenname)
550 |
551 |
552 | # -----------------------------------------------------------------------------
553 | # LexerReflect()
554 | #
555 | # This class represents information needed to build a lexer as extracted from a
556 | # user's input file.
557 | # -----------------------------------------------------------------------------
558 | class LexerReflect(object):
559 | def __init__(self, ldict, log=None, reflags=0):
560 | self.ldict = ldict
561 | self.error_func = None
562 | self.tokens = []
563 | self.reflags = reflags
564 | self.stateinfo = {'INITIAL': 'inclusive'}
565 | self.modules = set()
566 | self.error = False
567 | self.log = PlyLogger(sys.stderr) if log is None else log
568 |
569 | # Get all of the basic information
570 | def get_all(self):
571 | self.get_tokens()
572 | self.get_literals()
573 | self.get_states()
574 | self.get_rules()
575 |
576 | # Validate all of the information
577 | def validate_all(self):
578 | self.validate_tokens()
579 | self.validate_literals()
580 | self.validate_rules()
581 | return self.error
582 |
583 | # Get the tokens map
584 | def get_tokens(self):
585 | tokens = self.ldict.get('tokens', None)
586 | if not tokens:
587 | self.log.error('No token list is defined')
588 | self.error = True
589 | return
590 |
591 | if not isinstance(tokens, (list, tuple)):
592 | self.log.error('tokens must be a list or tuple')
593 | self.error = True
594 | return
595 |
596 | if not tokens:
597 | self.log.error('tokens is empty')
598 | self.error = True
599 | return
600 |
601 | self.tokens = tokens
602 |
603 | # Validate the tokens
604 | def validate_tokens(self):
605 | terminals = {}
606 | for n in self.tokens:
607 | if not _is_identifier.match(n):
608 | self.log.error("Bad token name '%s'", n)
609 | self.error = True
610 | if n in terminals:
611 | self.log.warning("Token '%s' multiply defined", n)
612 | terminals[n] = 1
613 |
614 | # Get the literals specifier
615 | def get_literals(self):
616 | self.literals = self.ldict.get('literals', '')
617 | if not self.literals:
618 | self.literals = ''
619 |
620 | # Validate literals
621 | def validate_literals(self):
622 | try:
623 | for c in self.literals:
624 | if not isinstance(c, StringTypes) or len(c) > 1:
625 | self.log.error('Invalid literal %s. Must be a single character', repr(c))
626 | self.error = True
627 |
628 | except TypeError:
629 | self.log.error('Invalid literals specification. literals must be a sequence of characters')
630 | self.error = True
631 |
632 | def get_states(self):
633 | self.states = self.ldict.get('states', None)
634 | # Build statemap
635 | if self.states:
636 | if not isinstance(self.states, (tuple, list)):
637 | self.log.error('states must be defined as a tuple or list')
638 | self.error = True
639 | else:
640 | for s in self.states:
641 | if not isinstance(s, tuple) or len(s) != 2:
642 | self.log.error("Invalid state specifier %s. Must be a tuple (statename,'exclusive|inclusive')", repr(s))
643 | self.error = True
644 | continue
645 | name, statetype = s
646 | if not isinstance(name, StringTypes):
647 | self.log.error('State name %s must be a string', repr(name))
648 | self.error = True
649 | continue
650 | if not (statetype == 'inclusive' or statetype == 'exclusive'):
651 | self.log.error("State type for state %s must be 'inclusive' or 'exclusive'", name)
652 | self.error = True
653 | continue
654 | if name in self.stateinfo:
655 | self.log.error("State '%s' already defined", name)
656 | self.error = True
657 | continue
658 | self.stateinfo[name] = statetype
659 |
660 | # Get all of the symbols with a t_ prefix and sort them into various
661 | # categories (functions, strings, error functions, and ignore characters)
662 |
663 | def get_rules(self):
664 | tsymbols = [f for f in self.ldict if f[:2] == 't_']
665 |
666 | # Now build up a list of functions and a list of strings
667 | self.toknames = {} # Mapping of symbols to token names
668 | self.funcsym = {} # Symbols defined as functions
669 | self.strsym = {} # Symbols defined as strings
670 | self.ignore = {} # Ignore strings by state
671 | self.errorf = {} # Error functions by state
672 | self.eoff = {} # EOF functions by state
673 |
674 | for s in self.stateinfo:
675 | self.funcsym[s] = []
676 | self.strsym[s] = []
677 |
678 | if len(tsymbols) == 0:
679 | self.log.error('No rules of the form t_rulename are defined')
680 | self.error = True
681 | return
682 |
683 | for f in tsymbols:
684 | t = self.ldict[f]
685 | states, tokname = _statetoken(f, self.stateinfo)
686 | self.toknames[f] = tokname
687 |
688 | if hasattr(t, '__call__'):
689 | if tokname == 'error':
690 | for s in states:
691 | self.errorf[s] = t
692 | elif tokname == 'eof':
693 | for s in states:
694 | self.eoff[s] = t
695 | elif tokname == 'ignore':
696 | line = t.__code__.co_firstlineno
697 | file = t.__code__.co_filename
698 | self.log.error("%s:%d: Rule '%s' must be defined as a string", file, line, t.__name__)
699 | self.error = True
700 | else:
701 | for s in states:
702 | self.funcsym[s].append((f, t))
703 | elif isinstance(t, StringTypes):
704 | if tokname == 'ignore':
705 | for s in states:
706 | self.ignore[s] = t
707 | if '\\' in t:
708 | self.log.warning("%s contains a literal backslash '\\'", f)
709 |
710 | elif tokname == 'error':
711 | self.log.error("Rule '%s' must be defined as a function", f)
712 | self.error = True
713 | else:
714 | for s in states:
715 | self.strsym[s].append((f, t))
716 | else:
717 | self.log.error('%s not defined as a function or string', f)
718 | self.error = True
719 |
720 | # Sort the functions by line number
721 | for f in self.funcsym.values():
722 | f.sort(key=lambda x: x[1].__code__.co_firstlineno)
723 |
724 | # Sort the strings by regular expression length
725 | for s in self.strsym.values():
726 | s.sort(key=lambda x: len(x[1]), reverse=True)
727 |
728 | # Validate all of the t_rules collected
729 | def validate_rules(self):
730 | for state in self.stateinfo:
731 | # Validate all rules defined by functions
732 |
733 | for fname, f in self.funcsym[state]:
734 | line = f.__code__.co_firstlineno
735 | file = f.__code__.co_filename
736 | module = inspect.getmodule(f)
737 | self.modules.add(module)
738 |
739 | tokname = self.toknames[fname]
740 | if isinstance(f, types.MethodType):
741 | reqargs = 2
742 | else:
743 | reqargs = 1
744 | nargs = f.__code__.co_argcount
745 | if nargs > reqargs:
746 | self.log.error("%s:%d: Rule '%s' has too many arguments", file, line, f.__name__)
747 | self.error = True
748 | continue
749 |
750 | if nargs < reqargs:
751 | self.log.error("%s:%d: Rule '%s' requires an argument", file, line, f.__name__)
752 | self.error = True
753 | continue
754 |
755 | if not _get_regex(f):
756 | self.log.error("%s:%d: No regular expression defined for rule '%s'", file, line, f.__name__)
757 | self.error = True
758 | continue
759 |
760 | try:
761 | c = re.compile('(?P<%s>%s)' % (fname, _get_regex(f)), re.VERBOSE | self.reflags)
762 | if c.match(''):
763 | self.log.error("%s:%d: Regular expression for rule '%s' matches empty string", file, line, f.__name__)
764 | self.error = True
765 | except re.error as e:
766 | self.log.error("%s:%d: Invalid regular expression for rule '%s'. %s", file, line, f.__name__, e)
767 | if '#' in _get_regex(f):
768 | self.log.error("%s:%d. Make sure '#' in rule '%s' is escaped with '\\#'", file, line, f.__name__)
769 | self.error = True
770 |
771 | # Validate all rules defined by strings
772 | for name, r in self.strsym[state]:
773 | tokname = self.toknames[name]
774 | if tokname == 'error':
775 | self.log.error("Rule '%s' must be defined as a function", name)
776 | self.error = True
777 | continue
778 |
779 | if tokname not in self.tokens and tokname.find('ignore_') < 0:
780 | self.log.error("Rule '%s' defined for an unspecified token %s", name, tokname)
781 | self.error = True
782 | continue
783 |
784 | try:
785 | c = re.compile('(?P<%s>%s)' % (name, r), re.VERBOSE | self.reflags)
786 | if (c.match('')):
787 | self.log.error("Regular expression for rule '%s' matches empty string", name)
788 | self.error = True
789 | except re.error as e:
790 | self.log.error("Invalid regular expression for rule '%s'. %s", name, e)
791 | if '#' in r:
792 | self.log.error("Make sure '#' in rule '%s' is escaped with '\\#'", name)
793 | self.error = True
794 |
795 | if not self.funcsym[state] and not self.strsym[state]:
796 | self.log.error("No rules defined for state '%s'", state)
797 | self.error = True
798 |
799 | # Validate the error function
800 | efunc = self.errorf.get(state, None)
801 | if efunc:
802 | f = efunc
803 | line = f.__code__.co_firstlineno
804 | file = f.__code__.co_filename
805 | module = inspect.getmodule(f)
806 | self.modules.add(module)
807 |
808 | if isinstance(f, types.MethodType):
809 | reqargs = 2
810 | else:
811 | reqargs = 1
812 | nargs = f.__code__.co_argcount
813 | if nargs > reqargs:
814 | self.log.error("%s:%d: Rule '%s' has too many arguments", file, line, f.__name__)
815 | self.error = True
816 |
817 | if nargs < reqargs:
818 | self.log.error("%s:%d: Rule '%s' requires an argument", file, line, f.__name__)
819 | self.error = True
820 |
821 | for module in self.modules:
822 | self.validate_module(module)
823 |
824 | # -----------------------------------------------------------------------------
825 | # validate_module()
826 | #
827 | # This checks to see if there are duplicated t_rulename() functions or strings
828 | # in the parser input file. This is done using a simple regular expression
829 | # match on each line in the source code of the given module.
830 | # -----------------------------------------------------------------------------
831 |
832 | def validate_module(self, module):
833 | try:
834 | lines, linen = inspect.getsourcelines(module)
835 | except IOError:
836 | return
837 |
838 | fre = re.compile(r'\s*def\s+(t_[a-zA-Z_0-9]*)\(')
839 | sre = re.compile(r'\s*(t_[a-zA-Z_0-9]*)\s*=')
840 |
841 | counthash = {}
842 | linen += 1
843 | for line in lines:
844 | m = fre.match(line)
845 | if not m:
846 | m = sre.match(line)
847 | if m:
848 | name = m.group(1)
849 | prev = counthash.get(name)
850 | if not prev:
851 | counthash[name] = linen
852 | else:
853 | filename = inspect.getsourcefile(module)
854 | self.log.error('%s:%d: Rule %s redefined. Previously defined on line %d', filename, linen, name, prev)
855 | self.error = True
856 | linen += 1
857 |
858 | # -----------------------------------------------------------------------------
859 | # lex(module)
860 | #
861 | # Build all of the regular expression rules from definitions in the supplied module
862 | # -----------------------------------------------------------------------------
863 | def lex(module=None, object=None, debug=False, optimize=False, lextab='lextab',
864 | reflags=0, nowarn=False, outputdir=None, debuglog=None, errorlog=None):
865 |
866 | if lextab is None:
867 | lextab = 'lextab'
868 |
869 | global lexer
870 |
871 | ldict = None
872 | stateinfo = {'INITIAL': 'inclusive'}
873 | lexobj = Lexer()
874 | lexobj.lexoptimize = optimize
875 | global token, input
876 |
877 | if errorlog is None:
878 | errorlog = PlyLogger(sys.stderr)
879 |
880 | if debug:
881 | if debuglog is None:
882 | debuglog = PlyLogger(sys.stderr)
883 |
884 | # Get the module dictionary used for the lexer
885 | if object:
886 | module = object
887 |
888 | # Get the module dictionary used for the parser
889 | if module:
890 | _items = [(k, getattr(module, k)) for k in dir(module)]
891 | ldict = dict(_items)
892 | # If no __file__ attribute is available, try to obtain it from the __module__ instead
893 | if '__file__' not in ldict:
894 | ldict['__file__'] = sys.modules[ldict['__module__']].__file__
895 | else:
896 | ldict = get_caller_module_dict(2)
897 |
898 | # Determine if the module is package of a package or not.
899 | # If so, fix the tabmodule setting so that tables load correctly
900 | pkg = ldict.get('__package__')
901 | if pkg and isinstance(lextab, str):
902 | if '.' not in lextab:
903 | lextab = pkg + '.' + lextab
904 |
905 | # Collect parser information from the dictionary
906 | linfo = LexerReflect(ldict, log=errorlog, reflags=reflags)
907 | linfo.get_all()
908 | if not optimize:
909 | if linfo.validate_all():
910 | raise SyntaxError("Can't build lexer")
911 |
912 | if optimize and lextab:
913 | try:
914 | lexobj.readtab(lextab, ldict)
915 | token = lexobj.token
916 | input = lexobj.input
917 | lexer = lexobj
918 | return lexobj
919 |
920 | except ImportError:
921 | pass
922 |
923 | # Dump some basic debugging information
924 | if debug:
925 | debuglog.info('lex: tokens = %r', linfo.tokens)
926 | debuglog.info('lex: literals = %r', linfo.literals)
927 | debuglog.info('lex: states = %r', linfo.stateinfo)
928 |
929 | # Build a dictionary of valid token names
930 | lexobj.lextokens = set()
931 | for n in linfo.tokens:
932 | lexobj.lextokens.add(n)
933 |
934 | # Get literals specification
935 | if isinstance(linfo.literals, (list, tuple)):
936 | lexobj.lexliterals = type(linfo.literals[0])().join(linfo.literals)
937 | else:
938 | lexobj.lexliterals = linfo.literals
939 |
940 | lexobj.lextokens_all = lexobj.lextokens | set(lexobj.lexliterals)
941 |
942 | # Get the stateinfo dictionary
943 | stateinfo = linfo.stateinfo
944 |
945 | regexs = {}
946 | # Build the master regular expressions
947 | for state in stateinfo:
948 | regex_list = []
949 |
950 | # Add rules defined by functions first
951 | for fname, f in linfo.funcsym[state]:
952 | line = f.__code__.co_firstlineno
953 | file = f.__code__.co_filename
954 | regex_list.append('(?P<%s>%s)' % (fname, _get_regex(f)))
955 | if debug:
956 | debuglog.info("lex: Adding rule %s -> '%s' (state '%s')", fname, _get_regex(f), state)
957 |
958 | # Now add all of the simple rules
959 | for name, r in linfo.strsym[state]:
960 | regex_list.append('(?P<%s>%s)' % (name, r))
961 | if debug:
962 | debuglog.info("lex: Adding rule %s -> '%s' (state '%s')", name, r, state)
963 |
964 | regexs[state] = regex_list
965 |
966 | # Build the master regular expressions
967 |
968 | if debug:
969 | debuglog.info('lex: ==== MASTER REGEXS FOLLOW ====')
970 |
971 | for state in regexs:
972 | lexre, re_text, re_names = _form_master_re(regexs[state], reflags, ldict, linfo.toknames)
973 | lexobj.lexstatere[state] = lexre
974 | lexobj.lexstateretext[state] = re_text
975 | lexobj.lexstaterenames[state] = re_names
976 | if debug:
977 | for i, text in enumerate(re_text):
978 | debuglog.info("lex: state '%s' : regex[%d] = '%s'", state, i, text)
979 |
980 | # For inclusive states, we need to add the regular expressions from the INITIAL state
981 | for state, stype in stateinfo.items():
982 | if state != 'INITIAL' and stype == 'inclusive':
983 | lexobj.lexstatere[state].extend(lexobj.lexstatere['INITIAL'])
984 | lexobj.lexstateretext[state].extend(lexobj.lexstateretext['INITIAL'])
985 | lexobj.lexstaterenames[state].extend(lexobj.lexstaterenames['INITIAL'])
986 |
987 | lexobj.lexstateinfo = stateinfo
988 | lexobj.lexre = lexobj.lexstatere['INITIAL']
989 | lexobj.lexretext = lexobj.lexstateretext['INITIAL']
990 | lexobj.lexreflags = reflags
991 |
992 | # Set up ignore variables
993 | lexobj.lexstateignore = linfo.ignore
994 | lexobj.lexignore = lexobj.lexstateignore.get('INITIAL', '')
995 |
996 | # Set up error functions
997 | lexobj.lexstateerrorf = linfo.errorf
998 | lexobj.lexerrorf = linfo.errorf.get('INITIAL', None)
999 | if not lexobj.lexerrorf:
1000 | errorlog.warning('No t_error rule is defined')
1001 |
1002 | # Set up eof functions
1003 | lexobj.lexstateeoff = linfo.eoff
1004 | lexobj.lexeoff = linfo.eoff.get('INITIAL', None)
1005 |
1006 | # Check state information for ignore and error rules
1007 | for s, stype in stateinfo.items():
1008 | if stype == 'exclusive':
1009 | if s not in linfo.errorf:
1010 | errorlog.warning("No error rule is defined for exclusive state '%s'", s)
1011 | if s not in linfo.ignore and lexobj.lexignore:
1012 | errorlog.warning("No ignore rule is defined for exclusive state '%s'", s)
1013 | elif stype == 'inclusive':
1014 | if s not in linfo.errorf:
1015 | linfo.errorf[s] = linfo.errorf.get('INITIAL', None)
1016 | if s not in linfo.ignore:
1017 | linfo.ignore[s] = linfo.ignore.get('INITIAL', '')
1018 |
1019 | # Create global versions of the token() and input() functions
1020 | token = lexobj.token
1021 | input = lexobj.input
1022 | lexer = lexobj
1023 |
1024 | # If in optimize mode, we write the lextab
1025 | if lextab and optimize:
1026 | if outputdir is None:
1027 | # If no output directory is set, the location of the output files
1028 | # is determined according to the following rules:
1029 | # - If lextab specifies a package, files go into that package directory
1030 | # - Otherwise, files go in the same directory as the specifying module
1031 | if isinstance(lextab, types.ModuleType):
1032 | srcfile = lextab.__file__
1033 | else:
1034 | if '.' not in lextab:
1035 | srcfile = ldict['__file__']
1036 | else:
1037 | parts = lextab.split('.')
1038 | pkgname = '.'.join(parts[:-1])
1039 | exec('import %s' % pkgname)
1040 | srcfile = getattr(sys.modules[pkgname], '__file__', '')
1041 | outputdir = os.path.dirname(srcfile)
1042 | try:
1043 | lexobj.writetab(lextab, outputdir)
1044 | except IOError as e:
1045 | errorlog.warning("Couldn't write lextab module %r. %s" % (lextab, e))
1046 |
1047 | return lexobj
1048 |
1049 | # -----------------------------------------------------------------------------
1050 | # runmain()
1051 | #
1052 | # This runs the lexer as a main program
1053 | # -----------------------------------------------------------------------------
1054 |
1055 | def runmain(lexer=None, data=None):
1056 | if not data:
1057 | try:
1058 | filename = sys.argv[1]
1059 | f = open(filename)
1060 | data = f.read()
1061 | f.close()
1062 | except IndexError:
1063 | sys.stdout.write('Reading from standard input (type EOF to end):\n')
1064 | data = sys.stdin.read()
1065 |
1066 | if lexer:
1067 | _input = lexer.input
1068 | else:
1069 | _input = input
1070 | _input(data)
1071 | if lexer:
1072 | _token = lexer.token
1073 | else:
1074 | _token = token
1075 |
1076 | while True:
1077 | tok = _token()
1078 | if not tok:
1079 | break
1080 | sys.stdout.write('(%s,%r,%d,%d)\n' % (tok.type, tok.value, tok.lineno, tok.lexpos))
1081 |
1082 | # -----------------------------------------------------------------------------
1083 | # @TOKEN(regex)
1084 | #
1085 | # This decorator function can be used to set the regex expression on a function
1086 | # when its docstring might need to be set in an alternative way
1087 | # -----------------------------------------------------------------------------
1088 |
1089 | def TOKEN(r):
1090 | def set_regex(f):
1091 | if hasattr(r, '__call__'):
1092 | f.regex = _get_regex(r)
1093 | else:
1094 | f.regex = r
1095 | return f
1096 | return set_regex
1097 |
1098 | # Alternative spelling of the TOKEN decorator
1099 | Token = TOKEN
1100 |
1101 |
--------------------------------------------------------------------------------
/Tool/ply/ygen.py:
--------------------------------------------------------------------------------
1 | # ply: ygen.py
2 | #
3 | # This is a support program that auto-generates different versions of the YACC parsing
4 | # function with different features removed for the purposes of performance.
5 | #
6 | # Users should edit the method LParser.parsedebug() in yacc.py. The source code
7 | # for that method is then used to create the other methods. See the comments in
8 | # yacc.py for further details.
9 |
10 | import os.path
11 | import shutil
12 |
13 | def get_source_range(lines, tag):
14 | srclines = enumerate(lines)
15 | start_tag = '#--! %s-start' % tag
16 | end_tag = '#--! %s-end' % tag
17 |
18 | for start_index, line in srclines:
19 | if line.strip().startswith(start_tag):
20 | break
21 |
22 | for end_index, line in srclines:
23 | if line.strip().endswith(end_tag):
24 | break
25 |
26 | return (start_index + 1, end_index)
27 |
28 | def filter_section(lines, tag):
29 | filtered_lines = []
30 | include = True
31 | tag_text = '#--! %s' % tag
32 | for line in lines:
33 | if line.strip().startswith(tag_text):
34 | include = not include
35 | elif include:
36 | filtered_lines.append(line)
37 | return filtered_lines
38 |
39 | def main():
40 | dirname = os.path.dirname(__file__)
41 | shutil.copy2(os.path.join(dirname, 'yacc.py'), os.path.join(dirname, 'yacc.py.bak'))
42 | with open(os.path.join(dirname, 'yacc.py'), 'r') as f:
43 | lines = f.readlines()
44 |
45 | parse_start, parse_end = get_source_range(lines, 'parsedebug')
46 | parseopt_start, parseopt_end = get_source_range(lines, 'parseopt')
47 | parseopt_notrack_start, parseopt_notrack_end = get_source_range(lines, 'parseopt-notrack')
48 |
49 | # Get the original source
50 | orig_lines = lines[parse_start:parse_end]
51 |
52 | # Filter the DEBUG sections out
53 | parseopt_lines = filter_section(orig_lines, 'DEBUG')
54 |
55 | # Filter the TRACKING sections out
56 | parseopt_notrack_lines = filter_section(parseopt_lines, 'TRACKING')
57 |
58 | # Replace the parser source sections with updated versions
59 | lines[parseopt_notrack_start:parseopt_notrack_end] = parseopt_notrack_lines
60 | lines[parseopt_start:parseopt_end] = parseopt_lines
61 |
62 | lines = [line.rstrip()+'\n' for line in lines]
63 | with open(os.path.join(dirname, 'yacc.py'), 'w') as f:
64 | f.writelines(lines)
65 |
66 | print('Updated yacc.py')
67 |
68 | if __name__ == '__main__':
69 | main()
70 |
71 |
72 |
73 |
74 |
75 |
--------------------------------------------------------------------------------
/Tool/supportClasses/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Fare9/AndroidSwissKnife/96f2fd682acfe048b324db7937e43636d5747703/Tool/supportClasses/__init__.py
--------------------------------------------------------------------------------
/Tool/supportClasses/filters.py:
--------------------------------------------------------------------------------
1 | '''
2 | Variable for filters that can cause some problems
3 | '''
4 |
5 | filterString = {}
6 |
7 |
8 | # android.provider.Telephony
9 | filterString['android.provider.Telephony.SMS_RECEIVED'] = "\t[+] Want to get if you have a new SMS_RECEIVED"
10 | filterString['android.provider.Telephony.SMS_DELIVER'] = "\t[+] Have you SMS_DELIVER? The app knows the app knows it too"
11 | filterString['android.provider.Telephony.SIM_FULL'] = "\t[+] Trump and this app know that your SIM_FULL"
12 |
13 |
14 | # android.intent.action
15 | filterString['android.intent.action.ANSWER'] = "\t[+] This APP can handle your incoming calls with ANSWER"
16 | filterString['android.intent.action.ALL_APPS'] = "\t[+] Can list ALL_APPS, for example AV engines =)"
17 | filterString['android.intent.action.BOOT_COMPLETED'] = "\t[+] When BOOT_COMPLETED this app can start"
18 | filterString['android.intent.action.PHONE_STATE'] = "\t[+] This APP check PHONE_STATE, maybe when receive Calls...?"
19 | filterString['android.intent.action.SCREEN_OFF'] = "\t[+] This APP knows when SCREEN_OFF"
20 | filterString['android.intent.action.SCREEN_ON'] = "\t[+] This APP knows when SCREEN_ON"
21 | filterString['android.intent.action.USER_PRESENT'] = "\t[+] USER_PRESENT... Interesting to knows when you are there"
22 | filterString['android.intent.action.TIME_TICK'] = "\t[+] Frenetic TIME_TICK,tock,Frenetic tick,tick,tock..."
23 | filterString['android.intent.action.TIMEZONE_CHANGED'] = "\t[+] When TIMEZONE_CHANGED you know¿?"
24 | filterString['android.intent.action.PACKAGE_INSTALL'] = "\t[+] This will remember app when you PACKAGE_INSTALL"
25 | filterString['android.intent.action.PACKAGE_ADDED'] = "\t[+] When PACKAGE_ADDED app can knows it"
26 | filterString['android.intent.action.PACKAGE_REPLACED'] = "\t[+] Look if it's antivirus or malware,it knows PACKAGE_REPLACED"
27 | filterString['android.intent.action.MY_PACKAGE_REPLACED'] = "\t[+] okay MY_PACKAGE_REPLACED... I don't know what it is"
28 | filterString['android.intent.action.PACKAGE_REMOVED'] = "\t[+] PACKAGE_REMOVED why the h... needs to know this?"
29 | filterString['android.intent.action.PACKAGE_FULLY_REMOVED'] = "\t[+] PACKAGE_FULLY_REMOVED intent-filter is present"
30 | filterString['android.intent.action.PACKAGE_CHANGED'] = "\t[+] PACKAGE_RESTARTED intent-filter is present"
31 | filterString['android.intent.action.PACKAGE_RESTARTED'] = "\t[+] PACKAGE_RESTARTED intent-filter is present"
32 | filterString['android.intent.action.PACKAGE_DATA_CLEARED'] = "\t[+] PACKAGE_DATA_CLEARED intent-filter is present"
33 | filterString['android.intent.action.PACKAGE_FIRST_LAUNCH'] = "\t[+] It looks that It will know when PACKAGE_FIRST_LAUNCH"
34 | filterString['android.intent.action.PACKAGE_NEEDS_VERIFICATION'] = "\t[+] PACKAGE_NEEDS_VERIFICATION intent-filter is present"
35 | filterString['android.intent.action.PACKAGE_VERIFIED'] = "\t[+] PACKAGE_VERIFIED intent-filter is present"
36 | filterString['android.intent.action.UID_REMOVED'] = "\t[+] UID_REMOVED intent-filter is present"
37 | filterString['android.intent.action.QUERY_PACKAGE_RESTART'] = "\t[+] QUERY_PACKAGE_RESTART intent-filter is present"
38 | filterString['android.intent.action.CONFIGURATION_CHANGED'] = "\t[+] CONFIGURATION_CHANGED intent-filter is present"
39 | filterString['android.intent.action.LOCALE_CHANGED'] = "\t[+] LOCALE_CHANGED intent-filter is present"
40 | filterString['android.intent.action.BATTERY_CHANGED'] = "\t[+] This APP knows when BATTERY_CHANGED"
41 | filterString['android.intent.action.BATTERY_LOW'] = "\t[+] This APP could acts different when BATTERY_LOW"
42 | filterString['android.intent.action.BATTERY_OKAY'] = "\t[+] Interesing, this APP knows when BATTERY_OKAY"
43 | filterString['android.intent.action.ACTION_POWER_CONNECTED'] = "\t[+] This APP will know when ACTION_POWER_CONNECTED"
44 | filterString['android.intent.action.ACTION_POWER_DISCONNECTED'] = "\t[+] This APP knows when ACTION_POWER_DISCONNECTED"
45 | filterString['android.intent.action.ACTION_SHUTDOWN'] = "\t[+] ACTION_SHUTDOWN intent-filter is present"
46 | filterString['android.intent.action.DEVICE_STORAGE_LOW'] = "\t[+] APP knows if DEVICE_STORAGE_LOW"
47 | filterString['android.intent.action.DEVICE_STORAGE_OK'] = "\t[+] APP knows if DEVICE_STORAGE_OK"
48 | filterString['android.intent.action.DEVICE_STORAGE_FULL'] = "\t[+] APP knows if DEVICE_STORAGE_FULL"
49 | filterString['android.intent.action.DEVICE_STORAGE_NOT_FULL'] = "\t[+] APP knows if DEVICE_STORAGE_NOT_FULL"
50 | filterString['android.intent.action.NEW_OUTGOING_CALL'] = "\t[+] APP knows when you have a NEW_OUTGOING_CALL"
51 | filterString['android.intent.action.REBOOT'] = "\t[+] APP knows when you REBOOT"
52 | filterString['android.intent.action.DOCK_EVENT'] = "\t[+] DOCK_EVENT intent-filter is present"
53 | filterString['android.intent.action.MASTER_CLEAR_NOTIFICATION'] = "\t[+] MASTER_CLEAR_NOTIFICATION intent-filter is present"
54 | filterString['android.intent.action.USER_ADDED'] = "\t[+] APP knows when USER_ADDED"
55 | filterString['android.intent.action.USER_REMOVED'] = "\t[+] APP knows when USER_REMOVED"
56 | filterString['android.intent.action.USER_STOPPED'] = "\t[+] APP knows when USER_STOPPED"
57 | filterString['android.intent.action.USER_BACKGROUND'] = "\t[+] APP knows when USER_BACKGROUND"
58 | filterString['android.intent.action.USER_FOREGROUND'] = "\t[+] APP knows when USER_FOREGROUND"
59 | filterString['android.intent.action.USER_SWITCHED'] = "\t[+] APP knows when USER_SWITCHED"
60 | filterString['android.intent.action.HEADSET_PLUG'] = "\t[+] APP knows when HEADSET_PLUG"
61 | filterString['android.intent.action.ANALOG_AUDIO_DOCK_PLUG'] = "\t[+] APP knows when ANALOG_AUDIO_DOCK_PLUG"
62 | filterString['android.intent.action.HDMI_AUDIO_PLUG'] = "\t[+] APP knows when HDMI_AUDIO_PLUG"
63 | filterString['android.intent.action.USB_AUDIO_ACCESSORY_PLUG'] = "\t[+] APP knows when USB_AUDIO_ACCESSORY_PLUG"
64 | filterString['android.intent.action.USB_AUDIO_DEVICE_PLUG'] = "\t[+] APP knows when USB_AUDIO_DEVICE_PLUG"
65 | filterString['android.intent.action.CLEAR_DNS_CACHE'] = "\t[+] CLEAR_DNS_CACHE intent-filter is present"
66 | filterString['android.intent.action.PROXY_CHANGE'] = "\t[+] PROXY_CHANGE intent-filter is present"
67 | filterString['android.intent.action.DREAMING_STARTED'] = "\t[+] DREAMING_STARTED intent-filter is present"
68 | filterString['android.intent.action.DREAMING_STOPPED'] = "\t[+] DREAMING_STOPPED intent-filter is present"
69 | filterString['android.intent.action.ANY_DATA_STATE'] = "\t[+] ANY_DATA_STATE intent-filter is present"
70 |
71 | # android.os
72 | filterString['android.os.UpdateLock.UPDATE_LOCK_CHANGED'] = "\t[+] UPDATE_LOCK_CHANGED intent-filter is present"
73 |
74 | # android.server
75 | filterString['com.android.server.WifiManager.action.START_SCAN'] = "\t[+] START_SCAN intent-filter is present"
76 | filterString['com.android.server.WifiManager.action.DELAYED_DRIVER_STOP'] = "\t[+] DELAYED_DRIVER_STOP intent-filter is present"
77 |
78 |
79 | # android.app.action
80 | filterString['android.app.action.ENTER_CAR_MODE'] = "\t[+] ENTER_CAR_MODE intent-filter is present"
81 | filterString['android.app.action.EXIT_CAR_MODE'] = "\t[+] EXIT_CAR_MODE intent-filter is present"
82 | filterString['android.app.action.ENTER_DESK_MODE'] = "\t[+] ENTER_DESK_MODE intent-filter is present"
83 | filterString['android.app.action.EXIT_DESK_MODE'] = "\t[+] EXIT_DESK_MODE intent-filter is present"
84 |
85 | # android.net.conn
86 | filterString['android.net.conn.CONNECTIVITY_CHANGE'] ='\t[+] This APP can knows when CONNECTIVITY_CHANGE'
87 | filterString['android.net.conn.CONNECTIVITY_CHANGE_IMMEDIATE'] ='\t[+] This APP can knows when CONNECTIVITY_CHANGE_IMMEDIATE'
88 | filterString['android.net.conn.DATA_ACTIVITY_CHANGE'] ='\t[+] This APP can knows when DATA_ACTIVITY_CHANGE'
89 | filterString['android.net.conn.BACKGROUND_DATA_SETTING_CHANGED'] ='\t[+] This APP can knows when BACKGROUND_DATA_SETTING_CHANGED'
90 | filterString['android.net.conn.CAPTIVE_PORTAL_TEST_COMPLETED'] ='\t[+] This APP can knows when CAPTIVE_PORTAL_TEST_COMPLETED'
91 | filterString['android.net.wifi.WIFI_STATE_CHANGED'] ='\t[+] This APP can knows when WIFI_STATE_CHANGED'
92 | filterString['android.net.wifi.WIFI_AP_STATE_CHANGED'] ='\t[+] This APP can knows when WIFI_AP_STATE_CHANGED'
93 | filterString['android.net.wifi.WIFI_SCAN_AVAILABLE'] ='\t[+] This APP can knows when WIFI_SCAN_AVAILABLE'
94 | filterString['android.net.wifi.SCAN_RESULTS'] ='\t[+] SCAN_RESULTS wifi intent-filter is present'
95 | filterString['android.net.wifi.RSSI_CHANGED'] ='\t[+] RSSI_CHANGED wifi intent-filter is present'
96 | filterString['android.net.wifi.STATE_CHANGE'] ='\t[+] STATE_CHANGE wifi intent-filter is present'
97 | filterString['android.net.wifi.SCAN_RESULTS'] ='\t[+] CAN_RESULTS wifi intent-filter is present'
98 | filterString['android.net.wifi.LINK_CONFIGURATION_CHANGED'] ='\t[+] LINK_CONFIGURATION_CHANGED wifi intent-filter is present'
99 | filterString['android.net.wifi.CONFIGURED_NETWORKS_CHANGE'] ='\t[+] CONFIGURED_NETWORKS_CHANGE wifi intent-filter is present'
100 | filterString['android.net.wifi.supplicant.CONNECTION_CHANGE'] ='\t[+] CONNECTION_CHANGE wifi intent-filter is present'
101 | filterString['android.net.wifi.supplicant.STATE_CHANGE'] = '\t[+] STATE_CHANGE wifi supplicant intent-filter is present'
102 | filterString['android.net.wifi.p2p.STATE_CHANGED'] = '\t[+] STATE_CHANGED wifi p2p intent-filter is present'
103 |
104 | # android.nfc
105 | filterString['android.nfc.action.LLCP_LINK_STATE_CHANGED'] = '\t[+] LLCP_LINK_STATE_CHANGED intent-filter is present'
106 | filterString['com.android.nfc_extras.action.RF_FIELD_ON_DETECTED'] = '\t[+] RF_FIELD_ON_DETECTED intent-filter is present'
107 | filterString['com.android.nfc_extras.action.RF_FIELD_OFF_DETECTED'] = '\t[+] RF_FIELD_OFF_DETECTED intent-filter is present'
108 | filterString['com.android.nfc_extras.action.AID_SELECTED'] = '\t[+] AID_SELECTED intent-filter is present'
109 | filterString['android.nfc.action.TRANSACTION_DETECTED'] = '\t[+] TRANSACTION_DETECTED intent-filter is present'
110 |
111 |
112 |
113 | # android.appwidget.action
114 | filterString['android.appwidget.action.APPWIDGET_UPDATE_OPTIONS'] = "\t[+] APPWIDGET_UPDATE_OPTIONS intent-filter is present"
115 | filterString['android.appwidget.action.APPWIDGET_DELETED'] = "\t[+] APPWIDGET_DELETED intent-filter is present"
116 | filterString['android.appwidget.action.APPWIDGET_DISABLED'] = "\t[+] APPWIDGET_DISABLED intent-filter is present"
117 | filterString['android.appwidget.action.APPWIDGET_ENABLED'] = "\t[+] APPWIDGET_ENABLED intent-filter is present"
118 |
119 | # android.backup.intent
120 | filterString['android.backup.intent.RUN'] = "\t[+] RUN backup intent-filter is present"
121 | filterString['android.backup.intent.CLEAR'] = "\t[+] CLEAR backup intent-filter is present"
122 | filterString['android.backup.intent.INIT'] = "\t[+] INIT backup intent-filter is present"
123 |
124 | # android.bluetooth.adapter.action
125 | filterString['android.bluetooth.adapter.action.STATE_CHANGED'] = "\t[+] STATE_CHANGED bluetooth adapter intent-filter is present"
126 | filterString['android.bluetooth.adapter.action.SCAN_MODE_CHANGED'] = "\t[+] SCAN_MODE_CHANGED bluetooth adapter intent-filter is present"
127 | filterString['android.bluetooth.adapter.action.DISCOVERY_STARTED'] = "\t[+] DISCOVERY_STARTED bluetooth adapter intent-filter is present"
128 | filterString['android.bluetooth.adapter.action.DISCOVERY_FINISHED'] = "\t[+] DISCOVERY_FINISHED bluetooth adapter intent-filter is present"
129 | filterString['android.bluetooth.adapter.action.LOCAL_NAME_CHANGED'] = "\t[+] LOCAL_NAME_CHANGED bluetooth adapter intent-filter is present"
130 | filterString['android.bluetooth.adapter.action.CONNECTION_STATE_CHANGED'] = "\t[+] CONNECTION_STATE_CHANGED bluetooth adapter intent-filter is present"
131 | filterString['android.bluetooth.device.action.FOUND'] = "\t[+] FOUND bluetooth device intent-filter is present"
132 | filterString['android.bluetooth.device.action.DISAPPEARED'] = "\t[+] DISAPPEARED bluetooth device intent-filter is present"
133 | filterString['android.bluetooth.device.action.CLASS_CHANGED'] = "\t[+] CLASS_CHANGED bluetooth device intent-filter is present"
134 | filterString['android.bluetooth.device.action.ACL_CONNECTED'] = "\t[+] ACL_CONNECTED bluetooth device intent-filter is present"
135 | filterString['android.bluetooth.device.action.ACL_DISCONNECT_REQUESTED'] = "\t[+] ACL_DISCONNECT_REQUESTED bluetooth device intent-filter is present"
136 | filterString['android.bluetooth.device.action.ACL_DISCONNECTED'] = "\t[+] ACL_DISCONNECTED bluetooth device intent-filter is present"
137 | filterString['android.bluetooth.device.action.NAME_CHANGED'] = "\t[+] NAME_CHANGED bluetooth device intent-filter is present"
138 | filterString['android.bluetooth.device.action.BOND_STATE_CHANGED'] = "\t[+] BOND_STATE_CHANGED bluetooth device intent-filter is present"
139 | filterString['android.bluetooth.device.action.NAME_FAILED'] = "\t[+] NAME_FAILED bluetooth device intent-filter is present"
140 | filterString['android.bluetooth.device.action.PAIRING_REQUEST'] = "\t[+] PAIRING_REQUEST bluetooth device intent-filter is present"
141 | filterString['android.bluetooth.device.action.PAIRING_CANCEL'] = "\t[+] PAIRING_CANCEL bluetooth device intent-filter is present"
142 | filterString['android.bluetooth.device.action.CONNECTION_ACCESS_REPLY'] = "\t[+] CONNECTION_ACCESS_REPLY bluetooth device intent-filter is present"
143 | filterString['android.bluetooth.headset.profile.action.AUDIO_STATE_CHANGED'] = "\t[+] AUDIO_STATE_CHANGED bluetooth headset intent-filter is present"
144 | filterString['android.bluetooth.a2dp.profile.action.CONNECTION_STATE_CHANGED'] = "\t[+] CONNECTION_STATE_CHANGED bluetooth a2dp intent-filter is present"
145 | filterString['android.bluetooth.input.profile.action.CONNECTION_STATE_CHANGED'] = "\t[+] CONNECTION_STATE_CHANGED bluetooth input intent-filter is present"
146 | filterString['android.bluetooth.pan.profile.action.CONNECTION_STATE_CHANGED'] = "\t[+] CONNECTION_STATE_CHANGED bluetooth pan intent-filter is present"
147 |
148 | # android hardware
149 | filterString['android.hardware.display.action.WIFI_DISPLAY_STATUS_CHANGED'] = '\t[+] WIFI_DISPLAY_STATUS_CHANGED hardware display intent-filter is present'
150 | filterString['android.hardware.usb.action.USB_STATE'] = '\t[+] USB_STATE hardware usb intent-filter is present'
151 | filterString['android.hardware.usb.action.USB_ACCESSORY_ATTACHED'] = '\t[+] USB_ACCESSORY_ATTACHED hardware usb intent-filter is present'
152 | filterString['android.hardware.usb.action.USB_DEVICE_ATTACHED'] = '\t[+] USB_DEVICE_ATTACHED hardware usb intent-filter is present'
153 | filterString['android.hardware.usb.action.USB_DEVICE_DETACHED'] = '\t[+] USB_DEVICE_DETACHED hardware usb intent-filter is present'
154 | filterString['android.hardware.usb.action.USB_DEVICE_ATTACHED'] = '\t[+] USB_DEVICE_ATTACHED hardware usb intent-filter is present'
155 |
156 |
157 |
158 | #TODO Add this filters
159 | # 'android.net.wifi.p2p.DISCOVERY_STATE_CHANGE','android.net.wifi.p2p.THIS_DEVICE_CHANGED','android.net.wifi.p2p.PEERS_CHANGED','android.net.wifi.p2p.CONNECTION_STATE_CHANGE','android.net.wifi.p2p.PERSISTENT_GROUPS_CHANGED','android.net.conn.TETHER_STATE_CHANGED','android.net.conn.INET_CONDITION_ACTION','android.intent.action.EXTERNAL_APPLICATIONS_AVAILABLE','android.intent.action.EXTERNAL_APPLICATIONS_UNAVAILABLE','android.intent.action.AIRPLANE_MODE','android.intent.action.ADVANCED_SETTINGS','android.intent.action.BUGREPORT_FINISHED','android.intent.action.ACTION_IDLE_MAINTENANCE_START','android.intent.action.ACTION_IDLE_MAINTENANCE_END','android.intent.action.SERVICE_STATE','android.intent.action.RADIO_TECHNOLOGY','android.intent.action.EMERGENCY_CALLBACK_MODE_CHANGED','android.intent.action.SIG_STR','android.intent.action.ANY_DATA_STATE','android.intent.action.DATA_CONNECTION_FAILED','android.intent.action.SIM_STATE_CHANGED','android.intent.action.NETWORK_SET_TIME','android.intent.action.NETWORK_SET_TIMEZONE','android.intent.action.ACTION_SHOW_NOTICE_ECM_BLOCK_OTHERS','android.intent.action.ACTION_MDN_STATE_CHANGED','android.provider.Telephony.SPN_STRINGS_UPDATED','android.provider.Telephony.SIM_FULL','com.android.internal.telephony.data-restart-trysetup','com.android.internal.telephony.data-stall'
160 |
--------------------------------------------------------------------------------
/Tool/supportClasses/koodous.py:
--------------------------------------------------------------------------------
1 | '''
2 | Koodous is a free Antivirus for Android
3 | with contributions from the community.
4 |
5 | It will works as a quick analyzer
6 | '''
7 |
8 | import hashlib
9 | import requests
10 | import sys
11 | import os
12 | import json
13 | import time
14 | import pprint
15 |
16 | ####### RES API TOKEN FROM KOODOUS USER
17 | token = ''
18 |
19 |
20 | class KoodousAnalyzer():
21 | '''
22 | Class to connect with koodous rest API
23 | '''
24 |
25 | def __init__(self, apk, upload):
26 | self.apk = apk
27 | self.jsonOutput = ''
28 | self.hash = self.getSHA256()
29 | self.upload = upload
30 | self.status_code = {
31 | 200: "All is done",
32 | 201: "Created",
33 | 415: "It,s not a apk",
34 | 412: "Policy exception",
35 | 408: "The url is lapsed",
36 | 409: "Apk already exist in our database",
37 | 401: "Invalid token",
38 | 429: "Api limit reached",
39 | 404: "Doesn't exist",
40 | 401: "APK doesn't exist",
41 | 405: "waiting finish analysis"
42 | }
43 |
44 | def getSHA256(self):
45 | '''
46 | get SHA256 hash from the APK
47 | it will be for the Koodous Rest API
48 | :return:
49 | '''
50 | try:
51 | sha256 = hashlib.sha256()
52 | f = open(self.apk, 'rb')
53 |
54 | # now we read the apk and create hashes
55 | while True:
56 | data = f.read(512)
57 | if not data:
58 | break
59 |
60 | sha256.update(data)
61 |
62 | return sha256.hexdigest()
63 | except Exception as e:
64 | print ("[-] ERROR getting SHA256 from file: "+str(e))
65 | sys.exit(-1)
66 |
67 | def analyzeApk(self):
68 | '''
69 | Get if exists some analysis from an apk
70 | :return:
71 | '''
72 | if token == '':
73 | return
74 | print ("[+] Hash to analyze with koodous: "+self.hash)
75 | try:
76 | url_koodous = "https://api.koodous.com/apks/%s/analysis" % self.hash
77 | r = requests.get(url=url_koodous, headers={'Authorization': 'Token %s' % token})
78 | if r.status_code == 200:
79 | print("[+] Everything was okay")
80 | self.jsonOutput = r.json()
81 | elif r.status_code == 404:
82 | print("[+] That APK doesn't exist")
83 | if self.upload:
84 | self.jsonOutput = self.upload_and_analyze()
85 | print("Report to URL: ")
86 | print(url_koodous)
87 | else:
88 | self.jsonOutput = ''
89 | else:
90 | print("[-] There was a problem: "+str(r.text))
91 | print("[-] Koodous error: "+str(self.status_code[r.status_code]))
92 | self.jsonOutput = ''
93 | except Exception as e:
94 | print("[-] Error while getting koodous response: "+str(e))
95 | print("[-] Koodous error: "+str(self.status_code[r.status_code]))
96 |
97 | def upload_and_analyze(self):
98 | '''
99 | Method to get an upload file URL and upload the file,
100 | then wait for the analysis, finally get report
101 | '''
102 | if token == '':
103 | return
104 |
105 | print ("[+] Getting upload url from koodous")
106 | try:
107 | url_koodous = "https://api.koodous.com/apks/%s/get_upload_url" % self.hash
108 | r = requests.get(url=url_koodous, headers={'Authorization': 'Token %s' % token})
109 |
110 | if r.status_code == 200:
111 | print("[+] Everything was okay getting url")
112 |
113 |
114 | j = r.json()
115 |
116 | print("[+] Uploading apk to: "+str(j['upload_url']))
117 |
118 |
119 | files = {'file': open(self.apk,'rb')}
120 |
121 | s = requests.post(url=j['upload_url'],files=files)
122 |
123 | if s.status_code == 201: # 201 when created the file
124 | print('[+] Everything Okay uploading file')
125 |
126 | # now let start analysis
127 | print('[+] Let\'s start analysis')
128 | url_koodous = "https://api.koodous.com/apks/%s/analyze" % self.hash
129 | r = requests.get(url=url_koodous, headers={'Authorization': 'Token %s' % token})
130 |
131 | if r.status_code == 200:
132 | print('[+] Analysis started')
133 |
134 | # check if has finished
135 |
136 | url_koodous = "https://api.koodous.com/apks/%s/analysis" % self.hash
137 | r = requests.get(url=url_koodous, headers={'Authorization': 'Token %s' % token})
138 |
139 | i = 0
140 | constant_string = 'Waiting for the report...'
141 |
142 | counter = 0
143 | while r.status_code == 405:
144 | # show dots in string
145 | show_string = constant_string[0:i]
146 | sys.stdout.write(show_string)
147 | sys.stdout.flush()
148 | i += 1
149 | if i == (len(constant_string) + 1):
150 | i = 0
151 |
152 | time.sleep(0.2)
153 | counter += 1
154 | sys.stdout.write("\033[K\r")
155 | sys.stdout.flush()
156 |
157 | if counter == 1500: # I know maybe is too much, but you can't overwhelm the api
158 | r = requests.get(url=url_koodous, headers={'Authorization': 'Token %s' % token})
159 | counter = 0
160 |
161 | return r.json()
162 | else:
163 | print("[-] Koodous error: "+str(self.status_code[r.status_code]))
164 |
165 | else:
166 | print("[-] Koodous error: "+str(self.status_code[s.status_code]))
167 | print('[-] Something went wrong uploading file, check your internet connectivity or API key')
168 |
169 | else:
170 | print("[-] Koodous error: "+str(self.status_code[r.status_code]))
171 | except Exception as e:
172 | print("[-] There was an error in upload_and_analyze: "+str(e))
173 | return None
174 |
175 | if __name__ == "__main__":
176 | koodous = KoodousAnalyzer('/tmp/ojete.apk',True)
177 | koodous.analyzeApk()
178 | pprint.pprint(koodous.jsonOutput,indent=4)
179 |
--------------------------------------------------------------------------------
/Tool/supportClasses/permissions.py:
--------------------------------------------------------------------------------
1 | '''
2 | Variables for permissions, we will use this
3 | file for apktool module
4 | '''
5 |
6 | # Normal things¿?
7 | normal_things = {}
8 |
9 | normal_things['ACCESS_NETWORK_STATE'] = '\t[+] Mmm want to access ACCESS_NETWORK_STATE'
10 | normal_things['ACCESS_WIFI_STATE'] = '\t[+] Mmm want to access ACCESS_WIFI_STATE'
11 | normal_things['CHANGE_WIFI_STATE'] = '\t[+] Mmm want to access CHANGE_WIFI_STATE'
12 |
13 |
14 | # Some things to worry about
15 | strange_things = {}
16 |
17 | strange_things['CAMERA'] = '\t[+] Mmm want to access CAMERA, look for NSA spy'
18 | strange_things['hardware.camera'] = '\t[+] Mmm just for devices with CAMERAAAA'
19 | strange_things['READ_CONTACTS'] = '\t[+] Mmm want to READ_CONTACTS, take a look'
20 | strange_things['RECORD_AUDIO'] = '\t[+] Mmm want to RECORD_AUDIO, I hope you must press a button for that'
21 | strange_things['WRITE_SETTINGS'] = '\t[+] Ohh Want to WRITE_SETTINGS,bad...bad...bad'
22 | strange_things['RECEIVE_BOOT_COMPLETED'] = '\t[+] This process wants to run at boot completed'
23 | strange_things['ACTION_BOOT_COMPLETED'] = '\t[+] This process wants to run at boot completed'
24 | strange_things['READ_PHONE_STATE'] = '\t[+] READ_PHONE_STATE, wanna check phone constants?'
25 | # PROBLEM THINGS
26 | problem_things = {}
27 |
28 | problem_things['SEND_SMS'] = '\t[+] Ohh want to SEND_SMS'
29 | problem_things['RECEIVE_SMS'] = '\t[+] Ohh want to RECEIVE_SMS look for receiver in code'
30 | problem_things['READ_SMS'] = '\t[+] Ohh want to READ_SMS look for receiver in code'
31 | problem_things['WRITE_SMS'] = '\t[+] Ohh want to WRITE_SMS'
32 | problem_things['WRITE_CONTACTS'] = '\t[+] Why want to WRITE_CONTACTS ?'
33 | problem_things['CALL_PHONE'] = '\t[+] Oh really? accept CALL_PHONE'
34 | problem_things['PROCESS_OUTGOING_CALLS'] = '\t[+] Mother of Edward Snowden, PROCESS_OUTGOING_CALLS O.O'
35 | problem_things['KILL_BACKGROUND_PROCESSES'] = '\t[+] KILL_BACKGROUND_PROCESSES, even Demi Lovato wouldn\'t accept this app '
36 | problem_things['MOUNT_UNMOUNT_FILESYSTEMS'] = '\t[+] Wants to MOUNT_UMOUNT_FILESYSTEMS... That\'s not good'
37 |
38 |
--------------------------------------------------------------------------------
/Tool/supportClasses/utilities.py:
--------------------------------------------------------------------------------
1 | '''
2 | Some useful methods, I will explain every method
3 | and where i call it
4 | '''
5 |
6 | def parseObjDump(text,file_):
7 | '''
8 | I will parse output of objdump, something like this:
9 | 0000dd75 g DF .text 00000026 Java_com_Titanium_Magister_sursumApp_nativesursumAppCall
10 |
11 | We will call this method from readLibraries in androidSwissKnife.py
12 | '''
13 | output = []
14 | lines = text.split(b'\n')
15 | for line in lines:
16 | if len(line) < 1:
17 | continue
18 | dictionary = {}
19 | line = str(line)
20 | line = line.strip()
21 | line = line.replace('\\t',' ')
22 |
23 | strippedLine = line.split()
24 |
25 | dictionary['symbol_value'] = strippedLine[0]
26 | dictionary['symbols'] = strippedLine[1]
27 | if dictionary['symbols'] == 'l':
28 | dictionary['kind_symbol'] = 'local'
29 |
30 | elif dictionary['symbols'] == 'g':
31 | dictionary['kind_symbol'] = 'global'
32 |
33 | elif dictionary['symbols'] == 'u':
34 | dictionary['kind_symbol'] = 'unique global'
35 |
36 | elif dictionary['symbols'] == '!':
37 | dictionary['kind_symbol'] = 'both or neither (global/local)'
38 |
39 | elif dictionary['symbols'] == 'w':
40 | dictionary['kind_symbol'] = 'weak or strong symbol'
41 |
42 | elif dictionary['symbols'] == 'C':
43 | dictionary['kind_symbol'] = 'Constructor'
44 |
45 | elif dictionary['symbols'] == 'W':
46 | dictionary['kind_symbol'] = 'Warning'
47 |
48 | elif dictionary['symbols'] == 'd':
49 | dictionary['kind_symbol'] = 'Debugging symbol'
50 |
51 | elif dictionary['symbols'] == 'D':
52 | dictionary['kind_symbol'] = 'Dynamic symbol'
53 |
54 | elif dictionary['symbols'] == 'F':
55 | dictionary['kind_symbol'] = 'Symbol is a Function name'
56 |
57 | elif dictionary['symbols'] == 'f':
58 | dictionary['kind_symbol'] = 'Symbol is a File name'
59 |
60 | elif dictionary['symbols'] == 'O':
61 | dictionary['kind_symbol'] = 'Symbol is a Object name'
62 |
63 | #print(dictionary['kind_symbol'])
64 | dictionary['section'] = strippedLine[3]
65 | #print(dictionary['section'])
66 | dictionary['size'] = strippedLine[4]
67 | #print(dictionary['size'])
68 | dictionary['method'] = strippedLine[5]
69 | #print(dictionary['method'])
70 |
71 | output.append(dictionary)
72 |
73 | return {"File":file_,"Methods":output}
74 |
75 |
76 |
--------------------------------------------------------------------------------
/anemf.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Fare9/AndroidSwissKnife/96f2fd682acfe048b324db7937e43636d5747703/anemf.png
--------------------------------------------------------------------------------
/smalidea-0.03.zip:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Fare9/AndroidSwissKnife/96f2fd682acfe048b324db7937e43636d5747703/smalidea-0.03.zip
--------------------------------------------------------------------------------