├── .gitignore ├── GNUmakefile ├── LICENSE ├── README.md ├── bld.sh ├── build.sh ├── config.sh ├── edit-prefs.sh ├── flash.sh ├── load-config.sh ├── mach ├── moz2d-record.sh ├── profile.sh ├── repo ├── run-adb-remote.sh ├── run-ddd.sh ├── run-emulator.sh ├── run-gdb.sh ├── run-valgrind.sh ├── scripts ├── bootstrap-mac.sh ├── code-drop.sh ├── fastxul.sh ├── generate-orangutan-script.py ├── homebrew │ └── gcc-4.6.rb ├── package-emulator.sh ├── profile-symbolicate.py ├── push-toolchain.sh ├── run-monkey.sh ├── toolchain.sh ├── trace.sh ├── updates.sh └── xpcshell.sh ├── setup.sh ├── test.sh ├── tools ├── fix_b2g_stack.py ├── get_about_memory.py ├── get_gc_cc_log.py ├── include │ ├── __init__.py │ └── device_utils.py ├── mach_b2g_bootstrap.py └── update-tools │ ├── .gitignore │ ├── README.md │ ├── bin │ ├── darwin-x86 │ │ └── adb │ ├── gonk │ │ ├── busybox-armv6l │ │ └── update-binary │ ├── linux-x86 │ │ └── adb │ └── signapk.jar │ ├── build-flash-fota.py │ ├── build-fota-mar.py │ ├── build-fota-zip.py │ ├── build-gecko-mar.py │ ├── build-update-xml.py │ ├── test-update.py │ ├── update_tools.py │ └── wrap-mar.py └── watch-procrank.sh /.gitignore: -------------------------------------------------------------------------------- 1 | .config 2 | .repo/ 3 | .userconfig* 4 | .var.profile 5 | *.swp 6 | *.pyc 7 | *.fix_b2g_stack.cache 8 | Adreno* 9 | Makefile 10 | abi/ 11 | backup-*/ 12 | bionic/ 13 | bootable/ 14 | brcm_usrlib/ 15 | build/ 16 | dalvik/ 17 | development/ 18 | device/ 19 | download-*/ 20 | external/ 21 | frameworks/ 22 | gaia/ 23 | gecko/ 24 | gonk-misc/ 25 | hardware/ 26 | kernel/ 27 | libcore/ 28 | libnativehelper/ 29 | librecovery/ 30 | ndk/ 31 | objdir-gecko*/ 32 | out/ 33 | prebuilt/ 34 | prebuilts/ 35 | rilproxy/ 36 | sdk/ 37 | system/ 38 | vendor/ 39 | b2g_sdk/ 40 | -------------------------------------------------------------------------------- /GNUmakefile: -------------------------------------------------------------------------------- 1 | ifeq ("$(ANDROID_BUILD_TOP)","") 2 | $(error Must source setup.sh or build/envsetup.sh to use make directly) 3 | else 4 | include Makefile 5 | endif 6 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright 2012, Mozilla Foundation 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Boot to Gecko (B2G) 2 | 3 | Boot to Gecko aims to create a complete, standalone operating system for the open web. 4 | 5 | You can read more about B2G here: 6 | 7 | http://wiki.mozilla.org/B2G 8 | 9 | https://developer.mozilla.org/en-US/docs/Mozilla/B2G_OS 10 | 11 | Follow us on twitter: @Boot2Gecko 12 | 13 | http://twitter.com/Boot2Gecko 14 | 15 | Join the Mozilla Platform mailing list: 16 | 17 | http://groups.google.com/group/mozilla.dev.platform 18 | 19 | and talk to us on Matrix: 20 | 21 | https://chat.mozilla.org/#/room/#b2g:mozilla.org 22 | 23 | Discuss with Developers: 24 | 25 | Discourse: https://discourse.mozilla-community.org/c/b2g-os-participation 26 | 27 | # Building and running the android-10 emulator 28 | 29 | 1. Fetch the code: `REPO_INIT_FLAGS="--depth=1" ./config.sh emulator-10` 30 | 2. Setup your environment to fetch the custom NDK: `export LOCAL_NDK_BASE_URL='ftp://ftp.kaiostech.com/ndk/android-ndk'` 31 | 3. Install Gecko dependencies: `cd gecko && ./mach bootstrap`, choose option 4 (Android Geckoview). 32 | 4. Build: `./build.sh` 33 | 5. Run the emulator: `source build/envsetup.sh && lunch aosp_arm-userdebug && emulator -writable-system -selinux permissive` 34 | 35 | # Re-building your own NDK 36 | 37 | Because it's using a different c++ namespace than the AOSP base, we can't use the prebuilt NDK from Google. If you can't use the one built by KaiOS, here are the steps to build your own: 38 | 1. Download the ndk source: 39 | `repo init -u https://android.googlesource.com/platform/manifest -b ndk-release-r20` 40 | 2. change `__ndk` to `__` in `external/libcxx/include/__config`: 41 | ```diff 42 | -#define _LIBCPP_NAMESPACE _LIBCPP_CONCAT(__ndk,_LIBCPP_ABI_VERSION) 43 | +#define _LIBCPP_NAMESPACE _LIBCPP_CONCAT(__,_LIBCPP_ABI_VERSION) 44 | ``` 45 | 3. Build the ndk: 46 | `python ndk/checkbuild.py --no-build-tests` 47 | -------------------------------------------------------------------------------- /bld.sh: -------------------------------------------------------------------------------- 1 | build.sh -------------------------------------------------------------------------------- /build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # We want to figure out if we need to re-run the firmware 4 | # extraction routine. The first time we run build.sh, we 5 | # store the hash of important files. On subsequent runs, 6 | # we check if the hash is the same as the previous run. 7 | # If the hashes differ, we use a per-device script to redo 8 | # the firmware extraction 9 | function configure_device() { 10 | hash_file="$OUT/firmware.hash" 11 | 12 | # Make sure that our assumption that device codenames are unique 13 | # across vendors is true 14 | if [ $(ls -d device/*/$DEVICE 2> /dev/null | wc -l) -gt 1 ] ; then 15 | echo $DEVICE is ambiguous \"$(ls -d device/*/$DEVICE 2> /dev/null)\" 16 | return 1 17 | fi 18 | 19 | # Select which blob setup script to use, if any. We currently 20 | # assume that $DEVICE maps to the filesystem location, which is true 21 | # for the devices we support now (oct 2012) that do not require blobs. 22 | # The emulator uses a $DEVICE of 'emulator' but its device/ directory 23 | # uses the 'goldfish' name. 24 | if [ -f device/*/$DEVICE/download-blobs.sh ] ; then 25 | important_files="device/*/$DEVICE/download-blobs.sh" 26 | script="cd device/*/$DEVICE && ./download-blobs.sh" 27 | elif [ -f device/*/$DEVICE/extract-files.sh ] ; then 28 | important_files="device/*/$DEVICE/extract-files.sh" 29 | script="cd device/*/$DEVICE && ./extract-files.sh" 30 | else 31 | important_files= 32 | script= 33 | fi 34 | 35 | # If we have files that are important to look at, we need 36 | # to check if they've changed 37 | if [ -n "$important_files" ] ; then 38 | new_hash=$(cat $important_files | openssl sha1) 39 | if [ -f "$hash_file" ] ; then 40 | old_hash=$(cat "$hash_file") 41 | fi 42 | if [ "$old_hash" != "$new_hash" ] ; then 43 | echo Blob setup script has changed, re-running && 44 | sh -c "$script" && 45 | mkdir -p "$(dirname "$hash_file")" && 46 | echo "$new_hash" > "$hash_file" 47 | fi 48 | else 49 | rm -f $hash_file 50 | fi 51 | 52 | return $? 53 | } 54 | 55 | unset CDPATH 56 | . setup.sh && 57 | if [ -f patches/patch.sh ] ; then 58 | . patches/patch.sh 59 | fi && 60 | configure_device && 61 | time nice -n19 make $MAKE_FLAGS $@ 62 | 63 | ret=$? 64 | echo -ne \\a 65 | if [ $ret -ne 0 ]; then 66 | echo 67 | echo \> Build failed\! \< 68 | echo 69 | echo Build with \|./build.sh -j1\| for better messages 70 | echo If all else fails, use \|rm -rf objdir-gecko\| to clobber gecko and \|rm -rf out\| to clobber everything else. 71 | else 72 | case "$1" in 73 | "gecko") 74 | echo Run \|./build.sh snod\| to update the system image. 75 | echo Run \|./flash.sh gecko\| to update gecko. 76 | if echo $DEVICE | grep generic > /dev/null ; then 77 | echo $(tput setaf 1)$(tput bold)You must update the system image for emulator to pick up the updated gecko.$(tput sgr0) 78 | fi 79 | ;; 80 | *) 81 | if echo $DEVICE | grep generic > /dev/null ; then 82 | echo Run \|./run-emulator.sh\| to start the emulator 83 | else 84 | echo Run \|./flash.sh\| to flash all partitions of your device 85 | fi 86 | ;; 87 | esac 88 | exit 0 89 | fi 90 | 91 | exit $ret 92 | -------------------------------------------------------------------------------- /config.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | REPO=${REPO:-./repo} 4 | sync_flags="" 5 | 6 | repo_sync() { 7 | rm -rf .repo/manifest* && 8 | $REPO init -u $GITREPO -b $BRANCH -m $1.xml $REPO_INIT_FLAGS && 9 | $REPO sync $sync_flags $REPO_SYNC_FLAGS 10 | ret=$? 11 | if [ "$GITREPO" = "$GIT_TEMP_REPO" ]; then 12 | rm -rf $GIT_TEMP_REPO 13 | fi 14 | if [ $ret -ne 0 ]; then 15 | echo Repo sync failed 16 | exit -1 17 | fi 18 | } 19 | 20 | case `uname` in 21 | "Darwin") 22 | # Should also work on other BSDs 23 | CORE_COUNT=`sysctl -n hw.ncpu` 24 | ;; 25 | "Linux") 26 | CORE_COUNT=`grep processor /proc/cpuinfo | wc -l` 27 | ;; 28 | *) 29 | echo Unsupported platform: `uname` 30 | exit -1 31 | esac 32 | 33 | GITREPO=${GITREPO:-"https://github.com/kaiostech/manifests.git"} 34 | BRANCH=${BRANCH:-master} 35 | 36 | while [ $# -ge 1 ]; do 37 | case $1 in 38 | -d|-l|-f|-n|-c|-q|--force-sync|-j*) 39 | sync_flags="$sync_flags $1" 40 | if [ $1 = "-j" ]; then 41 | shift 42 | sync_flags+=" $1" 43 | fi 44 | shift 45 | ;; 46 | --help|-h) 47 | # The main case statement will give a usage message. 48 | break 49 | ;; 50 | -*) 51 | echo "$0: unrecognized option $1" >&2 52 | exit 1 53 | ;; 54 | *) 55 | break 56 | ;; 57 | esac 58 | done 59 | 60 | GIT_TEMP_REPO="tmp_manifest_repo" 61 | if [ -n "$2" ]; then 62 | GITREPO=$GIT_TEMP_REPO 63 | rm -rf $GITREPO && 64 | git init $GITREPO && 65 | cp $2 $GITREPO/$1.xml && 66 | cd $GITREPO && 67 | git add $1.xml && 68 | git commit -m "manifest" && 69 | git branch -m $BRANCH && 70 | cd .. 71 | fi 72 | 73 | echo MAKE_FLAGS=-j$((CORE_COUNT + 2)) > .tmp-config 74 | echo DEVICE_NAME=$1 >> .tmp-config 75 | 76 | case "$1" in 77 | "emulator-10-arm") 78 | echo PRODUCT_NAME=aosp_arm >> .tmp-config && 79 | echo TARGET_NAME=generic >> .tmp-config && 80 | repo_sync emulator-10 81 | ;; 82 | "emulator-10-x86_64") 83 | echo PRODUCT_NAME=aosp_x86_64 >> .tmp-config && 84 | echo TARGET_NAME=generic_x86_64 >> .tmp-config && 85 | echo BINSUFFIX=64 >> .tmp-config && 86 | repo_sync emulator-10 87 | ;; 88 | *) 89 | echo "Usage: $0 [-cdflnq] [-j ] [--force-sync] (device name)" 90 | echo "Flags are passed through to |./repo sync|." 91 | echo 92 | echo Valid devices to configure are: 93 | echo - emulator-10-arm 94 | echo - emulator-10-x86_64 95 | exit -1 96 | ;; 97 | esac 98 | 99 | echo GECKO_OBJDIR=$PWD/objdir-gecko-\$PRODUCT_NAME >> .tmp-config 100 | 101 | if [ $? -ne 0 ]; then 102 | echo Configuration failed 103 | exit -1 104 | fi 105 | 106 | mv .tmp-config .config 107 | 108 | echo Run \|./build.sh\| to start building 109 | -------------------------------------------------------------------------------- /edit-prefs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | [ -z "$EDITOR" ] && EDITOR=vi 5 | 6 | PREFS_JS=$(adb shell echo -n "/data/b2g/mozilla/*.default")/prefs.js 7 | echo "Pulling preferences: $PREFS_JS" 8 | adb pull $PREFS_JS 9 | $EDITOR prefs.js 10 | echo "Pushing and restarting" 11 | adb shell stop b2g 12 | adb push prefs.js $PREFS_JS 13 | adb shell start b2g 14 | -------------------------------------------------------------------------------- /flash.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . load-config.sh 4 | test -f $DEVICE_DIR/flash.sh && . $DEVICE_DIR/flash.sh 5 | 6 | ADB=${ADB:-adb} 7 | FASTBOOT=${FASTBOOT:-fastboot} 8 | HEIMDALL=${HEIMDALL:-heimdall} 9 | VARIANT=${VARIANT:-eng} 10 | FULLFLASH=false 11 | 12 | if [ ! -f "`which \"$ADB\"`" ]; then 13 | ADB=out/host/`uname -s | tr "[[:upper:]]" "[[:lower:]]"`-x86/bin/adb 14 | fi 15 | if [ ! -f "`which \"$FASTBOOT\"`" ]; then 16 | FASTBOOT=out/host/`uname -s | tr "[[:upper:]]" "[[:lower:]]"`-x86/bin/fastboot 17 | fi 18 | 19 | run_adb() 20 | { 21 | $ADB $ADB_FLAGS $@ 22 | } 23 | 24 | run_fastboot() 25 | { 26 | if [ "$1" = "devices" ]; then 27 | $FASTBOOT $@ 28 | else 29 | $FASTBOOT $FASTBOOT_FLAGS $@ 30 | fi 31 | return $? 32 | } 33 | 34 | update_time() 35 | { 36 | if [ `uname` = Darwin ]; then 37 | OFFSET=`date +%z` 38 | OFFSET=${OFFSET:0:3} 39 | TIMEZONE=`date +%Z$OFFSET|tr +- -+` 40 | else 41 | TIMEZONE=`date +%Z%:::z|tr +- -+` 42 | fi 43 | echo Attempting to set the time on the device 44 | run_adb wait-for-device && 45 | run_adb shell "toolbox date $(date +%s) || 46 | toolbox date -s $(date +%Y%m%d.%H%M%S)" && 47 | run_adb shell setprop persist.sys.timezone $TIMEZONE 48 | 49 | has_timekeep=$(run_adb shell ls /system/bin/timekeep | tr -d '\r\n') 50 | if [ "${has_timekeep}" = "/system/bin/timekeep" ]; then 51 | cur_date=$(date +%s) 52 | since_epoch=$(run_adb shell cat /sys/class/rtc/rtc0/since_epoch | tr -d '\r\n') 53 | timeadjust=$(echo "${cur_date}-${since_epoch}" | bc) 54 | run_adb shell setprop persist.sys.timeadjust ${timeadjust} 55 | run_adb shell timekeep restore 56 | fi; 57 | } 58 | 59 | fastboot_flash_image() 60 | { 61 | # $1 = {userdata,boot,system,recovery} the image filename 62 | # $2 = {userdata,boot,system,recovery} the fastboot partition target 63 | # if non specified, then $2 = $1 64 | PARTITION_FILE=$1 65 | PARTITION_NAME=$2 66 | 67 | if [ -z "${PARTITION_NAME}" ]; then 68 | PARTITION_NAME=${PARTITION_FILE} 69 | fi 70 | 71 | if [ "$DEVICE" == "flatfish" ] && [ "${PARTITION_NAME}" == "userdata" ]; then 72 | PARTITION_NAME="data" 73 | fi 74 | imgpath="out/target/product/$DEVICE/${PARTITION_FILE}.img" 75 | out="$(run_fastboot flash "${PARTITION_NAME}" "$imgpath" 2>&1)" 76 | rv="$?" 77 | echo "$out" 78 | 79 | if [[ "$rv" != "0" ]]; then 80 | # Print a nice error message if we understand what went wrong. 81 | if grep -q "too large" <(echo "$out"); then 82 | echo "" 83 | echo "Flashing $imgpath failed because the image was too large." 84 | echo "Try re-flashing after running" 85 | echo " \$ rm -rf $(dirname "$imgpath")/data && ./build.sh" 86 | fi 87 | return $rv 88 | fi 89 | } 90 | 91 | fastboot_flash_image_if_exists() 92 | { 93 | if [ -e "out/target/product/$DEVICE/$1.img" ]; then 94 | fastboot_flash_image $1 95 | fi 96 | } 97 | 98 | 99 | flash_fastboot() 100 | { 101 | local lockedness=$1 project=$2 102 | case $lockedness in 103 | "unlock"|"nounlock") 104 | ;; 105 | *) 106 | echo "$0: $FUNCNAME: Invalid argument: $lockedness" 107 | return 1 108 | ;; 109 | esac 110 | case $project in 111 | "system"|"boot"|"userdata"|"cache"|"") 112 | ;; 113 | *) 114 | echo "$0: Unrecognized project/partition: $project" 115 | return 1 116 | ;; 117 | esac 118 | 119 | delete_single_variant_persist 120 | 121 | case $DEVICE in 122 | "helix") 123 | run_adb reboot oem-1 124 | ;; 125 | "flatfish") 126 | run_adb reboot boot-fastboot 127 | ;; 128 | *) 129 | run_adb reboot bootloader 130 | ;; 131 | esac 132 | 133 | if ! run_fastboot devices; then 134 | echo Couldn\'t setup fastboot 135 | return 1 136 | fi 137 | 138 | case $lockedness in 139 | "unlock") 140 | run_fastboot oem unlock || true 141 | ;; 142 | esac 143 | 144 | case $project in 145 | "system" | "boot" | "userdata" | "cache") 146 | fastboot_flash_image $project && 147 | run_fastboot reboot 148 | ;; 149 | 150 | "") 151 | VERB="erase" 152 | if [ "$DEVICE" == "hammerhead" ] || [ "$DEVICE" == "mako" ] || 153 | [ "$DEVICE" == "flo" ] || [ "$DEVICE" == "fugu" ]; then 154 | VERB="format" 155 | fi 156 | DATA_PART_NAME="userdata" 157 | if [ "$DEVICE" == "flatfish" ]; then 158 | DATA_PART_NAME="data" 159 | fi 160 | # helix/dolphin don't support erase command in fastboot mode. 161 | if [ "$DEVICE" != "helix" -a "$DEVICE_NAME" != "dolphin" ]; then 162 | run_fastboot $VERB cache && 163 | run_fastboot $VERB $DATA_PART_NAME 164 | if [ $? -ne 0 ]; then 165 | return $? 166 | fi 167 | fi 168 | case ${DEVICE} in 169 | "aries"|"leo"|"scorpion"|"sirius"|"castor"|"castor_windy"|"honami"|"amami"|"tianchi"|"flamingo"|"eagle"|"seagull") 170 | fastboot_flash_image recovery FOTAKernel 171 | ;; 172 | esac 173 | fastboot_flash_image userdata && 174 | fastboot_flash_image_if_exists cache && 175 | fastboot_flash_image_if_exists boot && 176 | fastboot_flash_image system && 177 | run_fastboot reboot && 178 | update_time 179 | ;; 180 | esac 181 | echo -ne \\a 182 | } 183 | 184 | flash_heimdall() 185 | { 186 | local project=$1 187 | case $project in 188 | "system"|"kernel"|"") 189 | ;; 190 | *) 191 | echo "$0: Unrecognized project: $project" 192 | return 1 193 | ;; 194 | esac 195 | 196 | if [ ! -f "`which \"$HEIMDALL\"`" ]; then 197 | echo Couldn\'t find heimdall. 198 | echo Install Heimdall v1.3.1 from http://www.glassechidna.com.au/products/heimdall/ 199 | exit -1 200 | fi 201 | 202 | delete_single_variant_persist && 203 | run_adb reboot download && sleep 8 204 | if [ $? -ne 0 ]; then 205 | echo Couldn\'t reboot into download mode. Hope you\'re already in download mode 206 | fi 207 | 208 | case $project in 209 | "system") 210 | $HEIMDALL flash --factoryfs out/target/product/$DEVICE/$project.img 211 | ;; 212 | 213 | "kernel") 214 | $HEIMDALL flash --kernel device/samsung/$DEVICE/kernel 215 | ;; 216 | 217 | "") 218 | $HEIMDALL flash --factoryfs out/target/product/$DEVICE/system.img --kernel device/samsung/$DEVICE/kernel && 219 | update_time 220 | ;; 221 | esac 222 | 223 | ret=$? 224 | echo -ne \\a 225 | if [ $ret -ne 0 ]; then 226 | echo Heimdall flashing failed. 227 | case "`uname`" in 228 | "Darwin") 229 | if kextstat | grep com.devguru.driver.Samsung > /dev/null ; then 230 | echo Kies drivers found. 231 | echo Uninstall kies completely and restart your system. 232 | else 233 | echo Restart your system if you\'ve just installed heimdall. 234 | fi 235 | ;; 236 | "Linux") 237 | echo Make sure you have a line like 238 | echo SUBSYSTEM==\"usb\", ATTRS{idVendor}==\"04e8\", MODE=\"0666\" 239 | echo in /etc/udev/rules.d/android.rules 240 | ;; 241 | esac 242 | exit -1 243 | fi 244 | 245 | echo Run \|./flash.sh gaia\| if you wish to install or update gaia. 246 | } 247 | 248 | # Delete files in the device's /system/b2g that aren't in 249 | # $GECKO_OBJDIR/dist/b2g. 250 | # 251 | # We do this for general cleanliness, but also because b2g.sh determines 252 | # whether to use DMD by looking for the presence of libdmd.so in /system/b2g. 253 | # If we switch from a DMD to a non-DMD build and then |flash.sh gecko|, we want 254 | # to disable DMD, so we have to delete libdmd.so. 255 | # 256 | # Note that we do not delete *folders* in /system/b2g. This is intentional, 257 | # because some user data is stored under /system/b2g (e.g. prefs), but it seems 258 | # to be stored only inside directories. 259 | delete_extra_gecko_files_on_device() 260 | { 261 | files_to_remove="$(cat <(ls "$GECKO_OBJDIR/dist/b2g") <(run_adb shell "ls /system/b2g" | tr -d '\r') | sort | uniq -u)" 262 | if [[ "$files_to_remove" != "" ]]; then 263 | # We expect errors from the call to rm below under two circumstances: 264 | # 265 | # - We ask rm to remove a directory (per above, we don't 266 | # actually want to remove directories, so rm is doing the 267 | # right thing by not removing dirs) 268 | # 269 | # - We ask rm to remove a file which isn't on the device (if 270 | # you squint at files_to_remove, you'll see that it will 271 | # contain files which are on the host but not on the device; 272 | # obviously we can't remove those files from the device). 273 | for to_remove in $files_to_remove; do 274 | run_adb shell "rm /system/b2g/$to_remove" > /dev/null 275 | done 276 | fi 277 | return 0 278 | } 279 | 280 | delete_single_variant_persist() 281 | { 282 | run_adb shell 'rm -rf /persist/svoperapps' 283 | } 284 | 285 | flash_gecko() 286 | { 287 | delete_extra_gecko_files_on_device && 288 | run_adb push $GECKO_OBJDIR/dist/b2g /system/b2g && 289 | return 0 290 | } 291 | 292 | flash_gaia() 293 | { 294 | GAIA_MAKE_FLAGS="ADB=\"$ADB\"" 295 | USER_VARIANTS="user(debug)?" 296 | # We need to decide where to push the apps here. 297 | # If the VARIANTS is user or userdebug, send them to /system/b2g. 298 | # or, we will try to connect the phone and see where Gaia was installed 299 | # and try not to push to the wrong place. 300 | if [[ "$VARIANT" =~ $USER_VARIANTS ]]; then 301 | # Gaia's build takes care of remounting /system for production builds 302 | echo "Push to /system/b2g ..." 303 | GAIA_MAKE_FLAGS+=" GAIA_INSTALL_PARENT=/system/b2g" 304 | else 305 | echo "Detect GAIA_INSTALL_PARENT ..." 306 | # This part has been re-implemented in Gaia build script (bug 915484), 307 | # XXX: Remove this once we no longer support old Gaia branches. 308 | # Install to /system/b2g if webapps.json does not exist, or 309 | # points any installed app to /system/b2g. 310 | run_adb wait-for-device 311 | if run_adb shell 'cat /data/local/webapps/webapps.json || echo \"basePath\": \"/system\"' | grep -qs '"basePath": "/system' ; then 312 | echo "Push to /system/b2g ..." 313 | GAIA_MAKE_FLAGS+=" GAIA_INSTALL_PARENT=/system/b2g" 314 | else 315 | echo "Push to /data/local ..." 316 | GAIA_MAKE_FLAGS+=" GAIA_INSTALL_PARENT=/data/local" 317 | fi 318 | fi 319 | make -C gaia push $GAIA_MAKE_FLAGS 320 | 321 | # For older Gaia without |push| target, 322 | # run the original |install-gaia| target. 323 | # XXX: Remove this once we no longer support old Gaia branches. 324 | if [[ $? -ne 0 ]]; then 325 | make -C gaia install-gaia $GAIA_MAKE_FLAGS 326 | fi 327 | return $? 328 | } 329 | 330 | while [ $# -gt 0 ]; do 331 | case "$1" in 332 | "-s") 333 | ADB_FLAGS+="-s $2" 334 | FASTBOOT_FLAGS+="-s $2" 335 | shift 336 | ;; 337 | "-f") 338 | FULLFLASH=true 339 | ;; 340 | "-h"|"--help") 341 | echo "Usage: $0 [-s device] [-f] [project]" 342 | exit 0 343 | ;; 344 | "-"*) 345 | echo "$0: Unrecognized option: $1" 346 | exit 1 347 | ;; 348 | *) 349 | FULLFLASH=true 350 | PROJECT=$1 351 | ;; 352 | esac 353 | shift 354 | done 355 | 356 | case "$PROJECT" in 357 | "shallow") 358 | resp=`run_adb root` || exit $? 359 | [ "$resp" != "adbd is already running as root" ] && run_adb wait-for-device 360 | run_adb shell stop b2g && 361 | run_adb remount && 362 | flash_gecko && 363 | flash_gaia && 364 | update_time && 365 | echo Restarting B2G && 366 | run_adb shell start b2g 367 | exit $? 368 | ;; 369 | 370 | "gecko") 371 | resp=`run_adb root` || exit $? 372 | [ "$resp" != "adbd is already running as root" ] && run_adb wait-for-device 373 | run_adb shell stop b2g && 374 | run_adb remount && 375 | flash_gecko && 376 | echo Restarting B2G && 377 | run_adb shell start b2g 378 | exit $? 379 | ;; 380 | 381 | "gaia") 382 | flash_gaia 383 | exit $? 384 | ;; 385 | 386 | "time") 387 | update_time 388 | exit $? 389 | ;; 390 | esac 391 | 392 | case "$DEVICE" in 393 | "hamachi"|"helix"|"sp6821a_gonk") 394 | if $FULLFLASH; then 395 | flash_fastboot nounlock $PROJECT 396 | exit $? 397 | else 398 | run_adb root && 399 | run_adb shell stop b2g && 400 | run_adb remount && 401 | flash_gecko && 402 | flash_gaia && 403 | update_time && 404 | echo Restarting B2G && 405 | run_adb shell start b2g 406 | fi 407 | exit $? 408 | ;; 409 | 410 | "flame"|"otoro"|"unagi"|"keon"|"peak"|"inari"|"wasabi"|"flatfish"|"aries"|"leo"|"scorpion"|"sirius"|"castor"|"castor_windy"|"honami"|"amami"|"tianchi"|"flamingo"|"eagle"|"seagull"|"scx15_sp7715"*|"zte_p821a10") 411 | flash_fastboot nounlock $PROJECT 412 | ;; 413 | 414 | "panda"|"maguro"|"crespo"|"crespo4g"|"mako"|"hammerhead"|"flo"|"shamu"|"FP2"|"fugu") 415 | flash_fastboot unlock $PROJECT 416 | ;; 417 | 418 | "galaxys2") 419 | flash_heimdall $PROJECT 420 | ;; 421 | 422 | *) 423 | if [[ $(type -t flash_${DEVICE}) = function ]]; then 424 | flash_${DEVICE} $PROJECT 425 | else 426 | echo Unsupported device \"$DEVICE\", can\'t flash 427 | exit -1 428 | fi 429 | ;; 430 | esac 431 | -------------------------------------------------------------------------------- /load-config.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [[ ! -n "$B2G_DIR" ]]; then 4 | B2G_DIR=$(cd `dirname ${BASH_SOURCE[0]}`; pwd) 5 | fi 6 | 7 | . "$B2G_DIR/.config" 8 | if [ $? -ne 0 ]; then 9 | echo Could not load .config. Did you run config.sh? 10 | exit -1 11 | fi 12 | 13 | if [ -f "$B2G_DIR/.userconfig" ]; then 14 | . "$B2G_DIR/.userconfig" 15 | fi 16 | 17 | # Use default Gecko location if it's not provided in config files. 18 | if [ -z $GECKO_PATH ]; then 19 | GECKO_PATH=$B2G_DIR/gecko 20 | fi 21 | 22 | VARIANT=${VARIANT:-eng} 23 | PRODUCT_NAME=${PRODUCT_NAME:-full_${DEVICE}} 24 | DEVICE=${DEVICE:-${PRODUCT_NAME}} 25 | TARGET_NAME=${TARGET_NAME:-${PRODUCT_NAME}} 26 | LUNCH=${LUNCH:-${PRODUCT_NAME}-${VARIANT}} 27 | DEVICE_DIR=${DEVICE_DIR:-device/*/$DEVICE} 28 | BINSUFFIX=${BINSUFFIX:-} 29 | -------------------------------------------------------------------------------- /mach: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # This Source Code Form is subject to the terms of the Mozilla Public 3 | # License, v. 2.0. If a copy of the MPL was not distributed with this 4 | # file, You can obtain one at http://mozilla.org/MPL/2.0/. 5 | 6 | from __future__ import print_function, unicode_literals 7 | 8 | import os 9 | import sys 10 | 11 | def ancestors(path): 12 | while path: 13 | yield path 14 | (path, child) = os.path.split(path) 15 | if child == "": 16 | break 17 | 18 | def load_mach(b2g_home): 19 | sys.path[0:0] = [os.path.join(b2g_home, "tools")] 20 | import mach_b2g_bootstrap 21 | return mach_b2g_bootstrap.bootstrap(b2g_home) 22 | 23 | # Check whether the current directory is within a mach src or obj dir. 24 | for dir_path in ancestors(os.getcwd()): 25 | # If we find the mach bootstrap module, we are in the b2g_home dir. 26 | mach_path = os.path.join(dir_path, "tools/mach_b2g_bootstrap.py") 27 | if os.path.isfile(mach_path): 28 | mach = load_mach(dir_path) 29 | sys.exit(mach.run(sys.argv[1:])) 30 | 31 | print("Could not run mach: No mach source directory found") 32 | sys.exit(1) 33 | -------------------------------------------------------------------------------- /moz2d-record.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | #set -x 4 | 5 | SCRIPT_NAME=$(basename $0) 6 | 7 | ADB=adb 8 | 9 | set_recording() { 10 | PREFS_JS=$(adb shell echo -n "/data/b2g/mozilla/*.default")/prefs.js 11 | $ADB pull $PREFS_JS 12 | echo "user_pref(\"gfx.2d.recording\", $1);" >> prefs.js 13 | $ADB push prefs.js $PREFS_JS 14 | } 15 | 16 | HELP_start="Restart b2g with moz2d draw call recording." 17 | cmd_start() { 18 | echo "Stopping b2g" 19 | $ADB shell stop b2g 20 | $ADB shell rm "/data/local/tmp/moz2drec_*.aer" 21 | set_recording "true" 22 | echo "Restarting" 23 | $ADB shell start b2g 24 | echo "TIP: Close the application before invoking moz2d-record.sh stop" 25 | } 26 | 27 | HELP_stop="Restart b2g without recording and pull the files." 28 | cmd_stop() { 29 | echo "Stopping b2g" 30 | $ADB shell stop b2g 31 | echo "Pulling recording(s)" 32 | $ADB shell ls "/data/local/tmp/moz2drec_*.aer" | tr -d '\r' | xargs -n1 $ADB pull 33 | $ADB shell rm "/data/local/tmp/moz2drec_*.aer" 34 | set_recording "false" 35 | echo "Restarting" 36 | $ADB shell start b2g 37 | } 38 | 39 | HELP_clean="Clean the moz2drec files." 40 | cmd_clean() { 41 | rm moz2drec_*.aer 42 | } 43 | ########################################################################### 44 | # 45 | # Display a brief help message for each supported command 46 | # 47 | HELP_help="Shows these help messages" 48 | cmd_help() { 49 | if [ "$1" == "" ]; then 50 | echo "Usage: ${SCRIPT_NAME} command [args]" 51 | echo "where command is one of:" 52 | for command in ${allowed_commands}; do 53 | desc=HELP_${command} 54 | printf " %-11s %s\n" ${command} "${!desc}" 55 | done 56 | else 57 | command=$1 58 | if [ "${allowed_commands/*${command}*/${command}}" == "${command}" ]; then 59 | desc=HELP_${command} 60 | printf "%-11s %s\n" ${command} "${!desc}" 61 | else 62 | echo "Unrecognized command: '${command}'" 63 | fi 64 | fi 65 | } 66 | 67 | # 68 | # Determine if the first argument is a valid command and execute the 69 | # corresponding function if it is. 70 | # 71 | allowed_commands=$(declare -F | sed -ne 's/declare -f cmd_\(.*\)/\1/p' | tr "\n" " ") 72 | command=$1 73 | if [ "${command}" == "" ]; then 74 | cmd_help 75 | exit 0 76 | fi 77 | if [ "${allowed_commands/*${command}*/${command}}" == "${command}" ]; then 78 | shift 79 | cmd_${command} "$@" 80 | else 81 | echo "Unrecognized command: '${command}'" 82 | fi 83 | 84 | -------------------------------------------------------------------------------- /run-adb-remote.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #set -x 3 | 4 | ADB=${ADB:-adb} 5 | if [ ! -f "`which \"$ADB\"`" ]; then 6 | ADB=out/host/`uname -s | tr "[[:upper:]]" "[[:lower:]]"`-x86/bin/adb 7 | fi 8 | 9 | if [ "$1" = "" ] || [ "$1" = "connect" ]; then 10 | TARGET_IP=$($ADB shell netcfg | grep wlan0 | tr -s ' ' | cut -d' ' -f3 | cut -d'/' -f1) 11 | ADB_PORT=$((20000 + $(id -u) % 10000)) 12 | TARGET_DEV=$TARGET_IP:$ADB_PORT 13 | echo "Target IP: $TARGET_IP" 14 | echo "ADB port: $ADB_PORT" 15 | 16 | $ADB tcpip $ADB_PORT > /dev/null 17 | if [ "$?" != "0" ] || [ -n "$($ADB connect $TARGET_DEV | grep 'unable')" ]; then 18 | echo "Fail to establish the connection with TCP/IP!" 19 | exit 1 20 | else 21 | echo "Connected to the device at ${TARGET_DEV}." 22 | fi 23 | elif [ "$1" = "disconnect" ]; then 24 | TARGET_DEV=$($ADB devices | grep -oE "([[:digit:]]+\.){3}[[:digit:]]+:[[:digit:]]+") 25 | if [ "$TARGET_DEV" != "" ]; then 26 | $ADB disconnect $TARGET_DEV > /dev/null 27 | echo "Already disconnected at ${TARGET_DEV}." 28 | else 29 | echo "No device connected with TCP/IP!" 30 | fi 31 | fi 32 | 33 | -------------------------------------------------------------------------------- /run-ddd.sh: -------------------------------------------------------------------------------- 1 | run-gdb.sh -------------------------------------------------------------------------------- /run-emulator.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Get full path from where the script was executed, full path is needed to run emulator succesfully 4 | B2G_HOME=$(cd $(dirname $BASH_SOURCE); pwd) 5 | 6 | . $B2G_HOME/load-config.sh 7 | 8 | DEVICE=${DEVICE:-generic} 9 | 10 | TOOLS_PATH=$B2G_HOME/out/host/`uname -s | tr "[[:upper:]]" "[[:lower:]]"`-x86/bin 11 | 12 | DBG_CMD="" 13 | if [ x"$DBG" != x"" ]; then 14 | DBG_CMD="gdb -args" 15 | fi 16 | TAIL_ARGS="" 17 | if [ x"$GDBSERVER" != x"" ]; then 18 | TAIL_ARGS="$TAIL_ARGS -s -S" 19 | fi 20 | 21 | dns_servers="" 22 | if [ x"$B2G_DNS_SERVER" != x"" ]; then 23 | dns_servers=$B2G_DNS_SERVER 24 | fi 25 | 26 | # DNS servers from command line arg override ones from environment variable. 27 | while [ $# -gt 0 ]; do 28 | case "$1" in 29 | --dns-server) 30 | shift; dns_servers=$1 ;; 31 | *) 32 | break ;; 33 | esac 34 | shift 35 | done 36 | 37 | emu_extra_args="" 38 | if [ -n "$dns_servers" ]; then 39 | emu_extra_args="$emu_extra_args -dns-server $dns_servers" 40 | fi 41 | 42 | if [ "$DEVICE" = "generic_x86" ]; then 43 | EMULATOR=$TOOLS_PATH/emulator-x86 44 | KERNEL=$B2G_HOME/prebuilts/qemu-kernel/x86/kernel-qemu 45 | else 46 | EMULATOR=$TOOLS_PATH/emulator 47 | KERNEL=$B2G_HOME/prebuilts/qemu-kernel/arm/kernel-qemu-armv7 48 | TAIL_ARGS="$TAIL_ARGS -cpu cortex-a8" 49 | fi 50 | 51 | SDCARD_SIZE=${SDCARD_SIZE:-512M} 52 | SDCARD_IMG=${SDCARD_IMG:-${B2G_HOME}/out/target/product/${DEVICE}/sdcard.img} 53 | 54 | if [ ! -f "${SDCARD_IMG}" ]; then 55 | echo "Creating sdcard image file with size: ${SDCARD_SIZE} ..." 56 | ${TOOLS_PATH}/mksdcard -l sdcard ${SDCARD_SIZE} ${SDCARD_IMG} 57 | fi 58 | 59 | export DYLD_LIBRARY_PATH="$B2G_HOME/out/host/darwin-x86/lib" 60 | export PATH=$PATH:$TOOLS_PATH 61 | ${DBG_CMD} $EMULATOR \ 62 | -kernel $KERNEL \ 63 | -sysdir $B2G_HOME/out/target/product/$DEVICE/ \ 64 | -data $B2G_HOME/out/target/product/$DEVICE/userdata.img \ 65 | -sdcard ${SDCARD_IMG} \ 66 | -memory 512 \ 67 | -partition-size 512 \ 68 | -skindir $B2G_HOME/development/tools/emulator/skins \ 69 | -skin HVGA \ 70 | -verbose \ 71 | -gpu on \ 72 | -camera-back webcam0 \ 73 | $emu_extra_args \ 74 | -qemu $TAIL_ARGS 75 | -------------------------------------------------------------------------------- /run-gdb.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #set -x 3 | 4 | get_pid_by_name() { 5 | pid=$($ADB shell "ps | grep '$1' | cut -b 14-19 | tr -d ' '") 6 | if [ -n "$pid" ]; then 7 | pid=$($ADB shell "ps -A | grep '$1' | cut -b 14-19 | tr -d ' '") 8 | fi 9 | echo $pid 10 | } 11 | 12 | SCRIPT_NAME=$(basename $0) 13 | . load-config.sh 14 | 15 | ADB=${ADB:-adb} 16 | if [ ! -f "`which \"$ADB\" 2>/dev/null`" ]; then 17 | ADB=out/host/`uname -s | tr "[[:upper:]]" "[[:lower:]]"`-x86/bin/adb 18 | fi 19 | echo "ADB Location: " $ADB 20 | 21 | # Make sure that the adb server is running and that it's compatible with the 22 | # version of adb that we're using. Anytime you run adb it will kill the 23 | # running server if there is a mismatch, and it will automatically start 24 | # the server if it isn't running. Unfortunately, both of these activities 25 | # cause information to be printed, which can screw up any further command 26 | # which is trying to query information from the phone. So by starting 27 | # the server explicitly here, then we'll cause that output to go to this 28 | # command (which we ignore), and not wind up prepending output. 29 | # 30 | # For a clear example of this, try the following: 31 | # 32 | # adb start-server 33 | # x1=$(adb shell echo test) 34 | # adb kill-server 35 | # x2=$(adb shell echo test) 36 | # 37 | # and then compare x1 and x2 by using: 38 | # 39 | # echo "$x1" 40 | # echo "$x2" 41 | $ADB start-server 42 | 43 | case $DEVICE in 44 | aosp_x86_64) 45 | BINSUFFIX=64 46 | ;; 47 | *) 48 | BINSUFFIX= 49 | ;; 50 | esac 51 | 52 | HOST_OS=$(uname -s | tr "[[:upper:]]" "[[:lower:]]") 53 | HOST_ARCH=$(uname -m | tr "[[:upper:]]" "[[:lower:]]") 54 | 55 | if [ -z "${GDB}" ]; then 56 | if [ "${HOME}/.mozbuild/android-ndk-r20b-canary" ]; then 57 | GDB="${HOME}/.mozbuild/android-ndk-r20b-canary/prebuilt/${HOST_OS}-${HOST_ARCH}/bin/gdb" 58 | else 59 | echo "Not sure where gdb is located. Override using GDB= or fix the script." 60 | exit 1 61 | fi 62 | fi 63 | 64 | B2G_BIN=/system/b2g/b2g 65 | GDBINIT=/tmp/b2g.gdbinit.$(whoami).$$ 66 | 67 | GONK_OBJDIR="out/target/product/$TARGET_NAME" 68 | SYMDIR="$GONK_OBJDIR/symbols" 69 | 70 | if [ "$1" != "core" ] ; then 71 | GDBSERVER_PID=$(get_pid_by_name gdbserver$BINSUFFIX) 72 | 73 | if [ "$1" = "vgdb" -a -n "$2" ] ; then 74 | GDB_PORT="$2" 75 | elif [ "$1" = "attach" -a -n "$2" ] ; then 76 | B2G_PID=$2 77 | if [ -z "$($ADB ls /proc/$B2G_PID)" ] ; then 78 | ATTACH_TARGET=$B2G_PID 79 | B2G_PID=$(get_pid_by_name "$B2G_PID") 80 | if [ -z "$B2G_PID" ] ; then 81 | echo Error: PID $ATTACH_TARGET is invalid 82 | exit 1; 83 | fi 84 | echo "Found $ATTACH_TARGET PID: $B2G_PID" 85 | fi 86 | PROCESS_PORT=$((10000 + ($B2G_PID + $(id -u)) % 50000)) 87 | GDB_PORT=${GDB_PORT:-$PROCESS_PORT} 88 | # cmdline is null separated 89 | B2G_BIN=$($ADB shell "cat /proc/$B2G_PID/cmdline" | tr '\0' '\n' | head -1) 90 | else 91 | GDB_PORT=$((10000 + $(id -u) % 50000)) 92 | B2G_PID=$(get_pid_by_name b2g) 93 | fi 94 | 95 | for p in $GDBSERVER_PID ; do 96 | $ADB shell "cat /proc/$p/cmdline" | grep -q :$GDB_PORT && ( \ 97 | echo ..killing gdbserver pid $p 98 | $ADB shell "kill $p" 99 | ) || echo ..ignoring gdbserver pid $p 100 | 101 | done 102 | 103 | $ADB forward tcp:$GDB_PORT tcp:$GDB_PORT 104 | fi 105 | 106 | if [ "$1" = "attach" ]; then 107 | if [ -z "$B2G_PID" ]; then 108 | echo Error: No PID to attach to. B2G not running? 109 | exit 1 110 | fi 111 | 112 | $ADB shell "gdbserver$BINSUFFIX :$GDB_PORT --attach $B2G_PID" & 113 | elif [ "$1" == "core" ]; then 114 | if [ -z "$3" ]; then 115 | CORE_FILE=$2 116 | else 117 | B2G_BIN=$2 118 | CORE_FILE=$3 119 | fi 120 | 121 | if [ "$B2G_BIN" == "" -o "$CORE_FILE" == "" ]; then 122 | echo "Usage: $SCRIPT_NAME core [bin] " 123 | exit 1 124 | fi 125 | 126 | if [ ! -f $CORE_FILE ]; then 127 | echo "Error: $CORE_FILE not found." 128 | exit 1 129 | fi 130 | elif [ "$1" != "vgdb" ]; then 131 | if [ -n "$1" ]; then 132 | B2G_BIN=$1 133 | shift 134 | fi 135 | [ -n "$MOZ_PROFILER_STARTUP" ] && GDBSERVER_ENV="$GDBSERVER_ENV MOZ_PROFILER_STARTUP=$MOZ_PROFILER_STARTUP " 136 | [ -n "$MOZ_DEBUG_CHILD_PROCESS" ] && GDBSERVER_ENV="$GDBSERVER_ENV MOZ_DEBUG_CHILD_PROCESS=$MOZ_DEBUG_CHILD_PROCESS " 137 | [ -n "$MOZ_DEBUG_APP_PROCESS" ] && GDBSERVER_ENV="$GDBSERVER_ENV MOZ_DEBUG_APP_PROCESS='$MOZ_DEBUG_APP_PROCESS' " 138 | [ -n "$MOZ_IPC_MESSAGE_LOG" ] && GDBSERVER_ENV="$GDBSERVER_ENV MOZ_IPC_MESSAGE_LOG=$MOZ_IPC_MESSAGE_LOG " 139 | 140 | [ -n "$B2G_PID" ] && $ADB shell "kill $B2G_PID" 141 | [ "$B2G_BIN" = "/system/b2g/b2g" ] && $ADB shell "stop b2g" 142 | 143 | if [ "$($ADB shell 'if [ -f /system/b2g/libdmd.so ]; then echo 1; fi')" != "" ]; then 144 | echo "" 145 | echo "Using DMD." 146 | echo "" 147 | dmd="1" 148 | ld_preload_extra="/system/b2g/libdmd.so" 149 | fi 150 | 151 | $ADB shell "DMD=$dmd LD_LIBRARY_PATH=\"/system/b2g:/apex/com.android.runtime/lib$BINSUFFIX:/system/apex/com.android.runtime.debug/lib$BINSUFFIX\" LD_PRELOAD=\"$ld_preload_extra /system/b2g/libmozglue.so\" TMPDIR=/data/local/tmp $GDBSERVER_ENV gdbserver$BINSUFFIX --multi :$GDB_PORT $B2G_BIN $@" & 152 | fi 153 | 154 | sleep 1 155 | echo "handle SIGPIPE nostop" >> $GDBINIT 156 | echo "set solib-absolute-prefix $SYMDIR" > $GDBINIT 157 | echo "set solib-search-path $GECKO_OBJDIR/dist/bin:$SYMDIR:$SYMDIR/apex/com.android.runtime.debug/bin:$SYMDIR/apex/com.android.runtime.debug/lib:$SYMDIR/apex/com.android.runtime.debug/lib$BINSUFFIX:$SYMDIR/apex/com.android.runtime.debug/lib$BINSUFFIX/bionic" >> $GDBINIT 158 | if [ "$1" == "vgdb" ] ; then 159 | echo "target remote :$GDB_PORT" >> $GDBINIT 160 | elif [ "$1" != "core" ]; then 161 | echo "target extended-remote :$GDB_PORT" >> $GDBINIT 162 | fi 163 | 164 | PROG=$GECKO_OBJDIR/dist/bin/$(basename $B2G_BIN) 165 | [ -f $PROG ] || PROG=${SYMDIR}/${B2G_BIN} 166 | [ -f $PROG ] || PROG=${B2G_BIN} 167 | if [ ! -f $PROG ]; then 168 | echo "Error: program to debug not found:" 169 | echo " $GECKO_OBJDIR/dist/bin/$(basename $B2G_BIN)" 170 | echo " $SYMDIR/$B2G_BIN" 171 | echo " $B2G_BIN" 172 | exit 1 173 | fi 174 | 175 | if [[ "$-" == *x* ]]; then 176 | # Since we got here, set -x was enabled near the top of the file. print 177 | # out the contents of of the gdbinit file. 178 | echo "----- Start of $GDBINIT -----" 179 | cat $GDBINIT 180 | echo "----- End of $GDBINIT -----" 181 | fi 182 | 183 | if [ "$SCRIPT_NAME" == "run-ddd.sh" ]; then 184 | echo "ddd --debugger \"$GDB -x $GDBINIT\" $PROG $CORE_FILE" 185 | ddd --debugger "$GDB -x $GDBINIT" $PROG $CORE_FILE 186 | else 187 | echo $GDB -x $GDBINIT $PROG $CORE_FILE 188 | $GDB -x $GDBINIT $PROG $CORE_FILE 189 | fi 190 | 191 | -------------------------------------------------------------------------------- /run-valgrind.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #set -xv 3 | 4 | SCRIPT_NAME=$(basename $0) 5 | 6 | . load-config.sh 7 | 8 | ADB=${ADB:-adb} 9 | if [ ! -f "`which \"$ADB\"`" ]; then 10 | ADB=out/host/`uname -s | tr "[[:upper:]]" "[[:lower:]]"`-x86/bin/adb 11 | fi 12 | echo "ADB Location: " $ADB 13 | B2G_DIR="/data/valgrind-b2g" 14 | 15 | HAS_VALGRIND=$($ADB shell 'test -e /system/bin/valgrind ; echo -n $?') 16 | 17 | # Make sure valgrind is actually on system 18 | if [ "$HAS_VALGRIND" -ne 0 ]; then 19 | echo "Platform does not have valgrind executable, did you build with B2G_VALGRIND=1 in your .userconfig?" 20 | exit 1 21 | fi 22 | 23 | LIBMOZGLUE="$GECKO_OBJDIR/mozglue/build/libmozglue.so" 24 | LIBXUL="$GECKO_OBJDIR/toolkit/library/build/libxul.so" 25 | if [ ! -e "$LIBXUL" ]; then 26 | # try the old location 27 | LIBXUL="$GECKO_OBJDIR/toolkit/library/libxul.so" 28 | fi 29 | 30 | # Load libxul 31 | if [ "$1" = "debuginfo" ]; then 32 | echo "Recompiling libxul.so with debug info (this can take a few minutes)" 33 | $ADB remount 34 | $ADB shell "rm -rf $B2G_DIR && cp -r /system/b2g $B2G_DIR" 35 | cp "$LIBXUL" "$GECKO_OBJDIR/toolkit/library/libxul.debuginfo.so" 36 | 37 | STRIP=prebuilts/gcc/`uname -s | tr "[[:upper:]]" "[[:lower:]]"`-x86/arm/arm-linux-androideabi-4.7/bin/arm-linux-androideabi-strip 38 | $STRIP -R .debug_info "$GECKO_OBJDIR/toolkit/library/libxul.debuginfo.so" 39 | echo "Pushing debug libxul to phone (this takes about a minute)" 40 | time $ADB push $GECKO_OBJDIR/toolkit/library/libxul.debuginfo.so $B2G_DIR/libxul.so 41 | shift 42 | elif [ "$1" = "nocopy" ]; then 43 | echo "Skipping libxul.so copy step and just running valgrind..." 44 | shift 45 | else 46 | $ADB remount 47 | $ADB shell "rm -rf $B2G_DIR && cp -r /system/b2g $B2G_DIR" 48 | 49 | # compress first, to limit amount of data pushed over the slow pipe 50 | # Compressing at level 3 or greater increases compression time a lot 51 | # without having much effect on resulting file size, and so is a net loss. 52 | echo "Compressing libxul.so..." 53 | time gzip -2 < "$LIBXUL" > $GECKO_OBJDIR/toolkit/library/libxul.so.gz 54 | 55 | echo "Pushing compressed debug libxul to device (this can take upwards of 5 minutes)" 56 | time $ADB push $GECKO_OBJDIR/toolkit/library/libxul.so.gz $B2G_DIR/libxul.so.gz 57 | time $ADB push "$LIBMOZGLUE" $B2G_DIR/libmozglue.so 58 | 59 | echo "Decompressing on phone..." 60 | time $ADB shell "gzip -d $B2G_DIR/libxul.so.gz" 61 | $ADB shell "chmod 0755 $B2G_DIR/libxul.so" 62 | $ADB shell "chmod 0755 $B2G_DIR/libmozglue.so" 63 | fi 64 | 65 | #$ADB reboot 66 | $ADB wait-for-device 67 | $ADB shell stop b2g 68 | 69 | CORE_ARGS="" 70 | if [ "$1" = "vgdb" ]; then 71 | # delete stale vgdb pipes 72 | $ADB shell rm /data/local/tmp/vgdb* 73 | CORE_ARGS="--trace-children=no --vgdb-error=0 --vgdb=yes" 74 | else 75 | CORE_ARGS="--trace-children=yes" 76 | fi 77 | 78 | # Flags for the Valgrind core -- applicable to all tools. 79 | # 80 | # The --px- flags control precise exceptions. They are mandatory for Gecko. 81 | CORE_ARGS="$CORE_ARGS --px-default=allregs-at-mem-access" 82 | CORE_ARGS="$CORE_ARGS --px-file-backed=unwindregs-at-mem-access" 83 | # Show source paths relative to the source tree root 84 | CORE_ARGS="$CORE_ARGS --fullpath-after=`pwd`/" 85 | # Read full debuginfo for libraries, if available 86 | CORE_ARGS="$CORE_ARGS --extra-debuginfo-path=/sdcard/symbols-for-valgrind" 87 | # Specify what GPU the device has. 88 | CORE_ARGS="$CORE_ARGS --kernel-variant=android-gpu-adreno3xx" 89 | # Other flags 90 | CORE_ARGS="$CORE_ARGS --error-limit=no --fair-sched=yes" 91 | 92 | # Flags for Memcheck. 93 | # 94 | MC_ARGS="" 95 | # Not necessary for a --disable-jemalloc build, but .. 96 | MC_ARGS="$MC_ARGS --soname-synonyms=somalloc=libmozglueZdso" 97 | # Avoid a lot of pointless noise. 98 | MC_ARGS="$MC_ARGS --show-mismatched-frees=no --partial-loads-ok=yes" 99 | # Avoid segfaulting Gecko until such time as bug 1166724 is fixed 100 | MC_ARGS="$MC_ARGS --malloc-fill=0x00" 101 | # Say what we expect the average translation size to be. Getting this 102 | # right can save tens of megabytes of RAM on the phone. 103 | MC_ARGS="$MC_ARGS --avg-transtab-entry-size=400" 104 | # Request 11 sectors for translation storage. More sectors improves 105 | # performance but costs a lot of memory. 106 | MC_ARGS="$MC_ARGS --num-transtab-sectors=11" 107 | 108 | # Gather them all together. EXTRA_ARGS allows users to pass in extra 109 | # arguments on the command line. 110 | VALGRIND_ARGS="$CORE_ARGS $MC_ARGS $EXTRA_ARGS" 111 | 112 | # Due to the fact that we follow forks, we can't log to a logfile. Expect the 113 | # user to redirect stdout. 114 | $ADB shell "B2G_DIR='/data/valgrind-b2g' HOSTNAME='b2g' LOGNAME='b2g' COMMAND_PREFIX='/system/bin/valgrind $VALGRIND_ARGS' exec /system/bin/b2g.sh" 115 | 116 | -------------------------------------------------------------------------------- /scripts/bootstrap-mac.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Boot2Gecko Mac OS X bootstrap script 4 | # See --help 5 | 6 | this_dir=$(cd `dirname $0`; pwd) 7 | 8 | print_usage() { 9 | cat << EOF 10 | Boot2Gecko Mac OS X Bootstrap 11 | 12 | This script attempts to bootstrap a "minimal" OS X installation 13 | with the tools necessary to build Boot2Gecko. 14 | 15 | The only requirement for running this script should be XCode 4.x / 3.x, 16 | and either OS X 10.6+ (Snow Leopard). 17 | 18 | Usage: $0 [options] 19 | Options: 20 | --help print this message 21 | --dry-run only prints commands instead of executing them 22 | (default: run commands) 23 | --auto-install automatically installs all necessary packages, 24 | and doesn't prompt (default: prompt) 25 | --clone clone the Boot2Gecko git repository after the 26 | environment has been bootstrapped into: 27 | $PWD/B2G 28 | EOF 29 | exit 1 30 | } 31 | 32 | parse_options() { 33 | option_dry_run=no 34 | option_auto_install=no 35 | option_help=no 36 | option_clone=no 37 | 38 | while [ $# -gt 0 ]; do 39 | case "$1" in 40 | --help|-h) option_help=yes;; 41 | --dry-run) option_dry_run=yes;; 42 | --auto-install) option_auto_install=yes;; 43 | --clone) option_clone=yes;; 44 | *) break;; 45 | esac 46 | shift 47 | done 48 | 49 | if [ "$option_help" = "yes" ]; then 50 | print_usage 51 | fi 52 | } 53 | 54 | prompt_question() { 55 | question="$1" 56 | default_answer="$2" 57 | 58 | echo -n "$question" 59 | 60 | # using /dev/tty avoids slurping up STDIN when this script is piped to bash 61 | read full_answer < /dev/tty 62 | 63 | if [ "$full_answer" = "" ]; then 64 | answer=$default_answer 65 | else 66 | answer=${full_answer:0:1} 67 | fi 68 | answer=`echo $answer | tr '[[:lower:]]' '[[:upper:]]'` 69 | 70 | if [[ $answer != Y && $answer != N ]]; then 71 | echo "Error: invalid response $full_answer." 72 | echo "Expected \"y\", \"yes\", \"n\", or \"no\" (case insensitive)" 73 | echo "" 74 | prompt_question "$question" "$default_answer" 75 | fi 76 | } 77 | 78 | run_command() { 79 | if [ "$option_dry_run" = "yes" ]; then 80 | command_prefix="(dry-run) " 81 | fi 82 | 83 | echo "$command_prefix=> $@" 84 | 85 | if [ "$option_dry_run" = "no" ]; then 86 | $@ 87 | fi 88 | } 89 | 90 | bootstrap_mac() { 91 | check_xcode 92 | 93 | homebrew_formulas="" 94 | git=`which git` 95 | if [ $? -ne 0 ]; then 96 | homebrew_formulas+="git:git" 97 | else 98 | echo "Found git: $git" 99 | fi 100 | 101 | gpg=`which gpg` 102 | if [ $? -ne 0 ]; then 103 | homebrew_formulas+=" gpg:gpg" 104 | else 105 | echo "Found gpg: $gpg" 106 | fi 107 | 108 | ccache=`which ccache` 109 | if [ $? -ne 0 ]; then 110 | homebrew_formulas+=" ccache:ccache" 111 | else 112 | echo "Found ccache: $ccache" 113 | fi 114 | 115 | yasm=`which yasm` 116 | if [ $? -ne 0 ]; then 117 | homebrew_formulas+=" yasm:yasm" 118 | else 119 | echo "Found yasm: $yasm" 120 | fi 121 | 122 | cmake=`which cmake` 123 | if [ $? -ne 0 ]; then 124 | homebrew_formulas+=" cmake:cmake" 125 | else 126 | echo "Found cmake: $cmake" 127 | fi 128 | 129 | found_autoconf213=1 130 | autoconf213=`which autoconf213` 131 | if [ $? -ne 0 ]; then 132 | found_autoconf213=0 133 | 134 | # Try just "autoconf" and check the version 135 | autoconf=`which autoconf` 136 | if [ $? -eq 0 ]; then 137 | autoconf_version=`$autoconf --version | grep "2.13"` 138 | if [ $? -eq 0 ]; then 139 | autoconf213=$autoconf 140 | found_autoconf213=1 141 | fi 142 | fi 143 | fi 144 | 145 | if [ $found_autoconf213 -eq 0 ]; then 146 | autoconf213_formula="https://raw.github.com/Homebrew/homebrew-versions/master/autoconf213.rb" 147 | homebrew_formulas+=" autoconf-2.13:$autoconf213_formula" 148 | else 149 | echo "Found autoconf-2.13: $autoconf213" 150 | fi 151 | 152 | # We don't need additional toolchain since Mavericks (10.9) 153 | if [[ $osx_version =~ 10\.[6-8] ]]; then 154 | found_apple_gcc=0 155 | check_apple_gcc 156 | 157 | if [ $found_apple_gcc -eq 0 ]; then 158 | # No Apple gcc, probably because newer Xcode 4.3 only installed LLVM-backed gcc 159 | # Fall back to checking for / installing gcc-4.6 160 | 161 | found_gcc46=1 162 | gcc46=`which gcc-4.6` 163 | if [ $? -ne 0 ]; then 164 | found_gcc46=0 165 | gcc46_formula="https://raw.github.com/mozilla-b2g/B2G/master/scripts/homebrew/gcc-4.6.rb" 166 | homebrew_formulas+=" gcc-4.6:$gcc46_formula" 167 | else 168 | echo "Found gcc-4.6: $gcc46" 169 | fi 170 | fi 171 | fi 172 | 173 | gnutar= 174 | for prog in gnutar gtar tar; do 175 | _gnutar=`which $prog` 176 | if [ $? -eq 0 ]; then 177 | $_gnutar -c --owner=0 --group=0 --numeric-owner --mode=go-w -f - $this_dir > /dev/null 2>&1 178 | if [ $? -eq 0 ]; then 179 | gnutar=$_gnutar 180 | break 181 | fi 182 | fi 183 | done 184 | 185 | if [ ! -z "$gnutar" ]; then 186 | echo ""Found GNU tar: $gnutar 187 | else 188 | homebrew_formulas+=" gnu-tar:gnu-tar" 189 | fi 190 | 191 | if [ ! -z "$homebrew_formulas" ]; then 192 | homebrew=`which brew` 193 | if [ $? -ne 0 ]; then 194 | homebrew="/usr/local/bin/brew" 195 | fi 196 | 197 | if [ ! -f $homebrew ]; then 198 | prompt_install_homebrew 199 | homebrew=`which brew` 200 | if [ $? -ne 0 ]; then 201 | homebrew="/usr/local/bin/brew" 202 | fi 203 | fi 204 | 205 | if [ ! -f $homebrew ]; then 206 | echo "Error: Homebrew was not found, and some dependencies couldn't" 207 | echo " be found. Without homebrew, You'll need to 'brew install' these" 208 | echo " dependencies manually:" 209 | echo "" 210 | for entry in $homebrew_formulas; do 211 | name=${entry%%:*} 212 | formula=${entry:${#name}+1} 213 | echo " * $formula" 214 | done 215 | echo "" 216 | exit 1 217 | fi 218 | 219 | echo "Found homebrew: $homebrew" 220 | for entry in $homebrew_formulas; do 221 | name=${entry%%:*} 222 | formula=${entry:${#name}+1} 223 | prompt_install_homebrew_formula $name $formula 224 | done 225 | else 226 | if [ "$option_clone" = "yes" ]; then 227 | clone_b2g 228 | fi 229 | 230 | echo "Congratulations, you are now ready to start building Boot2Gecko!" 231 | echo "For more details, see our documentation:" 232 | echo "" 233 | echo " https://developer.mozilla.org/en/Mozilla/Boot_to_Gecko/Preparing_for_your_first_B2G_build" 234 | echo "" 235 | fi 236 | } 237 | 238 | install_xcode() { 239 | if [[ $osx_version =~ 10\.([7-9]|[1-9][0-9]+) ]]; then 240 | # In OS X 10.7+, we open the Mac App Store for Xcode 241 | # Opening the App Store is annoying, so ignore option_auto_install here 242 | echo "You will need to install Xcode 4.3 or newer to build Boot to Gecko on your version of OS X." 243 | prompt_question "Do you want to open Xcode in the Mac App Store? [Y/n] " Y 244 | if [[ $answer = Y ]]; then 245 | # Xcode iTunes http URL: http://itunes.apple.com/us/app/xcode/id497799835?mt=12 246 | # Mac App Store URL: macappstore://itunes.apple.com/app/id497799835?mt=12 247 | run_command open macappstore://itunes.apple.com/app/id497799835\?mt\=12 248 | fi 249 | else 250 | echo "You will need to install \"Xcode 3.2.6 for Snow Leopard\" to build Boot2Gecko." 251 | echo "Note: This is a 4.1GB download, and requires a free Apple account." 252 | echo "" 253 | prompt_question "Do you want to download XCode 3.2.6 for Snow Leopard in your browser? [Y/n] " Y 254 | if [[ $answer = Y ]]; then 255 | run_command open https://developer.apple.com/downloads/download.action\?path=Developer_Tools/xcode_3.2.6_and_ios_sdk_4.3__final/xcode_3.2.6_and_ios_sdk_4.3.dmg 256 | fi 257 | fi 258 | echo "" 259 | echo "After installing Xcode, follow these steps:" 260 | echo " 1. Run Xcode to allow it to configure itself." 261 | echo " 2. If it asks for permission to update the command line tools, let it." 262 | echo " 3. If it doesn't, open the Preferences, go to the Downloads panel, and install them." 263 | echo "" 264 | echo "Then run this script again to continue configuring to build Boot2Gecko." 265 | } 266 | 267 | download_ten_six_sdk () { 268 | dmgpath="$1" 269 | echo "You need to download the 10.6 SDK. To do this, you'll need to download" 270 | echo "the Xcode 4.3 installer. Download it to the default location (~/Downloads)" 271 | prompt_question "Do you want to open the Apple Developer Site to download Xcode 4.3? [Y/n] " Y 272 | if [[ $answer = Y ]] ; then 273 | run_command open https://developer.apple.com/downloads/download.action\?path=Developer_Tools/xcode_4.3_for_lion_21266/xcode_43_lion.dmg 274 | fi 275 | echo "When this is downloaded, rerun this script. If you had to download to an" 276 | echo "alternate location, use the XCODE43_DMG_LOCATION environment variable" 277 | } 278 | 279 | install_ten_six_sdk () { 280 | dmgpath=${XCODE43_DMG_LOCATION:-~/Downloads/xcode_43_lion.dmg} 281 | if [[ ! -f $dmgpath ]] ; then 282 | download_ten_six_sdk $dmgpath 283 | exit 1 284 | fi 285 | echo "Installing 10.6 sdk" 286 | if [ "$option_dry_run" == "yes" ] ; then 287 | mp="no_tmp_file_for_dry_run" 288 | else 289 | mp=$(mktemp -d -t xcode43_mount) 290 | fi 291 | run_command hdiutil attach "$dmgpath" -mountpoint "$mp" 292 | sdk_dir="$mp/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX10.6.sdk" 293 | target="/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/" 294 | echo "Beginning to copy SDK over" 295 | run_command sudo cp -a "$sdk_dir" "$target" 296 | if [ $? -eq 0 ] ; then 297 | echo "Done" 298 | else 299 | echo "Failed to copy 10.6 SDK" 300 | fi 301 | run_command hdiutil unmount "$mp" && 302 | run_command rm -rf "$mp" 303 | if [ $? -ne 0 ] ; then 304 | echo "Failed to unmount $dmgpath from $mp" 305 | fi 306 | } 307 | 308 | check_xcode() { 309 | osx_version=`sw_vers -productVersion` 310 | 311 | # First, if xcode-select isn't around, we have no Xcode at all, or at least not 312 | # the command-line tools 313 | 314 | xcode_select=`which xcode-select` 315 | if [ $? -ne 0 ]; then 316 | install_xcode 317 | exit 1 318 | fi 319 | 320 | # Next, run xcode-select to be sure we don't get an error; sometimes it's there 321 | # even though Xcode is not (why!?) 322 | 323 | tmp_select=`xcode-select --print-path &> /dev/null` 324 | if [ $? -ne 0 ]; then 325 | install_xcode 326 | exit 1 327 | fi 328 | 329 | # Assume the old Xcode 3 location, then look elsewhere 330 | 331 | if [ -d "/Developer/Applications/Xcode.app" ]; then 332 | xcode_path=/Developer/Applications/Xcode.app 333 | osx_106_sdk=/Developer/SDKs/MacOSX10.6.sdk 334 | osx_sdk=$osx_106_sdk 335 | else 336 | xcode_dev_path=`xcode-select --print-path` 337 | xcode_path=${xcode_dev_path%/Contents/Developer} 338 | osx_106_sdk=$xcode_dev_path/Platforms/MacOSX.platform/Developer/SDKs/MacOSX10.6.sdk 339 | osx_107_sdk=$xcode_dev_path/Platforms/MacOSX.platform/Developer/SDKs/MacOSX10.7.sdk 340 | osx_108_sdk=$xcode_dev_path/Platforms/MacOSX.platform/Developer/SDKs/MacOSX10.8.sdk 341 | osx_109_sdk=$xcode_dev_path/Platforms/MacOSX.platform/Developer/SDKs/MacOSX10.9.sdk 342 | osx_1010_sdk=$xcode_dev_path/Platforms/MacOSX.platform/Developer/SDKs/MacOSX10.10.sdk 343 | osx_1011_sdk=$xcode_dev_path/Platforms/MacOSX.platform/Developer/SDKs/MacOSX10.11.sdk 344 | fi 345 | 346 | if [[ $osx_version =~ 10\.[6-8] ]]; then 347 | test -d $osx_106_sdk 348 | if [ $? -ne 0 ] ; then 349 | if [ "$option_auto_install" = "no" ]; then 350 | echo "You don't have the 10.6 SDK. This means that you're going to" 351 | echo "see lots of error messages and possibly have build issues." 352 | prompt_question "Do you want to install it? [Y/n] " Y 353 | else 354 | echo "Automatically install 10.6 SDK" 355 | answer=Y 356 | fi 357 | if [[ $answer == Y ]] ; then 358 | install_ten_six_sdk 359 | exit 1 360 | fi 361 | fi 362 | fi 363 | 364 | # Start with the 10.6 SDK and fall back toward newer and newer 365 | # ones until we find one 366 | 367 | if [ -d "$osx_106_sdk" ]; then 368 | osx_sdk=$osx_106_sdk 369 | elif [ -d "$osx_107_sdk" ]; then 370 | osx_sdk=$osx_107_sdk 371 | elif [ -d "$osx_108_sdk" ]; then 372 | osx_sdk=$osx_108_sdk 373 | elif [ -d "$osx_109_sdk" ]; then 374 | osx_sdk=$osx_109_sdk 375 | elif [ -d "$osx_1010_sdk" ]; then 376 | osx_sdk=$osx_1010_sdk 377 | elif [ -d "$osx_1011_sdk" ]; then 378 | osx_sdk=$osx_1011_sdk 379 | fi 380 | 381 | # Peel the OS X SDK version out of the path so we don't have to mess with it 382 | # by hand; courtesy Geoff Weiss 383 | if [[ $osx_sdk =~ MacOSX([0-9]+\.[0-9]+)\.sdk ]]; then 384 | sdk_ver=${BASH_REMATCH[1]} 385 | else 386 | sdk_ver=UNKNOWN 387 | fi 388 | 389 | if [ ! -d "$xcode_path" ]; then 390 | install_xcode 391 | exit 1 392 | else 393 | echo "Found Xcode: $xcode_path" 394 | if [ ! -d "$osx_sdk" ]; then 395 | echo "Error: Could not find Mac OS X $sdk_ver SDK in this location:" 396 | echo " $osx_sdk" 397 | echo "" 398 | exit 1 399 | else 400 | echo "Found Mac OS X $sdk_ver SDK: $osx_sdk" 401 | return 0 402 | fi 403 | fi 404 | 405 | exit 1 406 | } 407 | 408 | check_apple_gcc() { 409 | # Check for non-LLVM apple gcc 410 | gcc_path=`which gcc` 411 | gcc_regex="i686-apple-darwin1[01]-gcc-" 412 | 413 | if [ ! -f "$gcc_path" ]; then 414 | return 1 415 | fi 416 | 417 | version=`"$gcc_path" --version | sed '2,/end-/d' | sed 's/ .*//' ` 418 | echo $version | grep -q -E $gcc_regex 419 | if [ $? -eq 0 ]; then 420 | found_apple_gcc=1 421 | apple_gcc=$gcc_path 422 | echo "Found Apple gcc ($version): $gcc_path" 423 | return 0 424 | else 425 | echo "Warning: gcc reports version $version, will look for gcc-4.6" 1>&2 426 | return 1 427 | fi 428 | } 429 | 430 | prompt_install_homebrew() { 431 | if [ "$option_auto_install" = "no" ]; then 432 | echo "You don't seem to have the 'brew' command installed" 433 | echo "in your system. Homebrew is a free package manager for" 434 | echo "OS X that will greatly ease environment setup." 435 | echo "" 436 | 437 | prompt_question "Do you want to install Homebrew? (may require sudo) [Y/n] " Y 438 | else 439 | echo "Could not find 'brew', starting Homebrew installer..." 440 | answer=Y 441 | fi 442 | 443 | if [[ $answer = Y ]]; then 444 | install_homebrew 445 | else 446 | echo "Please manually install Homebrew, and put it in your PATH. For more, see:" 447 | echo "https://github.com/mxcl/homebrew/wiki/installation" 448 | echo "" 449 | fi 450 | } 451 | 452 | install_homebrew() { 453 | # This was taken and modified from the official Homebrew wiki: 454 | # https://github.com/mxcl/homebrew/wiki/installation 455 | # Update this in the future if Homebrew installation changes 456 | 457 | if [ "$option_dry_run" = "yes" ]; then 458 | # fake the tempfile creation for dry-runs 459 | echo "(dry-run)=> mktemp /tmp/b2g-boostrap.XXXXX" 460 | tmp_installer="/tmp/b2g-bootstrap.XXXXX" 461 | else 462 | tmp_installer=`mktemp /tmp/b2g-bootstrap.XXXXXX` || ( 463 | echo "Error: Could not make temporary file for Homebrew installer" && 464 | exit 1 465 | ) 466 | fi 467 | 468 | installer_url="https://raw.github.com/mxcl/homebrew/go/install" 469 | 470 | run_command curl -fsSL $installer_url -o $tmp_installer 471 | run_command ruby $tmp_installer 472 | } 473 | 474 | prompt_install_homebrew_formula() { 475 | name=$1 476 | formula=$2 477 | 478 | if [ "$option_auto_install" = "no" ]; then 479 | echo "$name wasn't found, but it looks like you have Homebrew" 480 | echo "installed at $homebrew." 481 | echo "" 482 | echo "Do you want to install $name by running Homebrew?" 483 | prompt_question "[$homebrew install $formula] [Y/n] " Y 484 | else 485 | echo "Automatically installing $name" 486 | answer=Y 487 | fi 488 | 489 | if [[ $answer = Y ]]; then 490 | run_command $homebrew install $formula 491 | fi 492 | } 493 | 494 | clone_b2g() { 495 | if [ -d "$PWD/B2G" ]; then 496 | echo "Found existing B2G: $PWD/B2G" 497 | return 0 498 | fi 499 | 500 | run_command git clone git://github.com/mozilla-b2g/B2G.git 501 | } 502 | 503 | parse_options $@ 504 | bootstrap_mac 505 | -------------------------------------------------------------------------------- /scripts/code-drop.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | cd .. 4 | 5 | . load-config.sh 6 | 7 | b2g_root=$(cd `dirname $0` ; pwd) 8 | b2g_basename=$(basename $b2g_root) 9 | b2g_parent=$(dirname $b2g_root) 10 | branch=$(cd $b2g_root/.repo/manifests && git rev-parse --abbrev-ref HEAD) 11 | manifest_file=sources.xml 12 | output="../B2G_${branch}_${DEVICE_NAME}.tar.gz" 13 | 14 | if [ -n "$OUT_DIR" -a -d $b2g_root/$OUT_DIR ] ; then 15 | gecko_exclude="--exclude=${b2g_basename}/$OUT_DIR" 16 | fi 17 | 18 | if [ -n "$GECKO_OBJDIR" -a -d $b2g_root/$GECKO_OBJDIR ] ; then 19 | gecko_exclude="--exclude=${b2g_basename}/$GECKO_OBJDIR" 20 | fi 21 | 22 | if [ -f "$EXCLUDE_FILE" ]; then 23 | extra_excludes="--exclude-from=$EXCLUDE_FILE" 24 | fi 25 | 26 | [ $DEVICE_NAME ] && [ $branch ] && 27 | echo Creating manifest && 28 | $b2g_root/gonk-misc/add-revision.py $b2g_root/.repo/manifest.xml \ 29 | --output $manifest_file --force --b2g-path $b2g_root --tags && 30 | echo Creating Tarball && 31 | nice tar zcf "$output" \ 32 | -C $b2g_parent \ 33 | --checkpoint=1000 \ 34 | --checkpoint-action=dot \ 35 | --transform="s,^$b2g_basename,B2G_${branch}_${DEVICE_NAME}," \ 36 | --exclude=".git" \ 37 | --exclude=".hg" \ 38 | --exclude="$b2g_basename/$output" \ 39 | --exclude="$b2g_basename/.repo" \ 40 | --exclude="$b2g_basename/repo" \ 41 | --exclude="$b2g_basename/out" \ 42 | --exclude="$b2g_basename/objdir-gecko" \ 43 | --exclude="B2G_*.tar.gz" \ 44 | $gecko_exclude \ 45 | $android_exclude \ 46 | $b2g_basename \ 47 | $extra_excludes && 48 | rm $manifest_file && 49 | mv $output $PWD && 50 | echo Done! && 51 | echo "{'output': '$b2g_root/$output'}" || 52 | echo "ERROR: Could not create tarball" 53 | 54 | -------------------------------------------------------------------------------- /scripts/fastxul.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | # Support running with PWD=script or b2g root 6 | [ -e load-config.sh ] || cd .. 7 | 8 | B2G_DIR=$PWD 9 | . ./load-config.sh 10 | 11 | SCRIPT_NAME=$(basename $0) 12 | 13 | make -C "${GECKO_OBJDIR}" $MAKE_FLAGS binaries 14 | make -C "${GECKO_OBJDIR}" $MAKE_FLAGS package 15 | 16 | echo "Compressing xul" 17 | gzip --best -c "${GECKO_OBJDIR}/dist/b2g/libxul.so" > /tmp/b2g_libxul.so.gz 18 | adb remount 19 | echo adb push /tmp/b2g_libxul.so.gz /system/b2g/libxul.so.gz 20 | adb push /tmp/b2g_libxul.so.gz /system/b2g/libxul.so.gz 21 | 22 | adb shell stop b2g 23 | echo adb shell "gzip -d /system/b2g/libxul.so" 24 | adb shell "gzip -d /system/b2g/libxul.so" 25 | echo Restarting B2G 26 | adb shell start b2g 27 | 28 | echo "Reminder: ${SCRIPT_NAME} is a helper script to quickly update XUL when making" 29 | echo "c++ only changes that *only* affect libxul.so." 30 | -------------------------------------------------------------------------------- /scripts/generate-orangutan-script.py: -------------------------------------------------------------------------------- 1 | import argparse, sys 2 | from itertools import * 3 | from random import * 4 | 5 | def print_device_home(device): 6 | if device == 'unagi': 7 | print 'tap 44 515 1 2000' # long home key for unagi 8 | elif device == 'aries': 9 | print 'tap 360 1240 1 2000' # also a long software home press 10 | else: 11 | # 102 corresponds to the home button key (at least on flame devices) 12 | print 'keydown 102' 13 | print 'sleep 5' 14 | print 'keyup 102' 15 | 16 | def main(): 17 | parser = argparse.ArgumentParser(description="Generate automated test script for orangutan") 18 | parser.add_argument("-s", "--steps", help="Number of steps") 19 | parser.add_argument("-d", "--device", help="Optional device name for defaults") 20 | parser.add_argument("--width", help="Device width in pixels") 21 | parser.add_argument("--height", help="Device height in pixels") 22 | 23 | args = parser.parse_args(sys.argv[1:]) 24 | 25 | rndSeed = random() 26 | fuzzSeed = str(rndSeed) # replace rndSeed here with the required seed 27 | #print 'Current seed is: ' + fuzzSeed 28 | rnd = Random(fuzzSeed) 29 | 30 | if (args.steps): 31 | steps = int(args.steps) 32 | else: 33 | steps = 10000 34 | 35 | if args.device: 36 | device = args.device 37 | else: 38 | device = 'flame' 39 | 40 | if device == 'unagi': 41 | maxX = 320 42 | # The home key is around (44, 515) on unagi. 43 | # Filed a followup bug (838267) for better key support. 44 | maxY = 520 45 | elif device == 'flame': 46 | maxX = 480 47 | maxY = 854 48 | elif device == 'aries': 49 | maxX = 720 50 | maxY = 1280 51 | else: 52 | maxX = 320 53 | maxY = 520 54 | if args.width: 55 | maxX = int(args.width) 56 | if args.height: 57 | maxY = int(args.height) 58 | 59 | count = 1 60 | sleepAllowed = 1 61 | while (count <= steps): 62 | if count % 1000 == 0: 63 | print_device_home(device) 64 | count = count + 1 65 | sleepAllowed = 1 66 | continue 67 | 68 | x = rnd.choice(['tap', 'sleep', 'drag']) 69 | if x == 'tap': 70 | # tap [x] [y] [num times] [duration of each tap in msec] 71 | print 'tap ', rnd.randint(1, maxX), ' ', rnd.randint(1, maxY), ' ', rnd.randint(1,3), rnd.randint(50, 1000) 72 | sleepAllowed = 1 73 | count = count + 1 74 | elif x == 'sleep': 75 | # sleep [duration in msec] 76 | if (sleepAllowed): 77 | print 'sleep', rnd.randint(100, 3000) 78 | count = count + 1 79 | sleepAllowed = 0 80 | else: 81 | # drag [start x] [start y] [end x] [end y] [num steps] [duration in msec] 82 | print 'drag',rnd.randint(1, maxX), ' ', rnd.randint(1, maxY), ' ' , rnd.randint(1, maxX), ' ', rnd.randint(1, maxY), ' ', rnd.randint(10, 20), ' ', rnd.randint(10, 350) 83 | sleepAllowed = 1 84 | count = count + 1 85 | 86 | if __name__ == "__main__": 87 | main() 88 | -------------------------------------------------------------------------------- /scripts/homebrew/gcc-4.6.rb: -------------------------------------------------------------------------------- 1 | require 'formula' 2 | 3 | # NOTE: GCC 4.6.0 adds the gccgo compiler for the Go language. However, 4 | # gccgo "is currently known to work on GNU/Linux and RTEMS. Solaris support 5 | # is in progress. It may or may not work on other platforms." 6 | 7 | def cxx? 8 | ARGV.include? '--enable-cxx' 9 | end 10 | 11 | def fortran? 12 | ARGV.include? '--enable-fortran' 13 | end 14 | 15 | def java? 16 | ARGV.include? '--enable-java' 17 | end 18 | 19 | def objc? 20 | ARGV.include? '--enable-objc' 21 | end 22 | 23 | def objcxx? 24 | ARGV.include? '--enable-objcxx' 25 | end 26 | 27 | def build_everything? 28 | ARGV.include? '--enable-all-languages' 29 | end 30 | 31 | def nls? 32 | ARGV.include? '--enable-nls' 33 | end 34 | 35 | def profiledbuild? 36 | ARGV.include? '--enable-profiled-build' 37 | end 38 | 39 | class Ecj < Formula 40 | # Little Known Fact: ecj, Eclipse Java Complier, is required in order to 41 | # produce a gcj compiler that can actually parse Java source code. 42 | url 'ftp://sourceware.org/pub/java/ecj-4.5.jar' 43 | md5 'd7cd6a27c8801e66cbaa964a039ecfdb' 44 | end 45 | 46 | class Gcc46 < Formula 47 | homepage 'http://gcc.gnu.org' 48 | url 'http://ftpmirror.gnu.org/gcc/gcc-4.6.3/gcc-4.6.3.tar.bz2' 49 | mirror 'http://ftp.gnu.org/gnu/gcc/gcc-4.6.3/gcc-4.6.3.tar.bz2' 50 | md5 '773092fe5194353b02bb0110052a972e' 51 | 52 | depends_on 'gmp' 53 | depends_on 'libmpc' 54 | depends_on 'mpfr' 55 | 56 | def patches 57 | { :p0 => ["http://gcc.gnu.org/bugzilla/attachment.cgi?id=25656"] } 58 | end 59 | 60 | def options 61 | [ 62 | ['--enable-cxx', 'Build the g++ compiler'], 63 | ['--enable-fortran', 'Build the gfortran compiler'], 64 | ['--enable-java', 'Buld the gcj compiler'], 65 | ['--enable-objc', 'Enable Objective-C language support'], 66 | ['--enable-objcxx', 'Enable Objective-C++ language support'], 67 | ['--enable-all-languages', 'Enable all compilers and languages, except Ada'], 68 | ['--enable-nls', 'Build with natural language support'], 69 | ['--enable-profiled-build', 'Make use of profile guided optimization when bootstrapping GCC'] 70 | ] 71 | end 72 | 73 | def install 74 | # Force 64-bit on systems that use it. Build failures reported for some 75 | # systems when this is not done. 76 | ENV.m64 if MacOS.prefer_64_bit? 77 | 78 | # GCC will suffer build errors if forced to use a particular linker. 79 | ENV.delete 'LD' 80 | 81 | ENV.remove_from_cflags '-Qunused-arguments' 82 | ENV.set_cpu_flags 'core2' 83 | 84 | # This is required on systems running a version newer than 10.6, and 85 | # it's probably a good idea regardless. 86 | # 87 | # https://trac.macports.org/ticket/27237 88 | ENV.append 'CXXFLAGS', '-U_GLIBCXX_DEBUG -U_GLIBCXX_DEBUG_PEDANTIC' 89 | 90 | gmp = Formula.factory 'gmp' 91 | mpfr = Formula.factory 'mpfr' 92 | libmpc = Formula.factory 'libmpc' 93 | 94 | # Sandbox the GCC lib, libexec and include directories so they don't wander 95 | # around telling small children there is no Santa Claus. This results in a 96 | # partially keg-only brew following suggestions outlined in the "How to 97 | # install multiple versions of GCC" section of the GCC FAQ: 98 | # http://gcc.gnu.org/faq.html#multiple 99 | gcc_prefix = prefix + 'gcc' 100 | 101 | args = [ 102 | # Sandbox everything... 103 | "--prefix=#{gcc_prefix}", 104 | # ...except the stuff in share... 105 | "--datarootdir=#{share}", 106 | # ...and the binaries... 107 | "--bindir=#{bin}", 108 | # ...which are tagged with a suffix to distinguish them. 109 | "--program-suffix=-#{version.to_s.slice(/\d\.\d/)}", 110 | "--with-gmp=#{gmp.prefix}", 111 | "--with-mpfr=#{mpfr.prefix}", 112 | "--with-mpc=#{libmpc.prefix}", 113 | "--with-system-zlib", 114 | "--enable-stage1-checking", 115 | "--enable-plugin", 116 | "--enable-lto", 117 | "--disable-multilib" 118 | ] 119 | 120 | args << '--disable-nls' unless nls? 121 | 122 | if build_everything? 123 | # Everything but Ada, which requires a pre-existing GCC Ada compiler 124 | # (gnat) to bootstrap. GCC 4.6.0 add go as a language option, but it is 125 | # currently only compilable on Linux. 126 | languages = %w[c c++ fortran java objc obj-c++] 127 | else 128 | # The C compiler is always built, but additional defaults can be added 129 | # here. 130 | languages = %w[c] 131 | 132 | languages << 'c++' if cxx? 133 | languages << 'fortran' if fortran? 134 | languages << 'java' if java? 135 | languages << 'objc' if objc? 136 | languages << 'obj-c++' if objcxx? 137 | end 138 | 139 | if java? or build_everything? 140 | source_dir = Pathname.new Dir.pwd 141 | 142 | Ecj.new.brew do |ecj| 143 | # Copying ecj.jar into the toplevel of the GCC source tree will cause 144 | # gcc to automagically package it into the installation. It *must* be 145 | # named ecj.jar and not ecj-version.jar in order for this to happen. 146 | mv "ecj-#{ecj.version}.jar", (source_dir + 'ecj.jar') 147 | end 148 | end 149 | 150 | Dir.mkdir 'build' 151 | Dir.chdir 'build' do 152 | system '../configure', "--enable-languages=#{languages.join(',')}", *args 153 | 154 | if profiledbuild? 155 | # Takes longer to build, may bug out. Provided for those who want to 156 | # optimise all the way to 11. 157 | system 'make profiledbootstrap' 158 | else 159 | system 'make bootstrap' 160 | end 161 | 162 | # At this point `make check` could be invoked to run the testsuite. The 163 | # deja-gnu and autogen formulae must be installed in order to do this. 164 | 165 | system 'make install' 166 | end 167 | end 168 | end 169 | -------------------------------------------------------------------------------- /scripts/package-emulator.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set - 3 | cd .. 4 | . setup.sh 5 | PRODUCT_OUT=$(get_build_var PRODUCT_OUT) 6 | HOST_OUT=$(get_build_var HOST_OUT) 7 | OUT_DIR=$(get_abs_build_var OUT_DIR) 8 | 9 | EMULATOR_FILES=(\ 10 | .config \ 11 | load-config.sh \ 12 | run-emulator.sh \ 13 | ${HOST_OUT}/bin/adb \ 14 | ${HOST_OUT}/bin/emulator \ 15 | ${HOST_OUT}/bin/emulator-arm \ 16 | ${HOST_OUT}/bin/mksdcard \ 17 | ${HOST_OUT}/bin/qemu-android-x86 \ 18 | ${HOST_OUT}/lib \ 19 | ${HOST_OUT}/usr \ 20 | development/tools/emulator/skins \ 21 | prebuilts/qemu-kernel/arm/kernel-qemu-armv7 \ 22 | ${PRODUCT_OUT}/system/build.prop \ 23 | ${PRODUCT_OUT}/system.img \ 24 | ${PRODUCT_OUT}/userdata.img \ 25 | ${PRODUCT_OUT}/ramdisk.img) 26 | 27 | EMULATOR_ARCHIVE="${OUT_DIR}/emulator.tar.gz" 28 | 29 | echo "Creating emulator archive at $EMULATOR_ARCHIVE" 30 | 31 | rm -rf $EMULATOR_ARCHIVE 32 | tar -cvzf $EMULATOR_ARCHIVE --transform 's,^,b2g-distro/,S' --show-transformed-names ${EMULATOR_FILES[@]} 33 | 34 | -------------------------------------------------------------------------------- /scripts/profile-symbolicate.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import argparse, bisect, json, os, subprocess, sys 4 | import os.path, re, urllib2 5 | 6 | gSpecialLibs = { 7 | # The [vectors] is a special section used for functions which can really 8 | # only be implemented in kernel space. See arch/arm/kernel/entry-armv.S 9 | "[vectors]" : { 10 | "0xffff0f60": "__kernel_cmpxchg64", 11 | "0xffff0fa0": "__kernel_dmb", 12 | "0xffff0fc0": "__kernel_cmpxchg", 13 | "0xffff0fe0": "__kernel_get_tls", 14 | "0xffff0ffc": "__kernel_helper_version" 15 | } 16 | } 17 | 18 | symbol_path_matcher = re.compile(r"^(.+) \[(.*)]$") 19 | 20 | def fixupAddress(lib, address): 21 | lib_address = int(address, 0) - lib.start + lib.offset 22 | return (lib_address & ~1) - 1 23 | 24 | def formatAddress(address): 25 | return "0x{:X}".format(address) 26 | 27 | def get_tools_prefix(): 28 | if "GECKO_TOOLS_PREFIX" in os.environ: 29 | return os.environ["GECKO_TOOLS_PREFIX"] 30 | 31 | if "TARGET_TOOLS_PREFIX" in os.environ: 32 | return os.environ["TARGET_TOOLS_PREFIX"] 33 | 34 | return None 35 | 36 | ############################################################################### 37 | # 38 | # Library class. There is an instance of this for each library in the profile. 39 | # 40 | ############################################################################### 41 | 42 | class Library: 43 | def __init__(self, lib_dict, verbose=False, symbols_path=None): 44 | """lib_dict will be the JSON dictionary from the profile""" 45 | self.start = lib_dict["start"] 46 | self.end = lib_dict["end"] 47 | self.offset = lib_dict["offset"] 48 | self.target_name = lib_dict["name"] 49 | self.id = lib_dict["breakpadId"] 50 | self.verbose = verbose 51 | self.host_name = None 52 | self.located = False 53 | self.symbols = {} 54 | self.symbol_table = None 55 | self.symbol_table_addresses = None 56 | self.symbols_path = symbols_path 57 | 58 | def AddressToSymbol(self, address_str): 59 | """Attempts to convert an address into a symbol.""" 60 | return self.AddressesToSymbols([address_str])[0] 61 | 62 | def AddressesToSymbols(self, addresses_strs): 63 | """Converts multiple addresses into symbols.""" 64 | if not self.located: 65 | self.Locate() 66 | if self.symbol_table: 67 | return self.LookupAddressesInSymbolTable(addresses_strs) 68 | if not self.host_name: 69 | unknown = "Unknown (in " + self.target_name + ")" 70 | return [unknown for i in range(len(addresses_strs))] 71 | syms = self.LookupAddressesInBreakpad(addresses_strs) 72 | if syms is not None: 73 | return syms 74 | target_tools_prefix = get_tools_prefix() 75 | if target_tools_prefix is None: 76 | target_tools_prefix = "arm-eabi-" 77 | args = [target_tools_prefix + "addr2line", "-C", "-f", "-e", self.host_name] 78 | nm_args = ["gecko/tools/profiler/nm-symbolicate.py", self.host_name] 79 | for address_str in addresses_strs: 80 | lib_address = int(address_str, 0) - self.start + self.offset 81 | if self.verbose: 82 | print "Address %s maps to library '%s' offset 0x%08x" % (address_str, self.host_name, lib_address) 83 | # Fix up addresses from stack frames; they're for the insn after 84 | # the call, which might be a different function thanks to inlining: 85 | adj_address = max(0, (lib_address & ~1) - 1) 86 | args.append("0x%08x" % adj_address) 87 | nm_args.append("0x%08x" % adj_address) 88 | # Calling addr2line will return 2 lines for each address. The output will be something 89 | # like the following: 90 | # PR_IntervalNow 91 | # /home/work/B2G-profiler/mozilla-inbound/nsprpub/pr/src/misc/prinrval.c:43 92 | # PR_Unlock 93 | # /home/work/B2G-profiler/mozilla-inbound/nsprpub/pr/src/pthreads/ptsynch.c:191 94 | syms_and_lines = subprocess.check_output(args).split("\n") 95 | 96 | # Check if we had no useful output from addr2line. If so well try using the symbol table 97 | # from nm. 98 | has_good_line = False 99 | for line in syms_and_lines: 100 | if line != "??" and line != "??:0" and line != "": 101 | has_good_line = True 102 | if has_good_line == False: 103 | syms_and_lines = subprocess.check_output(nm_args).split("\n") 104 | 105 | syms = [] 106 | for i in range(len(addresses_strs)): 107 | syms.append(syms_and_lines[i*2] + " (in " + self.target_name + ")") 108 | return syms 109 | 110 | def AddUnresolvedAddress(self, address): 111 | """Stores an address into a set of addresses which will be translated into symbols later""" 112 | # The output format wants the addresses as 0xAAAAAAAA, so we store the keys as strings 113 | self.symbols["0x%08x" % address] = None 114 | 115 | def ContainsAddress(self, address): 116 | """Determines if the indicated address is contained in this library""" 117 | return (address >= self.start) and (address < self.end) 118 | 119 | def Dump(self): 120 | """Dumps out some information about this library.""" 121 | self.Locate() 122 | print "0x%08x-0x%08x 0x%08x %-40s %s" % (self.start, self.end, self.offset, self.target_name, self.host_name) 123 | 124 | def DumpSymbols(self): 125 | """Dumps out some information about the symbols in this library.""" 126 | for address_str in sorted(self.symbols.keys()): 127 | print address_str, self.symbols[address_str] 128 | 129 | def FindLibInTree(self, basename, dir, exclude_dir=None): 130 | """Search a tree for a library and return the first one found""" 131 | args = ["find", dir] 132 | if exclude_dir: 133 | args = args + ["!", "(", "-name", exclude_dir, "-prune", ")"] 134 | args = args + ["-name", basename, "-type", "f", "-print", "-quit"] 135 | fullname = subprocess.check_output(args) 136 | if len(fullname) > 0: 137 | if fullname[-1] == "\n": 138 | return fullname[:-1] 139 | return fullname 140 | return None 141 | 142 | def Locate(self): 143 | """Try to determine the local name of a given library""" 144 | if self.target_name[:7] == "/system": 145 | basename = os.path.basename(self.target_name) 146 | # First look for a gecko library. We avoid the dist tree since 147 | # those are stripped. 148 | if "GECKO_OBJDIR" in os.environ: 149 | gecko_objdir = os.environ["GECKO_OBJDIR"] 150 | else: 151 | gecko_objdir = "objdir-gecko" 152 | if not os.path.isdir(gecko_objdir): 153 | if self.symbols_path is None: 154 | print(gecko_objdir, "isn't a directory"); 155 | sys.exit(1) 156 | self.host_name = os.path.basename(self.target_name) 157 | self.located = True 158 | return 159 | lib_name = self.FindLibInTree(basename, gecko_objdir, exclude_dir="dist") 160 | if not lib_name: 161 | # Probably an android library 162 | # Look in /symbols first, fallback to /system for partial symbols 163 | if "PRODUCT_OUT" in os.environ: 164 | product_out = os.environ["PRODUCT_OUT"] + "/symbols" 165 | else: 166 | product_out = "out/target/product/symbols" 167 | if not os.path.isdir(product_out): 168 | print(product_out, "isn't a directory"); 169 | sys.exit(1) 170 | lib_name = self.FindLibInTree(basename, product_out) 171 | if not lib_name: 172 | if "PRODUCT_OUT" in os.environ: 173 | product_out = os.environ["PRODUCT_OUT"] + "/system" 174 | else: 175 | product_out = "out/target/product/system" 176 | if not os.path.isdir(product_out): 177 | print(product_out, "isn't a directory"); 178 | sys.exit(1) 179 | lib_name = self.FindLibInTree(basename, product_out) 180 | if lib_name: 181 | self.host_name = lib_name 182 | if self.verbose: 183 | print "Found '" + self.host_name + "' for '" + self.target_name + "'" 184 | elif self.target_name in gSpecialLibs: 185 | self.symbol_table = gSpecialLibs[self.target_name] 186 | self.symbol_table_addresses = sorted(self.symbol_table.keys()) 187 | elif self.target_name[:1] == "/": # Absolute paths. 188 | basename = os.path.basename(self.target_name) 189 | dirname = os.path.dirname(self.target_name) 190 | lib_name = self.target_name 191 | if os.path.exists(lib_name): 192 | self.target_name = basename 193 | self.host_name = lib_name 194 | if self.verbose: 195 | print "Found '" + self.host_name + "' for '" + self.target_name + "'" 196 | self.located = True 197 | 198 | def LookupAddressInSymbolTable(self, address_str): 199 | """Lookup an address using a special symbol_table.""" 200 | i = bisect.bisect(self.symbol_table_addresses, address_str) 201 | if i: 202 | i = i - 1 203 | if address_str >= self.symbol_table_addresses[i]: 204 | sym = self.symbol_table[self.symbol_table_addresses[i]] 205 | else: 206 | sym = "Unknown" 207 | return sym + " (in " + self.target_name + ")" 208 | 209 | def LookupAddressesInSymbolTable(self, addresses): 210 | """Looks up multiple addresses using the special symbol table.""" 211 | syms = [] 212 | for address in addresses: 213 | syms.append(self.LookupAddressInSymbolTable(address)) 214 | return syms 215 | 216 | def LookupAddressesInBreakpad(self, addresses): 217 | if not self.symbols_path: 218 | return None 219 | 220 | if "GECKO_PATH" in os.environ: 221 | gecko_src_path = os.environ["GECKO_PATH"] 222 | else: 223 | gecko_src_path = "gecko" 224 | sys.path.append(os.path.join(gecko_src_path, "tools", "rb")) 225 | from fix_stack_using_bpsyms import fixSymbols 226 | 227 | def fixSymbol(address): 228 | addr = fixupAddress(self, address) 229 | # the 'x' in '0x' must be lower case, all others must be upper case 230 | addr_str = formatAddress(addr) 231 | libname = os.path.basename(self.host_name) 232 | line = "[" + libname + " +" + addr_str + "]" 233 | symbol = fixSymbols(line, self.symbols_path) 234 | if symbol == line: 235 | return "??" 236 | if symbol[-1] == "\n": 237 | symbol = symbol[:-1] 238 | # Absolute source path ends up in a long string, 239 | # take only the source file with line number. 240 | # When the symbol is resolved with source path, 241 | # the symbol format is: 242 | # [:] 243 | match = symbol_path_matcher.match(symbol) 244 | if match: 245 | symbol, source_path = match.group(1, 2) 246 | source_file = os.path.basename(source_path) 247 | symbol += " @ " + source_file 248 | return symbol + " (in " + self.target_name + ")" 249 | 250 | return map(lambda address: fixSymbol(address), addresses) 251 | 252 | def ResolveSymbols(self, progress=False): 253 | """Tries to convert all of the symbols into symbolic equivalents.""" 254 | if len(self.symbols) == 0: 255 | return 256 | addresses_strs = self.symbols.keys() 257 | for i in range(0,len(addresses_strs), 256): 258 | slice = addresses_strs[i:i+256] 259 | if progress: 260 | print "Resolving symbols for", self.target_name, len(slice), "addresses" 261 | syms = self.AddressesToSymbols(slice) 262 | for j in range(len(syms)): 263 | self.symbols[addresses_strs[i+j]] = syms[j] 264 | 265 | ############################################################################### 266 | # 267 | # Libraries class. Encapsulates the collection of libraries. 268 | # 269 | ############################################################################### 270 | 271 | class Libraries: 272 | def __init__(self, profile, verbose=False, symbols_path=None): 273 | lib_dicts = json.loads(profile["libs"]) 274 | lib_dicts = sorted(lib_dicts, key=lambda lib: lib["start"]) 275 | self.libs = [Library(lib_dict, verbose=verbose, 276 | symbols_path=symbols_path) for lib_dict in lib_dicts] 277 | # Create a sorted list of just the start addresses so that we can use 278 | # bisect to lookup addresses 279 | self.libs_start = [lib.start for lib in self.libs] 280 | self.profile = profile 281 | self.last_lib = None 282 | self.symbols_path = symbols_path 283 | 284 | def Dump(self): 285 | """Dumps out some information about all of the libraries that we're tracking.""" 286 | for lib in self.libs: 287 | lib.Dump() 288 | 289 | def DumpSymbols(self): 290 | """Dumps out the symbols for all of the libraries that we're tracking.""" 291 | for lib in self.libs: 292 | lib.DumpSymbols() 293 | 294 | def AddressToLib(self, address): 295 | """Does a binary search through our ordered collection of libraries.""" 296 | i = bisect.bisect(self.libs_start, address) 297 | if i: 298 | i = i - 1 299 | if i < len(self.libs_start): 300 | lib = self.libs[i] 301 | if lib.ContainsAddress(address): 302 | return lib 303 | 304 | def Lookup(self, address): 305 | """Figures out which library a given address comes from.""" 306 | if not (self.last_lib and self.last_lib.ContainsAddress(address)): 307 | self.last_lib = self.AddressToLib(address) 308 | return self.last_lib 309 | 310 | def ResolveSymbols(self, progress=True): 311 | """Tries to convert all of the symbols into symbolic equivalents.""" 312 | if not self.symbols_path or not self.symbols_path.startswith('http'): 313 | for lib in self.libs: 314 | lib.ResolveSymbols(progress=progress) 315 | return 316 | 317 | # We were given a url address as the symbols path, 318 | # it must be a symbolapi server address 319 | addresses = [] 320 | memory_map = [] 321 | libs = [] 322 | index = 0 323 | address_map = {} 324 | 325 | for lib in self.libs: 326 | # skip fake binaries 327 | if not lib.target_name.startswith("["): 328 | libname = os.path.basename(lib.target_name) 329 | memory_map.append((libname, lib.id)) 330 | libs.append(lib) 331 | for address in lib.symbols.keys(): 332 | adj_address = fixupAddress(lib, address) 333 | address_map[adj_address] = address 334 | addresses.append((index, adj_address)) 335 | index += 1 336 | 337 | symbolication_request = { 338 | "stacks": [addresses], 339 | "memoryMap": memory_map, 340 | "version": 3, 341 | "symbolSources": ["B2G", "Firefox"] 342 | } 343 | 344 | request_data = json.dumps(symbolication_request) 345 | 346 | headers = { 347 | "Content-Type": "application/json", 348 | "Content-Length": len(request_data), 349 | "Connection": "close" 350 | } 351 | 352 | request = urllib2.Request(url=self.symbols_path, data=request_data, headers=headers) 353 | r = urllib2.urlopen(request) 354 | if r.getcode() != 200: 355 | raise Exception("Bad request: " + str(r.status_code)) 356 | 357 | content = json.load(r) 358 | 359 | for sym, address in zip(content[0], addresses): 360 | index = address[0] 361 | lib = libs[index] 362 | original_address = address_map[address[1]] 363 | lib.symbols[original_address] = sym 364 | 365 | def SearchUnresolvedAddresses(self, progress=False): 366 | """Search and build a set of unresolved addresses for each library.""" 367 | if progress: 368 | print "Scanning for unresolved addresses..." 369 | 370 | def getUnresolvedAddressesV2(): 371 | last_location = None 372 | for thread in self.profile["threads"]: 373 | samples = thread["samples"] 374 | for sample in samples: 375 | frames = sample["frames"] 376 | for frame in frames: 377 | location = frame["location"] 378 | if location[:2] == "0x": 379 | # Quick optimization since lots of times the same address appears 380 | # many times in a row. We only need to add each address once. 381 | if location != last_location: 382 | yield int(location, 0) 383 | last_location = location 384 | 385 | def getUnresolvedAddressesV3(): 386 | for thread in self.profile["threads"]: 387 | for str in thread["stringTable"]: 388 | if str[:2] == "0x": 389 | try: 390 | yield int(str, 0) 391 | except ValueError: 392 | continue 393 | 394 | if self.profile["meta"]["version"] >= 3: 395 | addresses = getUnresolvedAddressesV3() 396 | else: 397 | addresses = getUnresolvedAddressesV2() 398 | for address in addresses: 399 | lib = self.Lookup(address) 400 | if lib: 401 | lib.AddUnresolvedAddress(address) 402 | 403 | def SymbolicationTable(self): 404 | """Create the union of all of the symbols from all of the libraries.""" 405 | result = {} 406 | for lib in self.libs: 407 | result.update(lib.symbols) 408 | return result 409 | 410 | ############################################################################### 411 | # 412 | # Main 413 | # 414 | ############################################################################### 415 | 416 | def main(): 417 | parser = argparse.ArgumentParser(description="Symbolicate Gecko Profiler file") 418 | parser.add_argument("filename", help="profile file from phone") 419 | parser.add_argument("--dump-libs", help="Dump library information", action="store_true") 420 | parser.add_argument("--dump-syms", help="Dump symbol information", action="store_true") 421 | parser.add_argument("--no-progress", help="Turn off progress messages", action="store_true") 422 | parser.add_argument("-l", "--lookup", help="lookup a single address") 423 | parser.add_argument("-o", "--output", help="specify the name of the output file") 424 | parser.add_argument("-v", "--verbose", help="increase output verbosity", action="store_true") 425 | parser.add_argument("-s", "--symbols-path", metavar="symbols path", help="Path to symbols directory") 426 | args = parser.parse_args(sys.argv[1:]) 427 | verbose = args.verbose 428 | progress = not args.no_progress 429 | 430 | if not args.symbols_path: 431 | if "GECKO_OBJDIR" not in os.environ: 432 | print "'GECKO_OBJDIR' needs to be defined in the environment" 433 | sys.exit(1) 434 | 435 | if get_tools_prefix() is None: 436 | print "'{GECKO|TARGET}_TOOLS_PREFIX' needs to be defined in the environment" 437 | sys.exit(1) 438 | 439 | if "PRODUCT_OUT" not in os.environ: 440 | print "'PRODUCT_OUT' needs to be defined in the environment" 441 | sys.exit(1) 442 | 443 | def print_var(var): 444 | if var in os.environ: 445 | print var + " = '" + os.environ[var] + "'" 446 | 447 | 448 | if verbose: 449 | print "Filename =", args.filename 450 | print_var("GECKO_OBJDIR") 451 | if "GECKO_TOOLS_PREFIX" in os.environ: 452 | print_var("GECKO_TOOLS_PREFIX") 453 | else: 454 | print_var("TARGET_TOOLS_PREFIX") 455 | print_var("PRODUCT_OUT") 456 | 457 | # Read in the JSON file created by the profiler. 458 | if progress: 459 | print "Reading profiler file", args.filename, "..." 460 | profile = json.load(open(args.filename, "rb")) 461 | 462 | libs = Libraries(profile, verbose, args.symbols_path) 463 | if args.dump_libs: 464 | libs.Dump() 465 | 466 | if args.lookup: 467 | address_str = args.lookup 468 | address = int(address_str, 0) 469 | lib = libs.Lookup(address) 470 | if lib: 471 | lib.Locate() 472 | print("Address 0x%08x maps to symbol '%s'" % (address, lib.AddressToSymbol(address_str))) 473 | else: 474 | print("Address 0x%08x not found in a library" % address) 475 | else: 476 | libs.SearchUnresolvedAddresses(progress=progress) 477 | libs.ResolveSymbols(progress=progress) 478 | if args.dump_syms: 479 | libs.DumpSymbols() 480 | else: 481 | sym_profile = {"format": "profileJSONWithSymbolicationTable,1", 482 | "profileJSON": profile, 483 | "symbolicationTable": libs.SymbolicationTable()} 484 | if args.output: 485 | sym_filename = args.output 486 | else: 487 | sym_filename = args.filename + ".syms" 488 | if progress: 489 | print "Writing symbolicated results to", sym_filename, "..." 490 | json.dump(sym_profile, open(sym_filename, "wb")) 491 | if progress: 492 | print "Done" 493 | 494 | if __name__ == "__main__": 495 | main() 496 | -------------------------------------------------------------------------------- /scripts/push-toolchain.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # This simple script knows how to take a toolchain, add the packaging 3 | # jazz needed to get it onto TBPL builders and show the commands that 4 | # will upload. This script requires ssh access to the remote host 5 | # and generates a file that needs to be landed into mozilla-central 6 | set -e 7 | 8 | cd .. 9 | 10 | # if [ -z $REMOTE_HOST ] ; then REMOTE_HOST=runtime-binaries.pvt.build.mozilla.com ; fi 11 | if [ -z $REMOTE_HOST ] ; then REMOTE_HOST=relengweb1.dmz.scl3.mozilla.com ; fi 12 | if [ -z $REMOTE_PATH ] ; then REMOTE_PATH=/var/www/html/runtime-binaries/tooltool/ ; fi 13 | if [ -z $ALGORITHM ] ; then ALGORITHM=sha512 ; fi 14 | if [ -z $VERSION ] ; then 15 | echo "ERROR: must specify a version" 16 | exit 1 17 | fi 18 | if [ ! -f gonk-toolchain-$VERSION.tar.bz2 ] ; then 19 | echo "ERROR: missing the toolchain file" 20 | pwd && ls 21 | exit 1 22 | fi 23 | 24 | cat > setup.sh << EOF 25 | #!/bin/bash 26 | # This script knows how to set up a gonk toolchain in a given builder's 27 | # directory. 28 | set -xe 29 | rm -rf gonk-toolchain 30 | tar jxf gonk-toolchain-$VERSION.tar.bz2 31 | mv gonk-toolchain-$VERSION gonk-toolchain 32 | EOF 33 | 34 | if [ ! -f tooltool/tooltool.py ] ; then 35 | git clone github.com:jhford/tooltool 36 | else 37 | (cd tooltool && git fetch && git merge origin/master) 38 | fi 39 | rm -f new.manifest 40 | python tooltool/tooltool.py -d $ALGORITHM -m new.manifest add gonk-toolchain-$VERSION.tar.bz2 setup.sh 41 | 42 | toolchainh=$(openssl dgst -$ALGORITHM < gonk-toolchain-$VERSION.tar.bz2 | sed "s/^(stdin)= //") 43 | setuph=$(openssl dgst -$ALGORITHM < setup.sh | sed "s/^(stdin)= //") 44 | 45 | echo "These commands will upload the toolchain to the tool server" 46 | echo "scp $PWD/gonk-toolchain-$VERSION.tar.bz2 $REMOTE_HOST:$REMOTE_PATH/$ALGORITHM/$toolchainh" 47 | echo "scp $PWD/setup.sh $REMOTE_HOST:$REMOTE_PATH/$ALGORITHM/$setuph" 48 | echo "ssh $REMOTE_HOST chmod 644 $REMOTE_PATH/$ALGORITHM/$setuph $REMOTE_PATH/$ALGORITHM/$toolchainh" 49 | 50 | echo "The file \"new.manifest\" contains a manifest that points to your toolchain" 51 | echo "It need to be landed in b2g/config/tooltool-manifests/ with a filename that matches" 52 | echo "what releng tells you. Currently, ics.manifest" 53 | echo "Contents:" 54 | echo ===== 55 | cat new.manifest 56 | echo ===== 57 | -------------------------------------------------------------------------------- /scripts/run-monkey.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | echo "* Automated testing with orangutan *" 4 | echo "You might want to disable phone calls and sending sms:" 5 | echo "Look at https://github.com/gregorwagner/gaia/tree/monkey" 6 | 7 | SCRIPT_NAME=orangutan-script 8 | ORNG_PATH=/data 9 | SCRIPT_PATH=/data/local 10 | ADB=${ADB:-adb} 11 | orangutan="$ORNG_PATH/orng" 12 | ifstmt="test -x $orangutan && echo '1'" 13 | status="$(${ADB} shell $ifstmt)" 14 | if [ -z "$status" ]; then 15 | echo "$orangutan does not exist! Install from https://github.com/wlach/orangutan and push orng to /data" 16 | exit 17 | fi 18 | 19 | if [ $# -gt 0 ]; then 20 | device=$1 21 | else 22 | device=flame 23 | fi 24 | 25 | if [ $# -eq 2 ]; then 26 | steps=$2 27 | else 28 | steps=100000 29 | fi 30 | 31 | if [ $device == "aries" ]; then 32 | event_device=/dev/input/event1 33 | else 34 | event_device=/dev/input/event0 35 | fi 36 | 37 | 38 | PYTHON=${PYTHON:-`which python`} 39 | $PYTHON generate-orangutan-script.py -d $device --steps $steps >$SCRIPT_NAME 40 | $ADB push $SCRIPT_NAME $SCRIPT_PATH 41 | echo "Running the script..." 42 | $ADB shell $orangutan $event_device $SCRIPT_PATH/$SCRIPT_NAME 43 | $ADB shell rm $SCRIPT_PATH/$SCRIPT_NAME 44 | echo "Done" 45 | -------------------------------------------------------------------------------- /scripts/toolchain.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | # This script will probably live in /scripts/ but life's easier 5 | # when work is done in the actual B2G root 6 | cd .. 7 | 8 | OUT_DIR=out 9 | 10 | . load-config.sh 11 | 12 | if [ -z $1 ] ; then 13 | echo "Usage: $0 []" 1>&2 14 | exit -1 15 | fi 16 | 17 | output=gonk-toolchain-$1 18 | manifest_file=sources.xml 19 | toolchain_target=linux-x86 20 | if [ $2 ] ; then 21 | toolchain_target=$2 22 | fi 23 | 24 | rm -rf $output ; mkdir -p $output 25 | ./gonk-misc/add-revision.py .repo/manifest.xml \ 26 | --output $manifest_file --force --b2g-path $B2G_DIR --tags 27 | 28 | if [ ! -d $OUT_DIR/target/product/$DEVICE ] ; then 29 | echo "ERROR: you must build B2G before building a toolchain" 1>&2 30 | exit -1 31 | fi 32 | 33 | # Important Directories 34 | for i in \ 35 | bionic \ 36 | dalvik/libnativehelper/include/nativehelper \ 37 | external/stlport/stlport \ 38 | external/bluetooth/bluez \ 39 | external/dbus \ 40 | external/libpng \ 41 | frameworks/base/include \ 42 | frameworks/base/media/libstagefright \ 43 | frameworks/base/native/include \ 44 | frameworks/base/opengl/include \ 45 | frameworks/base/services/sensorservice \ 46 | frameworks/base/services/camera/libcameraservice \ 47 | hardware/libhardware/include \ 48 | hardware/libhardware_legacy/include \ 49 | ndk/sources/android/cpufeatures \ 50 | ndk/sources/cxx-stl/system/include \ 51 | ndk/sources/cxx-stl/stlport/stlport \ 52 | ndk/sources/cxx-stl/gabi++/include \ 53 | out/target/product/$DEVICE/obj/lib \ 54 | prebuilt/$toolchain_target/toolchain/arm-linux-androideabi-4.4.x \ 55 | system/core/include \ 56 | system/media/wilhelm/include 57 | do 58 | mkdir -p $output/$i 59 | cp -r $i/* $output/$i 60 | done 61 | 62 | # Important Files 63 | for i in \ 64 | gonk-misc/Unicode.h \ 65 | system/vold/ResponseCode.h 66 | do 67 | directory=$(dirname $i) 68 | mkdir -p $output/$directory 69 | cp $i $output/$directory/ 70 | done 71 | 72 | tar cjf $output.tar.bz2 $output $manifest_file 73 | rm -rf $output 74 | 75 | echo "{ \"toolchain_tarball\": \"$(dirname `pwd`)/$output.tar.bz2\" }" 76 | 77 | -------------------------------------------------------------------------------- /scripts/trace.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | ############################################################################### 4 | # Option variables 5 | 6 | opt_adb=adb 7 | opt_adb_device= 8 | opt_buffer_size=2048 9 | opt_keep=no 10 | opt_trace_name=trace.log 11 | action= 12 | 13 | ############################################################################### 14 | # Globals 15 | 16 | ADB= # adb command complete with parameters 17 | 18 | ############################################################################### 19 | # Print the help message 20 | 21 | print_help() { 22 | printf "Usage: `basename $0` [OPTION...] [ACTION] 23 | Traces application running on the phone. 24 | 25 | Tracing is enabled by invoking the script with the --start command. The device 26 | can be disconnected after this point. Once the trace is stopped with the --stop 27 | a summary of the applications having run in the meantime is printed out. 28 | 29 | Actions: 30 | -h, --help Show this help message 31 | -s, --start Start tracing 32 | -t, --stop Stop tracing and pull the trace 33 | 34 | Application options: 35 | -b, --buffer-size Size of the tracing buffer in KiB 36 | -k, --keep Don't delete the trace 37 | 38 | ADB options: 39 | --adb Path to the adb command (default: $opt_adb) 40 | --emulator Trace the emulator 41 | --device [serial] Trace a device (the serial number is optional) 42 | " 43 | } 44 | 45 | suggest_help() { 46 | printf "Try \``basename $0` --help' for more information\n" 47 | } 48 | 49 | ############################################################################### 50 | # Parse the program arguments 51 | 52 | parse_args() { 53 | # If no arguments were given print the help message 54 | if [ $# -eq 0 ]; then 55 | action=help 56 | return 57 | fi 58 | 59 | # Set the options variables depending on the passed arguments 60 | while [ $# -gt 0 ]; do 61 | if [ `expr "$1" : "^-"` -eq 1 ]; then 62 | if [ $1 = "-h" ] || [ $1 = "--help" ]; then 63 | action=help 64 | shift 1 65 | elif [ $1 = "-s" ] || [ $1 = "--start" ]; then 66 | action=start 67 | shift 1 68 | elif [ $1 = "-t" ] || [ $1 = "--stop" ]; then 69 | action=stop 70 | shift 1 71 | elif [ $1 = "-b" ] || [ $1 = "--buffer-size" ]; then 72 | if [ $# -le 1 ]; then 73 | printf "error: No argument specified for $1\n" 74 | suggest_help 75 | exit 1 76 | else 77 | opt_buffer_size="$2" 78 | shift 2 79 | fi 80 | elif [ $1 = "-k" ] || [ $1 = "--keep" ]; then 81 | opt_keep=yes 82 | shift 1 83 | elif [ $1 = "-l" ] || [ $1 = "--duration" ]; then 84 | if [ $# -le 1 ]; then 85 | printf "error: No argument specified for $1\n" 86 | suggest_help 87 | exit 1 88 | else 89 | opt_duration="$2" 90 | shift 2 91 | fi 92 | elif [ $1 = "-d" ] || [ $1 = "--delay" ]; then 93 | if [ $# -le 1 ]; then 94 | printf "error: No argument specified for $1\n" 95 | suggest_help 96 | exit 1 97 | else 98 | opt_delay="$2" 99 | shift 2 100 | fi 101 | elif [ $1 = "--adb" ]; then 102 | if [ $# -le 1 ]; then 103 | printf "error: No argument specified for $1\n" 104 | suggest_help 105 | exit 1 106 | else 107 | opt_adb="$2" 108 | shift 2 109 | fi 110 | elif [ $1 = "--emulator" ]; then 111 | opt_adb_device="-e" 112 | shift 1 113 | elif [ $1 = "--device" ]; then 114 | if [ $# -le 1 ] || [ `expr "$2" : "^-"` -eq 1 ]; then 115 | opt_adb_device="-d" 116 | shift 1 117 | else 118 | opt_adb_device="-s $2" 119 | shift 2 120 | fi 121 | else 122 | printf "error: Unknown option $1\n" 123 | suggest_help 124 | exit 1 125 | fi 126 | else 127 | printf "error: Unknown option $1\n" 128 | suggest_help 129 | exit 1 130 | fi 131 | done 132 | } 133 | 134 | ############################################################################### 135 | # Prepare the adb command 136 | 137 | prepare_adb() { 138 | ADB="$opt_adb $opt_adb_device" 139 | } 140 | 141 | ############################################################################### 142 | # Find a place where to store the trace on the phone 143 | 144 | get_trace_path() { 145 | sdcard=$( 146 | $ADB shell "vdc volume list" | tr -d '\r' \ 147 | | grep -m 1 sdcard \ 148 | | cut -d' ' -f4 149 | ) 150 | 151 | if [ -n "$sdcard" ]; then 152 | printf "$sdcard/$opt_trace_name" 153 | else 154 | printf "/data/local/tmp/$opt_trace_name" 155 | fi 156 | } 157 | 158 | ############################################################################### 159 | # Setup tracing and start it (this also clears the previous trace) 160 | 161 | start_tracing() { 162 | # Enlarge the trace ring buffer 163 | $ADB shell "echo $opt_buffer_size > /sys/kernel/debug/tracing/buffer_size_kb" && 164 | # Clear the existing buffer 165 | $ADB shell "echo '' > /sys/kernel/debug/tracing/trace" && 166 | # Enable the scheduling runtime stats event tracer 167 | $ADB shell "echo 1 > /sys/kernel/debug/tracing/events/sched/sched_stat_runtime/enable" && 168 | # Enable the scheduling wakeups event tracer 169 | $ADB shell "echo 1 > /sys/kernel/debug/tracing/events/sched/sched_wakeup/enable" && 170 | # Enable the scheduling new wakeups event tracer 171 | $ADB shell "echo 1 > /sys/kernel/debug/tracing/events/sched/sched_wakeup_new/enable" && 172 | # Enable tracing 173 | $ADB shell "echo 1 > /sys/kernel/debug/tracing/tracing_enabled" 174 | } 175 | 176 | ############################################################################### 177 | # Stop tracing and dump the trace to the SD-card 178 | 179 | stop_tracing() { 180 | trace_path=$(get_trace_path) 181 | # Disable tracing 182 | $ADB shell "echo 0 > /sys/kernel/debug/tracing/tracing_enabled" && 183 | # Disable the scheduling runtime stats event tracer 184 | $ADB shell "echo 0 > /sys/kernel/debug/tracing/events/sched/sched_stat_runtime/enable" && 185 | # Disable the scheduling wakeups event tracer 186 | $ADB shell "echo 0 > /sys/kernel/debug/tracing/events/sched/sched_wakeup/enable" && 187 | # Disable the scheduling new wakeups event tracer 188 | $ADB shell "echo 0 > /sys/kernel/debug/tracing/events/sched/sched_wakeup_new/enable" && 189 | # Dump the trace 190 | $ADB shell "cat /sys/kernel/debug/tracing/trace > '$trace_path'" 191 | } 192 | 193 | ############################################################################### 194 | # Pull the trace from the SD-card 195 | 196 | transfer_trace() { 197 | trace_path=$(get_trace_path) 198 | $ADB pull "$trace_path" 1>/dev/null 2>/dev/null && 199 | if [ $opt_keep != yes ]; then 200 | $ADB shell rm "$trace_path" 201 | fi 202 | } 203 | 204 | ############################################################################### 205 | # Get the PIDs of all FxOS applications 206 | 207 | get_pids() { 208 | $ADB shell 'ps | grep /system/b2g/ | while read user pid ppid rest ; do 209 | echo $pid 210 | done' | tr -d '\r' 211 | } 212 | 213 | ############################################################################### 214 | # Process trace 215 | 216 | process_trace() { 217 | printf "%16s %5s %7s %12s %3s\n" "NAME" "PID" "WAKEUPS" "RUNTIME" "NEW" 218 | for pid in $(get_pids) ; do 219 | name=$($ADB shell cat /proc/$pid/comm | tr -d '\r') 220 | total_runtime=0 221 | wakeups=0 222 | new=no 223 | 224 | grep " pid=$pid " "$opt_trace_name" | { 225 | while read -r line; do 226 | # Accumulate each time slice 227 | runtime=$(expr "$line" : ".* sched_stat_runtime:.* runtime=\([0-9]*\).*") 228 | total_runtime=$((total_runtime + runtime)) 229 | 230 | # Count the number of wakeups 231 | wakeup=$(expr "$line" : ".* sched_wakeup:.* success=1.*") 232 | if [ $wakeup -gt 0 ]; then 233 | wakeups=$((wakeups + 1)) 234 | fi 235 | 236 | # Check if the app was created while we were tracing 237 | wakeup_new=$(expr "$line" : ".* sched_wakeup_new:.* success=1.*") 238 | if [ $wakeup_new -gt 0 ]; then 239 | new=yes 240 | fi 241 | done 242 | 243 | total_runtime=$((total_runtime / 1000000)) 244 | printf "%16s %5u %7u %9u ms %3s\n" \ 245 | "$name" $pid $wakeups $total_runtime $new 246 | } 247 | done 248 | 249 | if [ $opt_keep != yes ]; then 250 | rm -f "$opt_trace_name" 251 | fi 252 | } 253 | 254 | ############################################################################### 255 | # Main script 256 | 257 | parse_args "$@" 258 | prepare_adb 259 | 260 | case $action in 261 | help) 262 | print_help 263 | ;; 264 | start) 265 | start_tracing 266 | ;; 267 | stop) 268 | stop_tracing && 269 | transfer_trace && 270 | process_trace 271 | ;; 272 | *) 273 | printf "error: Unknown action $action\n" 274 | suggest_help 275 | exit 1 276 | ;; 277 | esac 278 | -------------------------------------------------------------------------------- /scripts/updates.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Determine the absolute path of our location. 4 | B2G_DIR=$(cd `dirname $0`/..; pwd) 5 | . $B2G_DIR/setup.sh 6 | 7 | # Run standard set of tests by default. Command line arguments can be 8 | # specified to run specific tests (an individual test file, a directory, 9 | # or an .ini file). 10 | TEST_PATH=$GECKO_PATH/testing/marionette/client/marionette/tests/update-tests.ini 11 | MARIONETTE_FLAGS+=" --homedir=$B2G_DIR --type=b2g-smoketest" 12 | 13 | while [ $# -gt 0 ]; do 14 | case "$1" in 15 | --*) 16 | MARIONETTE_FLAGS+=" $1" ;; 17 | *) 18 | MARIONETTE_TESTS+=" $1" ;; 19 | esac 20 | shift 21 | done 22 | 23 | MARIONETTE_TESTS=${MARIONETTE_TESTS:-$TEST_PATH} 24 | echo "Running tests: $MARIONETTE_TESTS" 25 | 26 | SCRIPT=$GECKO_PATH/testing/marionette/client/marionette/venv_b2g_update_test.sh 27 | PYTHON=${PYTHON:-`which python`} 28 | 29 | echo bash $SCRIPT "$PYTHON" $MARIONETTE_FLAGS $MARIONETTE_TESTS 30 | bash $SCRIPT "$PYTHON" $MARIONETTE_FLAGS $MARIONETTE_TESTS 31 | -------------------------------------------------------------------------------- /scripts/xpcshell.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | B2G_DIR=$(cd `dirname $0`/..; pwd) 4 | . $B2G_DIR/load-config.sh 5 | 6 | VIRTUAL_ENV_VERSION="49f40128a9ca3824ebf253eca408596e135cf893" 7 | BUSYBOX=$B2G_DIR/gaia/build/busybox-armv6l 8 | TEST_PACKAGE_STAGE_DIR=$GECKO_OBJDIR/dist/test-package-stage 9 | TESTING_MODULES_DIR=$TEST_PACKAGE_STAGE_DIR/modules 10 | 11 | XPCSHELL_FLAGS+=" --b2gpath $B2G_DIR \ 12 | --use-device-libs \ 13 | --busybox $BUSYBOX \ 14 | --testing-modules-dir $TESTING_MODULES_DIR" 15 | 16 | OUT_HOST=$B2G_DIR/out/host/`uname -s | tr "[[:upper:]]" "[[:lower:]]"`-x86 17 | ADB=${ADB:-$OUT_HOST/bin/adb} 18 | XPCSHELL_FLAGS+=" --adbpath $ADB" 19 | 20 | if [ "$DEVICE" = "generic" ]; then 21 | XPCSHELL_FLAGS+=" --emulator arm" 22 | elif [ "$DEVICE" = "generic_x86" ]; then 23 | XPCSHELL_FLAGS+=" --emulator x86" 24 | fi 25 | 26 | XPCSHELL_MANIFEST=tests/xpcshell_b2g.ini 27 | 28 | while [ $# -gt 0 ]; do 29 | case "$1" in 30 | --manifest) 31 | shift; XPCSHELL_MANIFEST=$1 ;; 32 | --manifest=*) 33 | XPCSHELL_MANIFEST=${1:11} ;; 34 | *) 35 | XPCSHELL_FLAGS+=" $1" ;; 36 | esac 37 | shift 38 | done 39 | 40 | XPCSHELL_FLAGS+=" --manifest $XPCSHELL_MANIFEST" 41 | MARIONETTE_HOME=$GECKO_PATH/testing/marionette/client/ 42 | PYTHON=`which python` 43 | 44 | VENV_DIR="marionette_venv" 45 | if [ -z $GECKO_OBJDIR ] 46 | then 47 | VENV_DIR="$MARIONETTE_DIR/$VENV_DIR" 48 | else 49 | VENV_DIR="$GECKO_OBJDIR/$VENV_DIR" 50 | fi 51 | 52 | if [ -d $VENV_DIR ] 53 | then 54 | echo "Using virtual environment in $VENV_DIR" 55 | else 56 | echo "Creating a virtual environment in $VENV_DIR" 57 | curl https://raw.github.com/pypa/virtualenv/${VIRTUAL_ENV_VERSION}/virtualenv.py | ${PYTHON} - $VENV_DIR 58 | fi 59 | . $VENV_DIR/bin/activate 60 | 61 | cd $MARIONETTE_HOME 62 | python setup.py develop 63 | 64 | set -e 65 | if [ ! -d "$TEST_PACKAGE_STAGE_DIR" ]; then 66 | cd $GECKO_OBJDIR 67 | make package-tests 68 | fi 69 | 70 | set -x 71 | cd $TEST_PACKAGE_STAGE_DIR/xpcshell 72 | $VENV_DIR/bin/python runtestsb2g.py $XPCSHELL_FLAGS $@ 73 | -------------------------------------------------------------------------------- /setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . load-config.sh 4 | 5 | export USE_CCACHE=yes && 6 | export GECKO_PATH && 7 | export GAIA_PATH && 8 | export GAIA_DOMAIN && 9 | export GAIA_PORT && 10 | export GAIA_DEBUG && 11 | export GECKO_OBJDIR && 12 | export B2G_NOOPT && 13 | export B2G_DEBUG && 14 | export MOZ_CHROME_MULTILOCALE && 15 | export L10NBASEDIR && 16 | export MOZ_B2G_DSDS && 17 | . build/envsetup.sh && 18 | lunch $LUNCH 19 | -------------------------------------------------------------------------------- /test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | B2G_HOME=$(dirname $BASH_SOURCE) 4 | 5 | usage() { 6 | echo "Usage: $0 [marionette|mochitest|updates|xpcshell] (frontend-args)" 7 | echo "" 8 | echo "'marionette' is the default frontend" 9 | } 10 | 11 | FRONTEND=$1 12 | if [ -z "$FRONTEND" ]; then 13 | FRONTEND=marionette 14 | else 15 | shift 16 | fi 17 | 18 | case "$FRONTEND" in 19 | mochitest) 20 | echo "Use ./mach mochitest-remote to run tests;" 21 | echo "use ./mach help mochitest-remote for options." ;; 22 | marionette) 23 | echo "Use ./mach marionette-webapi to run tests;" 24 | echo "use ./mach help mochitest-webapi for options." ;; 25 | updates) 26 | SCRIPT=$B2G_HOME/scripts/updates.sh ;; 27 | xpcshell) 28 | SCRIPT=$B2G_HOME/scripts/xpcshell.sh ;; 29 | --help|-h|help) 30 | usage 31 | exit 0;; 32 | *) 33 | usage 34 | echo "Error: Unknown test frontend: $FRONTEND" 1>&2 35 | exit 1 36 | esac 37 | 38 | echo $SCRIPT $@ 39 | $SCRIPT $@ 40 | -------------------------------------------------------------------------------- /tools/fix_b2g_stack.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | """This script uses addr2line (part of binutils) to post-process the entries 4 | produced by NS_FormatCodeAddress(). 5 | 6 | This is an analog to fix_linux_stack.py and is functionally similar to 7 | $B2G_ROOT/scripts/profile-symbolicate.py. 8 | 9 | """ 10 | 11 | from __future__ import print_function 12 | 13 | import sys 14 | if sys.version_info < (2, 7): 15 | # We need Python 2.7 because we import argparse. 16 | print('This script requires Python 2.7.', file=sys.stderr) 17 | sys.exit(1) 18 | 19 | import os 20 | import re 21 | import subprocess 22 | import itertools 23 | import argparse 24 | import platform 25 | import textwrap 26 | import threading 27 | import cPickle as pickle 28 | import fcntl 29 | from os.path import dirname 30 | from collections import defaultdict 31 | from gzip import GzipFile 32 | 33 | 34 | def first(pred, itr): 35 | """Return the first element of itr which matches the predicate pred, or 36 | return None if no such element exists. 37 | 38 | This function avoids running pred unnecessarily, so is suitable for use 39 | when pred is expensive. 40 | 41 | """ 42 | try: 43 | return itertools.ifilter(pred, itr).next() 44 | except StopIteration: 45 | return None 46 | 47 | 48 | def pump(dst, src): 49 | """Pump the file dst into the file src. When src hits EOF, close dst. 50 | 51 | Returns a thread object, so you can e.g. join() on the result. 52 | 53 | """ 54 | class Pumper(threading.Thread): 55 | def run(self): 56 | while True: 57 | bytes = src.read(4096) 58 | if not bytes: 59 | break 60 | dst.write(bytes) 61 | dst.close() 62 | p = Pumper() 63 | p.start() 64 | return p 65 | 66 | 67 | def _none_factory(): 68 | return None 69 | 70 | 71 | def _defaultdict_none_factory(): 72 | return defaultdict(_none_factory) 73 | 74 | 75 | class FixB2GStacksOptions(object): 76 | """Encapsulates arguments used in fix_b2g_stacks_in_file. 77 | 78 | The args argument to __init__() specifies options passed to 79 | fix_b2g_stacks_in_file. 80 | 81 | All of the args are optional, but see the caveat on the |product| arg. 82 | 83 | We look for the following properties on |args| and define corresponding 84 | properties on |self|. (All default paths are relative to this source 85 | file's location, not to the current working directory.) 86 | 87 | * toolchain_prefix: The cross-toolchain binary prefix. 88 | Default: 'arm-linux-androideabi-'. 89 | 90 | * toolchain_dir: The directory in which the cross-toolchain binaries 91 | live. Default: 92 | ../prebuilt/PLATFORM-x86/toolchain/arm-linux-android-eabi-4.4.x/bin 93 | 94 | * gecko_objdir: The gecko object directory. Default: ../objdir-gecko. 95 | 96 | * gonk_objdir: The gonk object directory. Default: ../out. 97 | 98 | * product: The device we're targeting. Default: the one directory 99 | inside gonk_objdir/target/product. If gonk_objdir/target/product 100 | is empty or has multiple sub-directories and the |product| arg was 101 | not specified, we raise an exception. 102 | 103 | * remove_cache: If true, delete fix_b2g_stack.py's persistent 104 | addr2line cache when we start running fix_b2g_stacks_in_file. 105 | 106 | In addition, this class defines two additional properties on itself based 107 | on the parameters received in __init__. 108 | 109 | * lib_search_dirs: [$gecko_objdir, $gonk_objdir/target/product/$product] 110 | 111 | * cross_bin(bin_name): Returns a path to the given cross-toolchain 112 | program. For example, cross_bin('nm') returns a path to the 113 | cross-toolchain's nm binary. 114 | 115 | """ 116 | def __init__(self, args): 117 | def get_arg(arg, default=None): 118 | try: 119 | if getattr(args, arg): 120 | return getattr(args, arg) 121 | except TypeError: 122 | pass 123 | 124 | try: 125 | if arg in args and args[arg]: 126 | return args[arg] 127 | except TypeError: 128 | pass 129 | 130 | try: 131 | return default() 132 | except TypeError: 133 | pass 134 | 135 | return default 136 | 137 | self.toolchain_prefix = get_arg('toolchain_prefix', 'arm-linux-androideabi-') 138 | self.toolchain_dir = get_arg('toolchain_dir', self._guess_toolchain_dir) 139 | self.remove_cache = get_arg('remove_cache', False) 140 | 141 | self.gecko_objdir = get_arg( 142 | 'gecko_objdir', os.path.join(dirname(__file__), '../objdir-gecko')) 143 | self.gonk_objdir = get_arg( 144 | 'gonk_objdir', os.path.join(dirname(__file__), '../out')) 145 | 146 | product = get_arg('product') 147 | if product: 148 | product_dir = os.path.join(self.gonk_objdir, 'target/product', product) 149 | else: 150 | product_dir = self._guess_gonk_product(self.gonk_objdir) 151 | 152 | self.lib_search_dirs = [self.gecko_objdir, product_dir] 153 | 154 | def cross_bin(self, bin_name): 155 | return os.path.join(self.toolchain_dir, self.toolchain_prefix + bin_name) 156 | 157 | @staticmethod 158 | def _guess_toolchain_dir(): 159 | patterns = [ 160 | 'prebuilt/%(host)s/toolchain/%(target)s-4.4.x', 161 | 'prebuilts/gcc/%(host)s/%(target_arch)s/%(target)s-4.7', 162 | ] 163 | # FIXME, bug 1032524: this shouldn't assume an ARM target. 164 | args = { 165 | 'host': platform.system().lower() + "-x86", 166 | 'target': 'arm-linux-androideabi', 167 | 'target_arch': 'arm', 168 | } 169 | for pattern in patterns: 170 | maybe_dir = os.path.join(dirname(__file__), '..', (pattern % args), 'bin') 171 | if os.path.exists(maybe_dir): 172 | return maybe_dir 173 | raise Exception("No toolchain directory found") 174 | 175 | @staticmethod 176 | def _guess_gonk_product(gonk_objdir): 177 | products_dir = os.path.join(gonk_objdir, 'target/product') 178 | products = os.listdir(products_dir) 179 | if not products: 180 | raise Exception("Couldn't auto-detect a product, because %s is empty." % 181 | products_dir) 182 | if len(products) == 1: 183 | return os.path.join(products_dir, products[0]) 184 | 185 | raise Exception(textwrap.dedent( 186 | '''Couldn't auto-detect a product because %s has multiple entries. 187 | 188 | Please re-run with --product. Your options are %s.''' % 189 | (products_dir, products))) 190 | 191 | 192 | class StackFixerCache(): 193 | """A cache for StackFixer which occasionally serializes itself to disk. 194 | 195 | This cache stores (lib, offset) --> string mappings, so we can avoid 196 | calling addr2line. After every so many puts, we write the cache out to 197 | disk. 198 | 199 | Please be kind and call flush() on this object when you're done with it. 200 | That gives us one last chance to write our cache out to disk. 201 | 202 | When the cache is read in from disk, we check that the libraries' sizes, 203 | mtimes, and ctimes haven't changed. If they have, we throw out the cached 204 | mappings. 205 | 206 | In theory you can safely access this cache from multiple processes, because 207 | we use fcntl locking on the cache file. (We never block on acquiring a 208 | lock on the cache file; if we can't immediately access the file, we simply 209 | give up.) But note that I have not tested that this locking works as 210 | intended. 211 | 212 | """ 213 | def __init__(self, options): 214 | self._initialized = False 215 | self._lib_lookups = None 216 | self._lib_metadata = None 217 | self._put_counter = 0 218 | 219 | # Write the cache file after this many puts. 220 | self._write_cache_after_puts = 500 221 | 222 | def _ensure_initialized(self): 223 | if self._initialized: 224 | return 225 | cache = self._read_cache_from_disk() 226 | if cache: 227 | self._lib_lookups = cache['lookups'] 228 | self._lib_metadata = cache['metadata'] 229 | self._validate_lib_metadata() 230 | else: 231 | self._lib_lookups = defaultdict(_defaultdict_none_factory) 232 | self._lib_metadata = {} 233 | self._initialized = True 234 | 235 | @staticmethod 236 | def cache_filename(): 237 | """Get the filename of our cache.""" 238 | return os.path.join(dirname(__file__), '.fix_b2g_stack.cache') 239 | 240 | @staticmethod 241 | def _read_cache_from_disk(): 242 | try: 243 | with open(StackFixerCache.cache_filename(), 'rb') as cache_file: 244 | try: 245 | fcntl.lockf(cache_file.fileno(), fcntl.LOCK_SH | fcntl.LOCK_NB) 246 | return pickle.load(cache_file) 247 | finally: 248 | try: 249 | fcntl.lockf(cache_file.fileno(), fcntl.LOCK_UN) 250 | except IOError: 251 | pass 252 | except (EOFError, IOError, pickle.PickleError) as e: 253 | pass 254 | return None 255 | 256 | def flush(self): 257 | if self._put_counter: 258 | self._write_cache_to_disk() 259 | 260 | def _write_cache_to_disk(self): 261 | try: 262 | with open(StackFixerCache.cache_filename(), 'wb') as cache_file: 263 | try: 264 | fcntl.lockf(cache_file.fileno(), fcntl.LOCK_EX | fcntl.LOCK_NB) 265 | pickle.dump({'lookups': self._lib_lookups, 266 | 'metadata': self._lib_metadata}, 267 | cache_file, 268 | pickle.HIGHEST_PROTOCOL) 269 | self._put_counter = 0 270 | return True 271 | finally: 272 | try: 273 | fcntl.lockf(cache_file.fileno(), fcntl.LOCK_UN) 274 | except IOError: 275 | pass 276 | except IOError, pickle.PickleError: 277 | pass 278 | return False 279 | 280 | def _validate_lib_metadata(self): 281 | for (lib_path, cached_metadata) in self._lib_metadata.items(): 282 | real_metadata = self._get_lib_metadata(lib_path) 283 | if real_metadata != cached_metadata or not real_metadata: 284 | self._lib_metadata[lib_path] = real_metadata 285 | try: 286 | del self._lib_lookups[lib_path] 287 | except KeyError: 288 | pass 289 | 290 | @staticmethod 291 | def _get_lib_metadata(lib_path): 292 | try: 293 | st = os.stat(lib_path) 294 | return (os.path.normpath(os.path.abspath(lib_path)), 295 | st.st_size, st.st_mtime, st.st_ctime) 296 | except: 297 | return None 298 | 299 | def get(self, lib_path, offset): 300 | self._ensure_initialized() 301 | return self._lib_lookups[lib_path][offset] 302 | 303 | def put(self, lib_path, offset, result): 304 | self._ensure_initialized() 305 | if lib_path not in self._lib_metadata: 306 | self._lib_metadata[lib_path] = self._get_lib_metadata(lib_path) 307 | self._lib_lookups[lib_path][offset] = result 308 | 309 | self._put_counter += 1 310 | if self._put_counter == self._write_cache_after_puts: 311 | self._write_cache_to_disk() 312 | 313 | # Reset the put counter even if the cache write above fails; if 314 | # this write failed, it's likely that our next write will fail too, 315 | # and we don't want to waste our time writing and failing over and 316 | # over again. 317 | self._put_counter = 0 318 | 319 | def get_maybe_set(self, lib_path, offset, result): 320 | """Get the addr2line result for (lib_path, offset). 321 | 322 | If (lib_path, offset) is not in our cache, insert |result()| or 323 | |result|, depending on whether |result| is callable. 324 | 325 | """ 326 | self._ensure_initialized() 327 | if not self._lib_lookups[lib_path][offset]: 328 | if callable(result): 329 | self.put(lib_path, offset, result()) 330 | else: 331 | self.put(lib_path, offset, result) 332 | return self._lib_lookups[lib_path][offset] 333 | 334 | 335 | class StackFixer(object): 336 | """An object used for translating (lib, offset) tuples into function+file 337 | names, using addr2line and a cache. 338 | 339 | Here and elsewhere we adopt the convention that |lib| is a library's 340 | basename (e.g. 'libxul.so'), while lib_path is a relative path from 341 | dirname(__file__) (i.e., this file's directory) to the library. 342 | 343 | Please be kind and call close() once you're done with this object. That 344 | gives us a chance to flush the cache to disk, making future invocations 345 | faster. 346 | 347 | """ 348 | 349 | _addr2line_procs = {} 350 | 351 | def __init__(self, options): 352 | self._lib_path_cache = defaultdict(list) 353 | self._cache = StackFixerCache(options) 354 | self._options = options 355 | 356 | def translate(self, fn_guess, lib, offset): 357 | """Translate the given offset (an integer) into the given library (e.g. 358 | 'libxul.so') into a human-readable string and return that string. 359 | 360 | fn_guess is a hint to make the output look nicer; we don't use 361 | it to look up lib+offsets. 362 | 363 | """ 364 | lib_path = self._find_lib(lib) 365 | return self._cache.get_maybe_set(lib_path, offset, 366 | lambda: self._addr2line(lib, offset, fn_guess)) 367 | 368 | def close(self): 369 | self._cache.flush() 370 | 371 | def _init_lib_path_cache(self): 372 | """Initialize self._lib_path_cache by walking all of the subdirectories 373 | of self._options.lib_search_dirs and finding all the '*.so', 'b2g', and 374 | 'plugin-container' files therein. 375 | 376 | """ 377 | for root, _, files in itertools.chain(*[os.walk(dir) for dir in 378 | self._options.lib_search_dirs]): 379 | for f in files: 380 | if f.endswith('.so') or f == 'b2g' or f == 'plugin-container': 381 | self._lib_path_cache[f].append(os.path.join(root, f)) 382 | 383 | def _find_lib(self, lib): 384 | """Get a path to the given lib (e.g. 'libxul.so'). 385 | 386 | We prefer unstripped versions of the lib, but if all we can find is a 387 | stripped version, we'll return that. 388 | 389 | If we can't find the lib, we return None. 390 | 391 | """ 392 | if not self._lib_path_cache: 393 | self._init_lib_path_cache() 394 | 395 | lib_paths = self._lib_path_cache[lib] 396 | if not lib_paths: 397 | return None 398 | if len(lib_paths) == 1: 399 | return lib_paths[0] 400 | 401 | lib_path = first(self._lib_has_symbols, lib_paths) 402 | if not lib_path: 403 | lib_path = self._lib_path_cache[lib][0] 404 | self._lib_path_cache[lib] = [lib_path] 405 | return lib_path 406 | 407 | def _lib_has_symbols(self, lib_path): 408 | """Check if the given lib_path has symbols. 409 | 410 | We do this by running nm on the library. If it's stripped, nm will not 411 | output anything to stdout. 412 | 413 | """ 414 | proc = subprocess.Popen( 415 | [self._options.cross_bin('nm'), lib_path], 416 | stdout=subprocess.PIPE, stderr=subprocess.PIPE) 417 | try: 418 | return not not proc.stdout.readline() 419 | except IOError: 420 | return False 421 | finally: 422 | proc.kill() 423 | 424 | def _addr2line(self, lib, offset, fn_guess): 425 | """Use addr2line to translate the given lib+offset. 426 | 427 | If addr2line can't resolve a lib+offset, you may still have a guess as 428 | to what function lives there. (For example, NS_StackWalk is sometimes 429 | able to resolve function names that addr2line can't.) fn_guess should 430 | be this guess, if you have one. 431 | 432 | """ 433 | def addr_str(): 434 | return '(%s+0x%x)' % (lib, offset) 435 | 436 | def fallback_str(): 437 | _fn_guess = fn_guess + ' ' if fn_guess and fn_guess != '???' else '' 438 | return '%s%s' % (_fn_guess, addr_str()) 439 | 440 | if lib not in StackFixer._addr2line_procs: 441 | lib_path = self._find_lib(lib) 442 | if not lib_path: 443 | return "%s (can't find lib)" % fallback_str() 444 | StackFixer._addr2line_procs[lib] = subprocess.Popen( 445 | [self._options.cross_bin('addr2line'), '-Cfe', lib_path], 446 | stdin=subprocess.PIPE, stdout=subprocess.PIPE) 447 | 448 | proc = StackFixer._addr2line_procs[lib] 449 | try: 450 | proc.stdin.write('0x%x\n' % offset) 451 | proc.stdin.flush() 452 | 453 | # addr2line returns two lines for every address we give it. The 454 | # first line is of the form "foo()", and the second line is of the 455 | # form "foo.cpp:123". 456 | func = proc.stdout.readline().strip() 457 | file_name = os.path.normpath(proc.stdout.readline().strip()) 458 | if func == '??' and file_name == '??:0': 459 | # addr2line wasn't helpful here. 460 | return '%s (no addr2line)' % fallback_str() 461 | return '%s %s %s' % (func, file_name, addr_str()) 462 | except IOError as e: 463 | # If our addr2line process dies, don't try to restart it. Just 464 | # leave it in a dead state and presumably every time we read/write 465 | # to/from it, we'll hit this case. 466 | return '%s (addr2line exception)' % fallback_str() 467 | 468 | 469 | # Matches lines produced by DMD before bug 1062709 landed. 470 | old_line_re = re.compile( 471 | r'''(\s+) # leading whitespace 472 | ([^ ][^\]]*) # either '???' or mangled fn signature 473 | \[ 474 | (\S+) # library name 475 | \s+ 476 | \+(0x[0-9a-fA-F]+) # offset into lib 477 | \] 478 | (\s+0x[0-9a-fA-F]+.*) # program counter and anything else 479 | ''', 480 | re.VERBOSE) 481 | 482 | # Matches lines produced by DMD (via NS_FormatCodeAddress()) after bug 1062709 483 | # landed. 484 | line_re = re.compile("^(.*#\d+: )(.+)\[(.+) \+(0x[0-9A-Fa-f]+)\](.*)$") 485 | 486 | def fixSymbols(line, fixer): 487 | # Try parsing it as if it's the new stack frame format. 488 | result = line_re.match(line) 489 | if result is not None: 490 | (before, fn, lib, offset, after) = result.groups() 491 | return before + fixer.translate(fn, lib, int(offset, 16)) + after + '\n' 492 | 493 | # Try parsing it as if it's the old stack frame format. 494 | result = old_line_re.match(line) 495 | if result is not None: 496 | (before, fn, lib, offset, after) = result.groups() 497 | return before + fixer.translate(fn, lib, int(offset, 16)) + after + '\n' 498 | 499 | return line 500 | 501 | 502 | def fix_b2g_stacks_in_file(infile, outfile, args={}, **kwargs): 503 | """Read lines from infile and output those lines to outfile with their 504 | stack frames rewritten. 505 | 506 | infile and outfile may be a files or file-like objects. For example, to 507 | read/write from strings, pass StringIO objects. 508 | 509 | args or kwargs will be passed to FixB2GStacksOptions (you may not specify 510 | both). See the docs on FixB2GStacksOptions for the supported argument 511 | names. 512 | 513 | """ 514 | if args and kwargs: 515 | raise Exception("Can't pass args and kwargs to fix_b2g_stacks_in_file.") 516 | options = FixB2GStacksOptions(args if args else kwargs) 517 | 518 | if options.remove_cache: 519 | try: 520 | os.remove(StackFixerCache.cache_filename()) 521 | except Exception: 522 | pass 523 | 524 | fixer = StackFixer(options) 525 | 526 | # Filter our output through c++filt. Pumping on a separate thread is 527 | # *much* faster than filtering line-by-line. 528 | # 529 | # On Mac OS, the native c++filt doesn't filter our output correctly, so we 530 | # use the cross-compiled one. (I don't know if the system c++filt works 531 | # properly on Linux, though I imagine it does.) 532 | cppfilt = subprocess.Popen([options.cross_bin('c++filt')], 533 | stdin=subprocess.PIPE, 534 | stdout=subprocess.PIPE) 535 | try: 536 | p = pump(outfile, cppfilt.stdout) 537 | for line in infile: 538 | cppfilt.stdin.write(fixSymbols(line, fixer)) 539 | finally: 540 | cppfilt.stdin.close() 541 | p.join() 542 | fixer.close() 543 | 544 | 545 | def add_argparse_arguments(parser): 546 | """Add arguments to an argparse parser which make the parser's result 547 | suitable for passing to fix_b2g_stacks_in_file. 548 | 549 | You might use this in your code as something like: 550 | 551 | parser = argparse.ArgumentParser() 552 | b2g_stack_group = parser.add_argument_group(...) 553 | fix_b2g_stack.add_argparse_arguments(b2g_stack_group) 554 | 555 | """ 556 | parser.add_argument('--toolchain-dir', metavar='DIR', 557 | help='Directory containing toolchain binaries') 558 | parser.add_argument('--toolchain-prefix', metavar='PREFIX', 559 | help='Toolchain binary prefix (e.g. "arm-linux-androideabi-")') 560 | parser.add_argument('--gecko-objdir', metavar='DIR', 561 | help='Path to gecko objdir (default: ../objdir-gecko)') 562 | parser.add_argument('--gonk-objdir', metavar='DIR', 563 | help='Path to gonk objdir (default: $B2G_ROOT/out)') 564 | parser.add_argument('--product', metavar='PRODUCT', 565 | help='Product being built (e.g. "otoro"). ' 566 | 'We try to detect this automatically.') 567 | parser.add_argument('--remove-cache', action='store_true', 568 | help="Delete the persistent addr2line cache before running.") 569 | 570 | if __name__ == '__main__': 571 | parser = argparse.ArgumentParser( 572 | description=__doc__, 573 | formatter_class=argparse.RawDescriptionHelpFormatter) 574 | parser.add_argument('infile', metavar='INFILE', nargs='?', 575 | help='File to read from (default: stdin). gz files are OK.') 576 | parser.add_argument('--outfile', metavar='FILE', 577 | help=textwrap.dedent('''\ 578 | File to write output to (default: stdout). If name 579 | ends with ".gz", we will gzip the file.''')) 580 | add_argparse_arguments(parser) 581 | args = parser.parse_args() 582 | 583 | infile = sys.stdin 584 | if args.infile: 585 | if args.infile.endswith('.gz'): 586 | infile = GzipFile(args.infile, 'r') 587 | else: 588 | infile = open(args.infile, 'r') 589 | 590 | outfile = sys.stdout 591 | if args.outfile: 592 | if args.outfile.endswith('.gz'): 593 | outfile = GzipFile(args.outfile, 'w') 594 | else: 595 | outfile = open(args.outfile, 'w') 596 | 597 | fix_b2g_stacks_in_file(infile, outfile, args) 598 | -------------------------------------------------------------------------------- /tools/get_about_memory.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | """Get a dump of about:memory from all the processes running on your device. 4 | 5 | You can then view these dumps using a recent Firefox nightly on your desktop by 6 | opening about:memory and using the button at the bottom of the page to load the 7 | memory-reports file that this script creates. 8 | 9 | By default this script also gets gc/cc logs from all B2G processes. This takes 10 | a while, and these logs are large, so you can turn it off if you like. 11 | 12 | This script also saves the output of b2g-procrank and a few other diagnostic 13 | programs. If you compiled with DMD and have it enabled, we'll also pull the 14 | DMD reports. 15 | """ 16 | 17 | from __future__ import print_function 18 | 19 | import sys 20 | 21 | if sys.version_info < (2, 7): 22 | # We need Python 2.7 because we import argparse. 23 | print('This script requires Python 2.7.', file=sys.stderr) 24 | sys.exit(1) 25 | 26 | import os 27 | import re 28 | import textwrap 29 | import argparse 30 | import json 31 | import urllib 32 | import shutil 33 | import subprocess 34 | import tarfile 35 | import traceback 36 | from datetime import datetime 37 | from gzip import GzipFile 38 | 39 | import include.device_utils as utils 40 | import fix_b2g_stack 41 | 42 | 43 | def process_dmd_files(dmd_files, args): 44 | """Run fix_b2g_stack.py on each of these files.""" 45 | if not dmd_files or args.no_dmd: 46 | return 47 | 48 | print() 49 | print('Processing DMD files. This may take a minute or two.') 50 | try: 51 | process_dmd_files_impl(dmd_files, args) 52 | print('Done processing DMD files. Have a look in %s.' % 53 | os.path.dirname(dmd_files[0])) 54 | except Exception as e: 55 | print('') 56 | print(textwrap.dedent('''\ 57 | An error occurred while processing the DMD dumps. Not to worry! 58 | The raw dumps are still there; just run fix_b2g_stack.py on 59 | them. 60 | '''), file=sys.stderr) 61 | traceback.print_exc(e) 62 | 63 | 64 | def get_proc_names(out_dir): 65 | """ 66 | Retrieves a mapping of process names to their PID as well as the raw 67 | output of b2g-procrank. 68 | """ 69 | with open(os.path.join(out_dir, 'b2g-procrank'), 'r') as f: 70 | procrank = f.read().split('\n') 71 | proc_names = {} 72 | for line in procrank: 73 | # App names may contain spaces and special characters (e.g. 74 | # '(Preallocated app)'). But for our purposes here, it's easier to 75 | # look at only the first word, and to strip off any special characters. 76 | # 77 | # We also assume that if an app name contains numbers, it contains them 78 | # only in the first word. 79 | match = re.match(r'^(\S+)\s+\D*(\d+)', line) 80 | if not match: 81 | continue 82 | proc_names[int(match.group(2))] = re.sub('\W', '', match.group(1)).lower() 83 | return proc_names, procrank 84 | 85 | 86 | def get_objdir_and_product(args): 87 | """Attempts to figure out the objdir and device name using the load-config.sh script""" 88 | if args.gecko_objdir and args.product: 89 | # User already specified objdir and product. 90 | return 91 | 92 | load_config_script = os.path.join(os.path.dirname(__file__), '../load-config.sh') 93 | try: 94 | # Run load-config.sh in a bash shell and spit out the config vars we 95 | # care about as a comma separated list when exiting. 96 | variables = subprocess.Popen( 97 | ["bash", "-c", 98 | "trap 'echo -n \"${GECKO_OBJDIR}\",\"${DEVICE_NAME}\"' exit; source \"$1\" > /dev/null 2>&1", 99 | "_", load_config_script], 100 | shell=False, stdout=subprocess.PIPE).communicate()[0].split(',') 101 | 102 | if not args.gecko_objdir and variables[0]: 103 | args.gecko_objdir = variables[0] 104 | 105 | if not args.product and variables[1]: 106 | args.product = variables[1] 107 | 108 | except Exception as e: 109 | pass 110 | 111 | 112 | def process_dmd_files_impl(dmd_files, args): 113 | out_dir = os.path.dirname(dmd_files[0]) 114 | 115 | proc_names, procrank = get_proc_names(out_dir) 116 | get_objdir_and_product(args) 117 | 118 | for f in dmd_files: 119 | # Extract the PID (e.g. 111) and UNIX time (e.g. 9999999) and the file 120 | # kind ('txt' or 'json', depending on the version) from the name 121 | # of the dmd file (e.g. dmd-9999999-111.json.gz). 122 | basename = os.path.basename(f) 123 | dmd_filename_match = re.match(r'^dmd-(\d+)-(\d+).(txt|json)', basename) 124 | if dmd_filename_match: 125 | creation_time = datetime.fromtimestamp(int(dmd_filename_match.group(1))) 126 | pid = int(dmd_filename_match.group(2)) 127 | kind = dmd_filename_match.group(3) 128 | if pid in proc_names: 129 | proc_name = proc_names[pid] 130 | outfile_name = 'dmd-%s-%d.%s' % (proc_name, pid, kind) 131 | else: 132 | proc_name = None 133 | outfile_name = 'dmd-%d.%s' % (pid, kind) 134 | else: 135 | pid = None 136 | creation_time = None 137 | outfile_name = 'processed-' + basename 138 | if outfile_name.endswith(".gz"): 139 | outfile_name = outfile_name[:-3] 140 | 141 | outfile_path = os.path.join(out_dir, outfile_name) 142 | with GzipFile(outfile_path + '.gz', 'w') if args.compress_dmd_logs else \ 143 | open(outfile_path, 'w') as outfile: 144 | with GzipFile(f, 'r') as infile: 145 | fix_b2g_stack.fix_b2g_stacks_in_file(infile, outfile, args) 146 | 147 | if not args.keep_individual_reports: 148 | os.remove(f) 149 | 150 | 151 | def get_kgsl_files(out_dir): 152 | """Retrieves kgsl graphics memory usage files.""" 153 | print() 154 | print('Processing kgsl files.') 155 | 156 | proc_names, _ = get_proc_names(out_dir) 157 | 158 | try: 159 | kgsl_pids = utils.remote_ls('/d/kgsl/proc/', verbose=False) 160 | except subprocess.CalledProcessError: 161 | # Probably not a kgsl device. 162 | print('kgsl graphics memory logs not available for this device.') 163 | return 164 | 165 | for pid in filter(None, kgsl_pids): 166 | name = proc_names[int(pid)] if int(pid) in proc_names else pid 167 | remote_file = '/d/kgsl/proc/%s/mem' % pid 168 | dest_file = os.path.join(out_dir, 'kgsl-%s-mem' % name) 169 | try: 170 | utils.pull_remote_file(remote_file, dest_file) 171 | except subprocess.CalledProcessError: 172 | print('Unable to retrieve kgsl file: %s' % remote_file, file=sys.stderr) 173 | 174 | print('Done processing kgsl files.') 175 | 176 | 177 | def merge_files(dir, files): 178 | """Merge the given memory reporter dump files into one giant file.""" 179 | dumps = [json.load(GzipFile(os.path.join(dir, f))) for f in files] 180 | 181 | merged_dump = dumps[0] 182 | for dump in dumps[1:]: 183 | # All of the properties other than 'reports' must be identical in all 184 | # dumps, otherwise we can't merge them. 185 | if set(dump.keys()) != set(merged_dump.keys()): 186 | print("Can't merge dumps because they don't have the " 187 | "same set of properties.", file=sys.stderr) 188 | return 189 | for prop in merged_dump: 190 | if prop != 'reports' and dump[prop] != merged_dump[prop]: 191 | print("Can't merge dumps because they don't have the " 192 | "same value for property '%s'" % prop, file=sys.stderr) 193 | 194 | merged_dump['reports'] += dump['reports'] 195 | 196 | merged_reports_path = os.path.join (dir, 'memory-reports') 197 | json.dump(merged_dump, 198 | open(merged_reports_path, 'w'), 199 | indent=2) 200 | return merged_reports_path 201 | 202 | 203 | def get_dumps(args): 204 | if args.output_directory: 205 | out_dir = utils.create_specific_output_dir(args.output_directory) 206 | else: 207 | out_dir = utils.create_new_output_dir('about-memory-') 208 | args.output_directory = out_dir 209 | 210 | # Do this function inside a try/catch which will delete out_dir if the 211 | # function throws and out_dir is empty. 212 | def do_work(): 213 | fifo_msg = 'memory report' if not args.minimize_memory_usage else \ 214 | 'minimize memory report' 215 | new_files = utils.notify_and_pull_files( 216 | fifo_msg=fifo_msg, 217 | outfiles_prefixes=['memory-report-'], 218 | remove_outfiles_from_device=not args.leave_on_device, 219 | out_dir=out_dir, 220 | optional_outfiles_prefixes=['dmd-']) 221 | 222 | memory_report_files = [f for f in new_files 223 | if f.startswith('memory-report-') or 224 | f.startswith('unified-memory-report-')] 225 | dmd_files = [f for f in new_files if f.startswith('dmd-')] 226 | if memory_report_files: 227 | merged_reports_path = os.path.abspath(merge_files(out_dir, memory_report_files)) 228 | else: 229 | # NB: It's possible this can happen if all child processes 230 | # die/restart during measurement. 231 | merged_reports_path = None 232 | 233 | utils.pull_procrank_etc(out_dir) 234 | 235 | if not args.keep_individual_reports: 236 | for f in memory_report_files: 237 | os.remove(os.path.join(out_dir, f)) 238 | 239 | return (out_dir, 240 | merged_reports_path, 241 | [os.path.join(out_dir, f) for f in dmd_files]) 242 | 243 | return utils.run_and_delete_dir_on_exception(do_work, out_dir) 244 | 245 | 246 | def get_and_show_info(args): 247 | (out_dir, merged_reports_path, dmd_files) = get_dumps(args) 248 | 249 | if dmd_files and not args.no_dmd: 250 | print('Got %d DMD dump(s).' % len(dmd_files)) 251 | 252 | if merged_reports_path: 253 | # Try to open the dump in Firefox. 254 | about_memory_url = "about:memory?file=%s" % urllib.quote(merged_reports_path) 255 | 256 | opened_in_firefox = False 257 | if args.open_in_firefox: 258 | try: 259 | # Open about_memory_url in Firefox, but don't display stdout or stderr. 260 | # This isn't necessary if Firefox is already running (which it 261 | # probably is), because in that case our |firefox| invocation will 262 | # open a new tab in the existing process and then immediately exit. 263 | # But if Firefox isn't already running, we don't want to pollute 264 | # our terminal with its output. 265 | 266 | # If we wanted to be platform-independent, we might be able to use 267 | # "NUL" on Windows. But the rest of this script already isn't 268 | # platform-independent, so whatever. 269 | fnull = open('/dev/null', 'w') 270 | subprocess.Popen(['firefox', about_memory_url], stdout=fnull, stderr=fnull) 271 | opened_in_firefox = True 272 | 273 | print() 274 | print(textwrap.fill(textwrap.dedent('''\ 275 | I just tried to open the memory report in Firefox. If that 276 | didn't work for some reason, or if you want to open this report 277 | at a later time, open the following URL in a Firefox nightly build: 278 | ''')) + '\n\n ' + about_memory_url) 279 | except (subprocess.CalledProcessError, OSError): 280 | pass 281 | 282 | # If we didn't open in Firefox, output the message below. 283 | if not opened_in_firefox: 284 | print() 285 | print(textwrap.fill(textwrap.dedent('''\ 286 | To view this report, open Firefox on this machine and load the 287 | following URL: 288 | ''')) + '\n\n ' + about_memory_url) 289 | else: 290 | print('') 291 | print("Failed to retrieve memory reports") 292 | 293 | # Get GC/CC logs if necessary. 294 | if args.get_gc_cc_logs: 295 | import get_gc_cc_log 296 | print('') 297 | print('Pulling GC/CC logs...') 298 | get_gc_cc_log.get_logs(args, out_dir=out_dir, get_procrank_etc=False) 299 | 300 | process_dmd_files(dmd_files, args) 301 | 302 | if not args.no_kgsl_logs: 303 | get_kgsl_files(out_dir) 304 | 305 | if args.create_archive: 306 | print('Archiving logs...') 307 | archive_path = utils.get_archive_path(out_dir) 308 | with tarfile.open(archive_path, 'w:bz2') as archive: 309 | archive.add(out_dir) 310 | shutil.rmtree(out_dir, ignore_errors=True) 311 | 312 | 313 | def main(): 314 | parser = argparse.ArgumentParser( 315 | description=__doc__, 316 | formatter_class=argparse.RawDescriptionHelpFormatter) 317 | 318 | parser.add_argument( 319 | '--minimize', '-m', dest='minimize_memory_usage', 320 | action='store_true', default=False, 321 | help='Minimize memory usage before collecting the memory reports.') 322 | 323 | parser.add_argument( 324 | '--directory', '-d', dest='output_directory', 325 | action='store', metavar='DIR', 326 | help=textwrap.dedent('''\ 327 | The directory to store the reports in. By default, we'll store the 328 | reports in the directory about-memory-N, for some N.''')) 329 | 330 | parser.add_argument( 331 | '--archive', 332 | dest='create_archive', 333 | action='store_true', default=False, 334 | help=textwrap.dedent('''\ 335 | Package the reports into an archive and remove the intermediate 336 | directory. A bz2 tar archive will be created with the name 337 | .tar.bz2''')) 338 | 339 | parser.add_argument( 340 | '--leave-on-device', '-l', dest='leave_on_device', 341 | action='store_true', default=False, 342 | help='Leave the reports on the device after pulling them.') 343 | 344 | parser.add_argument( 345 | '--no-auto-open', '-o', dest='open_in_firefox', 346 | action='store_false', default=True, 347 | help=textwrap.dedent("""\ 348 | By default, we try to open the memory report we fetch in Firefox. 349 | Specify this option prevent this.""")) 350 | 351 | parser.add_argument( 352 | '--keep-individual-reports', 353 | dest='keep_individual_reports', 354 | action='store_true', default=False, 355 | help=textwrap.dedent('''\ 356 | Don't delete the individual memory reports which we merge to create 357 | the memory-reports file. You shouldn't need to pass this parameter 358 | except for debugging.''')) 359 | 360 | gc_log_group = parser.add_mutually_exclusive_group() 361 | 362 | gc_log_group.add_argument( 363 | '--no-gc-cc-log', 364 | dest='get_gc_cc_logs', 365 | action='store_false', 366 | default=True, 367 | help="Don't get a gc/cc log.") 368 | 369 | gc_log_group.add_argument( 370 | '--abbreviated-gc-cc-log', 371 | dest='abbreviated_gc_cc_log', 372 | action='store_true', 373 | default=False, 374 | help='Get an abbreviated GC/CC log, instead of a full one.') 375 | 376 | parser.add_argument( 377 | '--uncompressed-gc-cc-log', 378 | dest='compress_gc_cc_logs', 379 | action='store_false', default=True, 380 | help='Do not compress the individual GC/CC logs.') 381 | 382 | parser.add_argument('--no-kgsl-logs', 383 | action='store_true', 384 | default=False, 385 | help='''Don't get the kgsl graphics memory logs.''') 386 | 387 | parser.add_argument( 388 | '--no-dmd', action='store_true', default=False, 389 | help='''Don't process DMD logs, even if they're available.''') 390 | 391 | parser.add_argument( 392 | '--uncompressed-dmd-logs', 393 | dest='compress_dmd_logs', 394 | action='store_false', default=True, 395 | help=textwrap.dedent('''\ 396 | Do not compress each individual DMD report after processing.''')) 397 | 398 | dmd_group = parser.add_argument_group( 399 | 'optional DMD args (passed to fix_b2g_stack)', 400 | textwrap.dedent('''\ 401 | You only need to worry about these options if you're running DMD on 402 | your device. These options get passed to fix_b2g_stack.''')) 403 | fix_b2g_stack.add_argparse_arguments(dmd_group) 404 | 405 | args = parser.parse_args() 406 | get_and_show_info(args) 407 | 408 | if __name__ == '__main__': 409 | main() 410 | -------------------------------------------------------------------------------- /tools/get_gc_cc_log.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | '''This script pulls GC and CC logs from all B2G processes on a device. These 4 | logs are primarily used by leak-checking tools. 5 | 6 | This script also saves the output of b2g-procrank and a few other diagnostic 7 | programs. 8 | 9 | ''' 10 | 11 | from __future__ import print_function 12 | 13 | import sys 14 | if sys.version_info < (2,7): 15 | # We need Python 2.7 because we import argparse. 16 | print('This script requires Python 2.7.', file=sys.stderr) 17 | sys.exit(1) 18 | 19 | import os 20 | import sys 21 | import re 22 | import argparse 23 | import textwrap 24 | import gzip 25 | from multiprocessing import Pool 26 | 27 | import include.device_utils as utils 28 | 29 | 30 | def gzip_compress(to_compress): 31 | with open(to_compress, mode='rb') as f_in: 32 | with gzip.open(to_compress + '.gz', mode='wb') as f_out: 33 | f_out.writelines(f_in) 34 | 35 | os.remove(to_compress) 36 | 37 | 38 | def compress_logs(log_filenames, out_dir): 39 | print('Compressing logs...') 40 | 41 | # Compress in parallel. While we're at it, we also strip off the 42 | # long identifier from the filenames, if we can. (The filename is 43 | # something like gc-log.PID.IDENTIFIER.log, where the identifier is 44 | # something like the number of seconds since the epoch when the log was 45 | # triggered.) 46 | to_compress = [] 47 | for f in log_filenames: 48 | # Rename the log file if we can. 49 | match = re.match(r'^([a-zA-Z-]+\.[0-9]+)\.[0-9]+.log$', f) 50 | if match: 51 | if not os.path.exists(os.path.join(out_dir, match.group(1))): 52 | new_name = match.group(1) + '.log' 53 | os.rename(os.path.join(out_dir, f), 54 | os.path.join(out_dir, new_name)) 55 | f = new_name 56 | 57 | to_compress.append(os.path.join(out_dir, f)) 58 | 59 | # Start compressing. 60 | pool = Pool() 61 | pool.map(gzip_compress, to_compress) 62 | pool.close() 63 | 64 | 65 | def get_logs(args, out_dir=None, get_procrank_etc=True): 66 | if not out_dir: 67 | if args.output_directory: 68 | out_dir = utils.create_specific_output_dir(args.output_directory) 69 | else: 70 | out_dir = utils.create_new_output_dir('gc-cc-logs-') 71 | 72 | if args.abbreviated_gc_cc_log: 73 | fifo_msg='abbreviated gc log' 74 | else: 75 | fifo_msg='gc log' 76 | 77 | def do_work(): 78 | log_filenames = utils.notify_and_pull_files( 79 | fifo_msg=fifo_msg, 80 | outfiles_prefixes=['cc-edges.', 'gc-edges.'], 81 | remove_outfiles_from_device=not args.leave_on_device, 82 | out_dir=out_dir) 83 | 84 | if get_procrank_etc: 85 | utils.pull_procrank_etc(out_dir) 86 | 87 | if args.compress_gc_cc_logs: 88 | compress_logs(log_filenames, out_dir) 89 | 90 | utils.run_and_delete_dir_on_exception(do_work, out_dir) 91 | 92 | if __name__ == '__main__': 93 | parser = argparse.ArgumentParser(description=__doc__, 94 | formatter_class=argparse.RawDescriptionHelpFormatter) 95 | 96 | parser.add_argument('--directory', '-d', dest='output_directory', 97 | action='store', metavar='DIR', 98 | help=textwrap.dedent('''\ 99 | The directory to store the logs in. By default, we'll store the 100 | reports in the directory gc-cc-logs-N, for some N.''')) 101 | 102 | parser.add_argument('--leave-on-device', '-l', dest='leave_on_device', 103 | action='store_true', default=False, 104 | help=textwrap.dedent('''\ 105 | Leave the logs on the device after pulling them. (Note: These logs 106 | can take up tens of megabytes and are stored uncompressed on the 107 | device!)''')) 108 | 109 | parser.add_argument('--abbreviated', dest='abbreviated_gc_cc_log', 110 | action='store_true', default=False, 111 | help=textwrap.dedent('''\ 112 | Get an abbreviated CC log instead of a full (i.e., all-traces) log. 113 | An abbreviated log doesn't trace through objects that the cycle 114 | collector knows must be reachable (e.g. DOM nodes whose window is 115 | alive).''')) 116 | 117 | parser.add_argument('--uncompressed', dest='compress_gc_cc_logs', 118 | action='store_false', default=True, 119 | help='Do not compress the individual logs.') 120 | 121 | args = parser.parse_args() 122 | get_logs(args) 123 | -------------------------------------------------------------------------------- /tools/include/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaiostech/B2G/99f4ff316f2a7dbfdfe155d1b014e299b1371d51/tools/include/__init__.py -------------------------------------------------------------------------------- /tools/include/device_utils.py: -------------------------------------------------------------------------------- 1 | """Utilities for interacting with a remote device.""" 2 | 3 | from __future__ import print_function 4 | from __future__ import division 5 | 6 | import os 7 | import sys 8 | import re 9 | import subprocess 10 | import textwrap 11 | from time import sleep 12 | 13 | 14 | def remote_shell(cmd, verbose=True): 15 | """Run the given command on on the device and return stdout. Throw an 16 | exception if the remote command returns a non-zero return code. 17 | 18 | Don't use this command for programs included in /system/bin/toolbox, such 19 | as ls and ps; instead, use remote_toolbox_cmd. 20 | 21 | adb shell doesn't check the remote command's error code. So to check this 22 | ourselves, we echo $? after running the command and then strip that off 23 | before returning the command's output. 24 | 25 | """ 26 | out = shell(r"""adb shell '%s; echo -n "|$?"'""" % cmd) 27 | 28 | # The final '\n' in |out| separates the command output from the return 29 | # code. (There's no newline after the return code because we did echo -n.) 30 | (cmd_out, _, retcode) = out.rpartition('|') 31 | retcode = retcode.strip() 32 | 33 | if retcode == '0': 34 | return cmd_out 35 | 36 | if verbose: 37 | print('Remote command %s failed with error code %s' % (cmd, retcode), 38 | file=sys.stderr) 39 | if cmd_out: 40 | print(cmd_out, file=sys.stderr) 41 | 42 | raise subprocess.CalledProcessError(retcode, cmd, cmd_out) 43 | 44 | 45 | def remote_toolbox_cmd(cmd, args='', verbose=True): 46 | """Run the given command from /system/bin/toolbox on the device. Pass 47 | args, if specified, and return stdout. Throw an exception if the command 48 | returns a non-zero return code. 49 | 50 | cmd must be a command that's part of /system/bin/toolbox. If you want to 51 | run an arbitrary command, use remote_shell. 52 | 53 | Use remote_toolbox_cmd instead of remote_shell if you're invoking a program 54 | that's included in /system/bin/toolbox. remote_toolbox_cmd will ensure 55 | that we use the toolbox's version, instead of busybox's version, even if 56 | busybox is installed on the system. This will ensure that we get 57 | the same output regardless of whether busybox is installed. 58 | 59 | """ 60 | return remote_shell('/system/bin/toolbox "%s" %s' % (cmd, args), verbose) 61 | 62 | 63 | def remote_ls(dir, verbose=True): 64 | """Run ls on the remote device, and return a set containing the results.""" 65 | return {f.strip() for f in remote_toolbox_cmd('ls', dir, verbose).split('\n')} 66 | 67 | 68 | def shell(cmd, cwd=None, show_errors=True): 69 | """Run the given command as a shell script on the host machine. 70 | 71 | If cwd is specified, we run the command from that directory; otherwise, we 72 | run the command from the current working directory. 73 | 74 | """ 75 | proc = subprocess.Popen(cmd, shell=True, cwd=cwd, 76 | stdout=subprocess.PIPE, stderr=subprocess.PIPE) 77 | (out, err) = proc.communicate() 78 | if proc.returncode: 79 | if show_errors: 80 | print('Command %s failed with error code %d' % 81 | (cmd, proc.returncode), file=sys.stderr) 82 | if err: 83 | print(err, file=sys.stderr) 84 | raise subprocess.CalledProcessError(proc.returncode, cmd, err) 85 | return out 86 | 87 | 88 | def get_archive_path(out_dir, extension='.tar.bz2'): 89 | """Gets the full path for an archive that would contain the given out_dir""" 90 | return out_dir.rstrip(os.path.sep) + extension 91 | 92 | 93 | def create_specific_output_dir(out_dir): 94 | """Create the given directory if it doesn't exist. 95 | 96 | Throw an exception if a non-directory file exists with the same name. 97 | 98 | """ 99 | if os.path.exists(out_dir): 100 | if os.path.isdir(out_dir): 101 | # Directory already exists; we're all good. 102 | return out_dir 103 | else: 104 | raise Exception(textwrap.dedent('''\ 105 | Can't use %s as output directory; something that's not a 106 | directory already exists with that name.''' % out_dir)) 107 | os.mkdir(out_dir) 108 | return out_dir 109 | 110 | 111 | def create_new_output_dir(out_dir_prefix): 112 | """Create a new directory whose name begins with out_dir_prefix.""" 113 | for i in range(0, 1024): 114 | try: 115 | dir = '%s%d' % (out_dir_prefix, i) 116 | if not os.path.isfile(get_archive_path(dir)): 117 | os.mkdir(dir) 118 | return dir 119 | except: 120 | pass 121 | raise Exception("Couldn't create output directory.") 122 | 123 | 124 | def get_remote_b2g_pids(): 125 | """Get the pids of all gecko processes running on the device. 126 | 127 | Returns a tuple (master_pid, child_pids), where child_pids is a list. 128 | 129 | """ 130 | procs = remote_toolbox_cmd('ps').split('\n') 131 | master_pid = None 132 | child_pids = [] 133 | b2g_pids = {} 134 | for line in procs: 135 | if re.search(r'/b2g|plugin-container\s*$', line): 136 | pids = line.split()[1:3] 137 | pid = int(pids[0]) 138 | ppid = int(pids[1]) 139 | b2g_pids[pid] = ppid 140 | for pid in b2g_pids: 141 | ppid = b2g_pids[pid] 142 | if ppid in b2g_pids: 143 | child_pids.append(pid) 144 | else: 145 | if master_pid: 146 | raise Exception('Two copies of b2g process found?') 147 | master_pid = pid 148 | 149 | if not master_pid: 150 | raise Exception('b2g does not appear to be running on the device.') 151 | 152 | return (master_pid, child_pids) 153 | 154 | 155 | def is_using_nuwa(): 156 | """Determines if Nuwa is being used""" 157 | return "(Nuwa)" in remote_shell('b2g-ps', False) 158 | 159 | def pull_procrank_etc(out_dir): 160 | """Get the output of procrank and a few other diagnostic programs and save 161 | it into out_dir. 162 | 163 | """ 164 | shell('adb shell b2g-info > b2g-info', cwd=out_dir) 165 | shell('adb shell procrank > procrank', cwd=out_dir) 166 | shell('adb shell b2g-ps > b2g-ps', cwd=out_dir) 167 | shell('adb shell b2g-procrank > b2g-procrank', cwd=out_dir) 168 | 169 | 170 | def run_and_delete_dir_on_exception(fun, dir): 171 | """Run the given function and, if it throws an exception, delete the given 172 | directory, if it's empty, before re-throwing the exception. 173 | 174 | You might want to wrap your call to send_signal_and_pull_files in this 175 | function.""" 176 | try: 177 | return fun() 178 | except: 179 | # os.rmdir will throw if the directory is non-empty, and a simple 180 | # 'raise' will re-throw the exception from os.rmdir (if that throws), 181 | # so we need to explicitly save the exception info here. See 182 | # http://nedbatchelder.com/blog/200711/rethrowing_exceptions_in_python.html 183 | exception_info = sys.exc_info() 184 | 185 | try: 186 | # Throws if the directory is not empty. 187 | os.rmdir(dir) 188 | except OSError: 189 | pass 190 | 191 | # Raise the original exception. 192 | raise exception_info[1], None, exception_info[2] 193 | 194 | 195 | def notify_and_pull_files(outfiles_prefixes, 196 | remove_outfiles_from_device, 197 | out_dir, 198 | optional_outfiles_prefixes=[], 199 | fifo_msg=None, 200 | signal=None, 201 | ignore_nuwa=is_using_nuwa()): 202 | """Send a message to the main B2G process (either by sending it a signal or 203 | by writing to a fifo that it monitors) and pull files created as a result. 204 | 205 | Exactly one of fifo_msg or signal must be non-null; otherwise, we throw 206 | an exception. 207 | 208 | If fifo_msg is non-null, we write fifo_msg to 209 | /data/local/debug_info_trigger. When this comment was written, B2G 210 | understood the messages 'memory report', 'minimize memory report', and 'gc 211 | log'. See nsMemoryInfoDumper.cpp's FifoWatcher. 212 | 213 | If signal is non-null, we send the given signal (which may be either a 214 | number or a string of the form 'SIGRTn', which we interpret as the signal 215 | SIGRTMIN + n). 216 | 217 | After writing to the fifo or sending the signal, we pull the files 218 | generated into out_dir on the host machine. We only pull files which were 219 | created after the signal was sent. 220 | 221 | When we're done, we remove the files from the device if 222 | remote_outfiles_from_device is true. 223 | 224 | outfiles_prefixes must be a list containing the beginnings of the files we 225 | expect to be created as a result of the signal. For example, if we expect 226 | to see files named 'foo-XXX' and 'bar-YYY', we'd set outfiles_prefixes to 227 | ['foo-', 'bar-']. 228 | 229 | We expect to pull len(outfiles_prefixes) * (# b2g processes) files from the 230 | device. If that succeeds, we then pull all files which match 231 | optional_outfiles_prefixes. 232 | 233 | """ 234 | 235 | if (fifo_msg is None) == (signal is None): 236 | raise ValueError("Exactly one of the fifo_msg and " 237 | "signal kw args must be non-null.") 238 | 239 | # Check if we should override the ignore_nuwa value. 240 | if not ignore_nuwa and os.getenv("MOZ_IGNORE_NUWA_PROCESS", "0") != "0": 241 | ignore_nuwa = True 242 | 243 | unified_outfiles_prefixes = ['unified-' + pfx for pfx in outfiles_prefixes] 244 | all_outfiles_prefixes = outfiles_prefixes + optional_outfiles_prefixes \ 245 | + unified_outfiles_prefixes 246 | 247 | (master_pid, child_pids) = get_remote_b2g_pids() 248 | child_pids = set(child_pids) 249 | old_files = _list_remote_temp_files(outfiles_prefixes + unified_outfiles_prefixes) 250 | 251 | if signal: 252 | _send_remote_signal(signal, master_pid) 253 | else: 254 | _write_to_remote_file('/data/local/debug_info_trigger', fifo_msg) 255 | 256 | num_expected_responses = 1 + len(child_pids) 257 | if ignore_nuwa: 258 | num_expected_responses -= 1 259 | num_expected_files = len(outfiles_prefixes) * num_expected_responses 260 | num_unified_expected = len(unified_outfiles_prefixes) 261 | 262 | max_wait = 60 * 2 263 | wait_interval = 1.0 264 | for i in range(0, int(max_wait / wait_interval)): 265 | new_files = _list_remote_temp_files(outfiles_prefixes) - old_files 266 | new_unified_files = _list_remote_temp_files(unified_outfiles_prefixes) - old_files 267 | if new_unified_files: 268 | files_gotten = len(new_unified_files) 269 | files_expected = num_unified_expected 270 | else: 271 | files_gotten = len(new_files) 272 | files_expected = num_expected_files 273 | sys.stdout.write('\rGot %d/%d files.' % (files_gotten, files_expected)) 274 | sys.stdout.flush() 275 | 276 | if files_gotten >= files_expected: 277 | print('') 278 | if files_gotten > files_expected: 279 | print("WARNING: Got more files than expected!", file=sys.stderr) 280 | print("(Is MOZ_IGNORE_NUWA_PROCESS set incorrectly?)", file=sys.stderr) 281 | break 282 | 283 | sleep(wait_interval) 284 | 285 | # Some pids may have gone away before reporting memory. This can happen 286 | # normally if the triggering of memory reporting causes some old 287 | # children to OOM. (Bug 931198) 288 | dead_child_pids = child_pids - set(get_remote_b2g_pids()[1]) 289 | if len(dead_child_pids): 290 | for pid in dead_child_pids: 291 | print("\rWarning: Child %u exited during memory reporting" % pid, file=sys.stderr) 292 | child_pids -= dead_child_pids 293 | num_expected_files -= len(outfiles_prefixes) * len(dead_child_pids) 294 | 295 | if files_gotten < files_expected: 296 | print('') 297 | print("We've waited %ds but the only relevant files we see are" % max_wait, file=sys.stderr) 298 | print('\n'.join([' ' + f for f in new_files | new_unified_files]), file=sys.stderr) 299 | print('We expected %d but see only %d files. Giving up...' % 300 | (files_expected, files_gotten), file=sys.stderr) 301 | raise Exception("Unable to pull some files.") 302 | 303 | new_files = _pull_remote_files(all_outfiles_prefixes, old_files, out_dir) 304 | if remove_outfiles_from_device: 305 | _remove_files_from_device(all_outfiles_prefixes, old_files) 306 | return [os.path.basename(f) for f in new_files] 307 | 308 | 309 | def pull_remote_file(remote_file, dest_file): 310 | """Copies a file from the device.""" 311 | shell('adb pull "%s" "%s"' % (remote_file, dest_file)) 312 | 313 | 314 | # You probably don't need to call the functions below from outside this module, 315 | # but hey, maybe you do. 316 | 317 | def _send_remote_signal(signal, pid): 318 | """Send a signal to a process on the device. 319 | 320 | signal can be either an integer or a string of the form 'SIGRTn' where n is 321 | an integer. We interpret SIGRTn to mean the signal SIGRTMIN + n. 322 | 323 | """ 324 | # killer is a program we put on the device which is like kill(1), except it 325 | # accepts signals above 31. It also understands "SIGRTn" per above. 326 | remote_shell("killer %s %d" % (signal, pid)) 327 | 328 | 329 | def _write_to_remote_file(file, msg): 330 | """Write a message to a file on the device. 331 | 332 | Note that echo is a shell built-in, so we use remote_shell, not 333 | remote_toolbox_cmd, here. 334 | 335 | Also, due to ghetto string escaping in remote_shell, we must use " and not 336 | ' in this command. 337 | 338 | """ 339 | remote_shell('echo -n "%s" > "%s"' % (msg, file)) 340 | 341 | 342 | def _list_remote_temp_files(prefixes): 343 | """Return a set of absolute filenames in the device's temp directory which 344 | start with one of the given prefixes.""" 345 | 346 | # Look for files in both /data/local/tmp/ and 347 | # /data/local/tmp/memory-reports. New versions of b2g dump everything into 348 | # /data/local/tmp/memory-reports, but old versions use /data/local/tmp for 349 | # some things (e.g. gc/cc logs). 350 | tmpdir = '/data/local/tmp/' 351 | outdirs = [d for d in [tmpdir, os.path.join(tmpdir, 'memory-reports')] if 352 | os.path.basename(d) in remote_ls(os.path.dirname(d))] 353 | 354 | found_files = set() 355 | for d in outdirs: 356 | found_files |= {os.path.join(d, file) for file in remote_ls(d) 357 | if any(file.startswith(prefix) for prefix in prefixes)} 358 | 359 | return found_files 360 | 361 | 362 | def _pull_remote_files(outfiles_prefixes, old_files, out_dir): 363 | """Pull files from the remote device's temp directory into out_dir. 364 | 365 | We pull each file in the temp directory whose name begins with one of the 366 | elements of outfiles_prefixes and which isn't listed in old_files. 367 | 368 | """ 369 | new_files = _list_remote_temp_files(outfiles_prefixes) - old_files 370 | for f in new_files: 371 | shell('adb pull %s' % f, cwd=out_dir) 372 | pass 373 | print("Pulled files into %s." % out_dir) 374 | return new_files 375 | 376 | 377 | def _remove_files_from_device(outfiles_prefixes, old_files): 378 | """Remove files from the remote device's temp directory. 379 | 380 | We remove all files starting with one of the elements of outfiles_prefixes 381 | which aren't listed in old_files. 382 | 383 | """ 384 | files_to_remove = _list_remote_temp_files(outfiles_prefixes) - old_files 385 | 386 | for f in files_to_remove: 387 | remote_toolbox_cmd('rm', f) 388 | -------------------------------------------------------------------------------- /tools/mach_b2g_bootstrap.py: -------------------------------------------------------------------------------- 1 | # This Source Code Form is subject to the terms of the Mozilla Public 2 | # License, v. 2.0. If a copy of the MPL was not distributed with this 3 | # file, You can obtain one at http://mozilla.org/MPL/2.0/. 4 | 5 | from __future__ import print_function, unicode_literals 6 | 7 | import imp 8 | import os 9 | import pickle 10 | import platform 11 | import subprocess 12 | import sys 13 | import tempfile 14 | import time 15 | import urlparse 16 | 17 | STATE_DIR_FIRST_RUN = ''' 18 | mach and the build system store shared state in a common directory on the 19 | filesystem. The following directory will be created: 20 | 21 | {userdir} 22 | 23 | If you would like to use a different directory, hit CTRL+c and set the 24 | MOZBUILD_STATE_PATH environment variable to the directory you would like to 25 | use and re-run mach. For this change to take effect forever, you'll likely 26 | want to export this environment variable from your shell's init scripts. 27 | '''.lstrip() 28 | 29 | MACH_NOT_FOUND = ''' 30 | The mach module could not be found on your system. Either configure the B2G 31 | repo, so the copy in gecko can be used, or install it from the Python package 32 | index. 33 | 34 | To install mach from pypi, run: 35 | 36 | $ wget https://raw.github.com/pypa/pip/master/contrib/get-pip.py -O - | python 37 | $ pip install mach 38 | '''.lstrip() 39 | 40 | MACH_DUPLICATES = ''' 41 | Warning: two copies of mach were detected. Using the copy in '%s'. To remove 42 | the obsolete copy in '%s', run `pip uninstall mach`. 43 | '''.lstrip() 44 | 45 | LOAD_CONFIG_FAILED = ''' 46 | An error occured when trying to source load-config.sh. Make sure there are 47 | no problems with your .userconfig file and try again. The following output was 48 | received: 49 | 50 | %s 51 | 52 | If you think this is an error in the mach driver itself, please file a bug under 53 | Boot2Gecko/Builds. 54 | '''.lstrip() 55 | 56 | # TODO Bug 794506 Integrate with the in-tree virtualenv configuration. 57 | SEARCH_PATHS = [] 58 | 59 | # Individual files providing mach commands. 60 | MACH_MODULES = [ 61 | 'python/mach/mach/commands/commandinfo.py', 62 | ] 63 | 64 | CATEGORIES = { 65 | 'build': { 66 | 'short': 'Build Commands', 67 | 'long': 'Interact with the build system', 68 | 'priority': 80, 69 | }, 70 | 'post-build': { 71 | 'short': 'Post-build Commands', 72 | 'long': 'Common actions performed after completing a build.', 73 | 'priority': 70, 74 | }, 75 | 'testing': { 76 | 'short': 'Testing', 77 | 'long': 'Run tests.', 78 | 'priority': 60, 79 | }, 80 | 'ci': { 81 | 'short': 'CI', 82 | 'long': 'Taskcluster commands', 83 | 'priority': 59 84 | }, 85 | 'devenv': { 86 | 'short': 'Development Environment', 87 | 'long': 'Set up and configure your development environment.', 88 | 'priority': 50, 89 | }, 90 | 'build-dev': { 91 | 'short': 'Low-level Build System Interaction', 92 | 'long': 'Interact with specific parts of the build system.', 93 | 'priority': 20, 94 | }, 95 | 'misc': { 96 | 'short': 'Potpourri', 97 | 'long': 'Potent potables and assorted snacks.', 98 | 'priority': 10, 99 | }, 100 | 'disabled': { 101 | 'short': 'Disabled', 102 | 'long': 'These commands are unavailable for your current context, run "mach " to see why.', 103 | 'priority': 0, 104 | } 105 | } 106 | 107 | def download_b2g_sdk(b2g_sdk): 108 | system = platform.system() 109 | if system == "Linux": 110 | url = "https://queue.taskcluster.net/v1/task/YamDhuDgTWa_kWXcSedDHA/artifacts/public/build/target.linux-x86_64.tar.bz2" 111 | elif system == "Darwin": 112 | url = "http://ftp.mozilla.org/pub/mozilla.org/b2g/nightly/2015/09/2015-09-02-03-02-03-mozilla-central/b2g-43.0a1.en-US.mac64.dmg" 113 | elif system == "Windows": 114 | url = "http://ftp.mozilla.org/pub/mozilla.org/b2g/nightly/2015/09/2015-09-02-03-02-03-mozilla-central/b2g-43.0a1.en-US.win32.zip" 115 | else: 116 | raise Exception('Unable to download b2g_sdk for %s' % system) 117 | 118 | if not os.path.isdir(b2g_sdk): 119 | os.mkdir(b2g_sdk) 120 | 121 | b2g_path = os.path.join(b2g_sdk, "b2g") 122 | if not os.path.isdir(b2g_path): 123 | file_path = os.path.join(b2g_sdk, os.path.basename(urlparse.urlparse(url).path)) 124 | 125 | import requests 126 | 127 | with open(file_path, "wb") as b2g: 128 | print("Downloading %s" % url) 129 | response = requests.get(url, stream=True) 130 | total_length = response.headers.get("content-length") 131 | 132 | if total_length is None: # no content length header 133 | b2g.write(response.content) 134 | else: 135 | download_length = 0 136 | total_length = int(total_length) 137 | for data in response.iter_content(8192): 138 | download_length += len(data) 139 | b2g.write(data) 140 | print("\r%10d / %10d [%3.2f%%]" % 141 | (download_length, 142 | total_length, 143 | download_length * 100. / total_length), 144 | end = "") 145 | b2g.close() 146 | 147 | print() 148 | print("Extract %s..." % file_path) 149 | 150 | import mozinstall 151 | mozinstall.install(file_path, os.path.join(b2g_sdk)) 152 | 153 | if system == "Darwin": 154 | os.symlink(os.path.join(b2g_sdk, "B2G.app", "Contents", "MacOS"), b2g_path) 155 | 156 | return b2g_path 157 | 158 | def bootstrap(b2g_home): 159 | # Ensure we are running Python 2.7+. We put this check here so we generate a 160 | # user-friendly error message rather than a cryptic stack trace on module 161 | # import. 162 | if sys.version_info[0] != 2 or sys.version_info[1] < 7: 163 | print('Python 2.7 or above (but not Python 3) is required to run mach.') 164 | print('You are running Python', platform.python_version()) 165 | sys.exit(1) 166 | 167 | # Global build system and mach state is stored in a central directory. By 168 | # default, this is ~/.mozbuild. However, it can be defined via an 169 | # environment variable. We detect first run (by lack of this directory 170 | # existing) and notify the user that it will be created. The logic for 171 | # creation is much simpler for the "advanced" environment variable use 172 | # case. For default behavior, we educate users and give them an opportunity 173 | # to react. We always exit after creating the directory because users don't 174 | # like surprises. 175 | state_user_dir = os.path.expanduser('~/.mozbuild') 176 | state_env_dir = os.environ.get('MOZBUILD_STATE_PATH', None) 177 | if state_env_dir: 178 | if not os.path.exists(state_env_dir): 179 | print('Creating global state directory from environment variable: %s' 180 | % state_env_dir) 181 | os.makedirs(state_env_dir, mode=0o770) 182 | print('Please re-run mach.') 183 | sys.exit(1) 184 | state_dir = state_env_dir 185 | else: 186 | if not os.path.exists(state_user_dir): 187 | print(STATE_DIR_FIRST_RUN.format(userdir=state_user_dir)) 188 | try: 189 | for i in range(20, -1, -1): 190 | time.sleep(1) 191 | sys.stdout.write('%d ' % i) 192 | sys.stdout.flush() 193 | except KeyboardInterrupt: 194 | sys.exit(1) 195 | 196 | print('\nCreating default state directory: %s' % state_user_dir) 197 | os.mkdir(state_user_dir) 198 | print('Please re-run mach.') 199 | sys.exit(1) 200 | state_dir = state_user_dir 201 | 202 | if os.path.isfile(os.path.join(b2g_home, '.config')): 203 | # Load the configuration created by the build system. 204 | # We need to call set -a because load-config doesn't 205 | # export the variables it creates. 206 | f = tempfile.NamedTemporaryFile() 207 | cmd = ['/usr/bin/env', 'bash', '-c', 208 | 'set -a && source %s > %s && python -c "import pickle,os;print(pickle.dumps(os.environ))"' 209 | % (os.path.join(b2g_home, 'load-config.sh'), f.name)] 210 | try: 211 | output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, cwd=b2g_home) 212 | os.environ.update(pickle.loads(output)) 213 | except subprocess.CalledProcessError, e: 214 | print(LOAD_CONFIG_FAILED % e.output.strip()) 215 | sys.exit(1) 216 | 217 | output = f.read() 218 | if output: 219 | print(output) 220 | f.close() 221 | 222 | # Absolutize GECKO_OBJDIR here, since otherwise mach will try to 223 | # absolutize it relative to the topsrcdir, which might be different 224 | # if GECKO_PATH is in use. 225 | if os.environ.get('GECKO_OBJDIR') is not None: 226 | os.environ['GECKO_OBJDIR'] = os.path.join(b2g_home, os.environ['GECKO_OBJDIR']) 227 | 228 | # If a gecko source tree is detected, its mach modules are also 229 | # loaded. 230 | gecko_dir = os.environ.get('GECKO_PATH', os.path.join(b2g_home, 'gecko')) 231 | gecko_bootstrap_dir = os.path.join(gecko_dir, 'build') 232 | if os.path.isdir(gecko_bootstrap_dir): 233 | path = os.path.join(gecko_bootstrap_dir, 'mach_bootstrap.py') 234 | with open(path, 'r') as fh: 235 | imp.load_module('mach_bootstrap', fh, path, 236 | ('.py', 'r', imp.PY_SOURCE)) 237 | 238 | import mach_bootstrap 239 | 240 | global SEARCH_PATHS 241 | global MACH_MODULES 242 | relpath = os.path.relpath(gecko_dir) 243 | SEARCH_PATHS += [os.path.join(relpath, p) 244 | for p in mach_bootstrap.SEARCH_PATHS] 245 | MACH_MODULES += [os.path.join(relpath, p) 246 | for p in mach_bootstrap.MACH_MODULES] 247 | 248 | mach_package = None 249 | try: 250 | mach_package = imp.find_module('mach')[1] 251 | except: 252 | pass 253 | 254 | try: 255 | sys.path[0:0] = [os.path.join(b2g_home, path) for path in SEARCH_PATHS] 256 | import mach.main 257 | except ImportError: 258 | print(MACH_NOT_FOUND) 259 | sys.exit(1) 260 | 261 | mach_gecko = os.path.join(gecko_dir, 'python', 'mach') 262 | if mach_package and os.path.isdir(mach_gecko): 263 | print(MACH_DUPLICATES % (mach_gecko, mach_package)) 264 | 265 | # The build system doesn't provide a mechanism to use 266 | # a different mozconfig. 267 | os.environ['MOZCONFIG'] = os.path.join(b2g_home, 'gonk-misc', 268 | 'default-gecko-config') 269 | 270 | xre_path = download_b2g_sdk(os.path.join(os.getcwd(), "b2g_sdk")) 271 | 272 | def get_build_var(name): 273 | env = os.environ.copy() 274 | env.update({'CALLED_FROM_SETUP': 'true', 275 | 'BUILD_SYSTEM': 'build/core'}) 276 | command = ['make', '--no-print-directory', 277 | '-C', b2g_home, 278 | '-f', 'build/core/config.mk', 279 | 'dumpvar-abs-%s' % name] 280 | DEVNULL = open(os.devnull, 'wb') 281 | return subprocess.check_output(command, env=env, stderr=DEVNULL).strip() 282 | 283 | def populate_context(context): 284 | context.state_dir = state_dir 285 | context.topdir = gecko_dir 286 | context.b2g_home = b2g_home 287 | context.xre_path = xre_path 288 | # device name is set from load configuration step above 289 | context.device_name = os.environ.get('DEVICE_NAME', '').rstrip() 290 | context.device = os.environ.get('DEVICE', '').rstrip() 291 | context.target_out = os.path.join( 292 | get_build_var('TARGET_PRODUCT_OUT_ROOT'), 293 | context.device) 294 | context.get_build_var = get_build_var 295 | 296 | mach = mach.main.Mach(b2g_home) 297 | mach.populate_context_handler = populate_context 298 | mach.require_conditions = True 299 | 300 | for category, meta in CATEGORIES.items(): 301 | mach.define_category(category, meta['short'], meta['long'], 302 | meta['priority']) 303 | 304 | for path in MACH_MODULES: 305 | module = os.path.join(b2g_home, path) 306 | if os.path.isfile(module): 307 | mach.load_commands_from_file(os.path.join(b2g_home, path)) 308 | 309 | if hasattr(mach, 'load_commands_from_entry_point'): 310 | mach.load_commands_from_entry_point('mach.b2g.providers') 311 | 312 | return mach 313 | -------------------------------------------------------------------------------- /tools/update-tools/.gitignore: -------------------------------------------------------------------------------- 1 | *.pyc 2 | -------------------------------------------------------------------------------- /tools/update-tools/README.md: -------------------------------------------------------------------------------- 1 | Tools for packaging and testing FxOS system updates 2 | 3 | Acknowledgements 4 | ================ 5 | For convenience, these scripts depend on a number of prebuilt binaries to 6 | minimize local build dependencies: 7 | 8 | * bin/$HOST/adb is from AOSP, and is licensed under the Apache Public License v2. 9 | Source code can be found [here](https://github.com/android/platform_system_core/tree/master/adb) 10 | 11 | * bin/$HOST/mar is from Mozilla, and is licensed under the Mozilla Public License 2.0. 12 | Source code can be found [here](http://hg.mozilla.org/mozilla-central/file/tip/modules/libmar) 13 | 14 | * bin/gonk/busybox-armv6l is from Busybox, and is licensed under the GNU GPL v2. 15 | Source code can be found [here](http://www.busybox.net/downloads/) 16 | 17 | * bin/gonk/update-binary is from AOSP and is licensed under the Apache Public License v2. 18 | Source code can be found [here](https://android.googlesource.com/platform/bootable/recovery.git) 19 | 20 | * bin/signapk.jar is from AOSP, and is licensed under the Apache Public License v2. 21 | Source code can be found [here](https://github.com/android/platform_build/tree/master/tools/signapk) 22 | -------------------------------------------------------------------------------- /tools/update-tools/bin/darwin-x86/adb: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaiostech/B2G/99f4ff316f2a7dbfdfe155d1b014e299b1371d51/tools/update-tools/bin/darwin-x86/adb -------------------------------------------------------------------------------- /tools/update-tools/bin/gonk/busybox-armv6l: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaiostech/B2G/99f4ff316f2a7dbfdfe155d1b014e299b1371d51/tools/update-tools/bin/gonk/busybox-armv6l -------------------------------------------------------------------------------- /tools/update-tools/bin/gonk/update-binary: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaiostech/B2G/99f4ff316f2a7dbfdfe155d1b014e299b1371d51/tools/update-tools/bin/gonk/update-binary -------------------------------------------------------------------------------- /tools/update-tools/bin/linux-x86/adb: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaiostech/B2G/99f4ff316f2a7dbfdfe155d1b014e299b1371d51/tools/update-tools/bin/linux-x86/adb -------------------------------------------------------------------------------- /tools/update-tools/bin/signapk.jar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaiostech/B2G/99f4ff316f2a7dbfdfe155d1b014e299b1371d51/tools/update-tools/bin/signapk.jar -------------------------------------------------------------------------------- /tools/update-tools/build-flash-fota.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # 3 | # Copyright (C) 2012 Mozilla Foundation 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | # 17 | # Build A FOTA update zip that flashes the system partition 18 | 19 | import argparse 20 | import os 21 | import sys 22 | import tempfile 23 | import update_tools 24 | 25 | def build_flash_fota(args): 26 | security_dir = os.path.join(update_tools.b2g_dir, "build", "target", 27 | "product", "security") 28 | public_key = args.public_key or os.path.join(security_dir, 29 | args.dev_key + ".x509.pem") 30 | private_key = args.private_key or os.path.join(security_dir, 31 | args.dev_key + ".pk8") 32 | output_zip = args.output or "flash.zip" 33 | update_bin = args.update_bin or os.path.join(update_tools.b2g_dir, "tools", 34 | "update-tools", "bin", "gonk", "update-binary") 35 | 36 | builder = update_tools.FlashFotaBuilder(fstab=args.system_fstab, 37 | sdk=args.sdk_version) 38 | 39 | builder.fota_type = args.fota_type 40 | builder.fota_dirs = [] 41 | builder.fota_files = [] 42 | if args.fota_type == 'partial': 43 | builder.fota_dirs = args.fota_dirs.split(' ') 44 | builder.fota_files = [line.rstrip() for line in open(args.fota_files, 'r')] 45 | 46 | builder.fota_sdcard = args.fota_sdcard or "/sdcard" 47 | 48 | builder.fota_check_device_name = args.fota_check_device_name 49 | builder.fota_check_gonk_version = args.fota_check_gonk_version 50 | builder.system_dir = args.system_dir 51 | 52 | builder.fota_partitions = args.fota_partitions.split(' ') or [] 53 | builder.fota_format_partitions = args.fota_format_partitions.split(' ') or [] 54 | 55 | builder.build_flash_fota(args.system_dir, public_key, private_key, 56 | output_zip, update_bin) 57 | print "FOTA Flash ZIP generated: %s" % output_zip 58 | 59 | def main(): 60 | parser = argparse.ArgumentParser(usage="%(prog)s [options]", 61 | epilog="Note: java is required to be on your PATH to sign the update.zip") 62 | 63 | system_group = parser.add_argument_group("system options") 64 | system_group.add_argument("--system-dir", dest="system_dir", 65 | required=True, help="path to system directory. required") 66 | system_group.add_argument("--system-fstab", dest="system_fstab", 67 | default=None, required=True, help="path to the recovery fstab. required") 68 | 69 | fota_group = parser.add_argument_group("fota options") 70 | fota_group.add_argument("--fota-type", dest="fota_type", 71 | required=False, default="full", 72 | help="'partial', 'full' or 'fullimg' fota. 'partial' requires a file list") 73 | fota_group.add_argument("--fota-dirs", dest="fota_dirs", 74 | required=False, default="", 75 | help="space-separated string containing list of dirs to include, to delete files") 76 | fota_group.add_argument("--fota-files", dest="fota_files", 77 | required=False, default="", 78 | help="file containing list of files in /system to include") 79 | fota_group.add_argument("--fota-sdcard", dest="fota_sdcard", 80 | required=False, default="/sdcard", 81 | help="sdcard mountpoint in recovery mode (RECOVERY_EXTERNAL_STORAGE)") 82 | fota_group.add_argument("--fota-partitions", dest="fota_partitions", 83 | required=False, default="", 84 | help="space-separated string containing list of partitions to flash") 85 | fota_group.add_argument("--fota-format-partitions", dest="fota_format_partitions", 86 | default="", required=False, 87 | help="space-separated list of partitions mount point that we allow to format") 88 | 89 | fota_checks_group = parser.add_argument_group("fota_checks_group") 90 | fota_checks_group.add_argument("--fota-check-device-name", dest="fota_check_device_name", 91 | default=None, required=False, 92 | help="'add a check to prevent the update from being installed on a device different from TARGET_DEVICE") 93 | fota_checks_group.add_argument("--fota-check-gonk-version", dest="fota_check_gonk_version", 94 | default=False, required=False, action="store_true", 95 | help="add checks to verify that the device's libraries match the ones the update depends on") 96 | 97 | signing_group = parser.add_argument_group("signing options") 98 | signing_group.add_argument("-d", "--dev-key", dest="dev_key", 99 | metavar="KEYNAME", default="testkey", 100 | help="Use the named dev key pair in build/target/product/security. " + 101 | "Possible keys: media, platform, shared, testkey. Default: testkey") 102 | 103 | signing_group.add_argument("-k", "--private-key", dest="private_key", 104 | metavar="PRIVATE_KEY", default=None, 105 | help="Private key used for signing the update.zip. Overrides --dev-key.") 106 | 107 | signing_group.add_argument("-K", "--public-key", dest="public_key", 108 | metavar="PUBLIC_KEY", default=None, 109 | help="Public key used for signing the update.zip. Overrides --dev-key.") 110 | 111 | parser.add_argument("-u", "--update-bin", dest="update_bin", 112 | required=False, default=None, 113 | help="Specify update-binary to be used in update.zip.") 114 | 115 | parser.add_argument("-s", "--sdk-version", dest="sdk_version", 116 | required=False, default=15, type=int, 117 | help="Specify the target SDK version (defaulting to SDK 15, ICS) when producing update.zip.") 118 | 119 | parser.add_argument("-o", "--output", dest="output", metavar="ZIP", 120 | help="Output to ZIP. Default: flash.zip", default=None) 121 | 122 | update_tools.validate_env(parser) 123 | try: 124 | build_flash_fota(parser.parse_args()) 125 | except update_tools.UpdateException, e: 126 | print >>sys.stderr, "Error: %s" % e 127 | sys.exit(1) 128 | 129 | if __name__ == "__main__": 130 | main() 131 | -------------------------------------------------------------------------------- /tools/update-tools/build-fota-mar.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # 3 | # Copyright (C) 2012 Mozilla Foundation 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | # 17 | # Build FxOS FOTA update MARs that contain an AOSP update.zip 18 | 19 | import argparse 20 | import os 21 | import sys 22 | import update_tools 23 | 24 | def build_fota_mar(update_zip, output_mar): 25 | try: 26 | builder = update_tools.FotaMarBuilder() 27 | builder.build_mar(update_zip, output_mar) 28 | print "FOTA Update MAR generated: %s" % output_mar 29 | except Exception, e: 30 | print >>sys.stderr, "Error: %s" % e 31 | sys.exit(1) 32 | 33 | def main(): 34 | parser = argparse.ArgumentParser(usage="%(prog)s [options] update.zip", 35 | epilog="Note: update.zip must be a signed FOTA update.zip") 36 | 37 | parser.add_argument("-o", "--output", dest="output", metavar="MAR", 38 | default=None, 39 | help="Output to update MAR. Default: replace '.zip' with '.mar'") 40 | 41 | update_tools.validate_env(parser) 42 | options, args = parser.parse_known_args() 43 | if len(args) == 0: 44 | parser.print_help() 45 | print >>sys.stderr, "Error: update.zip not specified" 46 | sys.exit(1) 47 | 48 | update_zip = args[0] 49 | if not os.path.exists(update_zip): 50 | print >>sys.stderr, \ 51 | "Error: update.zip does not exist: %s" % update_zip 52 | sys.exit(1) 53 | 54 | output_mar = options.output 55 | if not output_mar: 56 | if ".zip" in update_zip: 57 | output_mar = update_zip.replace(".zip", ".mar") 58 | else: 59 | output_mar = "update.mar" 60 | 61 | build_fota_mar(update_zip, output_mar) 62 | 63 | if __name__ == "__main__": 64 | main() 65 | -------------------------------------------------------------------------------- /tools/update-tools/build-fota-zip.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # 3 | # Copyright (C) 2012 Mozilla Foundation 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | # 17 | # Build FOTA update zips for testing. Production zips should be built w/ AOSP tools. 18 | 19 | import argparse 20 | import os 21 | import sys 22 | import tempfile 23 | import update_tools 24 | 25 | def build_fota_zip(update_dir, public_key, private_key, output_zip, update_bin): 26 | try: 27 | builder = update_tools.FotaZipBuilder() 28 | 29 | stage_dir = tempfile.mkdtemp() 30 | unsigned_zip = os.path.join(stage_dir, "update-unsigned.zip") 31 | 32 | builder.build_unsigned_zip(update_dir, unsigned_zip, update_bin) 33 | print "Public key: %s" % public_key 34 | print "Private key: %s" % private_key 35 | 36 | builder.sign_zip(unsigned_zip, public_key, private_key, output_zip) 37 | print "FOTA Update ZIP generated: %s" % output_zip 38 | except Exception, e: 39 | print >>sys.stderr, "Error: %s" % e 40 | sys.exit(1) 41 | 42 | def main(): 43 | parser = argparse.ArgumentParser(usage="%(prog)s [options] update-dir", 44 | epilog="Note: java is required to be on your PATH to sign the update.zip") 45 | 46 | parser.add_argument("-d", "--dev-key", dest="dev_key", metavar="KEYNAME", 47 | default="testkey", 48 | help="Use the named dev key pair in build/target/product/security. " + 49 | "Possible keys: media, platform, shared, testkey. Default: testkey") 50 | 51 | parser.add_argument("-k", "--private-key", dest="private_key", 52 | metavar="PRIVATE_KEY", default=None, 53 | help="Private key used for signing the update.zip. Overrides --dev-key.") 54 | 55 | parser.add_argument("-K", "--public-key", dest="public_key", 56 | metavar="PUBLIC_KEY", default=None, 57 | help="Public key used for signing the update.zip. Overrides --dev-key.") 58 | 59 | parser.add_argument("-u", "--update-bin", dest="update_bin", 60 | required=False, default=None, 61 | help="Specify update-binary to be used in update.zip.") 62 | 63 | parser.add_argument("-o", "--output", dest="output", metavar="ZIP", 64 | help="Output to ZIP. Default: update-dir.zip", default=None) 65 | 66 | update_tools.validate_env(parser) 67 | options, args = parser.parse_known_args() 68 | if len(args) == 0: 69 | parser.print_help() 70 | print >>sys.stderr, "Error: update-dir not specified" 71 | sys.exit(1) 72 | 73 | update_dir = args[0] 74 | if not os.path.isdir(update_dir): 75 | print >>sys.stderr, \ 76 | "Error: update-dir is not a directory: %s" % update_dir 77 | sys.exit(1) 78 | 79 | security_dir = os.path.join(update_tools.b2g_dir, "build", "target", 80 | "product", "security") 81 | public_key = options.public_key or os.path.join(security_dir, 82 | options.dev_key + ".x509.pem") 83 | private_key = options.private_key or os.path.join(security_dir, 84 | options.dev_key + ".pk8") 85 | update_bin = args.update_bin or os.path.join(update_tools.b2g_dir, "tools", 86 | "update-tools", "bin", "gonk", "update-binary") 87 | 88 | output_zip = options.output or update_dir + ".zip" 89 | build_fota_zip(update_dir, public_key, private_key, output_zip, update_bin) 90 | 91 | if __name__ == "__main__": 92 | main() 93 | -------------------------------------------------------------------------------- /tools/update-tools/build-gecko-mar.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # 3 | # Copyright (C) 2012 Mozilla Foundation 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | # 17 | # Build a gecko (OTA) full or incremental MAR 18 | 19 | import argparse 20 | import os 21 | import shutil 22 | import tempfile 23 | import update_tools 24 | 25 | def unwrap_mar(mar, verbose): 26 | print "Extracting MAR for incremental update: %s" % mar 27 | tmpdir = tempfile.mkdtemp() 28 | bz2_mar = update_tools.BZip2Mar(mar, verbose=verbose) 29 | bz2_mar.extract(tmpdir) 30 | return tmpdir 31 | 32 | def main(): 33 | parser = argparse.ArgumentParser() 34 | parser.add_argument("mar", metavar="MAR", help="Destination MAR file") 35 | parser.add_argument("--dir", metavar="DIR", default=None, 36 | help="Source directory. When building an \"incremental\" MAR, this can " + 37 | "also be a MAR for convenience. Default: $PWD") 38 | 39 | parser.add_argument("--to", metavar="TO", default=None, 40 | help="This is a synonym for --dir") 41 | parser.add_argument("--from", metavar="FROM", dest="from_dir", default=None, 42 | help="The base directory or MAR to build an incremental MAR from. This " + 43 | "will build an incremental update MAR between FROM and TO") 44 | parser.add_argument("-v", "--verbose", dest="verbose", action="store_true", 45 | default=False, help="Enable verbose logging") 46 | 47 | args = parser.parse_args() 48 | 49 | if os.path.isdir(args.mar): 50 | parser.error("MAR destination is a directory: %s" % args.mar) 51 | 52 | if not args.dir: 53 | args.dir = args.to if args.to else os.getcwd() 54 | 55 | if not args.from_dir and not os.path.isdir(args.dir): 56 | parser.error("Path is not a directory: %s" % args.dir) 57 | 58 | to_tmpdir = from_tmpdir = None 59 | if args.from_dir and os.path.isfile(args.dir): 60 | to_tmpdir = unwrap_mar(args.dir, args.verbose) 61 | args.dir = to_tmpdir 62 | 63 | if args.from_dir and os.path.isfile(args.from_dir): 64 | from_tmpdir = unwrap_mar(args.from_dir, args.verbose) 65 | args.from_dir = from_tmpdir 66 | 67 | try: 68 | builder = update_tools.GeckoMarBuilder() 69 | builder.build_gecko_mar(args.dir, args.mar, from_dir=args.from_dir) 70 | update_type = "incremental" if args.from_dir else "full" 71 | 72 | print "Built %s update MAR: %s" % (update_type, args.mar) 73 | except Exception, e: 74 | parser.error(e) 75 | finally: 76 | if to_tmpdir: 77 | shutil.rmtree(to_tmpdir) 78 | if from_tmpdir: 79 | shutil.rmtree(from_tmpdir) 80 | 81 | if __name__ == "__main__": 82 | main() 83 | -------------------------------------------------------------------------------- /tools/update-tools/build-update-xml.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # 3 | # Copyright (C) 2012 Mozilla Foundation 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | # 17 | # Given a MAR, Build a FxOS update.xml for testing. 18 | 19 | import sys 20 | import update_tools 21 | 22 | def main(): 23 | options = update_tools.UpdateXmlOptions() 24 | options.parse_args() 25 | output_xml = options.get_output_xml() 26 | 27 | try: 28 | xml = options.build_xml() 29 | if output_xml: 30 | with open(output_xml, "w") as out_file: 31 | out_file.write(xml) 32 | else: 33 | print xml 34 | 35 | except Exception, e: 36 | print >>sys.stderr, "Error: %s" % e 37 | sys.exit(1) 38 | 39 | if __name__ == "__main__": 40 | main() 41 | -------------------------------------------------------------------------------- /tools/update-tools/test-update.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # 3 | # Copyright (C) 2012 Mozilla Foundation 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | # 17 | # Given an a complete and/or partial update mar, this script will: 18 | # - Generate an update.xml 19 | # - Push the busybox HTTP server to device 20 | # - Push the update site to device, and start the HTTP server 21 | # - Tweak the profile prefs to override the update URL 22 | # - Restart B2G 23 | 24 | import sys 25 | from update_tools import UpdateXmlOptions, TestUpdate 26 | 27 | def main(): 28 | options = UpdateXmlOptions(output_arg=False) 29 | options.add_argument("--update-dir", dest="update_dir", metavar="DIR", 30 | default=None, help="Use a local http directory instead of pushing " + 31 | " Busybox to the device. Also requires --url-template") 32 | options.parse_args() 33 | 34 | try: 35 | test_update = TestUpdate(options.build_xml(), 36 | complete_mar=options.get_complete_mar(), 37 | partial_mar=options.get_partial_mar(), 38 | url_template=options.get_url_template(), 39 | update_dir=options.options.update_dir) 40 | 41 | test_update.test_update() 42 | except Exception, e: 43 | print >>sys.stderr, "Error: %s" % e 44 | sys.exit(1) 45 | 46 | if __name__ == "__main__": 47 | main() 48 | -------------------------------------------------------------------------------- /tools/update-tools/wrap-mar.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # 3 | # Copyright (C) 2012 Mozilla Foundation 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | # 17 | # AUS MARs will also have each entry bz2 compressed. This tool can extract or 18 | # rebuild these "wrapped" MARs. 19 | # 20 | # Warning: If you unwrap, edit a file, then re-wrap a MAR you will lose any 21 | # metadata that existed in the original MAR, such as signatures. 22 | 23 | import argparse 24 | import os 25 | import update_tools 26 | 27 | def main(): 28 | parser = argparse.ArgumentParser() 29 | parser.add_argument("mar", metavar="MAR", help="MAR archive to (un)wrap") 30 | parser.add_argument("dir", metavar="DIR", help="Source or destination " + 31 | "directory for (un)wrapping MAR.") 32 | parser.add_argument("-u", "--unwrap", dest="unwrap", action="store_true", 33 | default=False, help="Unwrap MAR to DIR") 34 | parser.add_argument("-v", "--verbose", dest="verbose", action="store_true", 35 | default=False, help="Verbose (un)wrapping") 36 | 37 | args = parser.parse_args() 38 | if os.path.isfile(args.dir): 39 | parser.error("Path is not a directory: %s" % args.dir) 40 | 41 | try: 42 | mar = update_tools.BZip2Mar(args.mar, verbose=args.verbose) 43 | action = mar.extract if args.unwrap else mar.create 44 | action(args.dir) 45 | 46 | if args.unwrap: 47 | print "Unwrapped MAR to %s" % args.dir 48 | else: 49 | print "Wrapped MAR to %s" % args.mar 50 | 51 | except Exception, e: 52 | parser.error(e) 53 | 54 | if __name__ == "__main__": 55 | main() 56 | -------------------------------------------------------------------------------- /watch-procrank.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | watch -n 1 'adb shell b2g-procrank' 3 | --------------------------------------------------------------------------------