├── master ├── public_html │ ├── favicon.ico │ ├── bg_gradient.jpg │ ├── robots.txt │ └── default.css ├── repository.py ├── templates │ └── README.txt ├── patches │ ├── 0013-Add-instance-id-to-build-properties.patch │ ├── 0019-Enable-run-time-AMI-determination.patch │ ├── 0002-Add-isIdle-helper.patch │ ├── README.md │ ├── 0001-Treat-RETRY-as-FAILURE.patch │ ├── 0005-Soft-disconnect-when-build_wait_timeout-0.patch │ ├── 0009-Move-spot-price-historical-averaging-to-its-own-func.patch │ ├── 0017-Allow-control-over-environment-logging-MasterShellCo.patch │ ├── 0003-Set-tags-for-latent-ec2-buildslaves.patch │ ├── 0018-Better-handling-for-instance-termination.patch │ ├── 0012-Add-debug-logging.patch │ ├── 0008-Allow-slaves-to-substantiate-in-parallel.patch │ ├── 0014-Remove-default-values-for-keypair-and-security-names.patch │ ├── 0006-Force-root-volumes-to-be-deleted-upon-termination-on.patch │ ├── 0011-Properly-handle-a-stale-broker-connection-on-ping.patch │ ├── 0007-Create-volumes.patch │ ├── 0010-Allow-independant-EC2-price_multiplier-or-max_spot_p.patch │ ├── 0004-Retry-on-EC2-NotFound-errors.patch │ ├── 0016-Add-support-for-block-devices-to-EC2LatentBuildSlave.patch │ └── 0015-Add-VPC-support-to-EC2LatentBuildSlave.patch ├── buildbot.tac ├── password.py.sample ├── github.py └── buildslaves.py ├── scripts ├── pts-test-profiles.tar.gz ├── bb-build-linux.sh ├── bb-cleanup.sh ├── bb-test-ztest.sh ├── bb-build.sh ├── ZTS-exceptions.md ├── runurl ├── bb-test-zfstests.sh ├── bb-test-pts.sh ├── bb-test-prepare.sh ├── openzfs-merge.sh ├── bb-test-cleanup.sh ├── openzfs-tracking.sh ├── bb-dependencies.sh └── known-issues.sh ├── .gitignore └── LICENSE /master/public_html/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openzfs/zfs-buildbot/HEAD/master/public_html/favicon.ico -------------------------------------------------------------------------------- /scripts/pts-test-profiles.tar.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openzfs/zfs-buildbot/HEAD/scripts/pts-test-profiles.tar.gz -------------------------------------------------------------------------------- /master/public_html/bg_gradient.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openzfs/zfs-buildbot/HEAD/master/public_html/bg_gradient.jpg -------------------------------------------------------------------------------- /master/public_html/robots.txt: -------------------------------------------------------------------------------- 1 | User-agent: * 2 | Disallow: /waterfall 3 | Disallow: /builders 4 | Disallow: /changes 5 | Disallow: /buildslaves 6 | Disallow: /schedulers 7 | Disallow: /one_line_per_build 8 | Disallow: /grid 9 | Disallow: /tgrid 10 | Disallow: /json 11 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *~ 2 | master/gitpoller* 3 | master/*_BUILD_ 4 | master/*_TEST_ 5 | master/http.* 6 | master/config.py 7 | master/runtests 8 | master/state.sqlite 9 | master/twistd.log* 10 | master/twistd.pid 11 | master/password.py 12 | master/password.pyc 13 | master/builderinfo.pyc 14 | master/public_html/*.rpm 15 | -------------------------------------------------------------------------------- /master/repository.py: -------------------------------------------------------------------------------- 1 | # -*- python -*- 2 | # ex: set syntax=python: 3 | 4 | zfs_repo = "https://github.com/openzfs/zfs.git" 5 | linux_repo = "https://github.com/torvalds/linux.git" 6 | 7 | all_repositories = { 8 | "https://github.com/torvalds/linux" : 'linux', 9 | "https://github.com/openzfs/zfs" : 'zfs', 10 | "https://github.com/torvalds/linux.git" : 'linux', 11 | "https://github.com/openzfs/zfs.git" : 'zfs', 12 | } 13 | -------------------------------------------------------------------------------- /master/templates/README.txt: -------------------------------------------------------------------------------- 1 | This is the directory to place customized templates for webstatus. 2 | 3 | You can find the sources for the templates used in: 4 | buildbot/status/web/templates 5 | 6 | You can copy any of those files to this directory, make changes, and buildbot will automatically 7 | use your modified templates. 8 | 9 | Also of note is that you will not need to restart/reconfig buildbot master to have these changes take effect. 10 | 11 | The syntax of the templates is Jinja2: 12 | http://jinja.pocoo.org/ 13 | -------------------------------------------------------------------------------- /master/patches/0013-Add-instance-id-to-build-properties.patch: -------------------------------------------------------------------------------- 1 | From 222b228779ad4625cc2bd104f451a968ea2f3651 Mon Sep 17 00:00:00 2001 2 | From: Brian Behlendorf 3 | Date: Wed, 20 Jul 2016 10:58:38 -0700 4 | Subject: [PATCH 13/18] Add instance id to build properties 5 | 6 | Signed-off-by: Brian Behlendorf 7 | --- 8 | master/buildbot/buildslave/ec2.py | 1 + 9 | 1 file changed, 1 insertion(+) 10 | 11 | diff --git a/master/buildbot/buildslave/ec2.py b/master/buildbot/buildslave/ec2.py 12 | index 1e8442e9e..9f24df51d 100644 13 | --- a/master/buildbot/buildslave/ec2.py 14 | +++ b/master/buildbot/buildslave/ec2.py 15 | @@ -471,6 +471,7 @@ class EC2LatentBuildSlave(AbstractLatentBuildSlave): 16 | else: 17 | raise 18 | if self.instance.state == RUNNING: 19 | + self.properties.setProperty("instance", self.instance.id, "BuildSlave") 20 | self.output = self.instance.get_console_output() 21 | minutes = duration // 60 22 | seconds = duration % 60 23 | -- 24 | 2.14.3 25 | 26 | -------------------------------------------------------------------------------- /master/buildbot.tac: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from twisted.application import service 4 | from buildbot.master import BuildMaster 5 | 6 | basedir = '/home/buildbot/zfs-buildbot/master' 7 | rotateLength = 10000000 8 | maxRotatedFiles = 10 9 | configfile = 'master.cfg' 10 | 11 | # Default umask for server 12 | umask = None 13 | 14 | # if this is a relocatable tac file, get the directory containing the TAC 15 | if basedir == '.': 16 | import os.path 17 | basedir = os.path.abspath(os.path.dirname(__file__)) 18 | 19 | # note: this line is matched against to check that this is a buildmaster 20 | # directory; do not edit it. 21 | application = service.Application('buildmaster') 22 | from twisted.python.logfile import LogFile 23 | from twisted.python.log import ILogObserver, FileLogObserver 24 | logfile = LogFile.fromFullPath(os.path.join(basedir, "twistd.log"), rotateLength=rotateLength, 25 | maxRotatedFiles=maxRotatedFiles) 26 | application.setComponent(ILogObserver, FileLogObserver(logfile).emit) 27 | 28 | m = BuildMaster(basedir, configfile, umask) 29 | m.setServiceParent(application) 30 | m.log_rotation.rotateLength = rotateLength 31 | m.log_rotation.maxRotatedFiles = maxRotatedFiles 32 | -------------------------------------------------------------------------------- /master/patches/0019-Enable-run-time-AMI-determination.patch: -------------------------------------------------------------------------------- 1 | diff --git a/master/buildbot/buildslave/ec2.py b/master/buildbot/buildslave/ec2.py 2 | index 13e5a08a6..aa7dd4633 100644 3 | --- a/master/buildbot/buildslave/ec2.py 4 | +++ b/master/buildbot/buildslave/ec2.py 5 | @@ -476,6 +476,7 @@ class EC2LatentBuildSlave(AbstractLatentBuildSlave): 6 | bid_price = self.max_spot_price 7 | log.msg('%s %s requesting spot instance with price %0.2f.' % 8 | (self.__class__.__name__, self.slavename, bid_price)) 9 | + image = self.get_image() 10 | reservations = self.conn.request_spot_instances( 11 | bid_price, self.ami, key_name=self.keypair_name, 12 | security_groups=self.classic_security_groups, 13 | @@ -489,7 +490,7 @@ class EC2LatentBuildSlave(AbstractLatentBuildSlave): 14 | instance_id = request.instance_id 15 | reservations = self.conn.get_all_instances(instance_ids=[instance_id]) 16 | self.instance = reservations[0].instances[0] 17 | - return self._wait_for_instance(self.get_image()) 18 | + return self._wait_for_instance(image) 19 | 20 | def _wait_for_instance(self, image): 21 | log.msg('%s %s waiting for instance %s to start' % 22 | -------------------------------------------------------------------------------- /master/patches/0002-Add-isIdle-helper.patch: -------------------------------------------------------------------------------- 1 | From b242612cbc4f4830c693a45f2ad5490856e8b352 Mon Sep 17 00:00:00 2001 2 | From: Brian Behlendorf 3 | Date: Tue, 3 Nov 2015 11:04:10 -0800 4 | Subject: [PATCH 02/18] Add isIdle helper 5 | 6 | Checks if a buildslave is available and currently idle. In the case 7 | of a latent buildslave it may be available but not substantiated. This 8 | helper allows us to determine if a build can be immediately serviced 9 | without the cost of substantiating a new buildslave. 10 | 11 | Signed-off-by: Brian Behlendorf 12 | --- 13 | master/buildbot/process/slavebuilder.py | 3 +++ 14 | 1 file changed, 3 insertions(+) 15 | 16 | diff --git a/master/buildbot/process/slavebuilder.py b/master/buildbot/process/slavebuilder.py 17 | index 86f427d3c..42f050e5b 100644 18 | --- a/master/buildbot/process/slavebuilder.py 19 | +++ b/master/buildbot/process/slavebuilder.py 20 | @@ -72,6 +72,9 @@ class AbstractSlaveBuilder(pb.Referenceable): 21 | # no slave? not very available. 22 | return False 23 | 24 | + def isIdle(self): 25 | + return self.isAvailable() and self.state is IDLE 26 | + 27 | def isBusy(self): 28 | return self.state not in (IDLE, LATENT) 29 | 30 | -- 31 | 2.14.3 32 | 33 | -------------------------------------------------------------------------------- /scripts/bb-build-linux.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # 3 | # Example usage: 4 | # 5 | # export LINUX_DIR="$HOME/src/linux" 6 | # export ZFS_DIR="$HOME/src/zfs" 7 | # ./bb-build-linux.sh 8 | # 9 | 10 | LINUX_DIR=${LINUX_DIR:-$(readlink -f .)} 11 | ZFS_DIR=${ZFS_DIR:-$(readlink -f ../zfs)} 12 | MAKE_LOG="$LINUX_DIR/make.log" 13 | 14 | set -x 15 | cd $LINUX_DIR 16 | 17 | # Configure the kernel for a default build. 18 | sed -i '/EXTRAVERSION = / s/$/.zfs/' Makefile 19 | make mrproper >>$MAKE_LOG 2>&1 || exit 1 20 | make defconfig >>$MAKE_LOG 2>&1 || exit 1 21 | 22 | # Enable ZFS and additional dependencies. 23 | cat >>.config <>$MAKE_LOG 2>&1 || exit 1 32 | 33 | # Configure ZFS and add it to the kernel tree. 34 | cd $ZFS_DIR 35 | ./autogen.sh >>$MAKE_LOG 2>&1 || exit 1 36 | ./configure --enable-linux-builtin --with-linux=$LINUX_DIR \ 37 | --with-linux-obj=$LINUX_DIR >>$MAKE_LOG 2>&1 || exit 1 38 | ./copy-builtin $LINUX_DIR >>$MAKE_LOG 2>&1 || exit 1 39 | 40 | # Build the kernel. 41 | cd $LINUX_DIR 42 | # if we don't do this, make prints a warning 43 | sed -i '/CONFIG_ZFS/d;$aCONFIG_ZFS=y' .config 44 | make -kj$(nproc) >>$MAKE_LOG 2>&1 || exit 1 45 | make -kj$(nproc) modules >>$MAKE_LOG 2>&1 || exit 1 46 | 47 | exit 0 48 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2015, Lawrence Livermore National Security, LLC (LLNS) 2 | All rights reserved. 3 | 4 | Redistribution and use in source and binary forms, with or without 5 | modification, are permitted provided that the following conditions are met: 6 | 7 | * Redistributions of source code must retain the above copyright notice, this 8 | list of conditions and the following disclaimer. 9 | 10 | * Redistributions in binary form must reproduce the above copyright notice, 11 | this list of conditions and the following disclaimer in the documentation 12 | and/or other materials provided with the distribution. 13 | 14 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 15 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 17 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 18 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 20 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 21 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 22 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 23 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 24 | -------------------------------------------------------------------------------- /master/patches/README.md: -------------------------------------------------------------------------------- 1 | # ZFS on Linux Buildbot Patches 2 | 3 | This directory contains patches which have been applied to the ZFS on 4 | Linux buildbot master. Some of the patches were required to improve 5 | reliability, others are bug fixes and finally several are to modify 6 | the default behavior to facilitate the testing of kernel modules. 7 | Each patch is fully described in its commit comment. 8 | 9 | ``` 10 | 0001-Treat-RETRY-as-FAILURE.patch 11 | 0002-Add-isIdle-helper.patch 12 | 0003-Set-tags-for-latent-ec2-buildslaves.patch 13 | 0004-Retry-on-EC2-NotFound-errors.patch 14 | 0005-Soft-disconnect-when-build_wait_timeout-0.patch 15 | 0006-Force-root-volumes-to-be-deleted-upon-termination-on.patch 16 | 0007-Create-volumes.patch 17 | 0008-Allow-slaves-to-substantiate-in-parallel.patch 18 | 0009-Move-spot-price-historical-averaging-to-its-own-func.patch 19 | 0010-Allow-independant-EC2-price_multiplier-or-max_spot_p.patch 20 | 0011-Properly-handle-a-stale-broker-connection-on-ping.patch 21 | 0012-Add-debug-logging.patch 22 | 0013-Add-instance-id-to-build-properties.patch 23 | 0014-Remove-default-values-for-keypair-and-security-names.patch 24 | 0015-Add-VPC-support-to-EC2LatentBuildSlave.patch 25 | 0016-Add-support-for-block-devices-to-EC2LatentBuildSlave.patch 26 | 0017-Allow-control-over-environment-logging-MasterShellCo.patch 27 | 0018-Better-handling-for-instance-termination.patch 28 | ``` 29 | 30 | The patches cleanly apply on top of `9df5d7d2a4db811fde4780cc1555453ee0f12649` 31 | in the buildbot Git repository, as well the buildbot 0.8.14 release. 32 | -------------------------------------------------------------------------------- /master/password.py.sample: -------------------------------------------------------------------------------- 1 | # -*- python -*- 2 | # ex: set syntax=python: 3 | 4 | # Build slaves configured with mkBuildSlave() must have a name and password 5 | # entry. The names must match the names in mkBuildSlave() and the passwords 6 | # should be distributed to the static builders for authentication. 7 | slave_userpass = [ 8 | ("slave1", "password1"), 9 | ("slave2", "password2"), 10 | ] 11 | 12 | # Web users are authenticated using a basic login and password. From the 13 | # web interface pending builds may be canceled or resubmitted. 14 | web_userpass = [ 15 | ("user1", "password1"), 16 | ("user2", "password2"), 17 | ] 18 | 19 | # Buildbot allows for requests to be submitted using the 'buildbot try' 20 | # command. This interface is mainly useful for injecting builds when 21 | # working on the buildbot infrastructure itself. The vast majority of 22 | # build requested should be submitted via pull requests. 23 | try_userpass = [ 24 | ("user1", "password1"), 25 | ("user2", "password2"), 26 | ] 27 | 28 | # Amazon ec2 credentials which are needed to create latent build slaves. 29 | ec2_default_access = "access" 30 | ec2_default_secret = "secret" 31 | 32 | # An Amazon ec2 key pair which should be installed for the default login 33 | # to enable access to the instance. This is optional. 34 | ec2_default_keypair_name = "buildbot" 35 | 36 | # Github credentials, these allow for an increased number of API calls 37 | # to be made without throttling and for status updates to be pushed for 38 | # pull requests. 39 | github_secret = "secret" 40 | github_token = "token" 41 | -------------------------------------------------------------------------------- /master/patches/0001-Treat-RETRY-as-FAILURE.patch: -------------------------------------------------------------------------------- 1 | From a09450c1c5fb08acf1cf8944ed84af3fd0ac6785 Mon Sep 17 00:00:00 2001 2 | From: Brian Behlendorf 3 | Date: Thu, 3 Dec 2015 10:27:24 -0800 4 | Subject: [PATCH 01/18] Treat RETRY as FAILURE 5 | 6 | When testing kernel modules with buildbot it's entirely possible that 7 | the system may panic resulting in a disconnect. For the purposes of 8 | automated testing we want to consider this a FAILURE and immediately 9 | move on to testing the next change. 10 | 11 | This change should not be pushed upstream until the behavior is made 12 | more flexible. For example, this could be done using a 'flunkOnRetry' 13 | build step option. 14 | 15 | Signed-off-by: Brian Behlendorf 16 | --- 17 | master/buildbot/process/build.py | 5 ++++- 18 | 1 file changed, 4 insertions(+), 1 deletion(-) 19 | 20 | diff --git a/master/buildbot/process/build.py b/master/buildbot/process/build.py 21 | index fc720d924..e0e3ffacd 100644 22 | --- a/master/buildbot/process/build.py 23 | +++ b/master/buildbot/process/build.py 24 | @@ -454,7 +454,10 @@ class Build(properties.PropertiesMixin): 25 | possible_overall_result = WARNINGS 26 | if step.flunkOnWarnings: 27 | possible_overall_result = FAILURE 28 | - elif result in (EXCEPTION, RETRY): 29 | + elif result == RETRY: 30 | + possible_overall_result = FAILURE 31 | + terminate = True 32 | + elif result == EXCEPTION: 33 | terminate = True 34 | 35 | # if we skipped this step, then don't adjust the build status 36 | -- 37 | 2.14.3 38 | 39 | -------------------------------------------------------------------------------- /master/patches/0005-Soft-disconnect-when-build_wait_timeout-0.patch: -------------------------------------------------------------------------------- 1 | From 85132f84526add0248459ad78ea1136f4de2bd51 Mon Sep 17 00:00:00 2001 2 | From: Brian Behlendorf 3 | Date: Thu, 3 Dec 2015 12:47:08 -0800 4 | Subject: [PATCH 05/18] Soft disconnect when 'build_wait_timeout==0' 5 | 6 | Insubstantiating the latent build slave without cleanly disconnecting 7 | it can result in a ping failure if a new latent slave is immediately 8 | instantiated. To prevent this the slave should disconnect cleanly 9 | before being insubstantiated. 10 | 11 | [Broker,2,52.25.93.92] ping finished: failure 12 | [Broker,2,52.25.93.92] slave ping failed; re-queueing the request 13 | 14 | This is easily reproducible by running back to back builds using EC2. 15 | 16 | Signed-off-by: Brian Behlendorf 17 | --- 18 | master/buildbot/buildslave/base.py | 2 +- 19 | 1 file changed, 1 insertion(+), 1 deletion(-) 20 | 21 | diff --git a/master/buildbot/buildslave/base.py b/master/buildbot/buildslave/base.py 22 | index 07c077b35..c28cf5043 100644 23 | --- a/master/buildbot/buildslave/base.py 24 | +++ b/master/buildbot/buildslave/base.py 25 | @@ -966,7 +966,7 @@ class AbstractLatentBuildSlave(AbstractBuildSlave): 26 | self.building.remove(sb.builder_name) 27 | if not self.building: 28 | if self.build_wait_timeout == 0: 29 | - d = self.insubstantiate() 30 | + d = self._soft_disconnect() 31 | # try starting builds for this slave after insubstantiating; 32 | # this will cause the slave to re-substantiate immediately if 33 | # there are pending build requests. 34 | -- 35 | 2.14.3 36 | 37 | -------------------------------------------------------------------------------- /master/patches/0009-Move-spot-price-historical-averaging-to-its-own-func.patch: -------------------------------------------------------------------------------- 1 | From 14b9ecba56ce3f673aca2b85f006292978261a76 Mon Sep 17 00:00:00 2001 2 | From: "Christopher J. Morrone" 3 | Date: Thu, 9 Jun 2016 15:03:07 -0700 4 | Subject: [PATCH 09/18] Move spot price historical averaging to its own 5 | function 6 | 7 | --- 8 | master/buildbot/buildslave/ec2.py | 6 +++++- 9 | 1 file changed, 5 insertions(+), 1 deletion(-) 10 | 11 | diff --git a/master/buildbot/buildslave/ec2.py b/master/buildbot/buildslave/ec2.py 12 | index d787f194f..3c77804f5 100644 13 | --- a/master/buildbot/buildslave/ec2.py 14 | +++ b/master/buildbot/buildslave/ec2.py 15 | @@ -401,7 +401,7 @@ class EC2LatentBuildSlave(AbstractLatentBuildSlave): 16 | (self.__class__.__name__, self.slavename, 17 | instance.id, goal, duration // 60, duration % 60)) 18 | 19 | - def _request_spot_instance(self): 20 | + def _bid_price_from_spot_price_history(self): 21 | timestamp_yesterday = time.gmtime(int(time.time() - 86400)) 22 | spot_history_starttime = time.strftime( 23 | '%Y-%m-%dT%H:%M:%SZ', timestamp_yesterday) 24 | @@ -419,6 +419,10 @@ class EC2LatentBuildSlave(AbstractLatentBuildSlave): 25 | target_price = 0.02 26 | else: 27 | target_price = (price_sum / price_count) * self.price_multiplier 28 | + return target_price 29 | + 30 | + def _request_spot_instance(self): 31 | + target_price = self._bid_price_from_spot_price_history() 32 | if target_price > self.max_spot_price: 33 | log.msg('%s %s calculated spot price %0.2f exceeds ' 34 | 'configured maximum of %0.2f' % 35 | -- 36 | 2.14.3 37 | 38 | -------------------------------------------------------------------------------- /master/patches/0017-Allow-control-over-environment-logging-MasterShellCo.patch: -------------------------------------------------------------------------------- 1 | From 1ecdf1f980bd02bfda20ee127742164608fcd2a2 Mon Sep 17 00:00:00 2001 2 | From: Brian Behlendorf 3 | Date: Tue, 17 Apr 2018 13:14:15 -0700 4 | Subject: [PATCH 17/18] Allow control over environment logging 5 | MasterShellCommand 6 | 7 | Signed-off-by: Giuseppe Di Natale 8 | --- 9 | master/buildbot/steps/master.py | 5 ++++- 10 | 1 file changed, 4 insertions(+), 1 deletion(-) 11 | 12 | diff --git a/master/buildbot/steps/master.py b/master/buildbot/steps/master.py 13 | index d2be1364e..2d980f155 100644 14 | --- a/master/buildbot/steps/master.py 15 | +++ b/master/buildbot/steps/master.py 16 | @@ -51,6 +51,7 @@ class MasterShellCommand(BuildStep): 17 | 18 | def __init__(self, command, 19 | env=None, path=None, usePTY=0, interruptSignal="KILL", 20 | + logEnviron=True, 21 | **kwargs): 22 | BuildStep.__init__(self, **kwargs) 23 | 24 | @@ -59,6 +60,7 @@ class MasterShellCommand(BuildStep): 25 | self.path = path 26 | self.usePTY = usePTY 27 | self.interruptSignal = interruptSignal 28 | + self.logEnviron = logEnviron 29 | 30 | class LocalPP(ProcessProtocol): 31 | 32 | @@ -144,7 +146,8 @@ class MasterShellCommand(BuildStep): 33 | "lists; key '%s' is incorrect" % (key,)) 34 | newenv[key] = p.sub(subst, env[key]) 35 | env = newenv 36 | - stdio_log.addHeader(" env: %r\n" % (env,)) 37 | + if self.logEnviron: 38 | + stdio_log.addHeader(" env: %r\n" % (env,)) 39 | 40 | # TODO add a timeout? 41 | self.process = reactor.spawnProcess(self.LocalPP(self), argv[0], argv, 42 | -- 43 | 2.14.3 44 | 45 | -------------------------------------------------------------------------------- /master/patches/0003-Set-tags-for-latent-ec2-buildslaves.patch: -------------------------------------------------------------------------------- 1 | From e6a80f20b7917dafe0e11fa43a575a2d3bb0f953 Mon Sep 17 00:00:00 2001 2 | From: Brian Behlendorf 3 | Date: Thu, 3 Dec 2015 10:37:06 -0800 4 | Subject: [PATCH 03/18] Set tags for latent ec2 buildslaves 5 | 6 | EC2LatentBuildSlave does not respect the tags argument to __init__() 7 | when launching spot instances. Apply the tags in _wait_for_instance() 8 | identical to what is done for attaching volumes. 9 | 10 | http://trac.buildbot.net/ticket/2903 11 | --- 12 | master/buildbot/buildslave/ec2.py | 4 ++-- 13 | 1 file changed, 2 insertions(+), 2 deletions(-) 14 | 15 | diff --git a/master/buildbot/buildslave/ec2.py b/master/buildbot/buildslave/ec2.py 16 | index 191999ea6..e16b6372b 100644 17 | --- a/master/buildbot/buildslave/ec2.py 18 | +++ b/master/buildbot/buildslave/ec2.py 19 | @@ -274,8 +274,6 @@ class EC2LatentBuildSlave(AbstractLatentBuildSlave): 20 | instance_id, image_id, start_time = self._wait_for_instance( 21 | reservation) 22 | if None not in [instance_id, image_id, start_time]: 23 | - if len(self.tags) > 0: 24 | - self.conn.create_tags(instance_id, self.tags) 25 | return [instance_id, image_id, start_time] 26 | else: 27 | log.msg('%s %s failed to start instance %s (%s)' % 28 | @@ -398,6 +396,8 @@ class EC2LatentBuildSlave(AbstractLatentBuildSlave): 29 | minutes // 60, minutes % 60, seconds) 30 | if len(self.volumes) > 0: 31 | self._attach_volumes() 32 | + if len(self.tags) > 0: 33 | + self.conn.create_tags(self.instance.id, self.tags) 34 | return self.instance.id, image.id, start_time 35 | else: 36 | return None, None, None 37 | -- 38 | 2.14.3 39 | 40 | -------------------------------------------------------------------------------- /scripts/bb-cleanup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # Check for a local cached configuration. 4 | if test -f /etc/buildslave; then 5 | . /etc/buildslave 6 | else 7 | echo "Missing configuration /etc/buildslave" 8 | exit 1 9 | fi 10 | 11 | BUILT_PACKAGE=${BUILT_PACKAGE:-""} 12 | 13 | set -x 14 | 15 | case "$BB_NAME" in 16 | Amazon*) 17 | if test "$BUILT_PACKAGE" = "zfs"; then 18 | sudo -E yum -y remove '(zfs-dkms.*|kmod-zfs.*)' \ 19 | libnvpair1 libuutil1 libzfs2 libzpool2 libzfs2-devel \ 20 | zfs zfs-debuginfo zfs-kmod-debuginfo zfs-dracut zfs-test 21 | fi 22 | ;; 23 | 24 | CentOS*) 25 | if test "$BUILT_PACKAGE" = "zfs"; then 26 | sudo -E yum -y remove '(zfs-dkms.*|kmod-zfs.*)' \ 27 | libnvpair1 libuutil1 libzfs2 libzpool2 libzfs2-devel \ 28 | zfs zfs-debuginfo zfs-kmod-debuginfo zfs-dracut zfs-test 29 | fi 30 | ;; 31 | 32 | Debian*) 33 | if test "$BUILT_PACKAGE" = "zfs"; then 34 | sudo -E apt-get --yes purge '(zfs-dkms.*|kmod-zfs.*)' \ 35 | libnvpair1 libuutil1 libzfs2 libzpool2 libzfs2-devel \ 36 | zfs zfs-initramfs zfs-dracut zfs-test 37 | fi 38 | ;; 39 | 40 | Fedora*) 41 | if test "$BUILT_PACKAGE" = "zfs"; then 42 | sudo -E dnf -y remove '(zfs-dkms.*|kmod-zfs.*)' \ 43 | libnvpair1 libuutil1 libzfs2 libzpool2 libzfs2-devel \ 44 | zfs zfs-debuginfo zfs-kmod-debuginfo zfs-dracut zfs-test 45 | fi 46 | ;; 47 | 48 | Ubuntu*) 49 | if test "$BUILT_PACKAGE" = "zfs"; then 50 | sudo -E apt-get --yes purge '(zfs-dkms.*|kmod-zfs.*)' \ 51 | libnvpair1 libuutil1 libzfs2 libzpool2 libzfs2-devel \ 52 | zfs zfs-initramfs zfs-dracut zfs-test 53 | fi 54 | ;; 55 | 56 | *) 57 | echo "$BB_NAME unknown platform" 58 | ;; 59 | esac 60 | 61 | exit 0 62 | -------------------------------------------------------------------------------- /master/patches/0018-Better-handling-for-instance-termination.patch: -------------------------------------------------------------------------------- 1 | From 28598e70a562feb536e19c98af18269c8cc92786 Mon Sep 17 00:00:00 2001 2 | From: Brian Behlendorf 3 | Date: Tue, 17 Apr 2018 13:16:16 -0700 4 | Subject: [PATCH 18/18] Better handling for instance termination 5 | 6 | Signed-off-by: Brian Behlendorf 7 | --- 8 | master/buildbot/buildslave/ec2.py | 12 ++++++++++-- 9 | 1 file changed, 10 insertions(+), 2 deletions(-) 10 | 11 | diff --git a/master/buildbot/buildslave/ec2.py b/master/buildbot/buildslave/ec2.py 12 | index bbf5d4b88..8a576ee8a 100644 13 | --- a/master/buildbot/buildslave/ec2.py 14 | +++ b/master/buildbot/buildslave/ec2.py 15 | @@ -407,14 +407,22 @@ class EC2LatentBuildSlave(AbstractLatentBuildSlave): 16 | try: 17 | instance.update() 18 | except boto.exception.EC2ResponseError, e: 19 | - log.msg('%s %s cannot find instance %s to terminate' % 20 | + log.msg('%s %s cannot find instance %s to update for termination' % 21 | (self.__class__.__name__, self.slavename, instance.id)) 22 | if e.error_code == 'InvalidInstanceID.NotFound': 23 | return 24 | else: 25 | raise 26 | if instance.state not in (SHUTTINGDOWN, TERMINATED): 27 | - instance.terminate() 28 | + try: 29 | + instance.terminate() 30 | + except boto.exception.EC2ResponseError, e: 31 | + log.msg('%s %s cannot find instance %s to terminate' % 32 | + (self.__class__.__name__, self.slavename, instance.id)) 33 | + if e.error_code == 'InvalidInstanceID.NotFound': 34 | + return 35 | + else: 36 | + raise 37 | log.msg('%s %s terminating instance %s' % 38 | (self.__class__.__name__, self.slavename, instance.id)) 39 | duration = 0 40 | -- 41 | 2.14.3 42 | 43 | -------------------------------------------------------------------------------- /master/patches/0012-Add-debug-logging.patch: -------------------------------------------------------------------------------- 1 | From 7ff371cb809d122b57bf1d4c108ebbe70d492862 Mon Sep 17 00:00:00 2001 2 | From: Brian Behlendorf 3 | Date: Tue, 19 Jul 2016 15:55:52 -0700 4 | Subject: [PATCH 12/18] Add debug logging 5 | 6 | --- 7 | master/buildbot/buildslave/base.py | 3 +++ 8 | master/buildbot/process/builder.py | 5 ++++- 9 | 2 files changed, 7 insertions(+), 1 deletion(-) 10 | 11 | diff --git a/master/buildbot/buildslave/base.py b/master/buildbot/buildslave/base.py 12 | index c28cf5043..87876f7b3 100644 13 | --- a/master/buildbot/buildslave/base.py 14 | +++ b/master/buildbot/buildslave/base.py 15 | @@ -873,8 +873,11 @@ class AbstractLatentBuildSlave(AbstractBuildSlave): 16 | self.substantiation_deferred = defer.Deferred() 17 | self.substantiation_build = build 18 | if self.slave is None: 19 | + log.msg("Slave %s is not substantiated" % (self.slavename)) 20 | d = self._substantiate(build) # start up instance 21 | d.addErrback(log.err, "while substantiating") 22 | + else: 23 | + log.msg("Slave %s is substantiated, waiting for detach" % (self.slavename)) 24 | # else: we're waiting for an old one to detach. the _substantiate 25 | # will be done in ``detached`` below. 26 | return self.substantiation_deferred 27 | diff --git a/master/buildbot/process/builder.py b/master/buildbot/process/builder.py 28 | index 112b5c105..ff8480c72 100644 29 | --- a/master/buildbot/process/builder.py 30 | +++ b/master/buildbot/process/builder.py 31 | @@ -322,7 +322,10 @@ class Builder(config.ReconfigurableServiceMixin, 32 | 33 | # set up locks 34 | build.setLocks(self.config.locks) 35 | - cleanups.append(lambda: slavebuilder.slave.releaseLocks()) 36 | + def lockCleanup(): 37 | + if slavebuilder.slave is not None: 38 | + slavebuilder.slave.releaseLocks() 39 | + cleanups.append(lockCleanup) 40 | 41 | if len(self.config.env) > 0: 42 | build.setSlaveEnvironment(self.config.env) 43 | -- 44 | 2.14.3 45 | 46 | -------------------------------------------------------------------------------- /master/patches/0008-Allow-slaves-to-substantiate-in-parallel.patch: -------------------------------------------------------------------------------- 1 | From ecdac0b8ecdf4695d7118366ef1186508da10c72 Mon Sep 17 00:00:00 2001 2 | From: "Christopher J. Morrone" 3 | Date: Thu, 21 Apr 2016 02:39:36 +0000 4 | Subject: [PATCH 08/18] Allow slaves to substantiate in parallel 5 | 6 | BuildRequestDistributor's _maybeStartBuildsOnBuilder() method is 7 | decorated with @defer.inlineCallbacks. This means that when it 8 | uses "yield bldr.maybeStartBuild", the entire slave substantiation 9 | process is sequentialized at that point. 10 | 11 | To enable parallel slave substantiation, we need to move the error 12 | handling code into a callback function that can be added to the 13 | Deferred that is returned by bldr.maybeStartBuild(). 14 | 15 | Fixes #4 16 | 17 | Signed-off-by: Christopher J. Morrone 18 | --- 19 | master/buildbot/process/buildrequestdistributor.py | 17 +++++++++-------- 20 | 1 file changed, 9 insertions(+), 8 deletions(-) 21 | 22 | diff --git a/master/buildbot/process/buildrequestdistributor.py b/master/buildbot/process/buildrequestdistributor.py 23 | index 2848510ab..a6d88cee0 100644 24 | --- a/master/buildbot/process/buildrequestdistributor.py 25 | +++ b/master/buildbot/process/buildrequestdistributor.py 26 | @@ -531,14 +531,15 @@ class BuildRequestDistributor(service.Service): 27 | bc = self.createBuildChooser(bldr, self.master) 28 | continue 29 | 30 | - buildStarted = yield bldr.maybeStartBuild(slave, breqs) 31 | - 32 | - if not buildStarted: 33 | - yield self.master.db.buildrequests.unclaimBuildRequests(brids) 34 | - 35 | - # and try starting builds again. If we still have a working slave, 36 | - # then this may re-claim the same buildrequests 37 | - self.botmaster.maybeStartBuildsForBuilder(self.name) 38 | + d = bldr.maybeStartBuild(slave, breqs) 39 | + @defer.inlineCallbacks 40 | + def checkBuildStart(buildStarted, slavename, buildername, brids): 41 | + if not buildStarted: 42 | + yield self.master.db.buildrequests.unclaimBuildRequests(brids) 43 | + # and try starting builds again. If we still have a working slave, 44 | + # then this may re-claim the same buildrequests 45 | + self.botmaster.maybeStartBuildsForBuilder(buildername) 46 | + d.addCallback(checkBuildStart, slave.slave.slavename, bldr.name, brids) 47 | 48 | def createBuildChooser(self, bldr, master): 49 | # just instantiate the build chooser requested 50 | -- 51 | 2.14.3 52 | 53 | -------------------------------------------------------------------------------- /master/patches/0014-Remove-default-values-for-keypair-and-security-names.patch: -------------------------------------------------------------------------------- 1 | From b7051bc715c0f8943c281418b22625f5d622f5cf Mon Sep 17 00:00:00 2001 2 | From: Neal Gompa 3 | Date: Fri, 30 Mar 2018 17:01:14 -0400 4 | Subject: [PATCH 14/18] Remove default values for keypair and security names 5 | for EC2LatentBuildSlave 6 | 7 | This is a simpler/trivial adaptation of 68a9267d5fff06e0ff7c6ea8a82ab66fcf6a359c in buildbot 0.9.x, 8 | originally authored by Vladimir Rutsky . 9 | 10 | There are no test adaptations for this, as ultimate behavior has not yet changed. 11 | 12 | Signed-off-by: Neal Gompa 13 | --- 14 | master/buildbot/buildslave/ec2.py | 11 ++++++++--- 15 | 1 file changed, 8 insertions(+), 3 deletions(-) 16 | 17 | diff --git a/master/buildbot/buildslave/ec2.py b/master/buildbot/buildslave/ec2.py 18 | index 9f24df51d..780c4ef05 100644 19 | --- a/master/buildbot/buildslave/ec2.py 20 | +++ b/master/buildbot/buildslave/ec2.py 21 | @@ -61,8 +61,8 @@ class EC2LatentBuildSlave(AbstractLatentBuildSlave): 22 | valid_ami_owners=None, valid_ami_location_regex=None, 23 | elastic_ip=None, identifier=None, secret_identifier=None, 24 | aws_id_file_path=None, user_data=None, region=None, 25 | - keypair_name='latent_buildbot_slave', 26 | - security_name='latent_buildbot_slave', 27 | + keypair_name=None, 28 | + security_name=None, 29 | max_builds=None, notify_on_missing=[], missing_timeout=60 * 20, 30 | build_wait_timeout=60 * 10, properties={}, locks=None, 31 | spot_instance=False, max_spot_price=1.6, volumes=[], 32 | @@ -95,10 +95,15 @@ class EC2LatentBuildSlave(AbstractLatentBuildSlave): 33 | else: 34 | # verify that regex will compile 35 | re.compile(valid_ami_location_regex) 36 | + if keypair_name is None: 37 | + keypair_name = 'latent_buildbot_slave' 38 | + log.msg('Using default keypair name, since none is set') 39 | + if security_name is None: 40 | + security_name = 'latent_buildbot_slave' 41 | + log.msg('Using default keypair name, since none is set') 42 | if spot_instance and price_multiplier is None and max_spot_price is None: 43 | raise ValueError('You must provide either one, or both, of ' 44 | 'price_multiplier or max_spot_price') 45 | - 46 | self.valid_ami_owners = valid_ami_owners 47 | self.valid_ami_location_regex = valid_ami_location_regex 48 | self.instance_type = instance_type 49 | -- 50 | 2.14.3 51 | 52 | -------------------------------------------------------------------------------- /scripts/bb-test-ztest.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | if test -f /etc/buildslave; then 4 | . /etc/buildslave 5 | fi 6 | 7 | if test -f ./TEST; then 8 | . ./TEST 9 | else 10 | echo "Missing $PWD/TEST configuration file" 11 | exit 1 12 | fi 13 | 14 | TEST_ZTEST_SKIP=${TEST_ZTEST_SKIP:-"Yes"} 15 | if echo "$TEST_ZTEST_SKIP" | grep -Eiq "^yes$|^on$|^true$|^1$"; then 16 | echo "Skipping disabled test" 17 | exit 3 18 | fi 19 | 20 | cleanup() 21 | { 22 | # Preserve the results directory for future analysis, as: 23 | # //ztest/ztest-.tar.xz 24 | if test -n "$UPLOAD_DIR"; then 25 | BUILDER="$(echo $BB_NAME | cut -f1-3 -d'-')" 26 | mkdir -p "$UPLOAD_DIR/$BUILDER/ztest" 27 | 28 | # Optionally remove the zloop-run directory, normally this contains 29 | # logs and vdev from successful runs and thus is removed by default. 30 | if echo "$TEST_ZTEST_KEEP_RUN_DIR" | grep -Eiq "^no$|^off$|^false$|^0$"; then 31 | rm -Rf "$TEST_ZTEST_DIR/zloop-run" 32 | fi 33 | 34 | # Optionally remove the core directory, this contains logs and vdevs 35 | # from failed run and is kept by default. 36 | if echo "$TEST_ZTEST_KEEP_CORE_DIR"|grep -Eiq "^no$|^off$|^false$|^0$"; then 37 | sudo -E rm -Rf "$TEST_ZTEST_DIR/core" 38 | fi 39 | 40 | # Convenience symlinks will no longer reference the correct locations 41 | # and are removed so they're not included in the archive. 42 | rm -f $TEST_ZTEST_DIR/ztest.core.* 43 | sudo -E mv ztest.* "$TEST_ZTEST_DIR" 44 | 45 | sudo -E tar -C "$(dirname $TEST_ZTEST_DIR)" -cJ \ 46 | -f "$UPLOAD_DIR/$BUILDER/ztest/$(basename $TEST_ZTEST_DIR).tar.xz" \ 47 | "$(basename $TEST_ZTEST_DIR)" 48 | fi 49 | 50 | sudo -E $ZFS_SH -u 51 | } 52 | trap cleanup EXIT TERM 53 | 54 | DATE="$(date +%Y%m%dT%H%M%S)" 55 | set -x 56 | 57 | TEST_ZTEST_OPTIONS=${TEST_ZTEST_OPTIONS:-"-l -m3"} 58 | TEST_ZTEST_TIMEOUT=${TEST_ZTEST_TIMEOUT:-300} 59 | TEST_ZTEST_DIR=${TEST_ZTEST_DIR:-"/mnt/ztest-${DATE}"} 60 | TEST_ZTEST_KEEP_RUN_DIR="No" 61 | TEST_ZTEST_KEEP_CORE_DIR="Yes" 62 | 63 | set +x 64 | 65 | case $(uname) in 66 | FreeBSD) 67 | if ! kldstat -qn openzfs; then 68 | sudo -E $ZFS_SH 69 | fi 70 | sudo -E sysctl kern.threads.max_threads_per_proc=5000 >/dev/null 71 | ;; 72 | Linux) 73 | if ! test -e /sys/module/zfs; then 74 | sudo -E $ZFS_SH 75 | fi 76 | ;; 77 | *) 78 | sudo -E $ZFS_SH 79 | ;; 80 | esac 81 | 82 | sudo -E mkdir -p "$TEST_ZTEST_DIR" 83 | sudo -E $ZLOOP_SH $TEST_ZTEST_OPTIONS \ 84 | -t $TEST_ZTEST_TIMEOUT \ 85 | -f $TEST_ZTEST_DIR \ 86 | -c $TEST_ZTEST_DIR/core 87 | RESULT=$? 88 | 89 | sudo -E chown -R $USER "$TEST_ZTEST_DIR" 90 | 91 | if test $RESULT != 0; then 92 | echo "Exited ztest with error $RESULT" 93 | exit 1 94 | fi 95 | 96 | exit $RESULT 97 | -------------------------------------------------------------------------------- /scripts/bb-build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | if test -f /etc/buildslave; then 4 | . /etc/buildslave 5 | fi 6 | 7 | case "$BB_NAME" in 8 | FreeBSD*) 9 | MAKE="gmake WITH_DEBUG=true" 10 | if sysctl -n kern.conftxt | grep -Fqx $'options\tINVARIANTS'; then 11 | MAKE="$MAKE WITH_INVARIANTS=true" 12 | fi 13 | NCPU=$(sysctl -n hw.ncpu) 14 | ;; 15 | Amazon*|CentOS*|Debian*|Fedora*|SUSE*|Ubuntu*) 16 | MAKE=make 17 | NCPU=$(nproc) 18 | ;; 19 | *) 20 | echo "Unknown BB_NAME, assuming Linux" 21 | MAKE=make 22 | NCPU=$(nproc) 23 | ;; 24 | esac 25 | export MAKE 26 | 27 | LINUX_OPTIONS=${LINUX_OPTIONS:-""} 28 | CONFIG_OPTIONS=${CONFIG_OPTIONS:-""} 29 | MAKE_OPTIONS=${MAKE_OPTIONS:-"-j$NCPU"} 30 | MAKE_TARGETS_KMOD=${MAKE_TARGETS_KMOD:-"pkg-kmod pkg-utils"} 31 | MAKE_TARGETS_DKMS=${MAKE_TARGETS_DKMS:-"pkg-dkms pkg-utils"} 32 | INSTALL_METHOD=${INSTALL_METHOD:-"none"} 33 | 34 | CONFIG_LOG="configure.log" 35 | MAKE_LOG="make.log" 36 | INSTALL_LOG="install.log" 37 | 38 | # Expect a custom Linux build in the ../linux/ directory. 39 | if [ "$LINUX_CUSTOM" = "yes" ]; then 40 | LINUX_DIR=$(readlink -f ../linux) 41 | LINUX_OPTIONS="$LINUX_OPTIONS --with-linux=$LINUX_DIR " \ 42 | "--with-linux-obj=$LINUX_DIR" 43 | fi 44 | 45 | set -x 46 | 47 | ./autogen.sh >>$CONFIG_LOG 2>&1 || exit 1 48 | 49 | case "$INSTALL_METHOD" in 50 | packages|kmod|pkg-kmod|dkms|dkms-kmod) 51 | 52 | ./configure $CONFIG_OPTIONS $LINUX_OPTIONS >>$CONFIG_LOG 2>&1 || exit 1 53 | 54 | case "$INSTALL_METHOD" in 55 | packages|kmod|pkg-kmod) 56 | $MAKE $MAKE_TARGETS_KMOD >>$MAKE_LOG 2>&1 || exit 1 57 | ;; 58 | dkms|pkg-dkms) 59 | $MAKE $MAKE_TARGETS_DKMS >>$MAKE_LOG 2>&1 || exit 1 60 | ;; 61 | esac 62 | 63 | sudo -E rm *.src.rpm 64 | 65 | # Preserve TEST and PERF packages which may be needed to investigate 66 | # test failures. BUILD packages are discarded. 67 | if test "$BB_MODE" = "TEST" -o "$BB_MODE" = "PERF"; then 68 | if test -n "$UPLOAD_DIR"; then 69 | BUILDER="$(echo $BB_NAME | cut -f1-3 -d'-')" 70 | mkdir -p "$UPLOAD_DIR/$BUILDER/packages" 71 | cp *.deb *.rpm $UPLOAD_DIR/$BUILDER/packages 72 | fi 73 | fi 74 | 75 | case "$BB_NAME" in 76 | Amazon*) 77 | sudo -E yum -y localinstall *.rpm >$INSTALL_LOG 2>&1 || exit 1 78 | ;; 79 | CentOS*) 80 | sudo -E yum -y localinstall *.rpm >$INSTALL_LOG 2>&1 || exit 1 81 | ;; 82 | Debian*) 83 | sudo -E apt-get -y install ./*.deb >$INSTALL_LOG 2>&1 || exit 1 84 | ;; 85 | Fedora*) 86 | sudo -E dnf -y localinstall *.rpm >$INSTALL_LOG 2>&1 || exit 1 87 | ;; 88 | Ubuntu-14.04*) 89 | for file in *.deb; do 90 | sudo -E gdebi -n $file >$INSTALL_LOG 2>&1 || exit 1 91 | done 92 | ;; 93 | Ubuntu*) 94 | sudo -E apt-get -y install ./*.deb >$INSTALL_LOG 2>&1 || exit 1 95 | ;; 96 | *) 97 | echo "$BB_NAME unknown platform" >$INSTALL_LOG 2>&1 98 | ;; 99 | esac 100 | ;; 101 | in-tree) 102 | ./configure $CONFIG_OPTIONS $LINUX_OPTIONS >>$CONFIG_LOG 2>&1 || exit 1 103 | $MAKE $MAKE_OPTIONS >>$MAKE_LOG 2>&1 || exit 1 104 | ./scripts/zfs-tests.sh -cv >>$INSTALL_LOG 2>&1 105 | sudo -E scripts/zfs-helpers.sh -iv >>$INSTALL_LOG 2>&1 106 | ;; 107 | system) 108 | ./configure $CONFIG_OPTIONS >>$CONFIG_LOG 2>&1 || exit 1 109 | $MAKE $MAKE_OPTIONS >>$MAKE_LOG 2>&1 || exit 1 110 | sudo -E $MAKE install >>$INSTALL_LOG 2>&1 || exit 1 111 | ;; 112 | none) 113 | ./configure $CONFIG_OPTIONS $LINUX_OPTIONS >>$CONFIG_LOG 2>&1 || exit 1 114 | $MAKE $MAKE_OPTIONS >>$MAKE_LOG 2>&1 || exit 1 115 | ;; 116 | *) 117 | echo "Unknown INSTALL_METHOD: $INSTALL_METHOD" 118 | exit 1 119 | ;; 120 | esac 121 | 122 | exit 0 123 | -------------------------------------------------------------------------------- /master/patches/0006-Force-root-volumes-to-be-deleted-upon-termination-on.patch: -------------------------------------------------------------------------------- 1 | From 21fd70b3a905797542a36299da5c699a20c906bb Mon Sep 17 00:00:00 2001 2 | From: Brian Behlendorf 3 | Date: Thu, 25 Feb 2016 09:16:47 -0800 4 | Subject: [PATCH 06/18] Force root volumes to be deleted upon termination on 5 | EC2 volumes. 6 | 7 | This patch is for buildbot. It introduces the ability to set all root 8 | volumes of an instance to be deleted upon termination. This change 9 | does not impact additionally attached volumes. 10 | 11 | Signed-off-by: Giuseppe Di Natale 12 | Signed-off-by: Brian Behlendorf 13 | --- 14 | master/buildbot/buildslave/ec2.py | 30 +++++++++++++++++++++++++++--- 15 | 1 file changed, 27 insertions(+), 3 deletions(-) 16 | 17 | diff --git a/master/buildbot/buildslave/ec2.py b/master/buildbot/buildslave/ec2.py 18 | index ae40cf181..00ead8a5d 100644 19 | --- a/master/buildbot/buildslave/ec2.py 20 | +++ b/master/buildbot/buildslave/ec2.py 21 | @@ -60,7 +60,8 @@ class EC2LatentBuildSlave(AbstractLatentBuildSlave): 22 | max_builds=None, notify_on_missing=[], missing_timeout=60 * 20, 23 | build_wait_timeout=60 * 10, properties={}, locks=None, 24 | spot_instance=False, max_spot_price=1.6, volumes=[], 25 | - placement=None, price_multiplier=1.2, tags={}): 26 | + placement=None, price_multiplier=1.2, tags={}, 27 | + delete_vol_term=True): 28 | 29 | AbstractLatentBuildSlave.__init__( 30 | self, name, password, max_builds, notify_on_missing, 31 | @@ -98,6 +99,8 @@ class EC2LatentBuildSlave(AbstractLatentBuildSlave): 32 | self.max_spot_price = max_spot_price 33 | self.volumes = volumes 34 | self.price_multiplier = price_multiplier 35 | + self.delete_vol_term = delete_vol_term 36 | + 37 | if None not in [placement, region]: 38 | self.placement = '%s%s' % (region, placement) 39 | else: 40 | @@ -294,12 +297,34 @@ class EC2LatentBuildSlave(AbstractLatentBuildSlave): 41 | return threads.deferToThread( 42 | self._stop_instance, instance, fast) 43 | 44 | + def _handle_delete_on_term(self): 45 | + if self.delete_vol_term is False: 46 | + return 47 | + 48 | + block_map = self.conn.get_instance_attribute(self.instance.id, attribute='blockDeviceMapping') 49 | + log.msg("%s: %s" % (self.instance.id, block_map)) 50 | + 51 | + del_on_term = [] 52 | + for devname in block_map['blockDeviceMapping']: 53 | + del_on_term.append('%s=true' % devname) 54 | + 55 | + if del_on_term: 56 | + log.msg(str(del_on_term)) 57 | + if not self.conn.modify_instance_attribute(self.instance.id, 'blockDeviceMapping', del_on_term): 58 | + log.msg("Failed to set deletion on termination") 59 | + 60 | def _attach_volumes(self): 61 | for volume_id, device_node in self.volumes: 62 | self.conn.attach_volume(volume_id, self.instance.id, device_node) 63 | log.msg('Attaching EBS volume %s to %s.' % 64 | (volume_id, device_node)) 65 | 66 | + def _handle_volumes(self): 67 | + self._handle_delete_on_term() 68 | + 69 | + if len(self.volumes) > 0: 70 | + self._attach_volumes() 71 | + 72 | def _stop_instance(self, instance, fast): 73 | if self.elastic_ip is not None: 74 | self.conn.disassociate_address(self.elastic_ip.public_ip) 75 | @@ -412,8 +437,7 @@ class EC2LatentBuildSlave(AbstractLatentBuildSlave): 76 | self.instance.use_ip(self.elastic_ip) 77 | start_time = '%02d:%02d:%02d' % ( 78 | minutes // 60, minutes % 60, seconds) 79 | - if len(self.volumes) > 0: 80 | - self._attach_volumes() 81 | + self._handle_volumes() 82 | if len(self.tags) > 0: 83 | self.conn.create_tags(self.instance.id, self.tags) 84 | return self.instance.id, image.id, start_time 85 | -- 86 | 2.14.3 87 | 88 | -------------------------------------------------------------------------------- /scripts/ZTS-exceptions.md: -------------------------------------------------------------------------------- 1 | ZTS test name | status/ZFS issue | comment 2 | ---|---|--- 3 | KNOWN-COMMON |- | 4 | casenorm/mixed_none_lookup_ci |- | 5 | casenorm/mixed_formd_lookup_ci |- | 6 | cli_root/zfs_unshare/zfs_unshare_002_pos |- | 7 | cli_root/zfs_unshare/zfs_unshare_006_pos |- | 8 | cli_user/misc/zfs_share_001_neg |- | 9 | cli_user/misc/zfs_unshare_001_neg |- | 10 | refreserv/refreserv_004_pos |- | 11 | rsend/rsend_008_pos |- | 12 | vdev_zaps/vdev_zaps_007_pos |- | 13 | 14 | KNOWN-FREEBSD |- | 15 | cli_root/zpool_wait/zpool_wait_trim_basic |- | 16 | cli_root/zpool_wait/zpool_wait_trim_cancel |- | 17 | cli_root/zpool_wait/zpool_wait_trim_flag |- | 18 | link_count/link_count_001 |- | 19 | 20 | KNOWN-LINUX |- | 21 | casenorm/mixed_formd_lookup |- | 22 | casenorm/mixed_formd_delete |- | 23 | casenorm/sensitive_formd_lookup |- | 24 | casenorm/sensitive_formd_delete |- | 25 | removal/removal_with_zdb |- | 26 | 27 | MAYBE-COMMON |- | 28 | cli_root/zdb/zdb_006_pos |- | 29 | cli_root/zfs_get/zfs_get_004_pos |- | 30 | cli_root/zfs_get/zfs_get_009_pos |- | 31 | cli_root/zfs_snapshot/zfs_snapshot_002_neg |- | 32 | cli_root/zpool_add/zpool_add_004_pos |- | 33 | cli_root/zpool_destroy/zpool_destroy_001_pos |- | 34 | cli_root/zpool_import/import_rewind_device_replaced |- | 35 | cli_root/zpool_import/import_rewind_config_changed |- | 36 | cli_root/zpool_import/zpool_import_missing_003_pos |- | 37 | cli_root/zpool_initialize/zpool_initialize_import_export |- | 38 | cli_root/zpool_upgrade/zpool_upgrade_004_pos |- | 39 | history/history_004_pos |- | 40 | history/history_005_neg |- | 41 | history/history_006_neg |- | 42 | history/history_008_pos |- | 43 | history/history_010_pos |- | 44 | io/mmap |- | 45 | l2arc/persist_l2arc_007_pos |- | 46 | largest_pool/largest_pool_001_pos |- | 47 | mmp/mmp_on_uberblocks |- | 48 | pool_checkpoint/checkpoint_discard_busy |- | 49 | pyzfs/pyzfs_unittest |- | 50 | no_space/enospc_002_pos |- | 51 | redundancy/redundancy_004_neg |- | 52 | redundancy/redundancy_draid_spare3 |- | 53 | reservation/reservation_008_pos |- | 54 | reservation/reservation_018_pos |- | 55 | rsend/rsend_019_pos |- | 56 | rsend/rsend_020_pos |- | 57 | rsend/rsend_021_pos |- | 58 | rsend/rsend_024_pos |- | 59 | rsend/send-c_volume |- | 60 | rsend/send_partial_dataset |- | 61 | snapshot/clone_001_pos |- | 62 | snapshot/snapshot_009_pos |- | 63 | snapshot/snapshot_010_pos |- | 64 | snapused/snapused_004_pos |- | 65 | threadsappend/threadsappend_001_pos |- | 66 | upgrade/upgrade_projectquota_001_pos |- | 67 | vdev_zaps/vdev_zaps_004_pos |- | 68 | zvol/zvol_ENOSPC/zvol_ENOSPC_001_pos |- | 69 | 70 | MAYBE-FREEBSD |- | 71 | cli_root/zfs_copies/zfs_copies_002_pos |- | 72 | cli_root/zfs_inherit/zfs_inherit_001_neg |- | 73 | cli_root/zfs_share/zfs_share_011_pos |- | 74 | cli_root/zfs_share/zfs_share_concurrent_shares |- | 75 | cli_root/zpool_import/zpool_import_012_pos |- | 76 | cli_root/zpool_import/zpool_import_features_001_pos |- | 77 | cli_root/zpool_import/zpool_import_features_002_neg |- | 78 | cli_root/zpool_import/zpool_import_features_003_pos |- | 79 | delegate/zfs_allow_003_pos |- | 80 | inheritance/inherit_001_pos |- | 81 | pool_checkpoint/checkpoint_zhack_feat |- | 82 | resilver/resilver_restart_001 |- | 83 | zvol/zvol_misc/zvol_misc_volmode |- | 84 | 85 | MAYBE-LINUX |- | 86 | alloc_class/alloc_class_009_pos |- | 87 | alloc_class/alloc_class_010_pos |- | 88 | alloc_class/alloc_class_011_neg |- | 89 | alloc_class/alloc_class_013_pos |- | 90 | cli_root/zfs_rename/zfs_rename_002_pos |- | 91 | cli_root/zpool_expand/zpool_expand_001_pos |- | 92 | cli_root/zpool_expand/zpool_expand_005_pos |- | 93 | cli_root/zpool_reopen/zpool_reopen_003_pos |- | 94 | fault/auto_spare_shared |- | 95 | io/io_uring |- | 96 | limits/filesystem_limit |- | 97 | limits/snapshot_limit |- | 98 | mmp/mmp_exported_import |- | 99 | mmp/mmp_inactive_import |- | 100 | refreserv/refreserv_raidz |- | 101 | rsend/rsend_007_pos |- | 102 | rsend/rsend_010_pos |- | 103 | rsend/rsend_011_pos |- | 104 | snapshot/rollback_003_pos |- | 105 | -------------------------------------------------------------------------------- /master/patches/0011-Properly-handle-a-stale-broker-connection-on-ping.patch: -------------------------------------------------------------------------------- 1 | From aa80ab9b1cf702bd6376962ff072836262e4e222 Mon Sep 17 00:00:00 2001 2 | From: Giuseppe Di Natale 3 | Date: Fri, 1 Jul 2016 17:00:10 +0000 4 | Subject: [PATCH 11/18] Properly handle a stale broker connection on ping 5 | 6 | When a slave is pinged, it is possible for the underlying 7 | broker connection to be stale. callRemote in that case, will 8 | raise an exception instead of returning a defer.succeed(False). 9 | Went ahead and added exception handling specifically for the 10 | DeadReferenceError and return defer.succeed(False). 11 | --- 12 | master/buildbot/process/builder.py | 3 +++ 13 | master/buildbot/process/buildrequestdistributor.py | 5 +++++ 14 | master/buildbot/process/slavebuilder.py | 18 ++++++++++++------ 15 | 3 files changed, 20 insertions(+), 6 deletions(-) 16 | 17 | diff --git a/master/buildbot/process/builder.py b/master/buildbot/process/builder.py 18 | index 2e5df6b35..112b5c105 100644 19 | --- a/master/buildbot/process/builder.py 20 | +++ b/master/buildbot/process/builder.py 21 | @@ -317,6 +317,9 @@ class Builder(config.ReconfigurableServiceMixin, 22 | build.setBuilder(self) 23 | log.msg("starting build %s using slave %s" % (build, slavebuilder)) 24 | 25 | + from pprint import pprint 26 | + pprint (vars(slavebuilder)) 27 | + 28 | # set up locks 29 | build.setLocks(self.config.locks) 30 | cleanups.append(lambda: slavebuilder.slave.releaseLocks()) 31 | diff --git a/master/buildbot/process/buildrequestdistributor.py b/master/buildbot/process/buildrequestdistributor.py 32 | index a6d88cee0..eb7229ca6 100644 33 | --- a/master/buildbot/process/buildrequestdistributor.py 34 | +++ b/master/buildbot/process/buildrequestdistributor.py 35 | @@ -275,6 +275,7 @@ class BasicBuildChooser(BuildChooserBase): 36 | # use 'preferred' slaves first, if we have some ready 37 | if self.preferredSlaves: 38 | slave = self.preferredSlaves.pop(0) 39 | + log.msg("BuildChooser: chose preferred slave %s for build" % (slave.slave.slavename)) 40 | defer.returnValue(slave) 41 | return 42 | 43 | @@ -286,22 +287,26 @@ class BasicBuildChooser(BuildChooserBase): 44 | 45 | if not slave or slave not in self.slavepool: 46 | # bad slave or no slave returned 47 | + log.msg("BuildChooser: found a bad/no slave") 48 | break 49 | 50 | self.slavepool.remove(slave) 51 | 52 | canStart = yield self.bldr.canStartWithSlavebuilder(slave) 53 | if canStart: 54 | + log.msg("BuildChooser: chose slave %s for build" % (slave.slave.slavename)) 55 | defer.returnValue(slave) 56 | return 57 | 58 | # save as a last resort, just in case we need them later 59 | if self.rejectedSlaves is not None: 60 | + log.msg("BuildChooser: placed slave %s in the rejected list" % (slave.slave.slavename)) 61 | self.rejectedSlaves.append(slave) 62 | 63 | # if we chewed through them all, use as last resort: 64 | if self.rejectedSlaves: 65 | slave = self.rejectedSlaves.pop(0) 66 | + log.msg("BuildChooser: chose rejected slave %s for build" % (slave.slave.slavename)) 67 | defer.returnValue(slave) 68 | return 69 | 70 | diff --git a/master/buildbot/process/slavebuilder.py b/master/buildbot/process/slavebuilder.py 71 | index 42f050e5b..0866720a3 100644 72 | --- a/master/buildbot/process/slavebuilder.py 73 | +++ b/master/buildbot/process/slavebuilder.py 74 | @@ -195,12 +195,18 @@ class Ping: 75 | return defer.succeed(False) 76 | self.running = True 77 | log.msg("sending ping") 78 | - self.d = defer.Deferred() 79 | - # TODO: add a distinct 'ping' command on the slave.. using 'print' 80 | - # for this purpose is kind of silly. 81 | - remote.callRemote("print", "ping").addCallbacks(self._pong, 82 | - self._ping_failed, 83 | - errbackArgs=(remote,)) 84 | + try: 85 | + self.d = defer.Deferred() 86 | + # TODO: add a distinct 'ping' command on the slave.. using 'print' 87 | + # for this purpose is kind of silly. 88 | + remote.callRemote("print", "ping").addCallbacks(self._pong, 89 | + self._ping_failed, 90 | + errbackArgs=(remote,)) 91 | + except pb.DeadReferenceError as drerr: 92 | + log.msg("ping failed: stale broker connection") 93 | + self.d = None 94 | + return defer.succeed(False) 95 | + 96 | return self.d 97 | 98 | def _pong(self, res): 99 | -- 100 | 2.14.3 101 | 102 | -------------------------------------------------------------------------------- /master/patches/0007-Create-volumes.patch: -------------------------------------------------------------------------------- 1 | From 4b4dbca300f6f58be60d7e398fd41ccdb2ad20de Mon Sep 17 00:00:00 2001 2 | From: Brian Behlendorf 3 | Date: Mon, 7 Mar 2016 14:13:11 -0800 4 | Subject: [PATCH 07/18] Create volumes 5 | 6 | --- 7 | master/buildbot/buildslave/ec2.py | 45 ++++++++++++++++++++++++++++++++++++--- 8 | 1 file changed, 42 insertions(+), 3 deletions(-) 9 | 10 | diff --git a/master/buildbot/buildslave/ec2.py b/master/buildbot/buildslave/ec2.py 11 | index 00ead8a5d..d787f194f 100644 12 | --- a/master/buildbot/buildslave/ec2.py 13 | +++ b/master/buildbot/buildslave/ec2.py 14 | @@ -44,6 +44,12 @@ SPOT_REQUEST_PENDING_STATES = ['pending-evaluation', 'pending-fulfillment'] 15 | FULFILLED = 'fulfilled' 16 | PRICE_TOO_LOW = 'price-too-low' 17 | 18 | +VOLUME_CREATING = 'creating' 19 | +VOLUME_AVAILABLE = 'available' 20 | +VOLUME_ISUSE = 'in-use' 21 | +VOLUME_DELETING = 'deleting' 22 | +VOLUME_DELETED = 'deleted' 23 | +VOLUME_ERROR = 'error' 24 | 25 | class EC2LatentBuildSlave(AbstractLatentBuildSlave): 26 | 27 | @@ -61,7 +67,7 @@ class EC2LatentBuildSlave(AbstractLatentBuildSlave): 28 | build_wait_timeout=60 * 10, properties={}, locks=None, 29 | spot_instance=False, max_spot_price=1.6, volumes=[], 30 | placement=None, price_multiplier=1.2, tags={}, 31 | - delete_vol_term=True): 32 | + delete_vol_term=True, create_volumes=[]): 33 | 34 | AbstractLatentBuildSlave.__init__( 35 | self, name, password, max_builds, notify_on_missing, 36 | @@ -100,6 +106,7 @@ class EC2LatentBuildSlave(AbstractLatentBuildSlave): 37 | self.volumes = volumes 38 | self.price_multiplier = price_multiplier 39 | self.delete_vol_term = delete_vol_term 40 | + self.create_volumes = create_volumes 41 | 42 | if None not in [placement, region]: 43 | self.placement = '%s%s' % (region, placement) 44 | @@ -301,8 +308,8 @@ class EC2LatentBuildSlave(AbstractLatentBuildSlave): 45 | if self.delete_vol_term is False: 46 | return 47 | 48 | - block_map = self.conn.get_instance_attribute(self.instance.id, attribute='blockDeviceMapping') 49 | - log.msg("%s: %s" % (self.instance.id, block_map)) 50 | + block_map = self.conn.get_instance_attribute(self.instance.id, 51 | + attribute='blockDeviceMapping') 52 | 53 | del_on_term = [] 54 | for devname in block_map['blockDeviceMapping']: 55 | @@ -313,6 +320,31 @@ class EC2LatentBuildSlave(AbstractLatentBuildSlave): 56 | if not self.conn.modify_instance_attribute(self.instance.id, 'blockDeviceMapping', del_on_term): 57 | log.msg("Failed to set deletion on termination") 58 | 59 | + def _create_volumes(self): 60 | + for device_node, volume_size, region in self.create_volumes: 61 | + new_vol = self.conn.create_volume(volume_size, region) 62 | + duration = 0 63 | + interval = self._poll_resolution 64 | + 65 | + vol = self.conn.get_all_volumes([new_vol.id])[0] 66 | + while vol.status not in (VOLUME_AVAILABLE): 67 | + time.sleep(interval) 68 | + duration += interval 69 | + if duration % 60 == 0: 70 | + log.msg( 71 | + '%s %s has waited %d minutes for volume %s creation' % 72 | + (self.__class__.__name__, self.slavename, 73 | + duration // 60, new_vol)) 74 | + try: 75 | + vol = self.conn.get_all_volumes([new_vol.id])[0] 76 | + except boto.exception.EC2ResponseError, e: 77 | + log.msg('%s %s failed to get all volumes' % 78 | + (self.__class__.__name__, self.slavename)) 79 | + raise 80 | + 81 | + log.msg('Attaching EBS volume %s to %s.' % (vol.id, device_node)) 82 | + self.conn.attach_volume(vol.id, self.instance.id, device_node) 83 | + 84 | def _attach_volumes(self): 85 | for volume_id, device_node in self.volumes: 86 | self.conn.attach_volume(volume_id, self.instance.id, device_node) 87 | @@ -320,11 +352,18 @@ class EC2LatentBuildSlave(AbstractLatentBuildSlave): 88 | (volume_id, device_node)) 89 | 90 | def _handle_volumes(self): 91 | + if len(self.create_volumes) > 0: 92 | + self._create_volumes() 93 | + 94 | self._handle_delete_on_term() 95 | 96 | if len(self.volumes) > 0: 97 | self._attach_volumes() 98 | 99 | + block_map = self.conn.get_instance_attribute(self.instance.id, 100 | + attribute='blockDeviceMapping') 101 | + log.msg("%s: blockDeviceMapping=%s" % (self.instance.id, block_map)) 102 | + 103 | def _stop_instance(self, instance, fast): 104 | if self.elastic_ip is not None: 105 | self.conn.disassociate_address(self.elastic_ip.public_ip) 106 | -- 107 | 2.14.3 108 | 109 | -------------------------------------------------------------------------------- /scripts/runurl: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # 3 | # runurl - Download a URL and run as a program, passing in arguments 4 | # 5 | # Copyright (C) 2009 Eric Hammond 6 | # 7 | 8 | error() { echo "$@" 1>&2; } 9 | fail() { [ $# -eq 0 ] || error "${BNAME}:" "$1"; exit ${2:-1}; } 10 | debug() { [ "$DEBUG" = "0" ] || error "${BNAME}:" "$@"; } 11 | cleanup() { [ -z "${TEMP_D}" -o ! -d "${TEMP_D}" ] || rm -Rf "${TEMP_D}"; } 12 | 13 | DEBUG="0" 14 | TEMP_D="" 15 | BNAME=${0##*/} 16 | 17 | while [ $# -gt 0 ]; do 18 | case $1 in 19 | -\?|--help) pod2text "${0}"; exit 0;; 20 | -d|--debug) DEBUG=1; shift 1 ;; 21 | -*) fail "Unrecognized option: $1 (try --help)";; 22 | *) url="$1"; shift 1; break ;; 23 | esac 24 | done 25 | 26 | [ -n "$url" ] || fail "Missing URL specification (try --help)" 27 | 28 | trap cleanup 0 29 | 30 | TEMP_D=$(mktemp -d ${TEMPDIR:-/tmp}/${BNAME}.XXXXXX) && 31 | runfile="${TEMP_D}/runfile" && wgetfile="${TEMP_D}/wget.out" || 32 | fail "failed to make tempdir" 33 | 34 | debug "downloading $url" 35 | 36 | wget \ 37 | --retry-connrefused \ 38 | --tries=20 \ 39 | "--output-document=$runfile" \ 40 | "--output-file=$wgetfile" \ 41 | "$url" 42 | 43 | wgetstatus=$? 44 | if [ $wgetstatus != 0 ]; then 45 | cat $wgetfile >&2 46 | fail "wget failed: $wgetstatus" "${wgetstatus}" 47 | fi 48 | 49 | chmod 700 "${runfile}" || fail "failed to change perms of ${runfile}" 50 | 51 | debug "running" 52 | $runfile "$@" 53 | 54 | exitstatus=$? 55 | 56 | debug "exit status $exitstatus" 57 | 58 | exit $exitstatus 59 | 60 | # 61 | # To read the documentation in this file use the command: 62 | # 63 | # perldoc runurl 64 | # 65 | 66 | =head1 NAME 67 | 68 | runurl - Download a URL and run as a program, passing in arguments 69 | 70 | =head1 SYNOPSYS 71 | 72 | runurl [I] I [I]... 73 | 74 | =head1 OPTIONS 75 | 76 | =over 8 77 | 78 | =item B<-?> B<--help> 79 | 80 | Debug mode 81 | 82 | =item B<-d> B<--debug> 83 | 84 | Debug mode 85 | 86 | =back 87 | 88 | =head1 ARGUMENTS 89 | 90 | =over 8 91 | 92 | =item B 93 | 94 | The URL of the progran to download and run 95 | 96 | =item B 97 | 98 | Options and arguments for the downloaded program 99 | 100 | =back 101 | 102 | =head1 DESCRIPTION 103 | 104 | The B command is a simple tool that downloads a program (or 105 | script) from the specified URL and runs it. 106 | 107 | The first argument to the B command is the URL of a script or 108 | program that should be run. Any leading "http://" may be omitted, but 109 | "https://" or "ftp://" and the like must still be specified. 110 | 111 | All remaining arguments listed after the URL (including ones which 112 | look like options) are passed verbatim to the program as its own 113 | options and arguments when it is run. 114 | 115 | The exit code of B is the exit code of the program, unless the 116 | original download of the URL failed, in which case that error is 117 | returned. 118 | 119 | =head1 EXAMPLES 120 | 121 | If the following content is stored at http://run.alestic.com/demo/hello 122 | 123 | #!/bin/bash 124 | echo "hello, $1" 125 | 126 | then this command: 127 | 128 | runurl run.alestic.com/demo/hello world 129 | 130 | will itself output: 131 | 132 | hello, world 133 | 134 | =head1 CAVEATS 135 | 136 | Only run content that you control or completely trust. 137 | 138 | Just because you like the content of a URL when you look at it in your 139 | browser does not mean that it will still look like that when B 140 | goes to run it. It could change at any point to something that is 141 | broken or even malicious unless it is under your control. 142 | 143 | Realize that you are depending on the network for commands to succeed. 144 | If the content is temporarily unavailable or has been moved, then the 145 | B command will fail. 146 | 147 | =head1 DEPENDENCIES 148 | 149 | This program requires that the following already be installed: 150 | 151 | wget 152 | 153 | =head1 SEE ALSO 154 | 155 | The B project site: 156 | 157 | https://launchpad.net/runurl 158 | 159 | Article about using B for initial configuration of Amazon EC2 160 | instances: 161 | 162 | http://alestic.com/2009/08/runurl 163 | 164 | =head1 INSTALLATION 165 | 166 | On most Ubuntu releases, the B package can be installed 167 | directly from the Alestic.com PPA using the following commands: 168 | 169 | code=$(lsb_release -cs) 170 | echo "deb http://ppa.launchpad.net/alestic/ppa/ubuntu $code main"| 171 | sudo tee /etc/apt/sources.list.d/alestic-ppa.list 172 | sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys BE09C571 173 | sudo apt-get update 174 | sudo apt-get install -y runurl 175 | 176 | =head1 BUGS 177 | 178 | Please report bugs at https://bugs.launchpad.net/runurl 179 | 180 | =head1 AUTHOR 181 | 182 | Eric Hammond 183 | 184 | =head1 LICENSE 185 | 186 | Copyright 2009 Eric Hammond 187 | 188 | Licensed under the Apache License, Version 2.0 (the "License"); 189 | you may not use this file except in compliance with the License. 190 | You may obtain a copy of the License at 191 | 192 | http://www.apache.org/licenses/LICENSE-2.0 193 | 194 | Unless required by applicable law or agreed to in writing, software 195 | distributed under the License is distributed on an "AS IS" BASIS, 196 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 197 | See the License for the specific language governing permissions and 198 | limitations under the License. 199 | 200 | =cut 201 | -------------------------------------------------------------------------------- /master/patches/0010-Allow-independant-EC2-price_multiplier-or-max_spot_p.patch: -------------------------------------------------------------------------------- 1 | From 8b6f056e6505554c3725f48077c3046ec2375ec3 Mon Sep 17 00:00:00 2001 2 | From: "Christopher J. Morrone" 3 | Date: Tue, 7 Jun 2016 13:51:41 -0700 4 | Subject: [PATCH 10/18] Allow independant EC2 price_multiplier or 5 | max_spot_price usage 6 | 7 | Change the EC2LatentBuildSlave to allow the use of just one 8 | of either max_spot_price or price_multiplier. In other words, 9 | either value can be set to None so that only the other value 10 | is used. When spot instances are used, at least one of those two 11 | parameters must be set. 12 | 13 | If only max_spot_price is set, that is the value that is used 14 | as the bid price. There is not need to look up the historical 15 | bid prices in that case. It seems likely that in the real world 16 | most people will want to set the maximum price they are willing 17 | to pay. Artificially bidding lower than one is willing to pay 18 | based on historical information is probably not a very common use 19 | case. 20 | 21 | Also, change the logic to use as the bid price the minumum of either 22 | the multiple of the historical average or the max_spot_price. The 23 | previous behavior was to raise an exception if the multiple was in 24 | excess of the max_spot_price. That doesn't make much sense. The 25 | historical average tells us little about the price _now_, so it probably 26 | never makes sense to abort early without at least making an attempt 27 | to substantiate with the max_spot_price. 28 | 29 | Fixes #2898 problems 1 and 3. 30 | --- 31 | master/buildbot/buildslave/ec2.py | 29 ++++++++++++++++------------- 32 | master/docs/manual/cfg-buildslaves.rst | 5 +++++ 33 | 2 files changed, 21 insertions(+), 13 deletions(-) 34 | 35 | diff --git a/master/buildbot/buildslave/ec2.py b/master/buildbot/buildslave/ec2.py 36 | index 3c77804f5..1e8442e9e 100644 37 | --- a/master/buildbot/buildslave/ec2.py 38 | +++ b/master/buildbot/buildslave/ec2.py 39 | @@ -95,6 +95,10 @@ class EC2LatentBuildSlave(AbstractLatentBuildSlave): 40 | else: 41 | # verify that regex will compile 42 | re.compile(valid_ami_location_regex) 43 | + if spot_instance and price_multiplier is None and max_spot_price is None: 44 | + raise ValueError('You must provide either one, or both, of ' 45 | + 'price_multiplier or max_spot_price') 46 | + 47 | self.valid_ami_owners = valid_ami_owners 48 | self.valid_ami_location_regex = valid_ami_location_regex 49 | self.instance_type = instance_type 50 | @@ -416,24 +420,23 @@ class EC2LatentBuildSlave(AbstractLatentBuildSlave): 51 | price_sum += price.price 52 | price_count += 1 53 | if price_count == 0: 54 | - target_price = 0.02 55 | + bid_price = 0.02 56 | else: 57 | - target_price = (price_sum / price_count) * self.price_multiplier 58 | - return target_price 59 | + bid_price = (price_sum / price_count) * self.price_multiplier 60 | + return bid_price 61 | 62 | def _request_spot_instance(self): 63 | - target_price = self._bid_price_from_spot_price_history() 64 | - if target_price > self.max_spot_price: 65 | - log.msg('%s %s calculated spot price %0.2f exceeds ' 66 | - 'configured maximum of %0.2f' % 67 | - (self.__class__.__name__, self.slavename, 68 | - target_price, self.max_spot_price)) 69 | - raise interfaces.LatentBuildSlaveFailedToSubstantiate() 70 | + if self.price_multiplier is None: 71 | + bid_price = self.max_spot_price 72 | else: 73 | - log.msg('%s %s requesting spot instance with price %0.2f.' % 74 | - (self.__class__.__name__, self.slavename, target_price)) 75 | + bid_price = self._bid_price_from_spot_price_history() 76 | + if self.max_spot_price is not None \ 77 | + and bid_price > self.max_spot_price: 78 | + bid_price = self.max_spot_price 79 | + log.msg('%s %s requesting spot instance with price %0.2f.' % 80 | + (self.__class__.__name__, self.slavename, bid_price)) 81 | reservations = self.conn.request_spot_instances( 82 | - target_price, self.ami, key_name=self.keypair_name, 83 | + bid_price, self.ami, key_name=self.keypair_name, 84 | security_groups=[ 85 | self.security_name], 86 | instance_type=self.instance_type, 87 | diff --git a/master/docs/manual/cfg-buildslaves.rst b/master/docs/manual/cfg-buildslaves.rst 88 | index d8ddd1884..ef70c3fb2 100644 89 | --- a/master/docs/manual/cfg-buildslaves.rst 90 | +++ b/master/docs/manual/cfg-buildslaves.rst 91 | @@ -372,6 +372,11 @@ Additionally, you may want to specify ``max_spot_price`` and ``price_multiplier` 92 | This example would attempt to create a m1.large spot instance in the us-west-2b region costing no more than $0.09/hour. 93 | The spot prices for that region in the last 24 hours will be averaged and multiplied by the ``price_multiplier`` parameter, then a spot request will be sent to Amazon with the above details. 94 | If the spot request is rejected, an error message will be logged with the final status. 95 | +If the multiple exceeds the ``max_spot_price``, the bid price will be the ``max_spot_price``. 96 | + 97 | +Either ``max_spot_price`` or ``price_multiplier``, but not both, may be None. 98 | +If ``price_multiplier`` is None, then no historical price information is retrieved; the bid price is simply the specified ``max_spot_price``. 99 | +If the ``max_spot_price`` is None, then the multiple of the historical average spot prices is used as the bid price with no limit. 100 | 101 | .. index:: 102 | libvirt 103 | -- 104 | 2.14.3 105 | 106 | -------------------------------------------------------------------------------- /scripts/bb-test-zfstests.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | if test -f /etc/buildslave; then 4 | . /etc/buildslave 5 | fi 6 | 7 | if test -f ./TEST; then 8 | . ./TEST 9 | else 10 | echo "Missing $PWD/TEST configuration file" 11 | exit 1 12 | fi 13 | 14 | TEST_ZFSTESTS_SKIP=${TEST_ZFSTESTS_SKIP:-"No"} 15 | if echo "$TEST_ZFSTESTS_SKIP" | grep -Eiq "^yes$|^on$|^true$|^1$"; then 16 | echo "Skipping disabled test" 17 | exit 3 18 | fi 19 | 20 | CONSOLE_LOG="$PWD/console.log" 21 | SUMMARY_LOG="$PWD/summary.log" 22 | TEST_LOG="$PWD/test.log" 23 | FULL_LOG="$PWD/full.log" 24 | KMEMLEAK_LOG="$PWD/kmemleak.log" 25 | KMEMLEAK_FILE="/sys/kernel/debug/kmemleak" 26 | DMESG_PID="0" 27 | RESULT=0 28 | 29 | cleanup() 30 | { 31 | if [ -f "$TEST_LOG" ]; then 32 | RESULTS_DIR=$(awk '/^Log directory/ { print $3; exit 0 }' "$TEST_LOG") 33 | if [ -d "$RESULTS_DIR" ]; then 34 | # Generate a summary of results and place them in a different file. 35 | grep -A 1000 "Results Summary" "$TEST_LOG" > $SUMMARY_LOG 36 | echo "" >> $SUMMARY_LOG 37 | awk '/\[FAIL\]|\[KILLED\]/{ show=1; print; next; } 38 | /\[SKIP\]|\[PASS\]/{ show=0; } show' $FULL_LOG >> $SUMMARY_LOG 39 | 40 | # Preserve the results directory for future analysis, as: 41 | # //zts/zts--.tar.xz 42 | if test -n "$UPLOAD_DIR"; then 43 | BUILDER="$(echo $BB_NAME | cut -f1-3 -d'-')" 44 | mkdir -p "$UPLOAD_DIR/$BUILDER/zts" 45 | 46 | RESULTS_DATE=$(basename $RESULTS_DIR) 47 | RESULTS_NAME="zts-$TEST_ZFSTESTS_RUNFILE-$RESULTS_DATE" 48 | RESULTS_DIRNAME=$(dirname $RESULTS_DIR) 49 | 50 | # Rename the results to include the run file name and date. 51 | # Then compress the renamed directory for upload. 52 | mv "$RESULTS_DIR" "$RESULTS_DIRNAME/$RESULTS_NAME" 53 | tar -C "$RESULTS_DIRNAME" -cJ \ 54 | -f "$UPLOAD_DIR/$BUILDER/zts/$RESULTS_NAME.tar.xz" \ 55 | "$RESULTS_NAME" 56 | fi 57 | fi 58 | fi 59 | 60 | sudo -E $ZFS_SH -u 61 | 62 | if [ "$DMESG_PID" = "0" ]; then 63 | dmesg >$CONSOLE_LOG 64 | else 65 | kill $DMESG_PID 66 | fi 67 | } 68 | trap cleanup EXIT TERM 69 | 70 | set -x 71 | 72 | # If our environment specifies a runfile or set of disks, use those. 73 | DEFAULT_ZFSTESTS_RUNFILE=${DEFAULT_ZFSTESTS_RUNFILE:-""} 74 | DEFAULT_ZFSTESTS_DISKS=${DEFAULT_ZFSTESTS_DISKS:-""} 75 | DEFAULT_ZFSTESTS_DISKSIZE=${DEFAULT_ZFSTESTS_DISKSIZE:-""} 76 | DEFAULT_ZFSTESTS_TAGS=${DEFAULT_ZFSTESTS_TAGS:-"functional"} 77 | DEFAULT_ZFSTESTS_PERF_RUNTIME=${DEFAULT_ZFSTESTS_PERF_RUNTIME:-180} 78 | DEFAULT_ZFSTESTS_PERF_FS_OPTS=${DEFAULT_ZFSTESTS_PERF_FS_OPTS:-"-o recsize=1M -o compress=lz4"} 79 | 80 | TEST_ZFSTESTS_DIR=${TEST_ZFSTESTS_DIR:-"/mnt/"} 81 | TEST_ZFSTESTS_DISKS=${TEST_ZFSTESTS_DISKS:-"$DEFAULT_ZFSTESTS_DISKS"} 82 | TEST_ZFSTESTS_DISKSIZE=${TEST_ZFSTESTS_DISKSIZE:-"$DEFAULT_ZFSTESTS_DISKSIZE"} 83 | TEST_ZFSTESTS_ITERS=${TEST_ZFSTESTS_ITERS:-"1"} 84 | TEST_ZFSTESTS_OPTIONS=${TEST_ZFSTESTS_OPTIONS:-"-vxR"} 85 | TEST_ZFSTESTS_RUNFILE=${TEST_ZFSTESTS_RUNFILE:-"$DEFAULT_ZFSTESTS_RUNFILE"} 86 | TEST_ZFSTESTS_TAGS=${TEST_ZFSTESTS_TAGS:-"$DEFAULT_ZFSTESTS_TAGS"} 87 | TEST_ZFSTESTS_PROFILE=${TEST_ZFSTESTS_PROFILE:-"No"} 88 | 89 | # Environment variables which control the performance test suite. 90 | PERF_RUNTIME=${TEST_ZFSTESTS_PERF_RUNTIME:-$DEFAULT_ZFSTESTS_PERF_RUNTIME} 91 | PERF_FS_OPTS=${TEST_ZFSTESTS_PERF_FS_OPTS:-"$DEFAULT_ZFSTESTS_PERF_FS_OPTS"} 92 | 93 | set +x 94 | 95 | case $(uname) in 96 | FreeBSD) 97 | if ! kldstat -qn openzfs; then 98 | sudo -E $ZFS_SH 99 | fi 100 | ;; 101 | Linux) 102 | if ! test -e /sys/module/zfs; then 103 | sudo -E $ZFS_SH 104 | fi 105 | ;; 106 | *) 107 | sudo -E $ZFS_SH 108 | ;; 109 | esac 110 | 111 | # Performance profiling disabled by default due to size of profiling data. 112 | if echo "$TEST_ZFSTESTS_PROFILE" | grep -Eiq "^yes$|^on$|^true$|^1$"; then 113 | case "$BB_NAME" in 114 | Amazon*) 115 | sudo yum -y install perf 116 | ;; 117 | *) 118 | echo "Performance profiling with 'perf' disabled." 119 | ;; 120 | esac 121 | fi 122 | 123 | export PERF_RUNTIME PERF_FS_OPTS 124 | 125 | # Default to loopback devices created by zfs-tests.sh when not specified. 126 | # If disks are given then optionally partition them if a size is provided. 127 | if [ -n "$TEST_ZFSTESTS_DISKS" ]; then 128 | if [ -n "$TEST_ZFSTESTS_DISKSIZE" ]; then 129 | DISKS="" 130 | for disk in $TEST_ZFSTESTS_DISKS; do 131 | set -x 132 | sudo -E parted --script /dev/$disk mklabel gpt 133 | sudo -E parted --script /dev/$disk mkpart logical 1MiB $TEST_ZFSTESTS_DISKSIZE 134 | set +x 135 | DISKS="$DISKS ${disk}1" 136 | done 137 | else 138 | DISKS="$TEST_ZFSTESTS_DISKS" 139 | fi 140 | export DISKS 141 | fi 142 | 143 | if $(sudo -E test -e "$KMEMLEAK_FILE"); then 144 | echo "Kmemleak enabled. Disabling scan thread and clearing log" 145 | sudo -E sh -c "echo scan=off >$KMEMLEAK_FILE" 146 | sudo -E sh -c "echo clear >$KMEMLEAK_FILE" 147 | fi 148 | 149 | sudo -E chmod 777 $TEST_ZFSTESTS_DIR 150 | sudo -E dmesg -c >/dev/null 151 | 152 | if $(dmesg -h 2>/dev/null | grep -qe '-w'); then 153 | dmesg -w >$CONSOLE_LOG & 154 | DMESG_PID=$! 155 | else 156 | touch $CONSOLE_LOG 157 | fi 158 | 159 | ln -s /var/tmp/test_results/current/log $FULL_LOG 160 | 161 | set -x 162 | $ZFS_TESTS_SH $TEST_ZFSTESTS_OPTIONS \ 163 | ${TEST_ZFSTESTS_RUNFILE:+-r $TEST_ZFSTESTS_RUNFILE} \ 164 | -d $TEST_ZFSTESTS_DIR \ 165 | -I $TEST_ZFSTESTS_ITERS \ 166 | -T $TEST_ZFSTESTS_TAGS > $TEST_LOG 2>&1 167 | RESULT=$? 168 | set +x 169 | 170 | if $(dmesg | grep "oom-killer"); then 171 | echo "Out-of-memory (OOM) killer invocation detected" 172 | [ $RESULT -eq 0 ] && RESULT=2 173 | fi 174 | 175 | if $(sudo -E test -e "$KMEMLEAK_FILE"); then 176 | # Scan must be run twice to ensure all leaks are detected. 177 | sudo -E sh -c "echo scan >$KMEMLEAK_FILE" 178 | sudo -E sh -c "echo scan >$KMEMLEAK_FILE" 179 | sudo -E cat $KMEMLEAK_FILE >$KMEMLEAK_LOG 180 | 181 | if [ -s "$KMEMLEAK_LOG" ]; then 182 | echo "Kmemleak detected see $KMEMLEAK_LOG" 183 | [ $RESULT -eq 0 ] && RESULT=2 184 | else 185 | echo "Kmemleak detected no leaks" >$KMEMLEAK_LOG 186 | fi 187 | fi 188 | 189 | exit $RESULT 190 | -------------------------------------------------------------------------------- /master/patches/0004-Retry-on-EC2-NotFound-errors.patch: -------------------------------------------------------------------------------- 1 | From a39b26825cb078c1a8402b0d8c2d96330cc0cc7e Mon Sep 17 00:00:00 2001 2 | From: Brian Behlendorf 3 | Date: Thu, 3 Dec 2015 11:15:43 -0800 4 | Subject: [PATCH 04/18] Retry on EC2 'NotFound' errors 5 | 6 | Due to EC2 eventual consistency model 'NotFound' errors can returned 7 | even after the successful creation of a resource. This spurious errors 8 | should be handled by retrying the operation according to the Amazon 9 | documentation. 10 | 11 | http://docs.aws.amazon.com/AWSEC2/latest/APIReference/query-api-troubleshooting.html#eventual-consistency 12 | Signed-off-by: Brian Behlendorf 13 | --- 14 | master/buildbot/buildslave/base.py | 1 + 15 | master/buildbot/buildslave/ec2.py | 60 +++++++++++++++++++++++++++++++------- 16 | 2 files changed, 51 insertions(+), 10 deletions(-) 17 | 18 | diff --git a/master/buildbot/buildslave/base.py b/master/buildbot/buildslave/base.py 19 | index 98b58814a..07c077b35 100644 20 | --- a/master/buildbot/buildslave/base.py 21 | +++ b/master/buildbot/buildslave/base.py 22 | @@ -998,6 +998,7 @@ class AbstractLatentBuildSlave(AbstractBuildSlave): 23 | del self._shutdown_callback_handle 24 | reactor.removeSystemEventTrigger(handle) 25 | self.substantiated = False 26 | + self.slave = None 27 | self.building.clear() # just to be sure 28 | yield d 29 | self.insubstantiating = False 30 | diff --git a/master/buildbot/buildslave/ec2.py b/master/buildbot/buildslave/ec2.py 31 | index e16b6372b..ae40cf181 100644 32 | --- a/master/buildbot/buildslave/ec2.py 33 | +++ b/master/buildbot/buildslave/ec2.py 34 | @@ -49,6 +49,7 @@ class EC2LatentBuildSlave(AbstractLatentBuildSlave): 35 | 36 | instance = image = None 37 | _poll_resolution = 5 # hook point for tests 38 | + _poll_retry = 10 39 | 40 | def __init__(self, name, password, instance_type, ami=None, 41 | valid_ami_owners=None, valid_ami_location_regex=None, 42 | @@ -302,7 +303,15 @@ class EC2LatentBuildSlave(AbstractLatentBuildSlave): 43 | def _stop_instance(self, instance, fast): 44 | if self.elastic_ip is not None: 45 | self.conn.disassociate_address(self.elastic_ip.public_ip) 46 | - instance.update() 47 | + try: 48 | + instance.update() 49 | + except boto.exception.EC2ResponseError, e: 50 | + log.msg('%s %s cannot find instance %s to terminate' % 51 | + (self.__class__.__name__, self.slavename, instance.id)) 52 | + if e.error_code == 'InvalidInstanceID.NotFound': 53 | + return 54 | + else: 55 | + raise 56 | if instance.state not in (SHUTTINGDOWN, TERMINATED): 57 | instance.terminate() 58 | log.msg('%s %s terminating instance %s' % 59 | @@ -380,7 +389,16 @@ class EC2LatentBuildSlave(AbstractLatentBuildSlave): 60 | log.msg('%s %s has waited %d minutes for instance %s' % 61 | (self.__class__.__name__, self.slavename, duration // 60, 62 | self.instance.id)) 63 | - self.instance.update() 64 | + try: 65 | + self.instance.update() 66 | + except boto.exception.EC2ResponseError, e: 67 | + log.msg('%s %s failed to find instance %s' % 68 | + (self.__class__.__name__, self.slavename, 69 | + self.instance.id)) 70 | + if e.error_code == 'InvalidInstanceID.NotFound': 71 | + continue 72 | + else: 73 | + raise 74 | if self.instance.state == RUNNING: 75 | self.output = self.instance.get_console_output() 76 | minutes = duration // 60 77 | @@ -406,11 +424,25 @@ class EC2LatentBuildSlave(AbstractLatentBuildSlave): 78 | log.msg('%s %s requesting spot instance' % 79 | (self.__class__.__name__, self.slavename)) 80 | duration = 0 81 | + attempts = 0 82 | interval = self._poll_resolution 83 | - requests = self.conn.get_all_spot_instance_requests( 84 | - request_ids=[reservation.id]) 85 | - request = requests[0] 86 | - request_status = request.status.code 87 | + while attempts < self._poll_retry: 88 | + try: 89 | + requests = self.conn.get_all_spot_instance_requests( 90 | + request_ids=[reservation.id]) 91 | + request = requests[0] 92 | + request_status = request.status.code 93 | + break 94 | + except boto.exception.EC2ResponseError, e: 95 | + attempts += 1 96 | + log.msg('%s %s failed to find spot request %s' % 97 | + (self.__class__.__name__, self.slavename, 98 | + reservation.id)) 99 | + if e.error_code == 'InvalidSpotInstanceRequestID.NotFound': 100 | + time.sleep(interval) 101 | + continue 102 | + else: 103 | + raise 104 | while request_status in SPOT_REQUEST_PENDING_STATES: 105 | time.sleep(interval) 106 | duration += interval 107 | @@ -418,10 +450,18 @@ class EC2LatentBuildSlave(AbstractLatentBuildSlave): 108 | log.msg('%s %s has waited %d minutes for spot request %s' % 109 | (self.__class__.__name__, self.slavename, duration // 60, 110 | request.id)) 111 | - requests = self.conn.get_all_spot_instance_requests( 112 | - request_ids=[request.id]) 113 | - request = requests[0] 114 | - request_status = request.status.code 115 | + try: 116 | + requests = self.conn.get_all_spot_instance_requests( 117 | + request_ids=[request.id]) 118 | + request = requests[0] 119 | + request_status = request.status.code 120 | + except boto.exception.EC2ResponseError, e: 121 | + log.msg('%s %s failed to find spot request %s' % 122 | + (self.__class__.__name__, self.slavename, request.id)) 123 | + if e.error_code == 'InvalidSpotInstanceRequestID.NotFound': 124 | + continue 125 | + else: 126 | + raise 127 | if request_status == FULFILLED: 128 | minutes = duration // 60 129 | seconds = duration % 60 130 | -- 131 | 2.14.3 132 | 133 | -------------------------------------------------------------------------------- /scripts/bb-test-pts.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Check for a local cached configuration. 4 | if test -f /etc/buildslave; then 5 | . /etc/buildslave 6 | fi 7 | 8 | # Custom test options will be saved in the tests directory. 9 | if test -f "./TEST"; then 10 | . ./TEST 11 | fi 12 | 13 | TEST_PTS_SKIP=${TEST_PTS_SKIP:-"No"} 14 | if echo "$TEST_PTS_SKIP" | grep -Eiq "^yes$|^on$|^true$|^1$"; then 15 | echo "Skipping disabled test" 16 | exit 3 17 | fi 18 | 19 | ZPOOL=${ZPOOL:-"zpool"} 20 | ZFS=${ZFS:-"zfs"} 21 | PTS=${PTS:-"phoronix-test-suite"} 22 | CONFIG_LOG="$PWD/config.log" 23 | 24 | # Cleanup the pool and restore any modified system state. The console log 25 | # is dumped twice to maximize the odds of preserving debug information. 26 | cleanup() 27 | { 28 | # Preserve the results directory for future analysis, as: 29 | # //pts/pts-.tar.xz 30 | if test -n "$UPLOAD_DIR"; then 31 | BUILDER="$(echo $BB_NAME | cut -f1-3 -d'-')" 32 | mkdir -p "$UPLOAD_DIR/$BUILDER/pts" 33 | 34 | tar -C "$RESULTS_DIR" -cJ \ 35 | -f "$UPLOAD_DIR/$BUILDER/pts/${TEST_RESULTS_NAME}.tar.xz" \ 36 | "$TEST_RESULTS_NAME" 37 | fi 38 | 39 | sudo modprobe -r brd 40 | sudo -E $ZFS_SH -u 41 | } 42 | trap cleanup EXIT SIGTERM 43 | 44 | set -x 45 | 46 | TEST_PTS_URL=${TEST_PTS_URL:-"https://github.com/phoronix-test-suite/phoronix-test-suite/archive/"} 47 | TEST_PTS_VER=${TEST_PTS_VER:-"master.tar.gz"} 48 | TEST_PTS_POOL=${TEST_PTS_POOL:-"perf"} 49 | TEST_PTS_POOL_OPTIONS=${TEST_PTS_POOL_OPTIONS:-""} 50 | TEST_PTS_FS=${TEST_PTS_FS:-"fs"} 51 | TEST_PTS_FS_OPTIONS=${TEST_PTS_FS_OPTIONS:-""} 52 | TEST_PTS_TEST_PROFILE_URL=${TEST_PTS_TEST_PROFILE_URL:-"https://raw.githubusercontent.com/openzfs/zfs-buildbot/master/scripts/"} 53 | TEST_PTS_TEST_PROFILE_VER=${TEST_PTS_TEST_PROFILE_VER:-"pts-test-profiles.tar.gz"} 54 | 55 | # Test cases to run. 56 | TEST_PTS_BENCHMARKS=${TEST_PTS_BENCHMARKS:-" \ 57 | zfs/aio-stress \ 58 | zfs/compilebench \ 59 | zfs/dbench \ 60 | zfs/postmark \ 61 | zfs/sqlite \ 62 | zfs/unpack-linux \ 63 | "} 64 | 65 | set +x 66 | 67 | sudo modprobe brd rd_nr=2 rd_size=2097152 68 | 69 | # Performance testing is done on an AWS EC2 d2.xlarge instance type: 70 | # 4 vCPUS 71 | # 30.5 GB of Memory 72 | # 3x2TB HDDs (xvdb, xvdc, xvdd) 73 | # 74 | # Ramdisks are used to simulate fast SSDs for log and cache devices: 75 | # 2x2GB SSDs (ram0, ram1) 76 | # 77 | RAIDZS="raidz xvdb xvdc xvdd" 78 | MIRRORS="mirror xvdb xvdc" 79 | LOGS="log ram0" 80 | CACHES="cache ram1" 81 | 82 | # Configurations to test. 83 | TEST_PTS_CONFIGS=( \ 84 | "RAIDZ1 1x3-way:$RAIDZS" \ 85 | "RAIDZ1 1x3-way+log+cache:$RAIDZS $LOGS $CACHES" \ 86 | "MIRROR 1x2-way:$MIRRORS" \ 87 | "MIRROR 1x2-way+log+cache:$MIRRORS $LOGS $CACHES" \ 88 | ) 89 | 90 | TEST_DIR="/$TEST_PTS_POOL/$TEST_PTS_FS" 91 | RESULTS_DIR="$HOME/.phoronix-test-suite/test-results/" 92 | PROFILES_DIR="$HOME/.phoronix-test-suite/test-profiles/" 93 | 94 | # Install and configure PTS is not already installed. 95 | # 96 | if ! type $PTS > /dev/null 2>&1; then 97 | case "$BB_NAME" in 98 | Amazon*) 99 | sudo yum -y install --enablerepo=epel phoronix-test-suite 100 | sudo yum -y install popt-devel 101 | ;; 102 | *) 103 | wget -qO${TEST_PTS_VER} ${TEST_PTS_URL}${TEST_PTS_VER} || exit 1 104 | tar -xzf ${TEST_PTS_VER} || exit 1 105 | rm ${TEST_PTS_VER} 106 | 107 | cd phoronix-test-suite* 108 | sudo ./install-sh >>$CONFIG_LOG 2>&1 || exit 1 109 | cd .. 110 | esac 111 | 112 | $PTS enterprise-setup >>$CONFIG_LOG 2>&1 113 | fi 114 | 115 | # Refresh the download cache. 116 | $PTS make-download-cache $TEST_PTS_BENCHMARKS >>CONFIG_LOG 2>&1 117 | 118 | # Install the custom zfs test profiles. 119 | rm -Rf $PROFILES_DIR/zfs 120 | wget -qO- ${TEST_PTS_TEST_PROFILE_URL}${TEST_PTS_TEST_PROFILE_VER} | \ 121 | tar xz -C $PROFILES_DIR 122 | 123 | # Configure PTS and pool to start with a clean slate 124 | $PTS user-config-set EnvironmentDirectory="$TEST_DIR" >>$CONFIG_LOG 2>&1 125 | $PTS user-config-set ResultsDirectory="$RESULTS_DIR" >>$CONFIG_LOG 2>&1 126 | $PTS user-config-set UploadResults="FALSE" >>$CONFIG_LOG 2>&1 127 | $PTS user-config-set AnonymousUsageReporting="FALSE" >>$CONFIG_LOG 2>&1 128 | $PTS user-config-set AnonymousSoftwareReporting="FALSE" >>$CONFIG_LOG 2>&1 129 | $PTS user-config-set AnonymousHardwareReporting="FALSE" >>$CONFIG_LOG 2>&1 130 | $PTS user-config-set SaveSystemLogs="TRUE" >>$CONFIG_LOG 2>&1 131 | $PTS user-config-set SaveTestLogs="TRUE" >>$CONFIG_LOG 2>&1 132 | $PTS user-config-set PromptForTestIdentifier="FALSE" >>$CONFIG_LOG 2>&1 133 | $PTS user-config-set PromptForTestDescription="FALSE" >>$CONFIG_LOG 2>&1 134 | $PTS user-config-set PromptSaveName="FALSE" >>$CONFIG_LOG 2>&1 135 | $PTS user-config-set Configured="TRUE" >>$CONFIG_LOG 2>&1 136 | 137 | case $(uname) in 138 | FreeBSD) 139 | if ! kldstat -qn openzfs; then 140 | sudo -E $ZFS_SH 141 | fi 142 | ;; 143 | Linux) 144 | if ! test -e /sys/module/zfs; then 145 | sudo -E $ZFS_SH 146 | fi 147 | ;; 148 | *) 149 | sudo -E $ZFS_SH 150 | ;; 151 | esac 152 | 153 | # If not given, read the zfs version and trim the hash to 7 characters. 154 | # This ensures that once merged all the test results will be in the same 155 | # directory. Current git versions truncate to 9 characters by default. 156 | if [ -z "$ZFS_VERSION" ]; then 157 | ZFS_VERSION="zfs-$(cat /sys/module/zfs/version)" 158 | if [[ "$ZFS_VERSION" =~ zfs-[0-9.]*-[0-9]*_g[0-9a-f]{9}$ ]]; then 159 | ZFS_VERSION="${ZFS_VERSION%??}" 160 | fi 161 | export ZFS_VERSION 162 | fi 163 | 164 | export TEST_RESULTS_NAME="pts-$(date +%Y%m%dt%H%M%S)" 165 | export TEST_RESULTS_DESCRIPTION="Buildbot automated testing results" 166 | export SYSTEM_LOGS="$RESULTS_DIR/$TEST_RESULTS_NAME/system-logs" 167 | 168 | rm -Rf "$RESULTS_DIR/*" 169 | 170 | for CONFIG in "${TEST_PTS_CONFIGS[@]}"; do 171 | ID=$(echo "$CONFIG" | cut -f1 -d':') 172 | VDEVS=$(echo "$CONFIG" | cut -f2 -d':') 173 | 174 | export TEST_RESULTS_IDENTIFIER="${ZFS_VERSION} $ID" 175 | 176 | sudo -E dmesg -c >/dev/null 177 | set -x 178 | sudo -E $ZPOOL create -f $TEST_PTS_POOL \ 179 | $TEST_PTS_POOL_OPTIONS $VDEVS || exit 1 180 | sudo -E $ZFS create $TEST_PTS_POOL/$TEST_PTS_FS \ 181 | $TEST_PTS_FS_OPTIONS || exit 1 182 | sudo -E chmod 777 $TEST_DIR 183 | 184 | $PTS batch-benchmark $TEST_PTS_BENCHMARKS 185 | 186 | LOG_DIR="$SYSTEM_LOGS/$TEST_RESULTS_IDENTIFIER" 187 | mkdir -p "$LOG_DIR" 188 | sudo -E $ZPOOL status -v >"$LOG_DIR/zpool-status.log" 189 | sudo -E $ZPOOL list -v >"$LOG_DIR/zpool-list.log" 190 | sudo -E $ZPOOL get all >"$LOG_DIR/zpool-get.log" 191 | sudo -E $ZPOOL iostat -rv >"$LOG_DIR/zpool-iostat-request-histogram.log" 192 | sudo -E $ZPOOL iostat -wv >"$LOG_DIR/zpool-iostat-latency-histogram.log" 193 | sudo -E $ZFS list >"$LOG_DIR/zfs-list.log" 194 | sudo -E $ZFS get all >"$LOG_DIR/zfs-get.log" 195 | sudo -E $ZPOOL destroy $TEST_PTS_POOL 196 | set +x 197 | 198 | dmesg >"$LOG_DIR/console.log" 199 | done 200 | 201 | exit 0 202 | -------------------------------------------------------------------------------- /scripts/bb-test-prepare.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | if test -f /etc/buildslave; then 4 | . /etc/buildslave 5 | fi 6 | 7 | TEST_PREPARE_SKIP=${TEST_PREPARE_SKIP:-"No"} 8 | if echo "$TEST_PREPARE_SKIP" | grep -Eiq "^yes$|^on$|^true$|^1$"; then 9 | echo "Skipping disabled test" 10 | exit 3 11 | fi 12 | 13 | ZFS_BUILD_DIR=$(readlink -f ../zfs) 14 | TEST_DIR="$PWD" 15 | TEST_FILE="${TEST_DIR}/TEST" 16 | 17 | # Attempt to set oom_score_adj for buildslave to prevent 18 | # it from being targeted by the oom-killer 19 | if test -f "$BB_DIR/twistd.pid"; then 20 | pid=$(cat "$BB_DIR/twistd.pid") 21 | if test -f "/proc/${pid}/oom_score_adj"; then 22 | sudo -E echo -1000 > /proc/${pid}/oom_score_adj 23 | fi 24 | fi 25 | 26 | # Create a TEST file which includes parameters which may appear in a top 27 | # level TEST file or the most recent git commit message. 28 | rm -f $TEST_FILE 29 | 30 | if test -d "$ZFS_BUILD_DIR"; then 31 | cd "$ZFS_BUILD_DIR" 32 | 33 | if test -f TEST; then 34 | cat TEST >>$TEST_FILE 35 | fi 36 | 37 | git log -1 | sed "s/^ *//g" | grep ^TEST_ >>$TEST_FILE 38 | cd "$TEST_DIR" 39 | fi 40 | 41 | cat << EOF >> $TEST_FILE 42 | 43 | ### 44 | # 45 | # Additional environment variables for use by bb-test-* scripts. 46 | # 47 | ZFS_BUILD_DIR=$ZFS_BUILD_DIR 48 | TEST_DIR=$TEST_DIR 49 | TEST_METHOD=$TEST_METHOD 50 | 51 | EOF 52 | 53 | # Add environment variables for "packages" or "in-tree" testing. 54 | TEST_METHOD=${TEST_METHOD:-"packages"} 55 | case "$TEST_METHOD" in 56 | packages|kmod|pkg-kmod|dkms|dkms-kmod|system) 57 | cat << EOF >> $TEST_FILE 58 | ZPOOL=${ZPOOL:-"zpool"} 59 | ZFS=${ZFS:-"zfs"} 60 | 61 | ZFS_SH=${ZFS_SH:-"zfs.sh"} 62 | ZFS_TESTS_SH=${ZFS_TESTS_SH:-"zfs-tests.sh"} 63 | ZLOOP_SH=${ZLOOP_SH:-"zloop.sh"} 64 | EOF 65 | ;; 66 | in-tree) 67 | cat << EOF >> $TEST_FILE 68 | ZPOOL=${ZPOOL:-"\$ZFS_BUILD_DIR/bin/zpool"} 69 | ZFS=${ZFS:-"\$ZFS_BUILD_DIR/bin/zfs"} 70 | 71 | ZFS_SH=${ZFS_SH:-"\$ZFS_BUILD_DIR/scripts/zfs.sh"} 72 | ZFS_TESTS_SH=${ZFS_TESTS_SH:-"\$ZFS_BUILD_DIR/scripts/zfs-tests.sh"} 73 | ZLOOP_SH=${ZLOOP_SH:-"\$ZFS_BUILD_DIR/scripts/zloop.sh"} 74 | EOF 75 | ;; 76 | *) 77 | cat << EOF >> $TEST_FILE 78 | echo "Unknown TEST_METHOD: $TEST_METHOD" 79 | exit 1 80 | EOF 81 | ;; 82 | esac 83 | 84 | # Uncomment when abreviated test runs are needed. 85 | #cat << EOF >> $TEST_FILE 86 | #TEST_ZTEST_SKIP="no" 87 | #TEST_ZFSTESTS_SKIP="no" 88 | #TEST_PTS_SKIP="no" 89 | # 90 | #case "$BB_MODE" in 91 | #TEST) 92 | # TEST_ZTEST_TIMEOUT=60 93 | # TEST_ZFSTESTS_TAGS="checksum" 94 | # ;; 95 | #PERF) 96 | # TEST_ZFSTESTS_PERF_RUNTIME=5 97 | # TEST_ZFSTESTS_DISKSIZE=32G 98 | # TEST_PTS_BENCHMARKS="zfs/unpack-linux" 99 | # ;; 100 | #esac 101 | #EOF 102 | 103 | . $TEST_FILE 104 | 105 | set -x 106 | 107 | # Preserve the results directory for future analysis. The contents 108 | # of this directory will be uploaded the the build master after all 109 | # of the requested tests have completed. 110 | # //*/*.tar.xz 111 | mkdir -p "${UPLOAD_DIR}" 112 | 113 | if test -n "$UPLOAD_DIR"; then 114 | BUILDER="$(echo $BB_NAME | cut -f1-3 -d'-')" 115 | mkdir -p "$UPLOAD_DIR/$BUILDER" 116 | fi 117 | 118 | # Start the Linux kernel watchdog so the system will panic in the case of a 119 | # lockup. This helps prevent one bad test run from stalling the builder. 120 | TEST_PREPARE_WATCHDOG=${TEST_PREPARE_WATCHDOG:-"Yes"} 121 | if echo "$TEST_PREPARE_WATCHDOG" | grep -Eiq "^yes$|^on$|^true$|^1$"; then 122 | case "$BB_NAME" in 123 | Amazon*) 124 | sudo -E systemctl start watchdog 125 | ;; 126 | 127 | CentOS*) 128 | if cat /etc/redhat-release | grep -Eq "release 6."; then 129 | sudo -E /etc/init.d/watchdog start 130 | elif cat /etc/redhat-release | grep -Eq "release [7|8|9]"; then 131 | sudo -E systemctl start watchdog 132 | fi 133 | ;; 134 | 135 | Debian*) 136 | sudo -E systemctl start watchdog 137 | ;; 138 | 139 | Fedora*) 140 | sudo -E systemctl start watchdog 141 | ;; 142 | 143 | FreeBSD*) 144 | sudo -E service watchdogd onestart 145 | ;; 146 | 147 | Ubuntu*) 148 | sudo -E apt-get install watchdog 149 | sudo -E service watchdog start 150 | ;; 151 | 152 | *) 153 | echo "$BB_NAME unknown platform" 154 | ;; 155 | esac 156 | fi 157 | 158 | # Start both NFS and Samba servers, needed by the ZFS Test Suite to run 159 | # zfs_share and zfs_unshare scripts. 160 | TEST_PREPARE_SHARES=${TEST_PREPARE_SHARES:-"Yes"} 161 | if echo "$TEST_PREPARE_SHARES" | grep -Eiq "^yes$|^on$|^true$|^1$"; then 162 | case "$BB_NAME" in 163 | Amazon*) 164 | sudo -E systemctl start nfs-server 165 | sudo -E systemctl start smb 166 | ;; 167 | 168 | CentOS*) 169 | if cat /etc/redhat-release | grep -Eq "release 6."; then 170 | sudo -E /etc/init.d/rpcbind start 171 | sudo -E /etc/init.d/nfs start 172 | sudo -E /etc/init.d/smb start 173 | elif cat /etc/redhat-release | grep -Eq "release [7|8|9]"; then 174 | sudo -E systemctl start nfs-server 175 | sudo -E systemctl start smb 176 | fi 177 | ;; 178 | 179 | Debian*) 180 | sudo -E systemctl start nfs-kernel-server 181 | sudo -E systemctl start samba 182 | ;; 183 | 184 | Fedora*) 185 | sudo -E systemctl start nfs-server 186 | sudo -E systemctl start smb 187 | ;; 188 | 189 | FreeBSD*) 190 | sudo -E touch /etc/zfs/exports 191 | sudo -E sysrc mountd_flags="/etc/zfs/exports" 192 | sudo -E service nfsd onestart 193 | echo '[global]' | sudo -E tee /usr/local/etc/smb4.conf >/dev/null 194 | sudo -E service samba_server onestart 195 | ;; 196 | 197 | Ubuntu*) 198 | sudo -E service nfs-kernel-server start 199 | sudo -E service smbd start 200 | ;; 201 | 202 | *) 203 | echo "$BB_NAME unknown platform" 204 | ;; 205 | esac 206 | fi 207 | 208 | # Ensure the default zfs-zed daemon is disabled during testing since 209 | # it may interfere with some of the ZTS test cases. 210 | case "$BB_NAME" in 211 | Amazon*|CentOS*|Debian*|Fedora*|Ubuntu*) 212 | sudo -E systemctl stop zfs-zed 213 | ;; 214 | FreeBSD*) 215 | ;; 216 | *) 217 | echo "$BB_NAME unknown platform" 218 | ;; 219 | esac 220 | 221 | # Latent slaves, which set BB_SHUTDOWN="Yes" in /etc/buildslave when 222 | # bootstrapping should be automatically shutdown after 8 hours. This 223 | # is done to ensure if the buildmaster terminates unexpectedly any 224 | # running latent slaves will terminate in a reasonable amount of time. 225 | # 226 | # Due to shutdowns not working reliably in CentOS 6 and Amazon they are 227 | # excluded from the scheduled shutdown. The coverage builder is allowed 228 | # 16 hours because the required debug kernel reduces overall performance. 229 | if echo "$BB_SHUTDOWN" | grep -Eiq "^yes$|^on$|^true$|^1$"; then 230 | case "$BB_NAME" in 231 | Amazon*|CentOS-6*) 232 | echo "Skipping scheduled shutdown" 233 | ;; 234 | *coverage*) 235 | echo "Scheduling shutdown" 236 | sudo -E shutdown +960 237 | ;; 238 | FreeBSD*) 239 | ;; 240 | *) 241 | echo "Scheduling shutdown" 242 | sudo -E shutdown +480 243 | ;; 244 | esac 245 | fi 246 | 247 | # Log mounted filesystems and available free space. 248 | df -h 249 | 250 | # Unload modules just in case they are still loaded from a previous test 251 | if [ -x $ZFS_SH ]; then 252 | sudo -E $ZFS_SH -vu 253 | fi 254 | 255 | exit 0 256 | -------------------------------------------------------------------------------- /scripts/openzfs-merge.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # This script tries to merge OpenZFS commits to ZoL. 4 | # 5 | # Instruction: 6 | # 7 | # Repository setup must be similar with openzfs-tracking.sh 8 | # requirements. 9 | # 10 | # Repository setup for valid compilation check: 11 | # ./autogen.sh 12 | # ./configure --enable-debug 13 | # 14 | # mandatory git settings: 15 | # [merge] 16 | # renameLimit = 999999 17 | # [user] 18 | # email = mail@gmelikov.ru 19 | # name = George Melikov 20 | # 21 | # Copyright (c) 2016 George Melikov. All rights reserved. 22 | # 23 | 24 | SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" 25 | 26 | REPOSITORY_PATH='.' 27 | TMP_FILE='/tmp/gittmpmessage.txt' 28 | 29 | # list with potential OpenZFS commits 30 | UNPORTED_COMMITS_FILE=$SCRIPTDIR'/hashes.txt' 31 | 32 | # Next files will generate automatically 33 | # list with commits git can't merge automatically 34 | EXCEPTIONS_GIT=$SCRIPTDIR'/exceptions.txt' 35 | # list with commits which can't be compiled without errors 36 | EXCEPTIONS_COMPILE=$SCRIPTDIR'/uncompiled.txt' 37 | # list with commits which has cstyle error 38 | EXCEPTIONS_CSTYLE=$SCRIPTDIR'/uncstyled.txt' 39 | # list with merged 40 | EXCEPTIONS_MERGED=$SCRIPTDIR'/merged.txt' 41 | 42 | LGREEN='\033[1;32m' # ${LGREEN} 43 | LRED='\033[1;31m' # ${LRED} 44 | NORMAL='\033[0m' # ${NORMAL} 45 | COUNT_MERGED=0 46 | LIST_MERGED= 47 | COUNT_COMPILE=0 48 | LIST_COMPILE= 49 | COUNT_CSTYLE=0 50 | LIST_CSTYLE= 51 | PUSH_NEW_BRANCH=0 52 | FAIL_STYLE=0 53 | 54 | 55 | usage() { 56 | cat << EOF 57 | USAGE: 58 | $0 [-hp] [-d directory] [-i commits.txt] [-c commit hash] [-g commit hash] 59 | 60 | DESCRIPTION: 61 | Auto merge OpenZFS commits to ZFS on Linux git 62 | repositories. 63 | Result - git branch with name 'autoport-oz#issue' 64 | 65 | OPTIONS: 66 | -h Show this message 67 | -d directory Git repo with openzfs and zfsonlinux remotes 68 | -i commits.txt File with OpenZFS commit hashes to merge 69 | (one hash per row) 70 | -c commit hash Prepare branch and try to merge this commit by hash, 71 | leaves branch for manual merge if not successfull 72 | -g commit hash Generate commit description for existing commit in 73 | branch generated by -i parameter 74 | -p force push new branch to Github 75 | -s Fail if style checks do not succeed after merge 76 | 77 | EOF 78 | } 79 | 80 | clean_unmerged() { 81 | git cherry-pick --abort 82 | git checkout master 83 | git branch -D "autoport-oz$OPENZFS_ISSUE" > /dev/null 2>&1 84 | rm -f "$TMP_FILE" 85 | } 86 | 87 | prepare_git() { 88 | cd "$REPOSITORY_PATH" 89 | rm -f "$TMP_FILE" 90 | git checkout master 91 | git fetch --all 92 | git rebase zfsonlinux/master 93 | git log --remotes=openzfs/master --format=%B -n 1 $OPENZFS_COMMIT > "$TMP_FILE" 94 | OPENZFS_ISSUE=$(grep -oP '^[^0-9]*\K[0-9]+' -m 1 "$TMP_FILE") 95 | } 96 | 97 | push_to_github() { 98 | if [ $PUSH_NEW_BRANCH -ne 0 ]; then 99 | git push origin autoport-oz$OPENZFS_ISSUE -f 100 | fi 101 | } 102 | 103 | generate_desc() { 104 | USER_NAME=$(git config user.name) 105 | USER_MAIL=$(git config user.email) 106 | OPENZFS_COMMIT_AUTHOR=$(git log --format="%aN <%aE>" --remotes=openzfs/master -n 1 $OPENZFS_COMMIT) 107 | sed -i '/^$/d' "$TMP_FILE" 108 | 109 | # handle github keywords 110 | sed -i '/^closes #\|^close #\|^closed #/Id' "$TMP_FILE" 111 | sed -i '/^fixes #\|^fix #\|^fixed #/Id' "$TMP_FILE" 112 | sed -i '/^resolves #\|^resolve #\|^resolved #/Id' "$TMP_FILE" 113 | 114 | sed -i "1s/^$OPENZFS_ISSUE/OpenZFS $OPENZFS_ISSUE -/" "$TMP_FILE" 115 | sed -i "1 a Authored by: $OPENZFS_COMMIT_AUTHOR" "$TMP_FILE" 116 | sed -i -e '1a\\' "$TMP_FILE" 117 | 118 | echo 'Ported-by: '$USER_NAME' <'$USER_MAIL'>' >> "$TMP_FILE" 119 | echo '' >> "$TMP_FILE" 120 | echo 'OpenZFS-issue: https://www.illumos.org/issues/'$OPENZFS_ISSUE >> "$TMP_FILE" 121 | echo 'OpenZFS-commit: https://github.com/openzfs/openzfs/commit/'$OPENZFS_COMMIT >> "$TMP_FILE" 122 | } 123 | 124 | #add description to commit 125 | add_desc_to_commit() { 126 | git commit --amend -F "$TMP_FILE" 127 | } 128 | 129 | # perform cherry-pick of patch 130 | cherry-pick() { 131 | prepare_git 132 | 133 | echo -e "${LGREEN}OpenZFS Issue #$OPENZFS_ISSUE ($OPENZFS_COMMIT)${NORMAL}" 134 | echo -e "${LGREEN}Checkout new branch${NORMAL}" 135 | git branch -D "autoport-oz$OPENZFS_ISSUE" > /dev/null 2>&1 136 | git checkout -b "autoport-oz$OPENZFS_ISSUE" 137 | 138 | echo -e "${LGREEN}Performing cherry-pick of ${OPENZFS_COMMIT}${NORMAL}" 139 | if ! git cherry-pick $OPENZFS_COMMIT; then 140 | printf 'cherry-pick failed\n' >&2 141 | echo $OPENZFS_COMMIT >> "$EXCEPTIONS_GIT" 142 | return 1 143 | fi 144 | 145 | return 0 146 | } 147 | 148 | merge() { 149 | ERR=0 150 | 151 | if ! cherry-pick ; then 152 | return 1 153 | fi 154 | 155 | echo -e "${LGREEN}compile... ${NORMAL}" 156 | if ! make -s -j$(nproc); then 157 | printf 'compilation failed\n' >&2 158 | echo $OPENZFS_COMMIT >> "$EXCEPTIONS_COMPILE" 159 | COUNT_COMPILE=$(($COUNT_COMPILE+1)) 160 | LIST_COMPILE="$LIST_COMPILE 161 | autoport-oz$OPENZFS_ISSUE" 162 | ERR=1 163 | fi 164 | 165 | echo -e "${LGREEN}cstyle... ${NORMAL}" 166 | if ! make cstyle; then 167 | printf 'style check failed\n' >&2 168 | echo $OPENZFS_COMMIT >> "$EXCEPTIONS_CSTYLE" 169 | COUNT_CSTYLE=$(($COUNT_CSTYLE+1)) 170 | LIST_CSTYLE="$LIST_CSTYLE 171 | autoport-oz$OPENZFS_ISSUE" 172 | if [ $FAIL_STYLE -ne 0 ]; then 173 | ERR=1 174 | fi 175 | fi 176 | 177 | generate_desc 178 | add_desc_to_commit 179 | 180 | if [ "$ERR" -eq "0" ]; then 181 | push_to_github 182 | echo $OPENZFS_COMMIT >> $EXCEPTIONS_MERGED 183 | echo -e "${LGREEN}$OPENZFS_COMMIT merged without warnings${NORMAL}" 184 | COUNT_MERGED=$(($COUNT_MERGED+1)) 185 | LIST_MERGED="$LIST_MERGED 186 | autoport-oz$OPENZFS_ISSUE" 187 | fi 188 | 189 | return 0 190 | } 191 | 192 | iterate_merge() { 193 | while read p; do 194 | OPENZFS_COMMIT=$p 195 | 196 | #if commit wasn't tried earlier 197 | EXCEPTION=$(grep -s -E "^$OPENZFS_COMMIT" "$EXCEPTIONS_GIT" \ 198 | "$EXCEPTIONS_COMPILE" "$EXCEPTIONS_CSTYLE" \ 199 | "$EXCEPTIONS_MERGED") 200 | 201 | if [ -n "$EXCEPTION" ]; then 202 | continue 203 | fi 204 | 205 | if ! merge ; then 206 | clean_unmerged 207 | fi 208 | done <$UNPORTED_COMMITS_FILE 209 | } 210 | 211 | prepare_manual() { 212 | if ! cherry-pick ; then 213 | echo -e "${LRED}$OPENZFS_COMMIT has merge conflicts${NORMAL}" 214 | return 1 215 | fi 216 | 217 | generate_desc 218 | add_desc_to_commit 219 | push_to_github 220 | 221 | echo -e "${LGREEN}$OPENZFS_COMMIT cherry-pick was successful${NORMAL}" 222 | return 0 223 | } 224 | 225 | while getopts 'hpd:i:c:g:s' OPTION; do 226 | case $OPTION in 227 | h) 228 | usage 229 | exit 1 230 | ;; 231 | d) 232 | REPOSITORY_PATH="$OPTARG" 233 | ;; 234 | i) 235 | UNPORTED_COMMITS_FILE=$OPTARG 236 | ;; 237 | c) 238 | OPENZFS_COMMIT=$OPTARG 239 | ;; 240 | p) 241 | PUSH_NEW_BRANCH=1 242 | ;; 243 | g) 244 | OPENZFS_COMMIT=$OPTARG 245 | prepare_git 246 | git checkout "autoport-oz$OPENZFS_ISSUE" 247 | generate_desc 248 | add_desc_to_commit 249 | push_to_github 250 | exit 0 251 | ;; 252 | s) 253 | FAIL_STYLE=1 254 | ;; 255 | esac 256 | done 257 | 258 | # process the single commit if it was provided 259 | if [ -n "$OPENZFS_COMMIT" ]; then 260 | if ! prepare_manual ; then 261 | exit 1 262 | fi 263 | 264 | exit 0 265 | fi 266 | 267 | iterate_merge 268 | 269 | rm -f "$TMP_FILE" 270 | 271 | #show results 272 | echo ' ' 273 | if [ "$COUNT_MERGED" -gt "0" ]; then 274 | echo -e "${LGREEN}$COUNT_MERGED successfully merged commits:${NORMAL}" 275 | echo $LIST_MERGED 276 | fi 277 | if [ "$COUNT_COMPILE" -gt "0" ]; then 278 | echo -e "${LGREEN}$COUNT_COMPILE commits with compile errors:${NORMAL}" 279 | echo $LIST_COMPILE 280 | fi 281 | if [ "$COUNT_CSTYLE" -gt "0" ]; then 282 | echo -e "${LGREEN}$COUNT_CSTYLE commits with cstyle warnings:${NORMAL}" 283 | echo $LIST_CSTYLE 284 | fi 285 | -------------------------------------------------------------------------------- /scripts/bb-test-cleanup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | if test -f /etc/buildslave; then 4 | . /etc/buildslave 5 | fi 6 | 7 | if test -f ./TEST; then 8 | . ./TEST 9 | else 10 | echo "Missing $PWD/TEST configuration file" 11 | exit 1 12 | fi 13 | 14 | WORKDIR=$(readlink -f .) 15 | GCOV_KERNEL="/sys/kernel/debug/gcov" 16 | ZFS_BUILD=$(readlink -f ../zfs) 17 | 18 | if $(sudo -E test ! -e "$GCOV_KERNEL"); then 19 | echo "Kernel Gcov disabled. Skipping test cleanup." 20 | exit 3 21 | fi 22 | 23 | if [ -z "$CODECOV_TOKEN" -o \ 24 | -z "$BUILDER_NAME" -o \ 25 | -z "$BUILD_NUMBER" -o \ 26 | -z "$ZFS_REVISION" -o \ 27 | -z "$BASE_BRANCH" ]; then 28 | echo "Missing a required environment variable." 29 | exit 1 30 | fi 31 | 32 | urlencode () 33 | { 34 | python -c "import urllib; print(urllib.quote('$1'));" 35 | } 36 | 37 | echo_build_url () 38 | { 39 | local NAME=$(urlencode "$BUILDER_NAME") 40 | local NUMBER=$(urlencode "$BUILD_NUMBER") 41 | echo "http://build.zfsonlinux.org/builders/$NAME/builds/$NUMBER" 42 | } 43 | 44 | upload_codecov_report_with_flag () 45 | { 46 | local PR_OR_BRANCH_OPT 47 | 48 | if [ -z "$1" ]; then 49 | echo "Can't upload Codecov report without a flag." 50 | exit 1 51 | fi 52 | 53 | # 54 | # When a PR number is specified, we prioritized that value, and 55 | # use the PR number as the branch name. If we don't specify a branch 56 | # name at all, the Codecov uploader script will automatically detect 57 | # a branch name to use when performing the upload (and what it 58 | # auto-detects is incorrect). Codecov support suggested we use 59 | # "pull/N" as the branch name for pull requests, which is what we're 60 | # doing here. 61 | # 62 | # When uploading a coverage report for a commit on an actual branch 63 | # of the main project repository, the PR_NUMBER should be the empty 64 | # string (since it's not a PR commit). Thus, when PR_NUMBER is 65 | # empty, we specify the branch only, when uploading the report; this 66 | # way, the commit is properly associated with the correct branch in 67 | # the Codecov UI. 68 | # 69 | if [ -n "$PR_NUMBER" ]; then 70 | PR_OR_BRANCH_OPT="-B pull/$PR_NUMBER -P $PR_NUMBER" 71 | elif [ -n "$BASE_BRANCH" ]; then 72 | PR_OR_BRANCH_OPT="-B $BASE_BRANCH" 73 | else 74 | # 75 | # We should have already caught this error using the environment 76 | # variable checking at the start of this cript, but it doesn't 77 | # hurt to double check that assumption. 78 | # 79 | echo "Missing PR_NUMBER or BASE_BRANCH environment variables." 80 | exit 1 81 | fi 82 | 83 | # 84 | # This configures the directory name that will be used for the HTML 85 | # report that will be generated. Since we'll create multiple 86 | # different reports, and we want to capture and log all of them 87 | # via Buildbot, we use a different directory name for each one. 88 | # 89 | export CODE_COVERAGE_OUTPUT_DIRECTORY="coverage-$1" 90 | 91 | make V=1 code-coverage-capture 92 | curl -s https://codecov.io/bash | bash -s - \ 93 | -c -Z -X gcov -X py -X xcode \ 94 | -n "$BUILDER_NAME" \ 95 | -b "$BUILD_NUMBER" \ 96 | -C "$ZFS_REVISION" \ 97 | -F "$1" \ 98 | $PR_OR_BRANCH_OPT 99 | 100 | # 101 | # The compressed tarball will be collected and stored on the 102 | # Buildbot "master". To try and reduce the amount of storage 103 | # required to store these coverage reports, we do our best to 104 | # compress the tarball as much possible. We measured xz to 105 | # result in a file that's roughly half the size when compared 106 | # to the output when using gzip; no other algorithms were tested. 107 | # 108 | # Additionally, this function is run from inside the "zfs" build 109 | # directory, so we must specify the full path to the output tarball 110 | # such that it can be easily found and consumed by Buildbot (i.e. 111 | # Buildbot will look for these files in $WORKDIR, not in the "zfs" 112 | # build directory). 113 | # 114 | if [ -n "$TEST_CODE_COVERAGE_HTML" ]; then 115 | tar -cf - "$CODE_COVERAGE_OUTPUT_DIRECTORY" | \ 116 | xz -9e > "${WORKDIR}/${CODE_COVERAGE_OUTPUT_DIRECTORY}.tar.xz" 117 | fi 118 | } 119 | 120 | copy_kernel_gcov_data_files () 121 | { 122 | # Allow access to gcov files as a non-root user 123 | sudo chmod -R a+rx /sys/kernel/debug/gcov 124 | sudo chmod a+rx /sys/kernel/debug 125 | 126 | # 127 | # For the kernel modules, the ".gcda" and ".gcno" files will be 128 | # contained in the debugfs location specified by the $GCOV_KERNEL 129 | # variable, and then the path to the files will mimic the directory 130 | # structure used when building the modules. 131 | # 132 | # We're copying these gcov data files files out of the debugfs 133 | # directory, and into the $ZFS_BUILD directory; this way the gcov 134 | # data files for the kernel modules will be in the build directory 135 | # just like they are for the userspace files. 136 | # 137 | # By doing this, we don't have to differentiate between userspace 138 | # files and kernel module files when generating the gcov reports. 139 | # 140 | # It's important to note that the ".gcno" files will already be 141 | # contained in the build directory, but sometimes the files will be 142 | # prefixed with ".tmp_". Thus, we have to be careful when copying 143 | # these into the build directory, such that: 144 | # 145 | # - If the ".gcno" files *do not* have the ".tmp_" prefix, the 146 | # original files (already in the build directory) will not be 147 | # replaced. 148 | # 149 | # - If the ".gcno" files *do* have the ".tmp_" prefix, we'll copy 150 | # the ".gcno" symlinks contained in the debugfs directory into 151 | # the build directory. These symlinks will then point to the 152 | # original files with the ".tmp_" prefix. 153 | # 154 | dirname="$PWD" 155 | cd "$GCOV_KERNEL$ZFS_BUILD" >/dev/null 156 | find . -name "*.gcda" -exec sh -c 'cp -v $0 '$ZFS_BUILD'/$0' {} \; 157 | find . -name "*.gcno" -exec sh -c 'cp -vdn $0 '$ZFS_BUILD'/$0' {} \; 158 | cd "$dirname" 159 | } 160 | 161 | # 162 | # This variable is used when we upload the Codecov report, and allows 163 | # the Codecov UI to link back to the buildbot build. 164 | # 165 | export CI_BUILD_URL=$(echo_build_url) 166 | 167 | # 168 | # We explicitly disable branch coverage (unless the default is 169 | # overridden via the TEST_* variable) because it causes almost all 170 | # ASSERT and VERIFY statements to result in partial hits, which is 171 | # distracting when viewing the code coverage via Codecov's UI. Until we 172 | # can devise a way to prevent these partial hits for ASSERT and VERIFY 173 | # statements, we've decided to simply turn off branch coverage all 174 | # together. 175 | # 176 | export CODE_COVERAGE_BRANCH_COVERAGE=${TEST_CODE_COVERAGE_BRANCH_COVERAGE:-0} 177 | 178 | set -x 179 | cd "${ZFS_BUILD}" 180 | 181 | upload_codecov_report_with_flag "user" 182 | 183 | # 184 | # Now that we've uploaded the coverage report for the user execution 185 | # (e.g. userspace commands, libraries, etc), we need to upload the 186 | # report for the kernel execution. We remove all the user execution 187 | # ".gcda" files and replace them with the kernel equivalents. This way, 188 | # we can simply use the "make" target to generate the coverage report 189 | # for the kernel files just like we did for the userspace files. 190 | # 191 | # Note, we cannot use the "code-coverage-clean" make target here, 192 | # because that would remove the ".gcno" files that we need when 193 | # generating the kernel coverage report. 194 | # 195 | find . -name '*.gcda' -delete 196 | copy_kernel_gcov_data_files 197 | 198 | upload_codecov_report_with_flag "kernel" 199 | 200 | exit 0 201 | -------------------------------------------------------------------------------- /master/patches/0016-Add-support-for-block-devices-to-EC2LatentBuildSlave.patch: -------------------------------------------------------------------------------- 1 | From ae680ca8b78c6fbc3fc7ed046f831bb54a922578 Mon Sep 17 00:00:00 2001 2 | From: Neal Gompa 3 | Date: Tue, 3 Apr 2018 17:39:52 -0400 4 | Subject: [PATCH 16/18] Add support for block devices to EC2LatentBuildSlave 5 | 6 | Partially adapted from 8b67f91b50d72979ff620413dc4169d277b519df in buildbot 0.9.x, 7 | originally authored by Ryan Sydnor . 8 | 9 | Signed-off-by: Neal Gompa 10 | --- 11 | master/buildbot/buildslave/ec2.py | 25 ++++++++++++-- 12 | master/buildbot/test/unit/test_buildslave_ec2.py | 42 ++++++++++++++++++++++++ 13 | master/docs/manual/cfg-buildslaves.rst | 23 +++++++++++++ 14 | 3 files changed, 87 insertions(+), 3 deletions(-) 15 | 16 | diff --git a/master/buildbot/buildslave/ec2.py b/master/buildbot/buildslave/ec2.py 17 | index dd111e283..bbf5d4b88 100644 18 | --- a/master/buildbot/buildslave/ec2.py 19 | +++ b/master/buildbot/buildslave/ec2.py 20 | @@ -29,6 +29,8 @@ import boto 21 | import boto.ec2 22 | import boto.exception 23 | 24 | +from boto.ec2.blockdevicemapping import BlockDeviceType, BlockDeviceMapping 25 | + 26 | from twisted.internet import defer 27 | from twisted.internet import threads 28 | from twisted.python import log 29 | @@ -68,7 +70,7 @@ class EC2LatentBuildSlave(AbstractLatentBuildSlave): 30 | build_wait_timeout=60 * 10, properties={}, locks=None, 31 | spot_instance=False, max_spot_price=1.6, volumes=[], 32 | placement=None, price_multiplier=1.2, tags={}, 33 | - delete_vol_term=True, create_volumes=[]): 34 | + delete_vol_term=True, create_volumes=[], block_device_map=None): 35 | 36 | AbstractLatentBuildSlave.__init__( 37 | self, name, password, max_builds, notify_on_missing, 38 | @@ -231,6 +233,21 @@ class EC2LatentBuildSlave(AbstractLatentBuildSlave): 39 | self.security_group_ids = security_group_ids 40 | self.classic_security_groups = [self.security_name] if self.security_name else None 41 | self.tags = tags 42 | + self.block_device_map = self.create_block_device_mapping(block_device_map) 43 | + 44 | + def create_block_device_mapping(self, mapping_definitions): 45 | + if not mapping_definitions: 46 | + return None 47 | + 48 | + result = BlockDeviceMapping() 49 | + for device_name, device_properties in mapping_definitions.iteritems(): 50 | + modified_device_properties = dict(device_properties) 51 | + # Since latent slaves are ephemeral, not leaking volumes on termination 52 | + # is a much safer default. 53 | + if 'delete_on_termination' not in modified_device_properties: 54 | + modified_device_properties['delete_on_termination'] = True 55 | + result[device_name] = BlockDeviceType(**modified_device_properties) 56 | + return result 57 | 58 | def get_image(self): 59 | if self.image is not None: 60 | @@ -299,7 +316,8 @@ class EC2LatentBuildSlave(AbstractLatentBuildSlave): 61 | key_name=self.keypair_name, security_groups=self.classic_security_groups, 62 | instance_type=self.instance_type, user_data=self.user_data, 63 | placement=self.placement, subnet_id=self.subnet_id, 64 | - security_group_ids=self.security_group_ids) 65 | + security_group_ids=self.security_group_ids, 66 | + block_device_map=self.block_device_map) 67 | self.instance = reservation.instances[0] 68 | instance_id, image_id, start_time = self._wait_for_instance( 69 | reservation) 70 | @@ -457,7 +475,8 @@ class EC2LatentBuildSlave(AbstractLatentBuildSlave): 71 | user_data=self.user_data, 72 | placement=self.placement, 73 | subnet_id=self.subnet_id, 74 | - security_group_ids=self.security_group_ids) 75 | + security_group_ids=self.security_group_ids, 76 | + block_device_map=self.block_device_map) 77 | request = self._wait_for_request(reservations[0]) 78 | instance_id = request.instance_id 79 | reservations = self.conn.get_all_instances(instance_ids=[instance_id]) 80 | diff --git a/master/buildbot/test/unit/test_buildslave_ec2.py b/master/buildbot/test/unit/test_buildslave_ec2.py 81 | index b8874dd4f..09b073872 100644 82 | --- a/master/buildbot/test/unit/test_buildslave_ec2.py 83 | +++ b/master/buildbot/test/unit/test_buildslave_ec2.py 84 | @@ -150,6 +150,48 @@ class TestEC2LatentBuildSlave(unittest.TestCase): 85 | self.assertEqual(instances[0].id, instance_id) 86 | self.assertEqual(instances[0].tags, {}) 87 | 88 | + @mock_ec2 89 | + def test_start_instance_volumes(self): 90 | + c = self.botoSetup() 91 | + amis = c.get_all_images() 92 | + bs = ec2.EC2LatentBuildSlave('bot1', 'sekrit', 'm1.large', 93 | + identifier='publickey', 94 | + secret_identifier='privatekey', 95 | + ami=amis[0].id, 96 | + block_device_map={ 97 | + "/dev/xvdb": { 98 | + "volume_type": "io1", 99 | + "iops": 10, 100 | + "size": 20, 101 | + }, 102 | + "/dev/xvdc": { 103 | + "volume_type": "gp2", 104 | + "size": 30, 105 | + "delete_on_termination": False 106 | + } 107 | + } 108 | + ) 109 | + 110 | + # moto does not currently map volumes properly. below ensures 111 | + # that my conversion code properly composes it, including 112 | + # delete_on_termination default. 113 | + from boto.ec2.blockdevicemapping import BlockDeviceType 114 | + self.assertEqual(set(['/dev/xvdb', '/dev/xvdc']), set(bs.block_device_map.keys())) 115 | + 116 | + def assertBlockDeviceEqual(a, b): 117 | + self.assertEqual(a.volume_type, b.volume_type) 118 | + self.assertEqual(a.iops, b.iops) 119 | + self.assertEqual(a.size, b.size) 120 | + self.assertEqual(a.delete_on_termination, b.delete_on_termination) 121 | + 122 | + assertBlockDeviceEqual( 123 | + BlockDeviceType(volume_type='io1', iops=10, size=20, delete_on_termination=True), 124 | + bs.block_device_map['/dev/xvdb']) 125 | + 126 | + assertBlockDeviceEqual( 127 | + BlockDeviceType(volume_type='gp2', size=30, delete_on_termination=False), 128 | + bs.block_device_map['/dev/xvdc']) 129 | + 130 | @mock_ec2 131 | def test_start_instance_tags(self): 132 | c = self.botoSetup() 133 | diff --git a/master/docs/manual/cfg-buildslaves.rst b/master/docs/manual/cfg-buildslaves.rst 134 | index cfc7dbc3d..2ead58acc 100644 135 | --- a/master/docs/manual/cfg-buildslaves.rst 136 | +++ b/master/docs/manual/cfg-buildslaves.rst 137 | @@ -348,6 +348,29 @@ The ``missing_timeout`` and ``notify_on_missing`` specify how long to wait for a 138 | ``keypair_name`` and ``security_name`` allow you to specify different names for these AWS EC2 values. 139 | They both default to ``latent_buildbot_slave``. 140 | 141 | +If you want to attach new ephemeral volumes, use the the block_device_map attribute. 142 | +This follows the BlockDeviceMap configuration of boto almost exactly, essentially acting as a passthrough. 143 | +The only distinction is that the volumes default to deleting on termination to avoid leaking volume resources when slaves are terminated. 144 | +See boto documentation for further details. 145 | + 146 | +:: 147 | + 148 | + from buildbot.plugins import buildslave 149 | + c['slaves'] = [ 150 | + buildslave.EC2LatentBuildSlave('bot1', 'sekrit', 'm1.large', 151 | + ami='ami-12345', 152 | + block_device_map= { 153 | + "/dev/xvdb" : { 154 | + "volume_type": "io1", 155 | + "iops": 1000, 156 | + "size": 100 157 | + } 158 | + } 159 | + ) 160 | + ] 161 | + 162 | + 163 | + 164 | VPC Support 165 | ############## 166 | 167 | -- 168 | 2.14.3 169 | 170 | -------------------------------------------------------------------------------- /scripts/openzfs-tracking.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # This script will generate markdown describing which OpenZFS have 4 | # been applied to ZFS on Linux and which still must be ported. The 5 | # script must be run in a git repository with the following remotes. 6 | # 7 | # zfsonlinux https://github.com/zfsonlinux/zfs.git 8 | # openzfs https://github.com/openzfs/openzfs.git 9 | # 10 | # Initial Setup: 11 | # 12 | # mkdir openzfs-tracking 13 | # cd openzfs-tracking 14 | # git clone -o zfsonlinux https://github.com/zfsonlinux/zfs.git 15 | # cd zfs 16 | # git remote add openzfs https://github.com/openzfs/openzfs.git 17 | # 18 | # Exceptions file format: 19 | # ---|---|--- 20 | # |/!/-| 21 | # , where 22 | # ! pending commit 23 | # - isn't applicable to Linux 24 | # ZoL commit 25 | # 26 | ZFSONLINUX_BRANCH="zfsonlinux/master" 27 | ZFSONLINUX_GIT="https://github.com/zfsonlinux/zfs/commit" 28 | ZFSONLINUX_DIR="." 29 | 30 | OPENZFS_BRANCH="openzfs/master" 31 | OPENZFS_HASH_START="1af68be" 32 | OPENZFS_HASH_END="HEAD" 33 | OPENZFS_URL="https://www.illumos.org/issues" 34 | OPENZFS_GIT="https://github.com/openzfs/openzfs/commit" 35 | 36 | # Only consider commits which modify one of the follow paths. 37 | OPENZFS_PATHS=" \ 38 | usr/src/uts/common/fs/zfs/sys usr/src/uts/common/fs/zfs usr/src/cmd/zdb \ 39 | usr/src/cmd/zfs usr/src/cmd/zhack usr/src/cmd/zinject usr/src/cmd/zpool \ 40 | usr/src/cmd/zstreamdump usr/src/cmd/ztest usr/src/lib/libzfs \ 41 | usr/src/lib/libzfs_core usr/src/lib/libzpool usr/src/man/man1m/zdb.1m \ 42 | usr/src/man/man1m/zfs.1m usr/src/man/man1m/zpool.1m \ 43 | usr/src/man/man1m/zstreamdump.1m usr/src/common/zfs \ 44 | usr/src/test/zfs-tests usr/src/tools/scripts/cstyle.pl \ 45 | usr/src/common/nvpair usr/src/common/avl" 46 | 47 | NUMBER_REGEX='^[0-9]+$' 48 | DATE=$(date) 49 | 50 | STATUS_APPLIED_COLOR="#80ff00" 51 | STATUS_EXCEPTION_COLOR="#80ff00" 52 | STATUS_MISSING_COLOR="#ff9999" 53 | STATUS_PR_COLOR="#ffee3a" 54 | STATUS_NONAPPLICABLE_COLOR="#DDDDDD" 55 | STATUS_PENDING_COLOR="#ffa500" 56 | 57 | STATUS_APPLIED="st_appl" 58 | STATUS_EXCEPTION="st_exc" 59 | STATUS_MISSING="st_mis" 60 | STATUS_PR="st_pr" 61 | STATUS_NONAPPLICABLE="st_na" 62 | STATUS_PENDING="st_pa" 63 | 64 | STATUS_APPLIED_TEXT="Applied" 65 | STATUS_EXCEPTION_TEXT="Applied" 66 | STATUS_MISSING_TEXT="No existing pull request" 67 | STATUS_PR_TEXT="Pull request" 68 | STATUS_NONAPPLICABLE_TEXT="Not applicable to Linux" 69 | STATUS_PENDING_TEXT="Pending" 70 | 71 | usage() { 72 | cat << EOF 73 | USAGE: 74 | $0 [-h] [-d directory] [-e exceptions] 75 | 76 | DESCRIPTION: 77 | Dynamically generate HTML for the OpenZFS Commit Tracking page 78 | using the commit logs from both the OpenZFS and ZFS on Linux git 79 | repositories. 80 | 81 | OPTIONS: 82 | -h Show this message 83 | -d directory Git repo with openzfs and zfsonlinux remotes 84 | -e exceptions Exception file (using ZoL wiki if not specified) 85 | -c file.txt Write OpenZFS unmerged commits' hashes to file, 86 | if specified (for openzfs-merge.sh) 87 | 88 | EXAMPLE: 89 | 90 | $0 -d ~/openzfs-tracking/zfs \\ 91 | >~/zfs-buildbot/master/public_html/openzfs-tracking.html 92 | 93 | EOF 94 | } 95 | 96 | while getopts 'hd:c:e:' OPTION; do 97 | case $OPTION in 98 | h) 99 | usage 100 | exit 1 101 | ;; 102 | d) 103 | ZFSONLINUX_DIR=$OPTARG 104 | ;; 105 | c) 106 | HASHES_FILE=$OPTARG 107 | ;; 108 | e) 109 | ZFSONLINUX_EXCEPTIONS=$OPTARG 110 | ;; 111 | esac 112 | done 113 | 114 | cat << EOF 115 | 116 | 117 | 118 | OpenZFS Tracking 119 | 120 | 121 | 122 | 123 | 124 | 125 | 128 | 129 | 130 | 131 | 163 | 164 | 204 | 205 | 206 | 207 |

OpenZFS Commit Tracking

208 |
209 | This page is updated regularly and shows a list of OpenZFS commits and their status in regard to the ZFS on Linux master branch. See wiki for more information about OpenZFS patches. 210 |
211 |
212 | 213 | 214 | 215 | 216 | 217 | 218 | 219 | 220 | 221 | 222 | 223 | EOF 224 | 225 | pushd $ZFSONLINUX_DIR >/dev/null 226 | ZFSONLINUX_PRS=$(curl -s https://api.github.com/repos/zfsonlinux/zfs/pulls) 227 | 228 | # Get all exceptions and comments 229 | if [ -z ${ZFSONLINUX_EXCEPTIONS+x} ]; then 230 | ZFSONLINUX_EXCEPTIONS=$(curl -s https://raw.githubusercontent.com/wiki/zfsonlinux/zfs/OpenZFS-exceptions.md | awk '/---|---|---/{y=1;next}y') 231 | else 232 | ZFSONLINUX_EXCEPTIONS=$(cat "$ZFSONLINUX_EXCEPTIONS" | awk '/---|---|---/{y=1;next}y') 233 | fi 234 | git fetch --all >/dev/null 235 | git log $OPENZFS_HASH_START..$OPENZFS_HASH_END --oneline $OPENZFS_BRANCH \ 236 | -- $OPENZFS_PATHS | while read LINE1; 237 | do 238 | OPENZFS_HASH=$(echo $LINE1 | cut -f1 -d' ') 239 | OPENZFS_ISSUE=$(echo $LINE1 | cut -f2 -d' ') 240 | OPENZFS_DESC=$(echo $LINE1 | cut -f3- -d' ' | \ 241 | sed 's#Reviewed.*##' | sed 's#Approved.*##') 242 | ZFSONLINUX_STATUS="" 243 | 244 | # Skip this commit of non-standard form. 245 | if ! [[ $OPENZFS_ISSUE =~ $NUMBER_REGEX ]]; then 246 | continue 247 | fi 248 | 249 | # Match issue against any open pull requests. 250 | ZFSONLINUX_PR=$(echo $ZFSONLINUX_PRS | jq -r ".[] | select(.title | \ 251 | contains(\"OpenZFS $OPENZFS_ISSUE \")) | { html_url: .html_url }" | \ 252 | grep html_url | cut -f2- -d':' | tr -d ' "') 253 | ZFSONLINUX_REGEX="^(openzfs|illumos)+.*[ #]+$OPENZFS_ISSUE([^0-9]|$)[ ,]+*.*" 254 | 255 | 256 | # Commit exceptions reference this Linux commit for an OpenZFS issue. 257 | EXCEPTION=$(echo "$ZFSONLINUX_EXCEPTIONS" | grep -E "^$OPENZFS_ISSUE[^0-9]") 258 | if [ -n "$EXCEPTION" ]; then 259 | EXCEPTION_HASH=$(echo $EXCEPTION | cut -f2 -d'|' | tr -d ' ') 260 | EXCEPTION_COMMENT=$(echo $EXCEPTION | cut -d'|' -f3-) 261 | if [ "$EXCEPTION_HASH" == "-" ]; then 262 | ZFSONLINUX_HASH="-" 263 | ZFSONLINUX_STATUS=$STATUS_NONAPPLICABLE 264 | ZFSONLINUX_STATUS_TEXT=$STATUS_NONAPPLICABLE_TEXT 265 | elif [ "$EXCEPTION_HASH" == "!" ]; then 266 | if [ -n "$ZFSONLINUX_PR" ]; then 267 | ZFSONLINUX_ISSUE=$(basename $ZFSONLINUX_PR) 268 | ZFSONLINUX_HASH="PR-$ZFSONLINUX_ISSUE" 269 | else 270 | ZFSONLINUX_HASH="!" 271 | fi 272 | ZFSONLINUX_STATUS=$STATUS_PENDING 273 | ZFSONLINUX_STATUS_TEXT=$STATUS_PENDING_TEXT 274 | elif [ -n "$EXCEPTION_HASH" ]; then 275 | ZFSONLINUX_HASH="$EXCEPTION_HASH" 276 | ZFSONLINUX_STATUS=$STATUS_EXCEPTION 277 | ZFSONLINUX_STATUS_TEXT=$STATUS_EXCEPTION_TEXT 278 | fi 279 | if [ -n "$EXCEPTION_COMMENT" ]; then 280 | OPENZFS_DESC="$OPENZFS_DESC
comment: $EXCEPTION_COMMENT" 281 | fi 282 | elif [ -n "$ZFSONLINUX_PR" ]; then 283 | ZFSONLINUX_ISSUE=$(basename $ZFSONLINUX_PR) 284 | ZFSONLINUX_HASH="PR-$ZFSONLINUX_ISSUE" 285 | ZFSONLINUX_STATUS=$STATUS_PR 286 | ZFSONLINUX_STATUS_TEXT=$STATUS_PR_TEXT 287 | else 288 | LINE2=$(git log --regexp-ignore-case --extended-regexp \ 289 | --no-merges --oneline \ 290 | --grep="$ZFSONLINUX_REGEX" $ZFSONLINUX_BRANCH) 291 | 292 | MATCH=$(echo $LINE2 | cut -f1 -d' ') 293 | if [ -n "$MATCH" ]; then 294 | ZFSONLINUX_HASH="$MATCH" 295 | ZFSONLINUX_STATUS=$STATUS_APPLIED 296 | ZFSONLINUX_STATUS_TEXT=$STATUS_APPLIED_TEXT 297 | else 298 | ZFSONLINUX_HASH="" 299 | ZFSONLINUX_STATUS=$STATUS_MISSING 300 | ZFSONLINUX_STATUS_TEXT=$STATUS_MISSING_TEXT 301 | if [ -n "$HASHES_FILE" ]; then 302 | echo $OPENZFS_HASH >> $HASHES_FILE 303 | fi 304 | fi 305 | fi 306 | 307 | cat << EOF 308 | 309 | 310 | 311 | 312 | 313 | 314 | 315 | EOF 316 | 317 | done 318 | 319 | popd >/dev/null 320 | 321 | cat << EOF 322 | 323 |
OpenZFS IssueOpenZFS CommitLinux CommitDescriptionStatus
$OPENZFS_ISSUE$OPENZFS_HASH$ZFSONLINUX_HASH$OPENZFS_DESC$ZFSONLINUX_STATUS_TEXT
324 |
Last Update: $DATE by openzfs-tracking.sh
325 |
326 | 327 | 328 | EOF 329 | -------------------------------------------------------------------------------- /master/github.py: -------------------------------------------------------------------------------- 1 | # -*- python -*- 2 | # ex: set syntax=python: 3 | 4 | import logging 5 | import urllib2 6 | import json 7 | import string 8 | import re 9 | 10 | from password import * 11 | from buildbot.status.web.hooks.github import GitHubEventHandler 12 | from dateutil.parser import parse as dateparse 13 | from twisted.python import log 14 | 15 | builders_common="arch," 16 | builders_linux="centos7,centos8,centos9,centosstream8,fedora37,fedora38,builtin," 17 | builders_freebsd="freebsd13,freebsd14" 18 | 19 | builders_push_master=builders_common+builders_linux+builders_freebsd+"coverage" 20 | builders_push_release=builders_common+builders_linux+builders_freebsd 21 | 22 | builders_pr_master=builders_common+builders_linux+builders_freebsd 23 | builders_pr_release=builders_common+builders_linux+builders_freebsd 24 | 25 | # Default builders for non-top PR commits 26 | builders_pr_minimum="arch" 27 | 28 | def query_url(url, token=None): 29 | log.msg("Making request to '%s'" % url) 30 | request = urllib2.Request(url) 31 | if token: 32 | request.add_header("Authorization", "token %s" % token) 33 | response = urllib2.urlopen(request) 34 | 35 | return json.loads(response.read()) 36 | 37 | # 38 | # Custom class to determine how to handle incoming Github changes. 39 | # 40 | class CustomGitHubEventHandler(GitHubEventHandler): 41 | valid_props = [ 42 | ('^Build[-\s]linux:\s*(yes|no)\s*$', 'override-buildlinux'), 43 | ('^Build[-\s]zfs:\s*(yes|no)\s*$', 'override-buildzfs'), 44 | ('^Built[-\s]in:\s*(yes|no)\s*$', 'override-builtin'), 45 | ('^Check[-\s]lint:\s*(yes|no)\s*$', 'override-checklint'), 46 | ('^Configure[-|\s]zfs:(.*)$', 'override-configzfs'), 47 | ('^Perf[-|\s]zts:\s*(yes|no)\s*$', 'override-perfzts'), 48 | ('^Perf[-|\s]pts:\s*(yes|no)\s*$', 'override-perfpts'), 49 | ] 50 | 51 | def parse_comments(self, comments, default_category): 52 | category = default_category 53 | 54 | # Extract any overrides for builders for this commit 55 | # Requires-builders: build arch distro test perf none 56 | category_pattern = '^Requires-builders:\s*([ ,a-zA-Z0-9]+)' 57 | m = re.search(category_pattern, comments, re.I | re.M) 58 | if m is not None: 59 | category = m.group(1).lower(); 60 | 61 | # If Requires-builders contains 'none', then skip this commit 62 | none_pattern = '.*none.*' 63 | m = re.search(none_pattern, category, re.I | re.M) 64 | if m is not None: 65 | category = "" 66 | 67 | return category 68 | 69 | def handle_push_commit(self, payload, commit, branch): 70 | created_at = dateparse(commit['timestamp']) 71 | comments = commit['message'] 72 | 73 | # Assemble the list of modified files. 74 | files = [] 75 | for kind in ('added', 'modified', 'removed'): 76 | files.extend(commit.get(kind, [])) 77 | 78 | # Extract if the commit message has property overrides 79 | props = { } 80 | for prop in CustomGitHubEventHandler.valid_props: 81 | step_pattern = prop[0] 82 | m = re.search(step_pattern, comments, re.I | re.M) 83 | if m is not None: 84 | prop_name = prop[1] 85 | props[prop_name] = json.dumps(m.group(1).lower()) 86 | 87 | match = re.match("master", branch) 88 | if match: 89 | category = self.parse_comments(comments, builders_push_master) 90 | else: 91 | # Extract if the commit message has property overrides 92 | # For 0.8 and earlier releases include the legacy builders. 93 | category = self.parse_comments(comments, builders_push_release) 94 | 95 | props['branch'] = branch 96 | 97 | # Enabled performance testing on pushes by default. 98 | props['perfpts'] = json.dumps("yes") 99 | props['perfzts'] = json.dumps("yes") 100 | 101 | change = { 102 | 'revision' : commit['id'], 103 | 'when_timestamp': created_at, 104 | 'branch': branch, 105 | 'revlink' : commit['url'], 106 | 'repository': payload['repository']['url'], 107 | 'project' : payload['repository']['full_name'], 108 | 'properties' : props, 109 | 'category': category, 110 | 'author': "%s <%s>" % (commit['author']['name'], 111 | commit['author']['email']), 112 | 'comments' : comments, 113 | 'files' : files, 114 | } 115 | 116 | if callable(self._codebase): 117 | change['codebase'] = self._codebase(payload) 118 | elif self._codebase is not None: 119 | change['codebase'] = self._codebase 120 | 121 | return change 122 | 123 | def handle_push(self, payload): 124 | changes = [] 125 | refname = payload['ref'] 126 | 127 | log.msg("Processing GitHub Push `%s'" % refname) 128 | 129 | # We only care about regular heads, i.e. branches 130 | match = re.match(r"^refs\/heads\/(.+)$", refname) 131 | if not match: 132 | log.msg("Ignoring refname `%s': Not a branch" % refname) 133 | return changes, 'git' 134 | 135 | branch = match.group(1) 136 | if payload.get('deleted'): 137 | log.msg("Branch `%s' deleted, ignoring" % branch) 138 | return changes, 'git' 139 | 140 | nr = 0 141 | for commit in payload['commits']: 142 | nr += 1 143 | 144 | if not commit.get('distinct', True): 145 | log.msg('Commit `%s` is a non-distinct commit, ignoring...' % 146 | (commit['id'],)) 147 | continue 148 | 149 | if nr > 10: 150 | log.msg('Commit `%s` exceeds push limit (%d > 5), ignoring...' % 151 | (commit['id'], nr)) 152 | continue 153 | 154 | change = self.handle_push_commit(payload, commit, branch) 155 | changes.append(change) 156 | 157 | log.msg("Received %d changes pushed from github" % len(changes)) 158 | 159 | return changes, 'git' 160 | 161 | def handle_pull_request_commit(self, payload, commit, nr, commits_nr, 162 | kernel_pr): 163 | 164 | pr_number = payload['number'] 165 | refname = 'refs/pull/%d/head' % (pr_number,) 166 | created_at = dateparse(payload['pull_request']['created_at']) 167 | branch = payload['pull_request']['base']['ref'] 168 | comments = commit['commit']['message'] + "\n\n" 169 | 170 | # Assemble the list of modified files. 171 | changed_files = [] 172 | for f in commit['files']: 173 | changed_files.append(f['filename']) 174 | 175 | # Extract if the commit message has property overrides 176 | props = { } 177 | for prop in CustomGitHubEventHandler.valid_props: 178 | step_pattern = prop[0] 179 | m = re.search(step_pattern, comments, re.I | re.M) 180 | if m is not None: 181 | prop_name = prop[1] 182 | props[prop_name] = json.dumps(m.group(1).lower()) 183 | 184 | # Annotate the head commit to allow special handling. 185 | if commit['sha'] == payload['pull_request']['head']['sha']: 186 | # For 0.8 and earlier releases include the legacy builders. 187 | match = re.match("master", branch) 188 | if match: 189 | category = builders_pr_master 190 | else: 191 | category = builders_pr_release 192 | 193 | else: 194 | category = builders_pr_minimum 195 | 196 | # Extract if the commit message has property overrides 197 | category = self.parse_comments(comments, category) 198 | 199 | if kernel_pr: 200 | if re.search(kernel_pattern, comments, re.I | re.M) is None: 201 | comments = comments + kernel_pr + "\n" 202 | 203 | comments = comments + "Pull-request: #%d part %d/%d\n" % ( 204 | pr_number, nr, commits_nr) 205 | 206 | props['branch'] = json.dumps(branch) 207 | props['pr_number'] = json.dumps(pr_number) 208 | 209 | # Disabled performance testing on PRs by default. 210 | props['perfpts'] = json.dumps("no") 211 | props['perfzts'] = json.dumps("no") 212 | 213 | change = { 214 | 'revision' : commit['sha'], 215 | 'when_timestamp': created_at, 216 | 'branch': refname, 217 | 'revlink' : commit['html_url'], 218 | 'repository': payload['repository']['clone_url'], 219 | 'project' : payload['repository']['name'], 220 | 'properties' : props, 221 | 'category': category, 222 | 'author': "%s <%s>" % (commit['commit']['committer']['name'], 223 | commit['commit']['committer']['email']), 224 | 'comments' : comments, 225 | 'files' : changed_files, 226 | } 227 | 228 | if callable(self._codebase): 229 | change['codebase'] = self._codebase(payload) 230 | elif self._codebase is not None: 231 | change['codebase'] = self._codebase 232 | 233 | return change 234 | 235 | def handle_pull_request(self, payload): 236 | changes = [] 237 | pr_number = payload['number'] 238 | commits_nr = payload['pull_request']['commits'] 239 | 240 | log.msg('Processing GitHub PR #%d' % pr_number, logLevel=logging.DEBUG) 241 | 242 | action = payload.get('action') 243 | if action not in ('opened', 'reopened', 'synchronize'): 244 | log.msg("GitHub PR #%d %s, ignoring" % (pr_number, action)) 245 | return changes, 'git' 246 | 247 | # When receiving a large PR only test the top commit. 248 | if commits_nr > 5: 249 | commit_url = payload['pull_request']['base']['repo']['commits_url'][:-6] 250 | commit_url += "/" + payload['pull_request']['head']['sha'] 251 | commit = query_url(commit_url, token=github_token) 252 | change = self.handle_pull_request_commit(payload, commit, 253 | commits_nr, commits_nr, None) 254 | changes.append(change) 255 | # Compile all commits in the stack and test the top commit. 256 | else: 257 | commits_url = payload['pull_request']['commits_url'] 258 | commits = query_url(commits_url, token=github_token) 259 | 260 | kernel_pr = None 261 | kernel_pattern = '^Requires-kernel:\s*([a-zA-Z0-9_\-\:\/\+\.]+)' 262 | for commit in commits: 263 | comments = commit['commit']['message'] 264 | m = re.search(kernel_pattern, comments, re.I | re.M) 265 | if m is not None: 266 | kernel_pr = 'Requires-kernel: %s' % m.group(1) 267 | break 268 | 269 | nr = 0 270 | for commit in commits: 271 | nr += 1 272 | commit = query_url(commit['url'], token=github_token) 273 | change = self.handle_pull_request_commit(payload, commit, 274 | nr, commits_nr, kernel_pr) 275 | changes.append(change) 276 | 277 | log.msg("Received %d changes from GitHub Pull Request #%d" % ( 278 | len(changes), pr_number)) 279 | 280 | return changes, 'git' 281 | -------------------------------------------------------------------------------- /master/patches/0015-Add-VPC-support-to-EC2LatentBuildSlave.patch: -------------------------------------------------------------------------------- 1 | From b46f111bd8cf962cf691c12316eb8ebff4747c8f Mon Sep 17 00:00:00 2001 2 | From: Neal Gompa 3 | Date: Fri, 30 Mar 2018 17:23:54 -0400 4 | Subject: [PATCH 15/18] Add VPC support to EC2LatentBuildSlave 5 | 6 | Partially adapted from 8b67f91b50d72979ff620413dc4169d277b519df in buildbot 0.9.x, 7 | originally authored by Ryan Sydnor . 8 | 9 | Tests related to spot instances were not adapted, as there are no spot instance tests 10 | in buildbot 0.8.x to begin with. 11 | 12 | Signed-off-by: Neal Gompa 13 | --- 14 | master/buildbot/buildslave/ec2.py | 57 ++++++++++++++---------- 15 | master/buildbot/test/unit/test_buildslave_ec2.py | 46 +++++++++++++++++++ 16 | master/docs/manual/cfg-buildslaves.rst | 21 +++++++++ 17 | 3 files changed, 101 insertions(+), 23 deletions(-) 18 | 19 | diff --git a/master/buildbot/buildslave/ec2.py b/master/buildbot/buildslave/ec2.py 20 | index 780c4ef05..dd111e283 100644 21 | --- a/master/buildbot/buildslave/ec2.py 22 | +++ b/master/buildbot/buildslave/ec2.py 23 | @@ -63,6 +63,7 @@ class EC2LatentBuildSlave(AbstractLatentBuildSlave): 24 | aws_id_file_path=None, user_data=None, region=None, 25 | keypair_name=None, 26 | security_name=None, 27 | + subnet_id=None, security_group_ids=None, 28 | max_builds=None, notify_on_missing=[], missing_timeout=60 * 20, 29 | build_wait_timeout=60 * 10, properties={}, locks=None, 30 | spot_instance=False, max_spot_price=1.6, volumes=[], 31 | @@ -72,6 +73,10 @@ class EC2LatentBuildSlave(AbstractLatentBuildSlave): 32 | AbstractLatentBuildSlave.__init__( 33 | self, name, password, max_builds, notify_on_missing, 34 | missing_timeout, build_wait_timeout, properties, locks) 35 | + if security_name and subnet_id: 36 | + raise ValueError( 37 | + 'security_name (EC2 classic security groups) is not supported ' 38 | + 'in a VPC. Use security_group_ids instead.') 39 | if not ((ami is not None) ^ 40 | (valid_ami_owners is not None or 41 | valid_ami_location_regex is not None)): 42 | @@ -98,7 +103,7 @@ class EC2LatentBuildSlave(AbstractLatentBuildSlave): 43 | if keypair_name is None: 44 | keypair_name = 'latent_buildbot_slave' 45 | log.msg('Using default keypair name, since none is set') 46 | - if security_name is None: 47 | + if security_name is None and not subnet_id: 48 | security_name = 'latent_buildbot_slave' 49 | log.msg('Using default keypair name, since none is set') 50 | if spot_instance and price_multiplier is None and max_spot_price is None: 51 | @@ -191,23 +196,24 @@ class EC2LatentBuildSlave(AbstractLatentBuildSlave): 52 | self.conn.create_key_pair(keypair_name) 53 | 54 | # create security group 55 | - try: 56 | - group = self.conn.get_all_security_groups(security_name)[0] 57 | - assert group 58 | - except boto.exception.EC2ResponseError, e: 59 | - if 'InvalidGroup.NotFound' in e.body: 60 | - self.security_group = self.conn.create_security_group( 61 | - security_name, 62 | - 'Authorization to access the buildbot instance.') 63 | - # Authorize the master as necessary 64 | - # TODO this is where we'd open the hole to do the reverse pb 65 | - # connect to the buildbot 66 | - # ip = urllib.urlopen( 67 | - # 'http://checkip.amazonaws.com').read().strip() 68 | - # self.security_group.authorize('tcp', 22, 22, '%s/32' % ip) 69 | - # self.security_group.authorize('tcp', 80, 80, '%s/32' % ip) 70 | - else: 71 | - raise 72 | + if security_name: 73 | + try: 74 | + group = self.conn.get_all_security_groups(security_name)[0] 75 | + assert group 76 | + except boto.exception.EC2ResponseError, e: 77 | + if 'InvalidGroup.NotFound' in e.body: 78 | + self.security_group = self.conn.create_security_group( 79 | + security_name, 80 | + 'Authorization to access the buildbot instance.') 81 | + # Authorize the master as necessary 82 | + # TODO this is where we'd open the hole to do the reverse pb 83 | + # connect to the buildbot 84 | + # ip = urllib.urlopen( 85 | + # 'http://checkip.amazonaws.com').read().strip() 86 | + # self.security_group.authorize('tcp', 22, 22, '%s/32' % ip) 87 | + # self.security_group.authorize('tcp', 80, 80, '%s/32' % ip) 88 | + else: 89 | + raise 90 | 91 | # get the image 92 | if self.ami is not None: 93 | @@ -221,6 +227,9 @@ class EC2LatentBuildSlave(AbstractLatentBuildSlave): 94 | if elastic_ip is not None: 95 | elastic_ip = self.conn.get_all_addresses([elastic_ip])[0] 96 | self.elastic_ip = elastic_ip 97 | + self.subnet_id = subnet_id 98 | + self.security_group_ids = security_group_ids 99 | + self.classic_security_groups = [self.security_name] if self.security_name else None 100 | self.tags = tags 101 | 102 | def get_image(self): 103 | @@ -287,9 +296,10 @@ class EC2LatentBuildSlave(AbstractLatentBuildSlave): 104 | def _start_instance(self): 105 | image = self.get_image() 106 | reservation = image.run( 107 | - key_name=self.keypair_name, security_groups=[self.security_name], 108 | + key_name=self.keypair_name, security_groups=self.classic_security_groups, 109 | instance_type=self.instance_type, user_data=self.user_data, 110 | - placement=self.placement) 111 | + placement=self.placement, subnet_id=self.subnet_id, 112 | + security_group_ids=self.security_group_ids) 113 | self.instance = reservation.instances[0] 114 | instance_id, image_id, start_time = self._wait_for_instance( 115 | reservation) 116 | @@ -442,11 +452,12 @@ class EC2LatentBuildSlave(AbstractLatentBuildSlave): 117 | (self.__class__.__name__, self.slavename, bid_price)) 118 | reservations = self.conn.request_spot_instances( 119 | bid_price, self.ami, key_name=self.keypair_name, 120 | - security_groups=[ 121 | - self.security_name], 122 | + security_groups=self.classic_security_groups, 123 | instance_type=self.instance_type, 124 | user_data=self.user_data, 125 | - placement=self.placement) 126 | + placement=self.placement, 127 | + subnet_id=self.subnet_id, 128 | + security_group_ids=self.security_group_ids) 129 | request = self._wait_for_request(reservations[0]) 130 | instance_id = request.instance_id 131 | reservations = self.conn.get_all_instances(instance_ids=[instance_id]) 132 | diff --git a/master/buildbot/test/unit/test_buildslave_ec2.py b/master/buildbot/test/unit/test_buildslave_ec2.py 133 | index f52fe7d61..b8874dd4f 100644 134 | --- a/master/buildbot/test/unit/test_buildslave_ec2.py 135 | +++ b/master/buildbot/test/unit/test_buildslave_ec2.py 136 | @@ -85,6 +85,52 @@ class TestEC2LatentBuildSlave(unittest.TestCase): 137 | ) 138 | self.assertEqual(bs.tags, tags) 139 | 140 | + @mock_ec2 141 | + def test_fail_mixing_classic_and_vpc_ec2_settings(self): 142 | + c = self.botoSetup() 143 | + amis = c.get_all_images() 144 | + 145 | + def create_slave(): 146 | + ec2.EC2LatentBuildSlave('bot1', 'sekrit', 'm1.large', 147 | + keypair_name="test_key", 148 | + identifier='publickey', 149 | + secret_identifier='privatekey', 150 | + ami=amis[0].id, 151 | + security_name="classic", 152 | + subnet_id="sn-1234" 153 | + ) 154 | + 155 | + self.assertRaises(ValueError, create_worker) 156 | + 157 | + @mock_ec2 158 | + def test_start_vpc_instance(self): 159 | + c = self.botoSetup() 160 | + 161 | + vpc_conn = boto.connect_vpc() 162 | + vpc = vpc_conn.create_vpc("192.168.0.0/24") 163 | + subnet = vpc_conn.create_subnet(vpc.id, "192.168.0.0/24") 164 | + amis = c.get_all_images() 165 | + 166 | + sg = c.create_security_group("test_sg", "test_sg", vpc.id) 167 | + bs = ec2.EC2LatentBuildSlave('bot1', 'sekrit', 'm1.large', 168 | + identifier='publickey', 169 | + secret_identifier='privatekey', 170 | + keypair_name="test_key", 171 | + security_group_ids=[sg.id], 172 | + subnet_id=subnet.id, 173 | + ami=amis[0].id 174 | + ) 175 | + 176 | + instance_id, _, _ = bs._start_instance() 177 | + instances = [i for i in c.get_only_instances() 178 | + if i.state != "terminated"] 179 | + 180 | + self.assertEqual(len(instances), 1) 181 | + self.assertEqual(instances[0].id, instance_id) 182 | + self.assertEqual(instances[0].subnet_id, subnet.id) 183 | + self.assertEqual(len(instances[0].groups), 1) 184 | + self.assertEqual(instances[0].groups[0].id, sg.id) 185 | + 186 | @mock_ec2 187 | def test_start_instance(self): 188 | c = self.botoSetup() 189 | diff --git a/master/docs/manual/cfg-buildslaves.rst b/master/docs/manual/cfg-buildslaves.rst 190 | index ef70c3fb2..cfc7dbc3d 100644 191 | --- a/master/docs/manual/cfg-buildslaves.rst 192 | +++ b/master/docs/manual/cfg-buildslaves.rst 193 | @@ -348,6 +348,27 @@ The ``missing_timeout`` and ``notify_on_missing`` specify how long to wait for a 194 | ``keypair_name`` and ``security_name`` allow you to specify different names for these AWS EC2 values. 195 | They both default to ``latent_buildbot_slave``. 196 | 197 | +VPC Support 198 | +############## 199 | + 200 | +If you are managing slaves within a VPC, your slave configuration must be modified from above. 201 | +You must specify the id of the subnet where you want your slave placed. 202 | +You must also specify security groups created within your VPC as opposed to classic EC2 security groups. 203 | +This can be done by passing the ids of the vpc security groups. 204 | +Note, when using a VPC, you can not specify classic EC2 security groups (as specified by security_name). 205 | + 206 | +:: 207 | + 208 | + from buildbot.plugins import buildslave 209 | + c['slaves'] = [ 210 | + buildslave.EC2LatentBuildSlave('bot1', 'sekrit', 'm1.large', 211 | + ami='ami-12345', 212 | + keypair_name='latent_buildbot_slave', 213 | + subnet_id='subnet-12345', 214 | + security_group_ids=['sg-12345','sg-67890'] 215 | + ) 216 | + ] 217 | + 218 | Spot instances 219 | ############## 220 | 221 | -- 222 | 2.14.3 223 | 224 | -------------------------------------------------------------------------------- /master/buildslaves.py: -------------------------------------------------------------------------------- 1 | # -*- python -*- 2 | # ex: set syntax=python: 3 | 4 | import string 5 | import random 6 | import re 7 | from password import * 8 | from buildbot.plugins import util 9 | from buildbot.buildslave import BuildSlave 10 | from buildbot.buildslave.ec2 import EC2LatentBuildSlave 11 | 12 | import socket 13 | hostname=socket.gethostname() 14 | my_ip=socket.gethostbyname(hostname) 15 | 16 | ### BUILDER CLASSES 17 | class ZFSBuilderConfig(util.BuilderConfig): 18 | @staticmethod 19 | def nextSlave(builder, slaves): 20 | availableSlave = None 21 | 22 | for slave in slaves: 23 | # if we found an idle slave, immediate use this one 24 | if slave.isIdle(): 25 | return slave 26 | 27 | # hold onto the first slave thats not spun up but free 28 | if availableSlave is None and slave.isAvailable(): 29 | availableSlave = slave 30 | 31 | # we got here because there was no idle slave 32 | if availableSlave is not None: 33 | return availableSlave 34 | 35 | # randomly choose among all our busy slaves 36 | return (random.choice(slaves) if slaves else None) 37 | 38 | # builders should prioritize a merge into master or the final commit 39 | # from a pull request before building other commits. This avoids 40 | # starving smaller pull requests from getting feedback. 41 | @staticmethod 42 | def nextBuild(builder, requests): 43 | pattern = '^Pull-request:\s*#\d+\s*part\s*(?P\d+)/(?P\d+)$' 44 | 45 | # go thru each request's changes to prioritize them 46 | for request in requests: 47 | for change in request.source.changes: 48 | m = re.search(pattern, change.comments, re.I | re.M) 49 | 50 | # if we don't find the pattern, this was a merge to master 51 | if m is None: 52 | return request 53 | 54 | part = int(m.group('part')) 55 | total = int(m.group('total')) 56 | 57 | # if the part is the same as the total, then we have the last commit 58 | if part == total: 59 | return request 60 | 61 | # we didn't have a merge into master or a final commit on a pull request 62 | return requests[0] 63 | 64 | def __init__(self, mergeRequests=False, nextSlave=None, nextBuild=None, **kwargs): 65 | if nextSlave is None: 66 | nextSlave = ZFSBuilderConfig.nextSlave 67 | 68 | if nextBuild is None: 69 | nextBuild = ZFSBuilderConfig.nextBuild 70 | 71 | util.BuilderConfig.__init__(self, nextSlave=nextSlave, 72 | nextBuild=nextBuild, 73 | mergeRequests=mergeRequests, **kwargs) 74 | 75 | ### BUILD SLAVE CLASSES 76 | # Create large EC2 latent build slave 77 | class ZFSEC2Slave(EC2LatentBuildSlave): 78 | default_user_data = user_data = """#!/bin/sh -x 79 | # Make /dev/console the serial console instead of the video console 80 | # so we get our output in the text system log at boot. 81 | case "$(uname)" in 82 | FreeBSD) 83 | # On FreeBSD the first enabled console becomes /dev/console 84 | # ttyv0,ttyu0,gdb -> ttyu0,ttyv0,gdb 85 | conscontrol delete ttyu0 86 | conscontrol add ttyu0 87 | # While here, we also need to disable the automatic updates in release AMIs. 88 | sysrc firstboot_freebsd_update_enable=NO 89 | ;; 90 | *) 91 | ;; 92 | esac 93 | 94 | # Duplicate all output to a log file, syslog, and the console. 95 | { 96 | export PATH=%s:$PATH 97 | 98 | # Ensure wget is available for runurl 99 | if ! hash wget 2>/dev/null; then 100 | if hash apt-get 2>/dev/null; then 101 | apt-get --quiet --yes install wget 102 | elif hash dnf 2>/dev/null; then 103 | echo "keepcache=true" >>/etc/dnf/dnf.conf 104 | echo "deltarpm=true" >>/etc/dnf/dnf.conf 105 | echo "fastestmirror=true" >>/etc/dnf/dnf.conf 106 | dnf clean all 107 | dnf --quiet -y install wget 108 | elif hash pkg 2>/dev/null; then 109 | echo IGNORE_OSVERSION=yes >>/usr/local/etc/pkg.conf 110 | pkg install --quiet -y wget 111 | elif hash yum 2>/dev/null; then 112 | yum --quiet -y install wget 113 | else 114 | echo "Unknown package manager, cannot install wget" 115 | fi 116 | fi 117 | 118 | # Run the bootstrap script 119 | export BB_MASTER='%s' 120 | export BB_NAME='%s' 121 | export BB_PASSWORD='%s' 122 | export BB_MODE='%s' 123 | export BB_URL='%s' 124 | 125 | # Get the runurl utility. 126 | wget -qO/usr/bin/runurl $BB_URL/runurl 127 | chmod 755 /usr/bin/runurl 128 | 129 | runurl $BB_URL/bb-bootstrap.sh 130 | } 2>&1 | tee /var/log/user-data.log | logger -t user-data -s 2>/dev/console 131 | """ 132 | 133 | @staticmethod 134 | def pass_generator(size=24, chars=string.ascii_uppercase + string.digits): 135 | return ''.join(random.choice(chars) for _ in range(size)) 136 | 137 | def __init__(self, name, password=None, master='', url='', mode="BUILD", 138 | instance_type="c5d.large", identifier=ec2_default_access, 139 | secret_identifier=ec2_default_secret, 140 | keypair_name=ec2_default_keypair_name, security_name="", 141 | subnet_id='subnet-05816f35f14929ed5', security_group_ids=["sg-0bca95807c454e00f"], 142 | user_data=None, region="us-west-2", placement='a', max_builds=1, 143 | build_wait_timeout=60, spot_instance=False, max_spot_price=0.10, 144 | price_multiplier=None, missing_timeout=3600*1, 145 | block_device_map=None, get_image=None, **kwargs): 146 | 147 | self.name = name 148 | bin_path = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" 149 | 150 | tags = kwargs.get('tags') 151 | if not tags or tags is None: 152 | tags={ 153 | "ENV" : "DEV", 154 | "Name" : "ZFSBuilder", 155 | "ORG" : "COMP", 156 | "OWNER" : "behlendorf1", 157 | "PLATFORM" : self.name, 158 | "PROJECT" : "ZFS", 159 | } 160 | 161 | if master in (None, ''): 162 | master = my_ip + ":9989" 163 | 164 | if url in (None, ''): 165 | url = "https://raw.githubusercontent.com/openzfs/zfs-buildbot/master/scripts/" 166 | 167 | if password is None: 168 | password = ZFSEC2Slave.pass_generator() 169 | 170 | if user_data is None: 171 | user_data = ZFSEC2Slave.default_user_data % (bin_path, master, name, password, mode, url) 172 | 173 | if block_device_map is None: 174 | # io1 is 50 IOPS/GB, iops _must_ be specified for io1 only 175 | # Cf. https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html 176 | boot_device_props = { "volume_type": "gp2", "size": 24 } 177 | 178 | # Reasonable default values for additional persistent disks, if desired 179 | persist_device_props = { "volume_type": "io1", 180 | "iops": 400, 181 | "size": 8 182 | } 183 | 184 | # The boot device name must exactly match the name in the 185 | # distribution provided AMI otherwise it will fail to boot. 186 | if "Amazon" in name or "Kernel.org" in name or "Debian" in name: 187 | boot_device = "/dev/xvda" 188 | else: 189 | boot_device = "/dev/sda1" 190 | 191 | block_device_map = { boot_device : boot_device_props, 192 | "/dev/sdb": { "ephemeral_name": "ephemeral0" }, 193 | "/dev/sdc": { "ephemeral_name": "ephemeral1" }, 194 | "/dev/sdd": { "ephemeral_name": "ephemeral2" }, 195 | "/dev/sde": { "ephemeral_name": "ephemeral3" }, 196 | "/dev/sdf": { "ephemeral_name": "ephemeral4" }, 197 | "/dev/sdg": { "ephemeral_name": "ephemeral5" }, 198 | } 199 | 200 | # get_image can be used to determine an AMI when the slave starts. 201 | if callable(get_image): 202 | # Trick EC2LatentBuildSlave input validation by providing a "valid" regex. 203 | # This won't actually be used because we override get_image(). 204 | kwargs['valid_ami_location_regex'] = '' 205 | # If we just set `self.get_image = get_image` then self doesn't get passed. 206 | self.get_image = lambda: get_image(self) 207 | 208 | EC2LatentBuildSlave.__init__( 209 | self, name=name, password=password, instance_type=instance_type, 210 | identifier=identifier, secret_identifier=secret_identifier, region=region, 211 | user_data=user_data, keypair_name=keypair_name, security_name=security_name, 212 | subnet_id=subnet_id, security_group_ids=security_group_ids, 213 | max_builds=max_builds, spot_instance=spot_instance, tags=tags, 214 | max_spot_price=max_spot_price, price_multiplier=price_multiplier, 215 | build_wait_timeout=build_wait_timeout, missing_timeout=missing_timeout, 216 | placement=placement, block_device_map=block_device_map, **kwargs) 217 | 218 | class ZFSEC2StyleSlave(ZFSEC2Slave): 219 | def __init__(self, name, **kwargs): 220 | ZFSEC2Slave.__init__(self, name, mode="STYLE", 221 | instance_type="m5d.large", max_spot_price=0.10, placement='a', 222 | spot_instance=True, **kwargs) 223 | 224 | # Create an HVM EC2 large latent build slave 225 | class ZFSEC2BuildSlave(ZFSEC2Slave): 226 | def __init__(self, name, arch="amd64", **kwargs): 227 | instance_types = { 228 | "amd64": "c5d.large", 229 | "arm64": "c6g.large" 230 | } 231 | assert arch in instance_types 232 | ZFSEC2Slave.__init__(self, name, mode="BUILD", 233 | instance_type=instance_types.get(arch), max_spot_price=0.10, placement='a', 234 | spot_instance=True, **kwargs) 235 | 236 | # Create an HVM EC2 latent test slave 237 | class ZFSEC2TestSlave(ZFSEC2Slave): 238 | def __init__(self, name, **kwargs): 239 | ZFSEC2Slave.__init__(self, name, build_wait_timeout=1, mode="TEST", 240 | instance_type="m5d.large", max_spot_price=0.10, placement='a', 241 | spot_instance=True, **kwargs) 242 | 243 | # Create an HVM EC2 latent test slave 244 | # AMI does not support an Elastic Network Adapter (ENA) 245 | class ZFSEC2ENATestSlave(ZFSEC2Slave): 246 | def __init__(self, name, **kwargs): 247 | ZFSEC2Slave.__init__(self, name, build_wait_timeout=1, mode="TEST", 248 | instance_type="m3.large", max_spot_price=0.10, placement='a', 249 | spot_instance=True, **kwargs) 250 | 251 | # Create an HVM EC2 latent test slave 252 | class ZFSEC2CoverageSlave(ZFSEC2Slave): 253 | def __init__(self, name, **kwargs): 254 | ZFSEC2Slave.__init__(self, name, build_wait_timeout=1, mode="TEST", 255 | instance_type="m3.xlarge", max_spot_price=0.10, placement='a', 256 | spot_instance=True, **kwargs) 257 | 258 | # Create a d2.xlarge slave for performance testing because they have disks 259 | class ZFSEC2PerfTestSlave(ZFSEC2Slave): 260 | def __init__(self, name, **kwargs): 261 | ZFSEC2Slave.__init__(self, name, build_wait_timeout=1, mode="PERF", 262 | instance_type="d2.xlarge", max_spot_price=0.60, placement='a', 263 | spot_instance=True, **kwargs) 264 | -------------------------------------------------------------------------------- /scripts/bb-dependencies.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # Check for a local cached configuration. 4 | if test -f /etc/buildslave; then 5 | . /etc/buildslave 6 | else 7 | echo "Missing configuration /etc/buildslave. Assuming dependencies are" 8 | echo "already satisfied and this is a persistent buildslave." 9 | exit 0 10 | fi 11 | 12 | # a function to wait for an apt-get upgrade to finish 13 | apt_get_install () { 14 | while true; do 15 | sudo -E apt-get --yes install "$@" 16 | 17 | # error code 11 indicates that a lock file couldn't be obtained 18 | # keep retrying until we don't see an error code of 11 19 | [ $? -ne 11 ] && break 20 | 21 | sleep 0.5 22 | done 23 | } 24 | 25 | # Temporary workaround for FreeBSD pkg db locking race 26 | pkg_install () { 27 | local pkg_pid=$(pgrep pkg 2>/dev/null) 28 | if [ -n "${pkg_pid}" ]; then 29 | pwait ${pkg_pid} 30 | fi 31 | sudo -E pkg install "${@}" 32 | } 33 | 34 | set -x 35 | 36 | case "$BB_NAME" in 37 | Amazon*) 38 | # Required development tools. 39 | sudo -E yum -y install gcc autoconf libtool gdb lcov bison flex 40 | 41 | # Required utilities. 42 | sudo -E yum -y install git rpm-build wget curl bc fio acl sysstat \ 43 | mdadm lsscsi parted attr dbench watchdog ksh nfs-utils samba \ 44 | rng-tools dkms php php-gd php-dom php-curl php-zip php-posix php-cli \ 45 | php-xml php-sqlite3 rsync 46 | 47 | if cat /etc/os-release | grep -Eq "Amazon Linux 2"; then 48 | sudo -E yum -y install \ 49 | python3 python3-devel python3-setuptools python3-cffi \ 50 | python3-packaging 51 | fi 52 | 53 | # Required development libraries 54 | sudo -E yum -y install kernel-devel-$(uname -r) \ 55 | zlib-devel libuuid-devel libblkid-devel libselinux-devel \ 56 | xfsprogs-devel libattr-devel libacl-devel libudev-devel \ 57 | device-mapper-devel openssl-devel libargon2-devel elfutils-libelf-devel \ 58 | libffi-devel libaio-devel libmount-devel pam-devel \ 59 | python-devel python-setuptools python-cffi libcurl-devel \ 60 | python-packaging ncompress 61 | ;; 62 | 63 | CentOS*) 64 | # Required repository packages 65 | if cat /etc/redhat-release | grep -Eq "release 6."; then 66 | sudo -E yum -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-6.noarch.rpm 67 | elif cat /etc/redhat-release | grep -Eq "release 7."; then 68 | sudo -E yum -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm 69 | elif cat /etc/redhat-release | grep -Eq "release 8"; then 70 | sudo -E yum -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm 71 | elif cat /etc/redhat-release | grep -Eq "release 9"; then 72 | sudo dnf config-manager --set-enabled crb 73 | sudo dnf -y install epel-release 74 | 75 | # Needed for kmod rpm 76 | sudo dnf -y install kernel-abi-stablelists 77 | else 78 | echo "No extra repo packages to install..." 79 | fi 80 | 81 | # To minimize EPEL leakage, disable by default... 82 | sudo -E sed -e "s/enabled=1/enabled=0/g" -i /etc/yum.repos.d/epel.repo 83 | 84 | # Required development tools. 85 | sudo -E yum -y --skip-broken install gcc make autoconf libtool gdb \ 86 | kernel-rpm-macros kernel-abi-whitelists 87 | 88 | # Required utilities. 89 | sudo -E yum -y --skip-broken install --enablerepo=epel git rpm-build \ 90 | wget curl bc fio acl sysstat mdadm lsscsi parted attr dbench watchdog \ 91 | ksh nfs-utils samba rng-tools dkms pamtester ncompress rsync jq 92 | 93 | # Required development libraries 94 | sudo -E yum -y --skip-broken install kernel-devel \ 95 | zlib-devel libuuid-devel libblkid-devel libselinux-devel \ 96 | xfsprogs-devel libattr-devel libacl-devel libudev-devel \ 97 | openssl-devel libargon2-devel libffi-devel pam-devel libaio-devel libcurl-devel 98 | 99 | # Packages that are version dependent and not always available 100 | if cat /etc/redhat-release | grep -Eq "release 7."; then 101 | sudo -E yum -y --skip-broken install --enablerepo=epel libasan \ 102 | python-devel python-setuptools python-cffi python-packaging \ 103 | python36 python36-devel python36-setuptools python36-cffi \ 104 | python36-packaging 105 | elif cat /etc/redhat-release | grep -Eq "release [8|9]"; then 106 | sudo -E yum -y --skip-broken install libasan libtirpc-devel \ 107 | python3-devel python3-setuptools python3-cffi 108 | # EL8 moved some dev tools into an entirely new repo. 109 | sudo -E yum -y --skip-broken install --enablerepo=powertools \ 110 | python3-packaging rpcgen 111 | fi 112 | 113 | ;; 114 | 115 | Debian*) 116 | export DEBIAN_FRONTEND=noninteractive 117 | 118 | # Required development tools. 119 | sudo -E apt-get --yes install build-essential autoconf libtool \ 120 | libtool-bin gdb lcov 121 | 122 | # Required utilities. 123 | sudo -E apt-get --yes install git alien fakeroot wget curl bc fio acl \ 124 | sysstat lsscsi parted gdebi attr dbench watchdog ksh nfs-kernel-server \ 125 | samba rng-tools dkms rsync 126 | 127 | # Required development libraries 128 | sudo -E apt-get --yes install linux-headers-$(uname -r) \ 129 | zlib1g-dev uuid-dev libblkid-dev libselinux-dev \ 130 | xfslibs-dev libattr1-dev libacl1-dev libudev-dev libdevmapper-dev \ 131 | libssl-dev libargon2-dev libaio-dev libffi-dev libelf-dev libmount-dev \ 132 | libpam0g-dev pamtester python-dev python-setuptools python-cffi \ 133 | python-packaging python3 python3-dev python3-setuptools python3-cffi \ 134 | libcurl4-openssl-dev python3-packaging python-distlib python3-distlib 135 | 136 | # Testing support libraries 137 | sudo -E apt-get --yes install libasan4 138 | ;; 139 | 140 | Fedora*) 141 | # Always test with the latest packages on Fedora. 142 | sudo -E dnf -y upgrade 143 | 144 | # Required development tools. 145 | sudo -E dnf -y install gcc make autoconf libtool gdb lcov rpcgen 146 | 147 | # Required utilities. 148 | sudo -E dnf -y install git rpm-build wget curl bc fio acl sysstat \ 149 | mdadm lsscsi parted attr dbench watchdog ksh nfs-utils samba \ 150 | rng-tools dkms ncompress rsync jq 151 | 152 | # Required development libraries 153 | sudo -E dnf -y install kernel-devel zlib-devel \ 154 | libuuid-devel libblkid-devel libselinux-devel \ 155 | xfsprogs-devel libattr-devel libacl-devel libudev-devel \ 156 | device-mapper-devel openssl-devel libargon2-devel libtirpc-devel libffi-devel \ 157 | libaio-devel libmount-devel pam-devel pamtester python-devel python-setuptools \ 158 | python-cffi python-packaging python3 python3-devel python3-setuptools \ 159 | python3-cffi libcurl-devel python3-packaging 160 | 161 | # Testing support libraries 162 | sudo -E dnf -y install libasan 163 | ;; 164 | 165 | FreeBSD*) 166 | # Temporary workaround for pkg db locking race 167 | pkg_pid=$(pgrep pkg 2>/dev/null) 168 | if [ -n "${pkg_pid}" ]; then 169 | pwait ${pkg_pid} 170 | fi 171 | # Always test with the latest packages on FreeBSD. 172 | sudo -E pkg upgrade -y --no-repo-update 173 | 174 | # Kernel source 175 | ( 176 | ABI=$(uname -p) 177 | VERSION=$(freebsd-version -r) 178 | sudo mkdir -p /usr/src 179 | sudo chown -R $(whoami) /usr/src 180 | cd /tmp 181 | fetch https://download.freebsd.org/ftp/snapshots/${ABI}/${VERSION}/src.txz || 182 | fetch https://download.freebsd.org/ftp/releases/${ABI}/${VERSION}/src.txz 183 | tar xpf src.txz -C / 184 | rm src.txz 185 | 186 | # Confirm we have the source code, if not, try git 187 | if [ ! -f /usr/src/sys/sys/param.h ]; then 188 | # Try to extract the git commit 189 | uname -v 190 | VSTR="$(uname -v | awk 'match($0,/[[:alnum:].-\/]+-n[[:digit:]]+-[[:alnum:]]+/) {print substr($0,RSTART,RLENGTH)}')" 191 | BRANCH="${VSTR%%-*}" 192 | HASH="${VSTR##*-}" 193 | if [ "${BRANCH}" = "main" ]; then 194 | git clone -q --single-branch --depth=1000 https://github.com/freebsd/freebsd-src /usr/src 195 | cd /usr/src 196 | git reset --hard $HASH 197 | elif [ "${BRANCH%%/*}" = "releng" ]; then 198 | git clone -q --single-branch --depth=200 -b ${BRANCH} https://github.com/freebsd/freebsd-src /usr/src 199 | cd /usr/src 200 | git reset --hard $HASH 201 | elif [ "${BRANCH%%/*}" = "stable" ]; then 202 | git clone -q --single-branch --depth=200 -b ${BRANCH} https://github.com/freebsd/freebsd-src /usr/src 203 | cd /usr/src 204 | git reset --hard $HASH 205 | else 206 | # Not sure what branch we are on, so try something likely to work 207 | git clone -q --single-branch --depth=1 -b releng/${VERSION%%-*} https://github.com/freebsd/freebsd-src /usr/src || 208 | git clone -q --single-branch --depth=1 -b stable/${VERSION%%.*} https://github.com/freebsd/freebsd-src /usr/src 209 | fi 210 | fi 211 | ) 212 | 213 | # Required libraries 214 | pkg_install -y --no-repo-update \ 215 | libargon2 216 | 217 | # Required development tools 218 | pkg_install -y --no-repo-update \ 219 | autoconf \ 220 | automake \ 221 | autotools \ 222 | bash \ 223 | gmake \ 224 | libtool 225 | 226 | # Essential testing utilities 227 | # No tests will run if these are missing. 228 | pkg_install -y --no-repo-update \ 229 | ksh93 \ 230 | python \ 231 | python3 232 | 233 | # Important testing utilities 234 | # Many tests will fail if these are missing. 235 | pkg_install -y --no-repo-update \ 236 | base64 \ 237 | fio 238 | 239 | # Testing support utilities 240 | # Only a few tests require these. 241 | pkg_install -y --no-repo-update \ 242 | samba416 \ 243 | gdb \ 244 | pamtester \ 245 | lcov \ 246 | rsync \ 247 | jq 248 | 249 | # Python support libraries 250 | pkg_install -xy --no-repo-update \ 251 | '^py3[[:digit:]]+-cffi$' \ 252 | '^py3[[:digit:]]+-sysctl$' \ 253 | '^py3[[:digit:]]+-packaging$' 254 | 255 | : # Succeed even if the last set of packages failed to install. 256 | ;; 257 | 258 | Ubuntu*) 259 | # Required development tools. 260 | apt_get_install build-essential autoconf libtool gdb lcov bison flex 261 | 262 | # Required utilities. 263 | apt_get_install git alien fakeroot wget curl bc fio acl \ 264 | sysstat mdadm lsscsi parted gdebi attr dbench watchdog ksh \ 265 | nfs-kernel-server samba rng-tools xz-utils dkms rsync 266 | 267 | # Required development libraries 268 | apt_get_install linux-headers-$(uname -r) \ 269 | zlib1g-dev uuid-dev libblkid-dev libselinux-dev \ 270 | xfslibs-dev libattr1-dev libacl1-dev libudev-dev libdevmapper-dev \ 271 | libssl-dev libargon2-dev libffi-dev libaio-dev libelf-dev libmount-dev \ 272 | libpam0g-dev pamtester python-dev python-setuptools python-cffi \ 273 | python3 python3-dev python3-setuptools python3-cffi \ 274 | libcurl4-openssl-dev python-packaging python3-packaging \ 275 | python-distlib python3-distlib 276 | 277 | if test "$BB_MODE" = "STYLE"; then 278 | apt_get_install pax-utils shellcheck cppcheck mandoc 279 | sudo -E pip --quiet install flake8 280 | fi 281 | 282 | # Testing support libraries 283 | apt_get_install python3 284 | ;; 285 | 286 | *) 287 | echo "$BB_NAME unknown platform" 288 | ;; 289 | esac 290 | -------------------------------------------------------------------------------- /master/public_html/default.css: -------------------------------------------------------------------------------- 1 | body.interface { 2 | margin-left: 30px; 3 | margin-right: 30px; 4 | margin-top: 20px; 5 | margin-bottom: 50px; 6 | padding: 0; 7 | background: url(bg_gradient.jpg) repeat-x; 8 | font-family: Verdana, sans-serif; 9 | font-size: 10px; 10 | background-color: #fff; 11 | color: #333; 12 | } 13 | 14 | .auth { 15 | position:absolute; 16 | top:5px; 17 | right:40px; 18 | } 19 | 20 | .alert { 21 | color: #c30000; 22 | background-color: #f2dcdc; 23 | padding: 5px 5px 5px 25px; 24 | margin-bottom: 20px; 25 | border-top:1px solid #ccc; 26 | border-bottom:1px solid #ccc; 27 | border-color: #c30000; 28 | font-size: 20px; 29 | } 30 | a:link,a:visited,a:active { 31 | color: #444; 32 | } 33 | 34 | table { 35 | border-spacing: 1px 1px; 36 | } 37 | 38 | table td { 39 | padding: 3px 4px 3px 4px; 40 | text-align: center; 41 | } 42 | 43 | .Project { 44 | min-width: 6em; 45 | } 46 | 47 | .LastBuild,.Activity { 48 | padding: 0 0 0 4px; 49 | } 50 | 51 | .LastBuild,.Activity,.Builder,.BuildStep { 52 | min-width: 5em; 53 | } 54 | 55 | /* Chromium Specific styles */ 56 | div.BuildResultInfo { 57 | color: #444; 58 | } 59 | 60 | div.Announcement { 61 | margin-bottom: 1em; 62 | } 63 | 64 | div.Announcement>a:hover { 65 | color: black; 66 | } 67 | 68 | div.Announcement>div.Notice { 69 | background-color: #afdaff; 70 | padding: 0.5em; 71 | font-size: 16px; 72 | text-align: center; 73 | } 74 | 75 | div.Announcement>div.Open { 76 | border: 3px solid #8fdf5f; 77 | padding: 0.5em; 78 | font-size: 16px; 79 | text-align: center; 80 | } 81 | 82 | div.Announcement>div.Closed { 83 | border: 5px solid #e98080; 84 | padding: 0.5em; 85 | font-size: 24px; 86 | font-weight: bold; 87 | text-align: center; 88 | } 89 | 90 | td.Time { 91 | color: #000; 92 | border-bottom: 1px solid #aaa; 93 | background-color: #eee; 94 | } 95 | 96 | td.Activity,td.Change,td.Builder { 97 | color: #333333; 98 | background-color: #CCCCCC; 99 | } 100 | 101 | td.Change { 102 | border-radius: 5px; 103 | -webkit-border-radius: 5px; 104 | -moz-border-radius: 5px; 105 | } 106 | 107 | td.Event { 108 | color: #777; 109 | background-color: #ddd; 110 | border-radius: 5px; 111 | -webkit-border-radius: 5px; 112 | -moz-border-radius: 5px; 113 | } 114 | 115 | td.Activity { 116 | border-top-left-radius: 10px; 117 | -webkit-border-top-left-radius: 10px; 118 | -moz-border-radius-topleft: 10px; 119 | min-height: 20px; 120 | padding: 2px 0 2px 0; 121 | } 122 | 123 | td.idle,td.waiting,td.offline,td.building { 124 | border-top-left-radius: 0px; 125 | -webkit-border-top-left-radius: 0px; 126 | -moz-border-radius-topleft: 0px; 127 | } 128 | 129 | .LastBuild { 130 | border-top-left-radius: 5px; 131 | -webkit-border-top-left-radius: 5px; 132 | -moz-border-radius-topleft: 5px; 133 | border-top-right-radius: 5px; 134 | -webkit-border-top-right-radius: 5px; 135 | -moz-border-radius-topright: 5px; 136 | } 137 | 138 | /* Console view styles */ 139 | td.DevStatus > table { 140 | table-layout: fixed; 141 | } 142 | 143 | td.DevRev { 144 | padding: 4px 8px 4px 8px; 145 | color: #333333; 146 | border-top-left-radius: 5px; 147 | -webkit-border-top-left-radius: 5px; 148 | -moz-border-radius-topleft: 5px; 149 | background-color: #eee; 150 | width: 1%; 151 | } 152 | 153 | td.DevRevCollapse { 154 | border-bottom-left-radius: 5px; 155 | -webkit-border-bottom-left-radius: 5px; 156 | -moz-border-radius-bottomleft: 5px; 157 | } 158 | 159 | td.DevName { 160 | padding: 4px 8px 4px 8px; 161 | color: #333333; 162 | background-color: #eee; 163 | width: 1%; 164 | text-align: left; 165 | } 166 | 167 | td.DevStatus { 168 | padding: 4px 4px 4px 4px; 169 | color: #333333; 170 | background-color: #eee; 171 | } 172 | 173 | td.DevSlave { 174 | padding: 4px 4px 4px 4px; 175 | color: #333333; 176 | background-color: #eee; 177 | } 178 | 179 | td.first { 180 | border-top-left-radius: 5px; 181 | -webkit-border-top-left-radius: 5px; 182 | -moz-border-radius-topleft: 5px; 183 | } 184 | 185 | td.last { 186 | border-top-right-radius: 5px; 187 | -webkit-border-top-right-radius: 5px; 188 | -moz-border-radius-topright: 5px; 189 | } 190 | 191 | td.DevStatusCategory { 192 | border-radius: 5px; 193 | -webkit-border-radius: 5px; 194 | -moz-border-radius: 5px; 195 | border-width: 1px; 196 | border-style: solid; 197 | } 198 | 199 | td.DevStatusCollapse { 200 | border-bottom-right-radius: 5px; 201 | -webkit-border-bottom-right-radius: 5px; 202 | -moz-border-radius-bottomright: 5px; 203 | } 204 | 205 | td.DevDetails { 206 | font-weight: normal; 207 | padding: 8px 8px 8px 8px; 208 | color: #333333; 209 | background-color: #eee; 210 | text-align: left; 211 | } 212 | 213 | td.DevDetails li a { 214 | padding-right: 5px; 215 | } 216 | 217 | td.DevComment { 218 | font-weight: normal; 219 | padding: 8px 8px 8px 8px; 220 | color: #333333; 221 | background-color: #eee; 222 | text-align: left; 223 | } 224 | 225 | td.DevBottom { 226 | border-bottom-right-radius: 5px; 227 | -webkit-border-bottom-right-radius: 5px; 228 | -moz-border-radius-bottomright: 5px; 229 | border-bottom-left-radius: 5px; 230 | -webkit-border-bottom-left-radius: 5px; 231 | -moz-border-radius-bottomleft: 5px; 232 | } 233 | 234 | td.Alt { 235 | background-color: #ddd; 236 | } 237 | 238 | .legend { 239 | border-radius: 5px !important; 240 | -webkit-border-radius: 5px !important; 241 | -moz-border-radius: 5px !important; 242 | width: 100px; 243 | max-width: 100px; 244 | text-align: center; 245 | padding: 2px 2px 2px 2px; 246 | height: 14px; 247 | white-space: nowrap; 248 | } 249 | 250 | .DevStatusBox { 251 | text-align: center; 252 | height: 20px; 253 | padding: 0 2px; 254 | line-height: 0; 255 | white-space: nowrap; 256 | } 257 | 258 | .DevStatusBox a { 259 | opacity: 0.85; 260 | border-width: 1px; 261 | border-style: solid; 262 | border-radius: 4px; 263 | -webkit-border-radius: 4px; 264 | -moz-border-radius: 4px; 265 | display: block; 266 | width: 90%; 267 | height: 20px; 268 | line-height: 20px; 269 | margin-left: auto; 270 | margin-right: auto; 271 | } 272 | 273 | .DevStatusBox a.notinbuilder { 274 | border-style: none; 275 | } 276 | 277 | .DevSlaveBox { 278 | text-align: center; 279 | height: 10px; 280 | padding: 0 2px; 281 | line-height: 0; 282 | white-space: nowrap; 283 | } 284 | 285 | .DevSlaveBox a { 286 | opacity: 0.85; 287 | border-width: 1px; 288 | border-style: solid; 289 | border-radius: 4px; 290 | -webkit-border-radius: 4px; 291 | -moz-border-radius: 4px; 292 | display: block; 293 | width: 90%; 294 | height: 10px; 295 | line-height: 20px; 296 | margin-left: auto; 297 | margin-right: auto; 298 | } 299 | 300 | a.noround { 301 | border-radius: 0px; 302 | -webkit-border-radius: 0px; 303 | -moz-border-radius: 0px; 304 | position: relative; 305 | margin-top: -8px; 306 | margin-bottom: -8px; 307 | height: 36px; 308 | border-top-width: 0; 309 | border-bottom-width: 0; 310 | } 311 | 312 | a.begin { 313 | border-top-width: 1px; 314 | position: relative; 315 | margin-top: 0px; 316 | margin-bottom: -7px; 317 | height: 27px; 318 | border-top-left-radius: 4px; 319 | -webkit-border-top-left-radius: 4px; 320 | -moz-border-radius-topleft: 4px; 321 | border-top-right-radius: 4px; 322 | -webkit-border-top-right-radius: 4px; 323 | -moz-border-radius-topright: 4px; 324 | } 325 | 326 | a.end { 327 | border-bottom-width: 1px; 328 | position: relative; 329 | margin-top: -7px; 330 | margin-bottom: 0px; 331 | height: 27px; 332 | border-bottom-left-radius: 4px; 333 | -webkit-border-bottom-left-radius: 4px; 334 | -moz-border-radius-bottomleft: 4px; 335 | border-bottom-right-radius: 4px; 336 | -webkit-border-bottom-right-radius: 4px; 337 | -moz-border-radius-bottomright: 4px; 338 | } 339 | 340 | .center_align { 341 | text-align: center; 342 | } 343 | 344 | .right_align { 345 | text-align: right; 346 | } 347 | 348 | .left_align { 349 | text-align: left; 350 | } 351 | 352 | div.BuildWaterfall { 353 | border-radius: 7px; 354 | -webkit-border-radius: 7px; 355 | -moz-border-radius: 7px; 356 | position: absolute; 357 | left: 0px; 358 | top: 0px; 359 | background-color: #FFFFFF; 360 | padding: 4px 4px 4px 4px; 361 | float: left; 362 | display: none; 363 | border-width: 1px; 364 | border-style: solid; 365 | } 366 | 367 | /* LastBuild, BuildStep states */ 368 | .success { 369 | color: #000; 370 | background-color: #8d4; 371 | border-color: #4F8530; 372 | } 373 | 374 | .failure { 375 | color: #000; 376 | background-color: #e88; 377 | border-color: #A77272; 378 | } 379 | 380 | .failure-again { 381 | color: #000; 382 | background-color: #eA9; 383 | border-color: #A77272; 384 | } 385 | 386 | .warnings { 387 | color: #FFFFFF; 388 | background-color: #fa3; 389 | border-color: #C29D46; 390 | } 391 | 392 | .skipped { 393 | color: #000; 394 | background: #AADDEE; 395 | border-color: #AADDEE; 396 | } 397 | 398 | .exception,.retry { 399 | color: #FFFFFF; 400 | background-color: #c6c; 401 | border-color: #ACA0B3; 402 | } 403 | 404 | .start { 405 | color: #000; 406 | background-color: #ccc; 407 | border-color: #ccc; 408 | } 409 | 410 | .running,.waiting,td.building { 411 | color: #000; 412 | background-color: #fd3; 413 | border-color: #C5C56D; 414 | } 415 | 416 | .paused { 417 | color: #FFFFFF; 418 | background-color: #8080FF; 419 | border-color: #dddddd; 420 | } 421 | 422 | .offline,td.offline { 423 | color: #FFFFFF; 424 | background-color: #777777; 425 | border-color: #dddddd; 426 | } 427 | 428 | 429 | .start { 430 | border-bottom-left-radius: 10px; 431 | -webkit-border-bottom-left-radius: 10px; 432 | -moz-border-radius-bottomleft: 10px; 433 | border-bottom-right-radius: 10px; 434 | -webkit-border-bottom-right-radius: 10px; 435 | -moz-border-radius-bottomright: 10px; 436 | } 437 | 438 | .notstarted { 439 | border-width: 1px; 440 | border-style: solid; 441 | border-color: #aaa; 442 | background-color: #fff; 443 | } 444 | 445 | .closed { 446 | background-color: #ff0000; 447 | } 448 | 449 | .closed .large { 450 | font-size: 1.5em; 451 | font-weight: bolder; 452 | } 453 | 454 | td.Project a:hover,td.start a:hover { 455 | color: #000; 456 | } 457 | 458 | .mini-box { 459 | text-align: center; 460 | height: 20px; 461 | padding: 0 2px; 462 | line-height: 0; 463 | white-space: nowrap; 464 | } 465 | 466 | .mini-box a { 467 | border-radius: 0; 468 | -webkit-border-radius: 0; 469 | -moz-border-radius: 0; 470 | display: block; 471 | width: 100%; 472 | height: 20px; 473 | line-height: 20px; 474 | margin-top: -30px; 475 | } 476 | 477 | .mini-closed { 478 | -box-sizing: border-box; 479 | -webkit-box-sizing: border-box; 480 | border: 4px solid red; 481 | } 482 | 483 | /* grid styles */ 484 | table.Grid { 485 | border-collapse: collapse; 486 | } 487 | 488 | table.Grid tr td { 489 | padding: 0.2em; 490 | margin: 0px; 491 | text-align: center; 492 | } 493 | 494 | table.Grid tr td.title { 495 | font-size: 90%; 496 | border-right: 1px gray solid; 497 | border-bottom: 1px gray solid; 498 | } 499 | 500 | table.Grid tr td.sourcestamp { 501 | font-size: 90%; 502 | } 503 | 504 | table.Grid tr td.builder { 505 | text-align: right; 506 | font-size: 90%; 507 | } 508 | 509 | table.Grid tr td.build { 510 | border: 1px gray solid; 511 | } 512 | 513 | /* column container */ 514 | div.column { 515 | margin: 0 2em 2em 0; 516 | float: left; 517 | } 518 | 519 | /* info tables */ 520 | table.info { 521 | border-spacing: 1px; 522 | } 523 | 524 | table.info td { 525 | padding: 0.1em 1em 0.1em 1em; 526 | text-align: center; 527 | } 528 | 529 | table.info th { 530 | padding: 0.2em 1.5em 0.2em 1.5em; 531 | text-align: center; 532 | } 533 | 534 | table.info td.left { 535 | text-align: left 536 | } 537 | 538 | table.info td .reason { 539 | display:block; 540 | font-weight: bold; 541 | } 542 | 543 | .alt { 544 | background-color: #f6f6f6; 545 | } 546 | 547 | li { 548 | padding: 0.1em 1em 0.1em 1em; 549 | } 550 | 551 | .result { 552 | padding: 0.3em 1em 0.3em 1em; 553 | } 554 | 555 | /* log view */ 556 | .log * { 557 | vlink: #800080; 558 | } 559 | 560 | span.stdout { 561 | color: black; 562 | } 563 | 564 | span.stderr { 565 | color: red; 566 | } 567 | 568 | span.header { 569 | color: blue; 570 | } 571 | span.ansi30 { 572 | color: black; 573 | } 574 | span.ansi31 { 575 | color: red; 576 | } 577 | span.ansi32 { 578 | color: green; 579 | } 580 | span.ansi33 { 581 | color: orange; 582 | } 583 | span.ansi34 { 584 | color: yellow; 585 | } 586 | span.ansi35 { 587 | color: purple; 588 | } 589 | span.ansi36 { 590 | color: blue; 591 | } 592 | span.ansi37 { 593 | color: white; 594 | } 595 | 596 | /* revision & email */ 597 | .revision .full { 598 | display: none; 599 | } 600 | 601 | .user .email { 602 | display: none; 603 | } 604 | 605 | pre { 606 | white-space: pre-wrap; 607 | } 608 | 609 | /* change comments (use regular colors here) */ 610 | pre.comments>a:link,pre.comments>a:visited { 611 | color: blue; 612 | } 613 | 614 | pre.comments>a:active { 615 | color: purple; 616 | } 617 | 618 | form.command_forcebuild { 619 | border-top: 1px solid black; 620 | padding: .5em; 621 | margin: .5em; 622 | } 623 | 624 | form.command_forcebuild > .row { 625 | border-top: 1px dotted gray; 626 | padding: .5em 0; 627 | } 628 | 629 | form.command_forcebuild .force-textarea > .label { 630 | display: block; 631 | } 632 | 633 | form.command_forcebuild .force-nested > .label { 634 | font-weight: bold; 635 | display: list-item; 636 | } 637 | 638 | form.command_forcebuild .force-any .force-text { 639 | display: inline; 640 | } 641 | -------------------------------------------------------------------------------- /scripts/known-issues.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # This script will generate HTML describing which failures have 4 | # been observed by the buildbot when running ZFS Test Suite. Its 5 | # purpose is to allow developers to quickly assess the prevalence 6 | # of any observed failures. 7 | # 8 | 9 | OPENZFS_DIR="/home/buildbot/zfs-buildbot/master/*_TEST_" 10 | OPENZFS_MTIME=30 11 | OPENZFS_MMIN=$((OPENZFS_MTIME*24*60)) 12 | OPENZFS_PRS_INCLUDE="no" 13 | OPENZFS_ISSUES=$(curl -s https://api.github.com/search/issues?q=repo:openzfs/zfs+label:%22Test%20Suite%22) 14 | 15 | NUMBER_REGEX='^[0-9]+$' 16 | DATE=$(date) 17 | STATUS_LOW_CUTOFF=1 18 | STATUS_MED_CUTOFF=5 19 | 20 | STATUS_LOW="st_low" 21 | STATUS_MED="st_med" 22 | STATUS_HIGH="st_high" 23 | STATUS_PR="st_pr" 24 | STATUS_RESOLVED="st_resolved" 25 | 26 | STATUS_LOW_COLOR="#ffee3a" 27 | STATUS_MED_COLOR="#ffa500" 28 | STATUS_HIGH_COLOR="#ff9999" 29 | STATUS_PR_COLOR="#f8f8f8" 30 | STATUS_RESOLVED_COLOR="#5ff567" 31 | 32 | STATUS_LOW_TEXT="low" 33 | STATUS_MED_TEXT="medium" 34 | STATUS_HIGH_TEXT="high" 35 | STATUS_PR_TEXT="" 36 | 37 | usage() { 38 | cat << EOF 39 | USAGE: 40 | $0 [-h] [-d directory] [-e exceptions] [-m mtime] 41 | 42 | DESCRIPTION: 43 | Dynamically generate HTML for the Known Issue Tracking page 44 | using the ZFS Test Suite results from the buildbot automated 45 | testing. 46 | 47 | OPTIONS: 48 | -h Show this message 49 | -d directory Directory containing the buildbot logs 50 | -e exceptions Exception file (using ZoL wiki if not specified) 51 | -m mtime Include test logs from the last N days 52 | -p Include PR failures in report 53 | 54 | EXAMPLE: 55 | 56 | $0 -d ~/zfs-buildbot/master/*_TEST_ -m 30 \\ 57 | >~/zfs-buildbot/master/public_html/known-issues.html 58 | 59 | EOF 60 | } 61 | 62 | while getopts 'hd:e:m:p' OPTION; do 63 | case $OPTION in 64 | h) 65 | usage 66 | exit 1 67 | ;; 68 | d) 69 | OPENZFS_DIR=$OPTARG 70 | ;; 71 | e) 72 | OPENZFS_EXCEPTIONS=$OPTARG 73 | ;; 74 | m) 75 | OPENZFS_MTIME=$OPTARG 76 | OPENZFS_MMIN=$((OPENZFS_MTIME*24*60)) 77 | ;; 78 | p) 79 | OPENZFS_PRS_INCLUDE="yes" 80 | ;; 81 | esac 82 | done 83 | 84 | cat << EOF 85 | 86 | 87 | 88 | OpenZFS Known Issue Tracking 89 | 90 | 91 | 92 | 93 | 94 | 95 | 98 | 99 | 100 | 101 | 102 | 145 | 146 | 191 | 192 | 193 | 194 |

OpenZFS Known Issue Tracking

195 |
196 |

This page is updated regularly and contains a list of all ZFS Test Suite 197 | issues observed during automated buildbot testing over the last 198 | $OPENZFS_MTIME days.

199 |

Refer to the Test Suite label in the issue tracker for a complete list of known issues.

200 |
201 |
202 | 203 | 204 | 205 | 206 | 207 | 208 | 209 | 210 | 211 | 212 | 213 | 214 | 215 | 216 | 217 | EOF 218 | 219 | # Unfortunately there's not enough information in the buildbot logs to 220 | # dynamically generate the encoded version of the builder name. 221 | # Therefore we're forced to implement this simple lookup function. 222 | function build_url() 223 | { 224 | local name=$1 225 | local nr=$2 226 | local url="https://build.openzfs.org/builders" 227 | 228 | case "$name" in 229 | CentOS_7_x86_64__TEST_) 230 | encoded_name="CentOS%207%20x86_64%20%28TEST%29" 231 | ;; 232 | CentOS_8_x86_64__TEST_) 233 | encoded_name="CentOS%208%20x86_64%20%28TEST%29" 234 | ;; 235 | CentOS_Stream_8_x86_64__TEST_) 236 | encoded_name="CentOS%20Stream%208%20x86_64%20%28TEST%29" 237 | ;; 238 | CentOS_9_x86_64__TEST_) 239 | encoded_name="CentOS%209%20x86_64%20%28TEST%29" 240 | ;; 241 | Debian_10_x86_64__TEST_) 242 | encoded_name="Debian%2010%20x86_64%20%28TEST%29" 243 | ;; 244 | Fedora_37_x86_64__TEST_) 245 | encoded_name="Fedora%2037%20x86_64%20%28TEST%29" 246 | ;; 247 | Fedora_38_x86_64__TEST_) 248 | encoded_name="Fedora%2038%20x86_64%20%28TEST%29" 249 | ;; 250 | Fedora_39_x86_64__TEST_) 251 | encoded_name="Fedora%2039%20x86_64%20%28TEST%29" 252 | ;; 253 | FreeBSD_stable_12_amd64__TEST_) 254 | encoded_name="FreeBSD%20stable%2F12%20amd64%20%28TEST%29" 255 | ;; 256 | FreeBSD_stable_13_amd64__TEST_) 257 | encoded_name="FreeBSD%20stable%2F13%20amd64%20%28TEST%29" 258 | ;; 259 | FreeBSD_stable_14_amd64__TEST_) 260 | encoded_name="FreeBSD%20stable%2F14%20amd64%20%28TEST%29" 261 | ;; 262 | *) 263 | encoded_named="unknown" 264 | ;; 265 | esac 266 | 267 | echo "$nr" 268 | } 269 | export -f build_url 270 | 271 | check() { 272 | local git_log="$1-log-git_zfs-stdio" 273 | local test_log="$1-log-shell_4-tests.bz2" 274 | local mode="$2" 275 | 276 | # Ignore incomplete builds 277 | [[ ! -e "$git_log" ]] && return 1 278 | [[ ! -e "$test_log" ]] && return 1 279 | 280 | nr=$(basename "$1" | cut -f1 -d' ') 281 | name=$(basename "$(dirname "$1")") 282 | test_url=$(build_url $name $nr) 283 | 284 | # Ignore the coverage builder 285 | [[ "$name" = "Ubuntu_18_04_x86_64_Coverage__TEST_" ]] && return 1 286 | 287 | # Annotate pull requests vs branch commits 288 | if grep -q "refs/pull" "$git_log"; then 289 | origin=$(grep -m1 "git fetch" "$git_log" | \ 290 | cut -f5 -d' ' | cut -d '/' -f3) 291 | else 292 | origin=$(grep -m1 "git clone --branch" "$git_log" | \ 293 | cut -f4 -d' ') 294 | fi 295 | 296 | # Strip and print the failed test cases 297 | bzgrep -e '\['"$mode"'\]' "$test_log" | \ 298 | awk -F"zfs-tests/" '{print $2}' | cut -d' ' -f1 | \ 299 | awk -v prefix="$test_url $origin " '{ print prefix $0; }' 300 | } 301 | export -f check 302 | 303 | # Get all exceptions and comments 304 | if [ -z ${OPENZFS_EXCEPTIONS+x} ]; then 305 | OPENZFS_EXCEPTIONS=$(curl -s https://raw.githubusercontent.com/wiki/openzfs/zfs/ZTS-exceptions.md | awk '/---|---|---/{y=1;next}y') 306 | else 307 | OPENZFS_EXCEPTIONS=$(cat "$OPENZFS_EXCEPTIONS" | awk '/---|---|---/{y=1;next}y') 308 | fi 309 | 310 | # List of all tests which have passed 311 | OPENZFS_PASSES=$(find $OPENZFS_DIR -type f -mmin -$OPENZFS_MMIN \ 312 | -regex ".*/[0-9]*" -exec bash -c 'check "$0" "PASS"' {} \; | \ 313 | cut -f3- -d' ' | sort | uniq -c | sort -nr) 314 | 315 | # List of all tests which have failed 316 | OPENZFS_FAILURES=$(find $OPENZFS_DIR -type f -mmin -$OPENZFS_MMIN \ 317 | -regex ".*/[0-9]*" -exec bash -c 'check "$0" "FAIL"' {} \;) 318 | 319 | echo "$OPENZFS_FAILURES" | cut -f3- -d' ' | sort | uniq -c | sort -nr | \ 320 | while read LINE1; do 321 | OPENZFS_ISSUE="" 322 | OPENZFS_NAME=$(echo $LINE1 | cut -f3 -d' ') 323 | OPENZFS_ORIGIN=$(echo $LINE1 | cut -f2 -d' ') 324 | OPENZFS_FAIL=$(echo $LINE1 | cut -f1 -d' ') 325 | OPENZFS_STATE="" 326 | OPENZFS_STATUS="" 327 | 328 | # Create links buildbot logs for all failed tests. 329 | OPENZFS_BUILDS=$(echo "$OPENZFS_FAILURES" | \ 330 | grep "$OPENZFS_ORIGIN $OPENZFS_NAME" | cut -f1-2 -d' ') 331 | 332 | OPENZFS_PASS=$(echo "$OPENZFS_PASSES" | \ 333 | grep "$OPENZFS_ORIGIN $OPENZFS_NAME" | \ 334 | awk '{$1=$1;print}' | cut -f1 -d' ') 335 | 336 | [[ "$OPENZFS_FAIL" =~ $NUMBER_REGEX ]] || OPENZFS_FAIL=0 337 | [[ "$OPENZFS_PASS" =~ $NUMBER_REGEX ]] || OPENZFS_PASS=1 338 | 339 | OPENZFS_RATE=$(bc <<< "scale=2; ((100*$OPENZFS_FAIL) / \ 340 | ($OPENZFS_PASS + $OPENZFS_FAIL))" | \ 341 | awk '{printf "%.2f", $0}') 342 | 343 | # Ignore test results with few samples. 344 | if [ "$OPENZFS_PASS" -lt 10 ]; then 345 | continue 346 | fi 347 | 348 | # Test failure was from an open pull request or branch. 349 | if [[ $OPENZFS_ORIGIN =~ $NUMBER_REGEX ]]; then 350 | 351 | if [ "$OPENZFS_PRS_INCLUDE" = "no" ]; then 352 | continue 353 | fi 354 | 355 | pr="https://github.com/openzfs/zfs/pull/$OPENZFS_ORIGIN" 356 | OPENZFS_ISSUE="PR-$OPENZFS_ORIGIN" 357 | OPENZFS_STATUS=$STATUS_PR 358 | OPENZFS_STATUS_TEXT=$STATUS_PR_TEXT 359 | OPENZFS_ORIGIN="Pull Requests" 360 | else 361 | OPENZFS_ORIGIN="Branch: $OPENZFS_ORIGIN" 362 | 363 | # Match test case name against open issues. For an issue 364 | # to be matched it must be labeled "Test Suite" and contain 365 | # the base name of the failing test case in the title. 366 | base=$(basename $OPENZFS_NAME) 367 | issue=$(echo "$OPENZFS_ISSUES" | jq ".items[] | \ 368 | select(.title | contains(\"$base\")) | \ 369 | {html_url, number, state}") 370 | url=$(echo "$issue"|grep html_url|cut -f2- -d':'|tr -d ' ",') 371 | number=$(echo "$issue"|grep number|cut -f2- -d':'|tr -d ' ",') 372 | state=$(echo "$issue"|grep state|cut -f2- -d':'|tr -d "\" ") 373 | OPENZFS_ISSUE="$number" 374 | 375 | if [[ ${OPENZFS_RATE%%.*} -le $STATUS_LOW_CUTOFF ]]; then 376 | OPENZFS_STATUS=$STATUS_LOW 377 | OPENZFS_STATUS_TEXT=$STATUS_LOW_TEXT 378 | elif [[ ${OPENZFS_RATE%%.*} -le $STATUS_MED_CUTOFF ]]; then 379 | OPENZFS_STATUS=$STATUS_MED 380 | OPENZFS_STATUS_TEXT=$STATUS_MED_TEXT 381 | else 382 | OPENZFS_STATUS=$STATUS_HIGH 383 | OPENZFS_STATUS_TEXT=$STATUS_HIGH_TEXT 384 | fi 385 | 386 | # Invalid test names should be ignored. 387 | if [[ ! "${OPENZFS_NAME}" =~ ^tests/functional/.* ]]; then 388 | continue 389 | fi 390 | 391 | # Match ZTS name in exceptions list. 392 | EXCEPTION=$(echo "$OPENZFS_EXCEPTIONS" | \ 393 | grep -E "^${OPENZFS_NAME##*tests/functional/}") 394 | if [ -n "$EXCEPTION" ]; then 395 | EXCEPTION_ISSUE=$(echo $EXCEPTION | cut -f2 -d'|' | tr -d ' ') 396 | EXCEPTION_STATE=$(echo $EXCEPTION | cut -d'|' -f3-) 397 | 398 | # '-' indicates the entry should be skipped, 399 | # '!' print the provided comment from the exception, 400 | # '' use state from references issue number. 401 | if [ "$EXCEPTION_ISSUE" == "-" ]; then 402 | continue; 403 | elif [ "$EXCEPTION_ISSUE" == "!" ]; then 404 | OPENZFS_STATE="$EXCEPTION_STATE" 405 | else 406 | issue=$(echo "$OPENZFS_ISSUES" | \ 407 | jq ".items[] | \ 408 | select(.number == $EXCEPTION_ISSUE) | \ 409 | {html_url, number, state}") 410 | url=$(echo "$issue"|grep html_url|cut -f2- -d':'|tr -d ' ",') 411 | number=$(echo "$issue"|grep number|cut -f2- -d':'|tr -d ' ",') 412 | state=$(echo "$issue"|grep state|cut -f2- -d':'|tr -d "\" ") 413 | OPENZFS_ISSUE="$number" 414 | OPENZFS_STATE="${state}" 415 | fi 416 | else 417 | OPENZFS_STATE="${state}" 418 | fi 419 | fi 420 | 421 | # add styles for resolved issues 422 | if [ "$OPENZFS_STATE" == "closed" ]; then 423 | OPENZFS_STATUS=$STATUS_RESOLVED 424 | fi 425 | 426 | cat << EOF 427 | 428 | 429 | 430 | 431 | 432 | 433 | 434 | 435 | 436 | 437 | 438 | EOF 439 | 440 | done 441 | 442 | cat << EOF 443 | 444 |
IssueRatePassFailFailure ListTest NameStateOriginSeverity
$OPENZFS_ISSUE$OPENZFS_RATE%$OPENZFS_PASS$OPENZFS_FAIL$OPENZFS_BUILDS$OPENZFS_NAME$OPENZFS_STATE$OPENZFS_ORIGIN$OPENZFS_STATUS_TEXT
445 |
Last Update: $DATE by known-issues.sh
446 |
447 | 448 | 449 | EOF 450 | --------------------------------------------------------------------------------