403 |
445 | ├── README.md ├── buildjob.sh ├── buildserver.py ├── buildworkers.sh ├── createbasevm.sh ├── data └── builds.sqlite ├── default.yml ├── fetchvmfile.sh ├── getguestipv4.sh ├── guest_scripts ├── guest_00_checkout.sh ├── guest_00_env.sh ├── guest_00_install.sh ├── guest_01_compile.sh ├── guest_fix_netplan.sh ├── guest_run.sh └── guest_shared.sh ├── rm_vm.sh ├── shared.sh ├── shutdown_vm.sh ├── ssh_vm.sh └── www ├── .htaccess └── index.php /README.md: -------------------------------------------------------------------------------- 1 | Bitcoin Core CI 2 | ===================================== 3 | 4 | https://bitcoinbuilds.org 5 | 6 | Bitcoin Core CI is a set of scripts to provide a CI (continious integration) with web frontend and GitHub integration. 7 | 8 | ### How does it work 9 | Under the hood, it's using KVM/virsh to start a clean VM where the build happens. 10 | The build runs in GNU screen (in the VM) and redirect the console output to a NFS share. 11 | 12 | In the center, there is a daemon written in python (buildserver.py), running on the host, 13 | that checks the sqlite3 database for new work. 14 | 15 | The daemon decomposes build requests into jobs by looking at the yml configuration file. 16 | 17 | Jobs are tracked by parsing the log file in the NFS share produced by the VM's GNU screen. 18 | 19 | ### Status 20 | 21 | The code is still messy and buggy. 22 | Your help is wanted! -------------------------------------------------------------------------------- /buildjob.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | set -x 5 | 6 | . ./shared.sh 7 | 8 | BASEVM=${1:-ubuntu1804_base} 9 | WORKER_NR=${2:-1} 10 | UUID=${3:-undef} 11 | VMLOGPATH="/mnt/shared/logs/builder_${WORKER_NR}_${UUID}.log" 12 | WORKER="${BASEVM}_${WORKER_NR}" 13 | 14 | # read the env variable script from stdin 15 | PIPE_IN="" 16 | if [ -p /dev/stdin ]; then 17 | while IFS= read line; do 18 | PIPE_IN="${PIPE_IN}\n${line}" 19 | done 20 | fi 21 | 22 | # we assume that the caller made sure the VM is not in use for another build 23 | # shutdown the VM if running (hard shutdown) 24 | vm_hard_shutdown $WORKER 25 | # revert the base snapshot 26 | virsh snapshot-revert --domain $WORKER --snapshotname base 27 | # start the VM 28 | virsh start $WORKER 29 | 30 | # get the IP (should be a fixed IP anyways, TODO: check if switching to a fix WORKER-NR->IP table makse sense) 31 | IP=$(get_vm_ip $WORKER) 32 | SSHHOST="ubuntu@${IP}" 33 | # check if we need to delete an entry from known host 34 | check_delete_known_host $IP 35 | # wait until SSH is available 36 | wait_for_ssh $SSHHOST 37 | 38 | # echo back the ENV script 39 | echo -e "$PIPE_IN" 40 | 41 | # send the ENV script to the host 42 | echo -e "$PIPE_IN" | ssh -oStrictHostKeyChecking=no $SSHHOST -T "cat > ~/env.sh" 43 | 44 | # copy all the guest scripts 45 | # TODO: if the yml file is comming from GIT, the guest scripts could as well 46 | scp -oStrictHostKeyChecking=no guest_scripts/* $SSHHOST:~/ 47 | 48 | # set uuid-file (for the ease of debug) 49 | ssh -oStrictHostKeyChecking=no $SSHHOST "echo $UUID | cat > uuid" 50 | 51 | # set hostname 52 | ssh -oStrictHostKeyChecking=no $SSHHOST "sudo hostnamectl set-hostname ${WORKER}" 53 | 54 | # make sure we have mounted the NFS share 55 | echo "Mounting NFS..." 56 | ssh -oStrictHostKeyChecking=no $SSHHOST "sudo mount /mnt/shared" 57 | echo "testing NFS..." 58 | # TODO: switch to a REAL test ;) 59 | ssh -oStrictHostKeyChecking=no $SSHHOST "ls -la /mnt/shared/" 60 | echo "nfs okay" 61 | 62 | # start build by launching GNU screen and redirect the logfile to the NFS share 63 | echo "starting build" 64 | ssh -oStrictHostKeyChecking=no $SSHHOST "screen -L -Logfile ${VMLOGPATH} -d -m -S build -h 100000 bash -c /home/ubuntu/guest_run.sh" 65 | echo "build stared" 66 | -------------------------------------------------------------------------------- /buildserver.py: -------------------------------------------------------------------------------- 1 | #!/bin/env python3 2 | 3 | import sqlite3 4 | import time 5 | import yaml 6 | import re 7 | import uuid 8 | import subprocess 9 | import os 10 | import threading 11 | from threading import Thread 12 | from enum import IntEnum 13 | 14 | SQLLITE_DB = "builds.sqlite" 15 | SCRIPT_DIR = os.path.dirname(__file__) 16 | DATA_DIR = os.path.join(SCRIPT_DIR, "data") 17 | LOGSDIR_WORK = "/home/jonasschnelli/vmshared/logs/" 18 | LOGSDIR_FINAL = os.path.join(SCRIPT_DIR, "logs") 19 | MAX_STALL_TIMEOUT = 300 20 | GLOBAL_ENV = "#!/bin/bash\n" 21 | RUNLOOP_SLEEP = 3 22 | 23 | class BuildStates(IntEnum): 24 | new = 0 25 | starting = 1 26 | started = 2 27 | failed = 3 28 | stalled = 4 29 | success = 5 30 | canceled = 6 31 | 32 | sql_con = sqlite3.connect(os.path.join(DATA_DIR, SQLLITE_DB)) 33 | cur = sql_con.cursor() 34 | sql_con.commit() 35 | sql_con.set_trace_callback(print) 36 | 37 | # create a job in the database 38 | def sql_create_job(conn, project): 39 | sql = ''' INSERT INTO jobs(to_build, uuid, name, starttime, endtime, baseimage, shellscript) 40 | VALUES(?,?,?,?,?,?,?) ''' 41 | cur = conn.cursor() 42 | cur.execute(sql, project) 43 | conn.commit() 44 | return cur.lastrowid 45 | 46 | # create all jobs from a build by loading/processing the YAML 47 | def create_jobs_from_build(conn, row): 48 | global GLOBAL_ENV 49 | 50 | #read the build from DB 51 | cur = sql_con.cursor() 52 | cur.execute("UPDATE builds SET state=?, starttime=? WHERE rowid=?", [BuildStates.started, int(time.time()), row['rowid']]) 53 | sql_con.commit() 54 | 55 | #TODO: change static yml by checking out the GIT one and looking at the override link in the database 56 | yml_content = "" 57 | with open('default.yml', 'r') as content_file: 58 | yml_content = content_file.read() 59 | 60 | # add the git basics to the env script 61 | job_settings = GLOBAL_ENV 62 | job_settings += "GIT_REPOSITORY=\"" 63 | if row['repo'] is not None: job_settings += row['repo'] 64 | job_settings += "\"\n" 65 | 66 | job_settings += "GIT_BRANCH=\"" 67 | if row['branch'] is not None: job_settings += row['branch'] 68 | job_settings += "\"\n" 69 | 70 | job_settings += "GIT_COMMIT=\"" 71 | if row['commit'] is not None: job_settings += row['commit'] 72 | job_settings += "\"\n" 73 | 74 | try: 75 | # load YAML 76 | settings = yaml.safe_load(yml_content) 77 | 78 | # load global settings 79 | for env in settings['env']['global']: 80 | job_settings += "export "+env+"\n" 81 | 82 | # process jobs 83 | for job in settings['jobs']: 84 | # define a job uuid 85 | job_uuid = str(uuid.uuid4()) 86 | # process env vars 87 | variables = re.findall(r'([\w]+)=(\"([^\"]*)\"|[^ ]*)', job['env']) 88 | jobenv = job_settings 89 | jobenv += "JOB_UUID=\""+job_uuid+"\"\n" 90 | for var in variables: 91 | jobenv += var[0]+"="+var[1]+"\n" 92 | 93 | #TODO: move to debug function 94 | print("=="+job['name']) 95 | print(jobenv) 96 | print("-----------") 97 | # create db 98 | sql_create_job(conn, (row['rowid'], job_uuid, job['name'], 0, 0, row['image'], jobenv)) 99 | 100 | except yaml.YAMLError as exc: 101 | print(exc) 102 | 103 | # find new builds and create the jobs database entries 104 | def process_builds(conn): 105 | c = conn.cursor() 106 | c.row_factory = sqlite3.Row 107 | # query builds that have not yet been processed 108 | # TODO: check if starttime is the right field, process multiple builds (threaded) 109 | c.execute("SELECT rowid, * FROM builds WHERE starttime=0") 110 | row = c.fetchone() 111 | if row: 112 | print(row) 113 | create_jobs_from_build(sql_con, row) 114 | 115 | # check if vm is running (via shall script) 116 | # TODO: currently unused, we completely trust the database 117 | def is_vm_running(name): 118 | proc = subprocess.Popen([os.path.join(SCRIPT_DIR, "is_vm_running.sh"), name]) 119 | stdout = proc.communicate()[0] 120 | rc = proc.returncode 121 | if rc == 0: 122 | return True 123 | return False 124 | 125 | # get last edit time of a file 126 | def last_edit(filename): 127 | if os.path.isfile(filename) == False: 128 | return 0 129 | st=os.stat(filename) 130 | return st.st_mtime 131 | 132 | # check if build has stalled 133 | def build_has_stalled(starttime, uuid, logfile): 134 | if os.path.isfile(logfile) == False and (time.time() - starttime) > MAX_STALL_TIMEOUT: 135 | with open(logfile, "w") as myfile: 136 | myfile.write("Build has stalled, logfile not written for "+str(time.time() - starttime)+" time") 137 | return True 138 | 139 | if os.path.isfile(logfile) == False: 140 | return False 141 | 142 | lastline = "" 143 | try: 144 | lastline = subprocess.check_output(['tail', '-1', logfile]).decode() 145 | except UnicodeError as exc: 146 | pass 147 | if lastline.startswith("#BUILD#"+uuid+"#: "): 148 | return False 149 | 150 | last_edit_time = last_edit(logfile) 151 | if time.time() - last_edit_time > MAX_STALL_TIMEOUT: 152 | st=os.stat(logfile) 153 | with open(logfile, "a") as myfile: 154 | myfile.write("Build has stalled, no output for more then "+str(time.time() - last_edit_time)+" seconds, logfile:"+logfile+", time:"+str(st.st_mtime)) 155 | return True 156 | return False 157 | 158 | # get last lines of a file (returns empty string if file does not exists or decode fails) 159 | def last_log_lines(logfile): 160 | if os.path.isfile(logfile) == False: 161 | return "" 162 | lastlines = "" 163 | try: 164 | lastlines = subprocess.check_output(['tail', '-5', logfile]).decode() 165 | except UnicodeError as exc: 166 | pass 167 | return lastlines 168 | 169 | # get last lines of a file (returns empty string if file does not exists or decode fails) 170 | def get_log_times(logfile): 171 | if os.path.isfile(logfile) == False: 172 | return "" 173 | times = "" 174 | try: 175 | times = subprocess.check_output(['grep', '^###', logfile], stderr=subprocess.STDOUT).decode() 176 | except UnicodeError as exc: 177 | pass 178 | except subprocess.CalledProcessError as e: 179 | pass 180 | return times 181 | 182 | # check if build as been completed 183 | def build_is_completed(starttime, uuid, logfile): 184 | if build_has_stalled(starttime, uuid, logfile) == True: 185 | return BuildStates.stalled 186 | 187 | if os.path.isfile(logfile) == False: 188 | return BuildStates.new 189 | 190 | lastline = "" 191 | try: 192 | lastline = subprocess.check_output(['tail', '-1', logfile]).decode() 193 | except UnicodeError as exc: 194 | pass 195 | pattern = "#BUILD#"+uuid+"#: " 196 | if lastline.startswith(pattern): 197 | retcode = lastline[len(pattern):len(pattern)+1] 198 | if int(retcode) == 0: 199 | return BuildStates.success 200 | else: 201 | # any other non null return code equals fail 202 | return BuildStates.failed 203 | return BuildStates.started 204 | 205 | # check if a build has ended based on the state 206 | def build_ended(state): 207 | if state == BuildStates.failed or state == BuildStates.stalled or state == BuildStates.success or state == BuildStates.canceled: 208 | return True 209 | return False 210 | 211 | # find a idling worker 212 | def find_free_worker(conn, baseimage): 213 | # TODO: make worker amount configurable 214 | for i in range(1,7): 215 | c = conn.cursor() 216 | c.execute("SELECT rowid, * FROM jobs WHERE workernr=? AND starttime!=0 AND endtime=0", [i]) 217 | row = c.fetchone() 218 | if row: 219 | continue #worker in use 220 | return i 221 | return 0 # means no worker is free 222 | 223 | build_threads = [] # global list of threads (in order to process on completion) 224 | 225 | # thread class that handles starting a build via shell script 226 | class StartBuildThread(Thread): 227 | def __init__(self, baseimage, workernr, uuid, shellscript, jobid): 228 | ''' Constructor. ''' 229 | 230 | Thread.__init__(self) 231 | # keep data 232 | self.baseimage = baseimage 233 | self.workernr = workernr 234 | self.uuid = uuid 235 | self.shellscript = shellscript 236 | self.jobid = jobid 237 | self.returncode = -1 238 | 239 | def get_returncode(self): 240 | return self.returncode 241 | def get_jobid(self): 242 | return self.jobid 243 | def run(self): 244 | print("call subprocess") 245 | proc = subprocess.Popen([os.path.join(SCRIPT_DIR, "buildjob.sh"), self.baseimage, self.workernr, self.uuid], stdin=subprocess.PIPE) 246 | stdout = proc.communicate(input=self.shellscript)[0] 247 | print("comms done") 248 | self.returncode = proc.returncode 249 | print("Returncode: "+str(self.returncode)) 250 | 251 | # process jobs, update status, detect stalles and complete builds 252 | def process_jobs(conn): 253 | global LOGSDIR_WORK 254 | global MAX_STALL_TIMEOUT 255 | global LOGSDIR_FINAL 256 | global build_threads 257 | c = conn.cursor() 258 | c.row_factory = sqlite3.Row 259 | c.execute("SELECT rowid, * FROM jobs WHERE starttime=0") 260 | 261 | # fetch all new jobs 262 | while True: 263 | rows = c.fetchmany(1000) 264 | if not rows: break 265 | for r in rows: 266 | workernr = find_free_worker(sql_con, r['baseimage']) 267 | if workernr == 0: 268 | print("No free worker") 269 | continue 270 | 271 | # make sure we register that we are trying to start the jobs (state "starting") 272 | cur = sql_con.cursor() 273 | cur.execute("UPDATE jobs SET workernr=?, state=?, starttime=? WHERE rowid=?", [workernr, BuildStates.starting, int(time.time()), r['rowid']]) 274 | sql_con.commit() 275 | 276 | # start the job in thread 277 | thread = StartBuildThread(r['baseimage'], str(workernr), r['uuid'], r['shellscript'].encode(), r['rowid']) 278 | build_threads.append(thread) 279 | thread.start() 280 | 281 | # create a symlink for the work log file 282 | logfile = os.path.join(LOGSDIR_WORK, "builder_"+str(workernr)+"_"+r['uuid']+".log") 283 | log_final = os.path.join(LOGSDIR_FINAL, r['uuid']+".log") 284 | os.symlink(logfile, log_final) 285 | 286 | # check running builds 287 | c.execute("SELECT rowid, * FROM jobs WHERE starttime!=0 AND endtime=0") 288 | while True: 289 | rows = c.fetchmany(1000) 290 | if not rows: break 291 | for r in rows: 292 | logfile = os.path.join(LOGSDIR_WORK, "builder_"+str(r['workernr'])+"_"+r['uuid']+".log") 293 | 294 | # figure out what task is currently in work 295 | taskname = "unknown" 296 | tasktime = 0 297 | times = get_log_times(logfile) 298 | lasttimes = times.splitlines() 299 | if len(lasttimes) >= 1: 300 | matches = re.match(r'^###([^#]*)#([^#]*)#([0-9]*)', lasttimes[-1]) 301 | if matches: 302 | tasktime = int(time.time())-int(matches.groups()[2]) 303 | taskname = matches.groups()[1] 304 | if r['task'] != taskname: 305 | # update the job's current task if different 306 | cur = sql_con.cursor() 307 | cur.execute("UPDATE jobs SET task=?, alltasks=? WHERE rowid=?", [taskname, times, r['rowid']]) 308 | sql_con.commit() 309 | 310 | state = build_is_completed(r['starttime'], r['uuid'], logfile) 311 | if r['action'] == 1: 312 | # cancle build 313 | print("canceling job...") 314 | state = BuildStates.canceled 315 | if build_ended(state): 316 | print("ending build") 317 | job_endtime = last_edit(logfile) 318 | log_final = os.path.join(LOGSDIR_FINAL, r['uuid']+".log") 319 | if job_endtime == 0: 320 | # if we could not get a job end time, assume "now" is the time the build has ended 321 | job_endtime = time.time() 322 | else: 323 | # file exists, copy logfile 324 | os.remove(log_final) # remove symlink 325 | os.rename(logfile, log_final) 326 | if state == BuildStates.stalled: 327 | with open(log_final, "a") as myfile: 328 | myfile.write("Build has stalled, no output for more then "+str(MAX_STALL_TIMEOUT)+" seconds") 329 | 330 | #copy build results 331 | # TODO: use ~/out.tar.gz and make sure it won't stop the script if the file is not present 332 | try: 333 | subprocess.check_output([os.path.join(SCRIPT_DIR, "fetchvmfile.sh"), r['baseimage'], str(r['workernr']), "src/out.tar.gz", os.path.join(LOGSDIR_FINAL, r['uuid']+"-buildresult.tar.gz")], stderr=subprocess.STDOUT).decode() 334 | except UnicodeError as exc: 335 | print("unicode error") 336 | pass 337 | except subprocess.CalledProcessError as e: 338 | print("command '{}' return with error (code {}): {}".format(e.cmd, e.returncode, e.output)) 339 | pass 340 | 341 | # shutdown the VM 342 | subprocess.check_output([os.path.join(SCRIPT_DIR, "shutdown_vm.sh"), r['baseimage']+"_"+str(r['workernr'])]) 343 | 344 | # update the job (has ended) 345 | cur = sql_con.cursor() 346 | times = get_log_times(log_final) 347 | cur.execute("UPDATE jobs SET state=?, endtime=?, task='', tasktime='0', alltasks=? WHERE rowid=?", [int(state), job_endtime, times, r['rowid']]) 348 | sql_con.commit() 349 | 350 | # calculate the job time 351 | buildtime = job_endtime - r['starttime'] 352 | print("build in # "+str(r['workernr'])+" finished with state "+str(state)+", buildtime: "+str(buildtime/60.0)+" mins") 353 | 354 | # check if the overall build is completed (if all jobs have completed) 355 | cur = sql_con.cursor() 356 | # fetch non-complete jobs 357 | cur.execute("SELECT rowid FROM jobs WHERE to_build =? AND endtime=0", [r['to_build']]) 358 | row = cur.fetchone() 359 | if row: 360 | print("not all jobs of build are done...") 361 | else: 362 | print("Build done") 363 | # check if all succeeded to store the correct state for the overall builds (either success or failed) 364 | cur = sql_con.cursor() 365 | cur.row_factory = sqlite3.Row 366 | cur.execute("SELECT rowid, * FROM jobs WHERE to_build = ?", [r['to_build']]) 367 | build_end_state = BuildStates.success 368 | while True: 369 | check_rows = cur.fetchmany(1000) 370 | if not check_rows: break 371 | for cr in check_rows: 372 | if cr['state'] != BuildStates.success: 373 | # if one job did not succeed, flag overall build as failed 374 | build_end_state = BuildStates.failed 375 | break 376 | 377 | #TODO: check if time.time() is acceptable for a build endtime or if we should use the last jobs endtime 378 | cur.execute("UPDATE builds SET state=?, endtime=? WHERE rowid=?", [int(build_end_state), time.time() , r['to_build']]) 379 | sql_con.commit() 380 | 381 | else: 382 | # build is still running 383 | buildtime = time.time() - r['starttime'] 384 | #print("build in # "+str(r['workernr'])+" running since: "+str(buildtime/60.0)+" mins") 385 | #print("===================\n"+last_log_lines(logfile)+"===================\n\n") 386 | 387 | # main runloop 388 | while True: 389 | process_builds(sql_con) 390 | process_jobs(sql_con) 391 | 392 | # check if threads do need completion 393 | print("check threads") 394 | # itterate over a copy of build_threads in order to allow to manipulate the original list 395 | for build_start_thread in list(build_threads): 396 | if (build_start_thread.is_alive()): 397 | print("Thread with jobid "+str(build_start_thread.get_jobid())+" is still running...") 398 | else: 399 | print("Thread with jobid "+str(build_start_thread.get_jobid())+" is done...") 400 | if build_start_thread.get_returncode() == 0: 401 | print("Updating the database") 402 | cur = sql_con.cursor() 403 | cur.execute("UPDATE jobs SET state=? WHERE rowid=?", [BuildStates.started, build_start_thread.get_jobid()]) 404 | sql_con.commit() 405 | build_threads.remove(build_start_thread) 406 | 407 | # sleep for a while 408 | time.sleep(RUNLOOP_SLEEP) 409 | 410 | 411 | -------------------------------------------------------------------------------- /buildworkers.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | set -x 5 | 6 | . ./shared.sh 7 | 8 | BASEVM=ubuntu1804_base 9 | vm_graceful_shutdown $BASEVM 10 | MAC_BASE="52:54:00:00:e6:0" 11 | for i in {1..8} 12 | do 13 | CLONENAME="${BASEVM}_$i" 14 | MACADDR=${MAC_BASE}$i 15 | vm_graceful_shutdown $CLONENAME 16 | vm_delete $CLONENAME 17 | virt-clone --original $BASEVM --name $CLONENAME --auto-clone -m $MACADDR 18 | virsh snapshot-create-as --domain $CLONENAME --name "base" 19 | done -------------------------------------------------------------------------------- /createbasevm.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | set -x 5 | 6 | . ./shared.sh 7 | 8 | BASEIMG=ubuntu1804 9 | VMNAME=$BASEIMG"_base" 10 | USERNAME=ubuntu 11 | VMCPU=2 12 | VMRAM=4098 13 | VMDISK=12 14 | BASEPACKAGES="python3 python3-zmq build-essential \ 15 | libtool autotools-dev automake pkg-config \ 16 | bsdmainutils curl git ca-certificates ccache joe \ 17 | " 18 | #nsis g++-mingw-w64-x86-64 wine-binfmt wine64 \ 19 | #g++-multilib \ 20 | #xorg" 21 | 22 | if [ ! -d kvm-install-vm ]; then 23 | git clone https://github.com/jonasschnelli/kvm-install-vm 24 | fi 25 | 26 | kvm-install-vm/kvm-install-vm create -t $BASEIMG -u $USERNAME -d $VMDISK -c $VMCPU -m $VMRAM -v -g vnc $VMNAME || true 27 | 28 | IP=$(get_vm_ip $VMNAME) 29 | echo $IP 30 | 31 | #wait for sshd 32 | sleep 10 33 | 34 | #fix static MAC address issue that would prevent network from working in clones 35 | 36 | if [ $BASEIMG == "ubuntu1804" ]; then 37 | scp -oStrictHostKeyChecking=no guest_scripts/guest_fix_netplan.sh ubuntu@$IP:/tmp 38 | ssh -oStrictHostKeyChecking=no ubuntu@$IP '/tmp/guest_fix_netplan.sh' 39 | fi 40 | 41 | #set password for ubuntu user 42 | ssh -oStrictHostKeyChecking=no ubuntu@$IP 'echo -e "ubuntu\nubuntu" | sudo passwd ubuntu' 43 | 44 | ssh -oStrictHostKeyChecking=no ubuntu@$IP 'echo "APT::Acquire::Retries \"5\";" | sudo tee -a /etc/apt/apt.conf.d/80-retries' 45 | 46 | #install base stuff 47 | ssh -oStrictHostKeyChecking=no ubuntu@$IP "sudo apt-get -qq -y update && sudo apt-get -qq -y install ${BASEPACKAGES} && sudo mount -a" 48 | 49 | ssh -oStrictHostKeyChecking=no ubuntu@$IP 'echo systemd hold | sudo dpkg --set-selections && sudo apt-get -y -qq full-upgrade' 50 | -------------------------------------------------------------------------------- /data/builds.sqlite: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jonasschnelli/bitcoin-core-ci/24b07ca317f4517192f93662e1bf8a66515083a0/data/builds.sqlite -------------------------------------------------------------------------------- /default.yml: -------------------------------------------------------------------------------- 1 | env: 2 | global: 3 | - MAKEJOBS=-j3 4 | - RUN_UNIT_TESTS=true 5 | - RUN_FUNCTIONAL_TESTS=true 6 | - RUN_FUZZ_TESTS=false 7 | - CCACHE_MAXSIZE=100M 8 | - CCACHE_COMPRESS=1 9 | jobs: 10 | - name: 'ARM' 11 | env: >- 12 | HOST=arm-linux-gnueabihf 13 | PACKAGES="python3 g++-arm-linux-gnueabihf" 14 | RUN_UNIT_TESTS=false 15 | RUN_FUNCTIONAL_TESTS=false 16 | GOAL="install" 17 | # -Wno-psabi is to disable ABI warnings: "note: parameter passing for argument of type ... changed in GCC 7.1" 18 | # This could be removed once the ABI change warning does not show up by default 19 | BITCOIN_CONFIG="--enable-glibc-back-compat --enable-reduce-exports CXXFLAGS=-Wno-psabi" 20 | - name: 'Win64' 21 | env: >- 22 | HOST=x86_64-w64-mingw32 23 | PACKAGES="python3 nsis g++-mingw-w64-x86-64 wine-binfmt wine64" 24 | RUN_FUNCTIONAL_TESTS=false 25 | GOAL="deploy" 26 | BITCOIN_CONFIG="--enable-reduce-exports --disable-gui-tests" 27 | - name: 'Linux x86_64' 28 | env: >- 29 | HOST=x86_64-unknown-linux-gnu 30 | PACKAGES="python3-zmq xorg" 31 | GOAL="install" 32 | BITCOIN_CONFIG="--enable-glibc-back-compat --enable-reduce-exports" 33 | - name: 'Linux32' 34 | env: >- 35 | ADDARCH=i386 36 | HOST=i686-pc-linux-gnu 37 | PACKAGES="g++-multilib python3-zmq libfontconfig1:i386 libxcb1:i386" 38 | GOAL="install" 39 | BITCOIN_CONFIG="--enable-zmq --with-gui=qt5 --disable-bip70 --enable-glibc-back-compat --enable-reduce-exports LDFLAGS=-static-libstdc++" 40 | - name: 'macOS' 41 | env: >- 42 | HOST=x86_64-apple-darwin14 43 | PACKAGES="cmake imagemagick libcap-dev librsvg2-bin libz-dev libbz2-dev libtiff-tools python3-dev python3-setuptools" 44 | GOAL="deploy" 45 | RUN_UNIT_TESTS=false 46 | RUN_FUNCTIONAL_TESTS=false 47 | OSX_SDK="10.11" 48 | BITCOIN_CONFIG="--enable-gui --enable-reduce-exports --enable-werror" 49 | -------------------------------------------------------------------------------- /fetchvmfile.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | set -x 5 | 6 | . ./shared.sh 7 | 8 | BASEVM=${1:-ubuntu1804_base} 9 | WORKER_NR=${2:-1} 10 | SRC=${3:-result.tar.gz} 11 | DEST=${4:-result.tar.gz} 12 | 13 | WORKER="${BASEVM}_${WORKER_NR}" 14 | 15 | if [ $(vm_is_running ${WORKER}) == "no" ]; then 16 | output "vm not running" 17 | exit 0 18 | fi 19 | 20 | IP=$(get_vm_ip $WORKER) 21 | SSHHOST="ubuntu@${IP}" 22 | wait_for_ssh $SSHHOST 23 | scp -oStrictHostKeyChecking=no $SSHHOST:~/${SRC} ${DEST} 24 | -------------------------------------------------------------------------------- /getguestipv4.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | set -x 5 | 6 | . ./shared.sh 7 | 8 | 9 | NAME=$1 10 | BRIDGE=virbr0 11 | 12 | get_vm_ip $1 13 | # 14 | #MAC=`virsh dumpxml $NAME | grep "mac address" | awk -F\' '{ print $2}'` 15 | ##while true 16 | # do 17 | # IP=$(grep -B1 $MAC /var/lib/libvirt/dnsmasq/$BRIDGE.status | head \ 18 | # -n 1 | awk '{print $2}' | sed -e s/\"//g -e s/,//) 19 | # if [ "$IP" = "" ] 20 | # then 21 | # sleep 1 22 | # else 23 | # 24 | # break 25 | # fi 26 | # done 27 | #echo $IP -------------------------------------------------------------------------------- /guest_scripts/guest_00_checkout.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -x 4 | set -e 5 | 6 | . ./env.sh 7 | 8 | if [[ ! -z $GIT_REPOSITORY ]]; then 9 | git clone -q --depth=50 $GIT_REPOSITORY src 10 | cd src 11 | if [[ ! -z $GIT_BRANCH ]]; then 12 | git fetch -q origin +$GIT_BRANCH 13 | git checkout -qf FETCH_HEAD 14 | fi 15 | if [[ ! -z $GIT_COMMIT ]]; then 16 | git checkout -qf $GIT_COMMIT 17 | fi 18 | fi 19 | cd src 20 | GITHEAD=`git rev-parse HEAD` 21 | echo "###GITHEAD#${GITHEAD}" 22 | cd .. -------------------------------------------------------------------------------- /guest_scripts/guest_00_env.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export MAKEJOBS=-j4 4 | export HOST=x86_64-unknown-linux-gnu 5 | export BITCOIN_CONFIG="--enable-zmq --with-gui=qt5 --enable-glibc-back-compat --enable-reduce-exports --enable-debug" 6 | export GOAL="install" 7 | -------------------------------------------------------------------------------- /guest_scripts/guest_00_install.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -x 4 | 5 | . ./env.sh 6 | 7 | # try to apt get for a couple of times 8 | cnt=0 9 | if [ -n "$ADDARCH" ]; then 10 | sudo dpkg --add-architecture $ADDARCH 11 | while true; do 12 | sudo apt update -qq -y 13 | if [ $? -eq 0 ]; then 14 | break 15 | fi 16 | cnt=$((cnt+1)) 17 | if [ $cnt -eq 60 ]; then 18 | echo "apt failed after 30 retries... erroring" 19 | exit 1 20 | fi 21 | sleep 3 22 | done 23 | fi 24 | cnt=0 25 | while true; do 26 | sudo apt-get install -qq -y ${PACKAGES} 27 | if [ $? -eq 0 ]; then 28 | break 29 | fi 30 | cnt=$((cnt+1)) 31 | if [ $cnt -eq 60 ]; then 32 | echo "apt failed after 30 retries... erroring" 33 | exit 1 34 | fi 35 | sleep 3 36 | done -------------------------------------------------------------------------------- /guest_scripts/guest_01_compile.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -x 4 | set -e 5 | 6 | . ./env.sh 7 | . ./guest_shared.sh 8 | 9 | ccache -s 10 | 11 | echo "compile..." 12 | echo "" 13 | 14 | HOMEPATH="/home/ubuntu" 15 | BASEPATH="${HOMEPATH}/src" 16 | 17 | # switch compiler 18 | if [[ $HOST = *-mingw32 ]]; then 19 | sudo update-alternatives --set $HOST-g++ $(which $HOST-g++-posix) 20 | fi 21 | 22 | # restore depends cache 23 | timeblock_start "RESTORE_CACHE" 24 | cd $BASEPATH 25 | mkdir -p depends/built 26 | COPYOVER=no 27 | COPYOVER_CCACHE=no 28 | rm -rf ${HOMEPATH}/.ccache 29 | GIT_BRANCH_LC=`echo $GIT_BRANCH | tr '[:upper:]' '[:lower:]'` 30 | if [[ ! -z $GIT_BRANCH ]] && [ $GIT_BRANCH_LC != "master" ]; then 31 | GIT_BRANCH_MD5=`echo $GIT_BRANCH_LC | md5sum | cut -f1 -d" "` 32 | if [ -d "/mnt/shared/cache/built/${GIT_BRANCH_MD5}_${HOST}" ]; then 33 | cp -r "/mnt/shared/cache/built/${GIT_BRANCH_MD5}_${HOST}" depends/built/${HOST} 34 | COPYOVER=yes 35 | fi 36 | if [ -f "/mnt/shared/cache/${GIT_BRANCH_MD5}_ccache_${HOST}.tar" ]; then 37 | cp "/mnt/shared/cache/${GIT_BRANCH_MD5}_ccache_${HOST}.tar" ${HOMEPATH}/ccache.tar 38 | tar -xf ${HOMEPATH}/ccache.tar -C ${HOMEPATH}/ 39 | COPYOVER_CCACHE=yes 40 | fi 41 | fi 42 | 43 | # copy master cache depends if no branch cache has been found 44 | if [ $COPYOVER == "no" ] && [ -d /mnt/shared/cache/built/master_$HOST ]; then 45 | cp -r /mnt/shared/cache/built/master_$HOST depends/built/$HOST 46 | fi 47 | 48 | if [ $COPYOVER_CCACHE == "no" ] && [ -f /mnt/shared/cache/master_ccache_$HOST.tar ]; then 49 | cp -r /mnt/shared/cache/master_ccache_$HOST.tar ${HOMEPATH}/ccache.tar 50 | tar -xf ${HOMEPATH}/ccache.tar -C ${HOMEPATH}/ 51 | fi 52 | 53 | ccache -s 54 | timeblock_end "RESTORE_CACHE" 55 | 56 | OUTDIR="$BASEPATH/out" 57 | mkdir -p $OUTDIR 58 | 59 | time ./autogen.sh 60 | 61 | 62 | mkdir -p depends/SDKs depends/sdk-sources 63 | 64 | if [ -n "$OSX_SDK" ] && [ ! -f depends/sdk-sources/MacOSX${OSX_SDK}.sdk.tar.gz ] && [ -f /mnt/shared/cache/MacOSX${OSX_SDK}.sdk.tar.gz ]; then 65 | cp /mnt/shared/cache/MacOSX${OSX_SDK}.sdk.tar.gz depends/sdk-sources/MacOSX${OSX_SDK}.sdk.tar.gz 66 | fi 67 | if [ -n "$OSX_SDK" ] && [ -f depends/sdk-sources/MacOSX${OSX_SDK}.sdk.tar.gz ]; then 68 | tar -C depends/SDKs -xf depends/sdk-sources/MacOSX${OSX_SDK}.sdk.tar.gz 69 | fi 70 | 71 | 72 | timeblock_start "MAKE_DEPENDS" 73 | cd depends 74 | make HOST=$HOST $MAKEJOBS 75 | timeblock_end "MAKE_DEPENDS" 76 | 77 | timeblock_start "UPDATE_DEPENDENCY_CACHE" 78 | mkdir -p /mnt/shared/cache/built 79 | STORENAME=$GIT_BRANCH_LC 80 | if [[ ! -z $GIT_BRANCH ]] && [ $GIT_BRANCH_LC != "master" ]; then 81 | STORENAME=`echo $GIT_BRANCH_LC | md5sum | cut -f1 -d" "` 82 | fi 83 | cp -rf built/${HOST} /mnt/shared/cache/built/${STORENAME}_copy_${HOST} 84 | rm -rf /mnt/shared/cache/built/${STORENAME}_${HOST} 85 | mv /mnt/shared/cache/built/${STORENAME}_copy_${HOST} /mnt/shared/cache/built/${STORENAME}_${HOST} 86 | cd .. 87 | timeblock_end "UPDATE_DEPENDENCY_CACHE" 88 | 89 | timeblock_start "CONFIGURE" 90 | time ./configure --disable-dependency-tracking --prefix=$BASEPATH/depends/$HOST --bindir=$OUTDIR/bin --libdir=$OUTDIR/lib $BITCOIN_CONFIG 91 | timeblock_end "CONFIGURE" 92 | 93 | timeblock_start "COMPILE_AND_INSTALL" 94 | time make $GOAL $MAKEJOBS 95 | ccache -s 96 | timeblock_end "COMPILE_AND_INSTALL" 97 | 98 | timeblock_start "UPDATE_CCACHE_CACHE" 99 | time tar -cf ${HOMEPATH}/ccache.tar -C ${HOMEPATH}/ .ccache 100 | time cp -rf ${HOMEPATH}/ccache.tar /mnt/shared/cache/${STORENAME}_ccache_copy_${HOST}.tar 101 | if [ -f "/mnt/shared/cache/${STORENAME}_ccache_${HOST}.tar" ]; then 102 | time mv /mnt/shared/cache/${STORENAME}_ccache_${HOST}.tar /mnt/shared/cache/DEL_${STORENAME}_ccache_${HOST}_${JOB_UUID} 103 | fi 104 | time mv /mnt/shared/cache/${STORENAME}_ccache_copy_${HOST}.tar /mnt/shared/cache/${STORENAME}_ccache_${HOST}.tar 105 | timeblock_end "UPDATE_CCACHE_CACHE" 106 | 107 | timeblock_start "RUN_TESTS" 108 | 109 | if [ "$RUN_UNIT_TESTS" = "true" ]; then 110 | time make $MAKEJOBS check VERBOSE=1 111 | fi 112 | 113 | if [ "$RUN_FUNCTIONAL_TESTS" = "true" ]; then 114 | time test/functional/test_runner.py --ci --combinedlogslen=4000 --quiet --failfast 115 | fi 116 | timeblock_end "RUN_TESTS" 117 | 118 | if [ -n "$OSX_SDK" ]; then 119 | make install 120 | if [ -f ${BASEPATH}/Bitcoin-Core.dmg ]; then 121 | cp -rf ${BASEPATH}/Bitcoin-Core.dmg ${OUTDIR} 122 | fi 123 | fi 124 | 125 | tar -czf ${BASEPATH}/out.tar.gz ${OUTDIR} 126 | -------------------------------------------------------------------------------- /guest_scripts/guest_fix_netplan.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | INTERFACE=`awk '/set-name: /{print $NF}' /etc/netplan/50-cloud-init.yaml` 3 | cat > /tmp/50-cloud-init.yaml <<- EOM 4 | # This file is generated from information provided by 5 | # the datasource. Changes to it will not persist across an instance. 6 | # To disable cloud-init's network configuration capabilities, write a file 7 | # /etc/cloud/cloud.cfg.d/99-disable-network-config.cfg with the following: 8 | # network: {config: disabled} 9 | network: 10 | ethernets: 11 | $INTERFACE: 12 | dhcp4: true 13 | dhcp-identifier: mac 14 | version: 2 15 | EOM 16 | 17 | sudo cp /tmp/50-cloud-init.yaml /etc/netplan/50-cloud-init.yaml 18 | -------------------------------------------------------------------------------- /guest_scripts/guest_run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . ./env.sh 4 | . ./guest_shared.sh 5 | 6 | timeblock_start GIT_CHECKOUT 7 | ./guest_00_checkout.sh 8 | timeblock_end GIT_CHECKOUT 9 | 10 | timeblock_start APT_INSTALL 11 | ./guest_00_install.sh 12 | timeblock_end APT_INSTALL 13 | 14 | ./guest_01_compile.sh 15 | EXIT_CODE=$? 16 | echo "Build finished with exit code ${EXIT_CODE}" 17 | echo "#BUILD#${JOB_UUID}#: ${EXIT_CODE}" 18 | -------------------------------------------------------------------------------- /guest_scripts/guest_shared.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set +x 3 | 4 | timeblock_start() { 5 | echo -e "\n###START#$1#$(date +%s)\n" 6 | } 7 | 8 | timeblock_end() { 9 | echo -e "\n###END#$1#$(date +%s)\n\n\n" 10 | } -------------------------------------------------------------------------------- /rm_vm.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -x 4 | set -e 5 | . ./shared.sh 6 | vm_delete $1 7 | -------------------------------------------------------------------------------- /shared.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | bold() { echo -e "\e[1m$@\e[0m" ; } 4 | red() { echo -e "\e[31m$@\e[0m" ; } 5 | green() { echo -e "\e[32m$@\e[0m" ; } 6 | yellow() { echo -e "\e[33m$@\e[0m" ; } 7 | 8 | ok() { green "${@:-OK}" ; } 9 | 10 | output() { echo -e "- $@" ; } 11 | outputn() { echo -en "- $@ ... " ; } 12 | 13 | function is_ssh_running () 14 | { 15 | ssh -oStrictHostKeyChecking=no -q $1 exit 16 | if [ $? -ne 0 ]; then 17 | echo "no" 18 | else 19 | echo "yes" 20 | fi 21 | } 22 | 23 | function wait_for_ssh () 24 | { 25 | counter=0 26 | timeout=120 27 | while ([ $(is_ssh_running $1) == "no" ] && [ "$counter" -lt "$timeout" ]) 28 | do 29 | sleep 2 30 | counter=$((counter+1)) 31 | done 32 | if [ "$counter" -eq "$timeout" ]; then 33 | output "ERROR: SSH check timout... SSH is still down" 34 | exit 1 35 | fi 36 | output "SSH up" 37 | } 38 | 39 | function vm_exists () 40 | { 41 | tmp=$(virsh list --all | grep " $1 " | awk '{ print $2}') 42 | if [ "x$tmp" == "x$1" ]; then 43 | echo "yes" 44 | else 45 | echo "no" 46 | fi 47 | } 48 | 49 | function vm_is_running () 50 | { 51 | tmp=$(virsh list --all | grep " $1 " | awk '{ print $3}') 52 | if ([ "x$tmp" == "x" ] || [ "x$tmp" != "xrunning" ]); then 53 | echo "no" 54 | else 55 | echo "yes" 56 | fi 57 | } 58 | 59 | function vm_delete () 60 | { 61 | if [ $(vm_is_running $1) == "yes" ]; then 62 | vm_graceful_shutdown $1 yes 63 | else 64 | if [ $(vm_exists $1) == "yes" ]; then 65 | virsh undefine --remove-all-storage --snapshots-metadata $1 66 | sleep 1 67 | fi 68 | fi 69 | } 70 | 71 | function vm_hard_shutdown () 72 | { 73 | if [ x$(vm_is_running $1) == "xyes" ]; then 74 | output "vm is running, shutting down..." 75 | virsh destroy $1 76 | while [ x$(vm_is_running $1) == "xyes" ] 77 | do 78 | sleep 1 79 | done 80 | if ([ -z $2 ] && [ x$2 == "xyes" ]); then 81 | virsh undefine --remove-all-storage --snapshots-metadata $1 82 | sleep 1 83 | fi 84 | fi 85 | } 86 | 87 | function vm_graceful_shutdown () 88 | { 89 | if [ x$(vm_is_running $1) == "xyes" ]; then 90 | output "vm is running, shutting down..." 91 | virsh shutdown $1 92 | while [ x$(vm_is_running $1) == "xyes" ] 93 | do 94 | sleep 1 95 | done 96 | if ([ -z $2 ] && [ x$2 == "xyes" ]); then 97 | virsh undefine --remove-all-storage --snapshots-metadata $1 98 | sleep 1 99 | fi 100 | fi 101 | } 102 | 103 | function check_delete_known_host () 104 | { 105 | output "Checking for $1 in known_hosts file" 106 | grep -q $1 ${HOME}/.ssh/known_hosts \ 107 | && outputn "Found entry for $1. Removing" \ 108 | && (sed --in-place "/^$1/d" ~/.ssh/known_hosts && ok ) \ 109 | || output "No entries found for $1" 110 | } 111 | 112 | function get_vm_ip_old () 113 | { 114 | NAME=$1 115 | BRIDGE=virbr0 116 | MAC=`virsh dumpxml $NAME | grep "mac address" | awk -F\' '{ print $2}'` 117 | while true 118 | do 119 | IP=$(grep -B1 $MAC /var/lib/libvirt/dnsmasq/$BRIDGE.status | head \ 120 | -n 1 | awk '{print $2}' | sed -e s/\"//g -e s/,//) 121 | if [ "$IP" = "" ] 122 | then 123 | sleep 1 124 | else 125 | 126 | break 127 | fi 128 | done 129 | echo $IP 130 | } 131 | 132 | function get_vm_ip () 133 | { 134 | NAME=$1 135 | BRIDGE=virbr0 136 | MAC=`virsh dumpxml $NAME | grep "mac address" | awk -F\' '{ print $2}'` 137 | while true 138 | do 139 | IP=$(arp -an | grep $MAC | awk '{ print $2}' | sed 's/[\(\)]//g') 140 | if [ "$IP" = "" ] 141 | then 142 | sleep 1 143 | else 144 | 145 | break 146 | fi 147 | done 148 | echo $IP 149 | } 150 | -------------------------------------------------------------------------------- /shutdown_vm.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -x 4 | set -e 5 | . ./shared.sh 6 | vm_graceful_shutdown $1 7 | -------------------------------------------------------------------------------- /ssh_vm.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -x 4 | set -e 5 | USER=ubuntu 6 | 7 | . ./shared.sh 8 | 9 | if [ $(vm_is_running $1) == "no" ]; then 10 | output "vm not running" 11 | exit 0 12 | fi 13 | 14 | IP=`./getguestipv4.sh $1` 15 | SSHHOST="${USER}@${IP}" 16 | wait_for_ssh $SSHHOST 17 | ssh -oStrictHostKeyChecking=no $SSHHOST 18 | -------------------------------------------------------------------------------- /www/.htaccess: -------------------------------------------------------------------------------- 1 | RewriteEngine On 2 | RewriteRule ^github$ github.php [L,QSA] 3 | -------------------------------------------------------------------------------- /www/index.php: -------------------------------------------------------------------------------- 1 | open('/home/jonasschnelli/discopop/data/builds.sqlite'); 24 | } 25 | } 26 | 27 | $db = new BuildsDB(); 28 | $db->busyTimeout(5000); 29 | if(!$db){ 30 | echo $db->lastErrorMsg(); 31 | exit(); 32 | } 33 | $showbuilds = true; // show builds by default 34 | 35 | // if build details are requested 36 | $jobs_results = false; 37 | if(isset($_REQUEST['build'])) { 38 | $statement = $db->prepare('SELECT rowid, * FROM jobs WHERE to_build=?'); 39 | $statement->bindValue(1, $_REQUEST['build'], SQLITE3_INTEGER); 40 | $jobs_results = $statement->execute(); 41 | if ($jobs_results != false) { 42 | $build_id = htmlentities(strip_tags($_REQUEST['build'])); 43 | } 44 | } 45 | 46 | // if job details are requested 47 | $job_row = false; 48 | if(isset($_REQUEST['job'])) { 49 | $statement = $db->prepare('SELECT rowid, * FROM jobs WHERE uuid=?'); 50 | $statement->bindValue(1, htmlentities(strip_tags($_REQUEST['job'])), SQLITE3_TEXT); 51 | $job_result = $statement->execute(); 52 | if ($job_result != false) { 53 | $job_id = htmlentities(strip_tags($_REQUEST['job'])); 54 | $job_row = $job_result->fetchArray(); 55 | $statement = $db->prepare('SELECT rowid, * FROM builds WHERE rowid=?'); 56 | $statement->bindValue(1, $job_row['to_build'], SQLITE3_INTEGER); 57 | $build_result = $statement->execute(); 58 | if ($build_result != false) { 59 | $build_row = $build_result->fetchArray(); 60 | } 61 | } 62 | $showbuilds = false; 63 | } 64 | 65 | if ($showbuilds) { 66 | $build_results = $db->query('SELECT rowid, * FROM builds ORDER BY rowid DESC LIMIT 0, 10'); 67 | } 68 | 69 | // manually add a build 70 | if(isset($_REQUEST['addpull'])) { 71 | $pullnr = $_REQUEST['addpull']; 72 | $stmt = $db->prepare('INSERT INTO builds (image,repo,branch,pullnr) VALUES(?,?,?,?)'); 73 | $stmt->bindValue(1, "ubuntu1804_base", SQLITE3_TEXT); 74 | $stmt->bindValue(2, "https://github.com/bitcoin/bitcoin", SQLITE3_TEXT); 75 | if (is_numeric($_REQUEST['addpull'])) { 76 | $stmt->bindValue(3, "refs/pull/".strval($pullnr)."/merge", SQLITE3_TEXT); 77 | $stmt->bindValue(4, strval($pullnr), SQLITE3_TEXT); 78 | $result = $stmt->execute(); 79 | header("Location: index.php"); 80 | exit(0); 81 | } 82 | else if ($_REQUEST['addpull']=="master") { 83 | $stmt->bindValue(3, "master", SQLITE3_TEXT); 84 | $result = $stmt->execute(); 85 | header("Location: index.php"); 86 | exit(0); 87 | } 88 | } 89 | 90 | function duration($start, $end) { 91 | if ($start == 0) { 92 | return "?"; 93 | } 94 | if ($end == 0) { 95 | return gmdate("H:i:s", time()-$start); 96 | } 97 | return gmdate("H:i:s", $end-$start); 98 | } 99 | 100 | function job_is_running($row) { 101 | if ($row['endtime'] == 0) { return true; } 102 | return false; 103 | } 104 | 105 | function state_to_text($state) { 106 | if ($state == 0) return "queued"; 107 | if ($state == 1) return "starting"; 108 | if ($state == 2) return "running"; 109 | if ($state == 3) return "failed"; 110 | if ($state == 4) return "stalled"; 111 | if ($state == 5) return "success"; 112 | if ($state == 6) return "canceled"; 113 | return "unknown"; 114 | } 115 | 116 | function color_from_state($state) { 117 | if ($state == 0) return "queued"; 118 | if ($state == 1) return ""; 119 | if ($state == 2) return "is-warning"; 120 | if ($state == 3) return "is-danger"; 121 | if ($state == 4) return "is-danger"; 122 | if ($state == 5) return "is-success"; 123 | if ($state == 6) return "is-dark"; 124 | return "unknown"; 125 | } 126 | 127 | function success_from_state($state) { 128 | if ($state == 0) return "unknown"; 129 | if ($state == 1) return "unknown"; 130 | if ($state == 2) return "unknown"; 131 | if ($state == 3) return "failed"; 132 | if ($state == 4) return "failed"; 133 | if ($state == 5) return "success"; 134 | if ($state == 6) return "failed"; 135 | return "unknown"; 136 | } 137 | 138 | function is_final($state) { 139 | if ($state >= 3) return true; 140 | return false; 141 | } 142 | 143 | function human_filesize($bytes, $decimals = 2) { 144 | $sz = 'BKMGTP'; 145 | $factor = floor((strlen($bytes) - 1) / 3); 146 | return sprintf("%.{$decimals}f", $bytes / pow(1024, $factor)) . @$sz[$factor]; 147 | } 148 | 149 | function print_githead_from_log($logfile) { 150 | if (!file_exists($logfile)) { echo "unknown"; } 151 | system("head -1000 ".$logfile." | grep -oP '^###GITHEAD#\K([A-Za-z0-9]*)'"); 152 | } 153 | 154 | $show_html_wrapper = !isset($_REQUEST['ajax']); 155 | ?> 156 | 157 | 158 |
159 | 160 | 161 | 162 | 163 | 164 | 165 |Build ID | 235 |State | 236 |Starttime | 237 |Repo | 238 |Branch | 239 |
---|---|---|---|---|
245 | | is-small"> | 246 |247 | | 248 | | 249 | |
Job # | 259 |State | 260 |Name | 261 |Duration | 262 |Current Task | 263 |
---|---|---|---|---|
271 | | ">Loading is-small"> | 272 |273 | | 274 | | 275 | |
493 | 498 |499 |