complete staging buildserver setup on jenkins.debian.net using nested KVM instances
See merge request !176
#
# apt_package_cache = True
+# The buildserver can use some local caches to speed up builds,
+# especially when the internet connection is slow and/or expensive.
+# If enabled, the buildserver setup will look for standard caches in
+# your HOME dir and copy them to the buildserver VM. Be aware: this
+# will reduce the isolation of the buildserver from your host machine,
+# so the buildserver will provide an environment only as trustworthy
+# as the host machine's environment.
+#
+# copy_caches_from_host = True
+
# To specify which Debian mirror the build server VM should use, by
# default it uses http.debian.net, which auto-detects which is the
# best mirror to use.
import tarfile
import traceback
import time
-import json
import requests
import tempfile
from configparser import ConfigParser
from . import net
from . import metadata
from . import scanner
+from . import vmtools
from .common import FDroidPopen, SdkToolsPopen
from .exception import FDroidException, BuildException, VCSException
pass
-def get_builder_vm_id():
- vd = os.path.join('builder', '.vagrant')
- if os.path.isdir(vd):
- # Vagrant 1.2 (and maybe 1.1?) it's a directory tree...
- with open(os.path.join(vd, 'machines', 'default',
- 'virtualbox', 'id')) as vf:
- id = vf.read()
- return id
- else:
- # Vagrant 1.0 - it's a json file...
- with open(os.path.join('builder', '.vagrant')) as vf:
- v = json.load(vf)
- return v['active']['default']
-
-
-def got_valid_builder_vm():
- """Returns True if we have a valid-looking builder vm
- """
- if not os.path.exists(os.path.join('builder', 'Vagrantfile')):
- return False
- vd = os.path.join('builder', '.vagrant')
- if not os.path.exists(vd):
- return False
- if not os.path.isdir(vd):
- # Vagrant 1.0 - if the directory is there, it's valid...
- return True
- # Vagrant 1.2 - the directory can exist, but the id can be missing...
- if not os.path.exists(os.path.join(vd, 'machines', 'default',
- 'virtualbox', 'id')):
- return False
- return True
-
-
-def vagrant(params, cwd=None, printout=False):
- """Run a vagrant command.
-
- :param: list of parameters to pass to vagrant
- :cwd: directory to run in, or None for current directory
- :returns: (ret, out) where ret is the return code, and out
- is the stdout (and stderr) from vagrant
- """
- p = FDroidPopen(['vagrant'] + params, cwd=cwd)
- return (p.returncode, p.output)
-
-
-def get_vagrant_sshinfo():
- """Get ssh connection info for a vagrant VM
-
- :returns: A dictionary containing 'hostname', 'port', 'user'
- and 'idfile'
- """
- if subprocess.call('vagrant ssh-config >sshconfig',
- cwd='builder', shell=True) != 0:
- raise BuildException("Error getting ssh config")
- vagranthost = 'default' # Host in ssh config file
- sshconfig = paramiko.SSHConfig()
- sshf = open(os.path.join('builder', 'sshconfig'), 'r')
- sshconfig.parse(sshf)
- sshf.close()
- sshconfig = sshconfig.lookup(vagranthost)
- idfile = sshconfig['identityfile']
- if isinstance(idfile, list):
- idfile = idfile[0]
- elif idfile.startswith('"') and idfile.endswith('"'):
- idfile = idfile[1:-1]
- return {'hostname': sshconfig['hostname'],
- 'port': int(sshconfig['port']),
- 'user': sshconfig['user'],
- 'idfile': idfile}
-
-
-def get_clean_vm(reset=False):
- """Get a clean VM ready to do a buildserver build.
-
- This might involve creating and starting a new virtual machine from
- scratch, or it might be as simple (unless overridden by the reset
- parameter) as re-using a snapshot created previously.
-
- A BuildException will be raised if anything goes wrong.
-
- :reset: True to force creating from scratch.
- :returns: A dictionary containing 'hostname', 'port', 'user'
- and 'idfile'
- """
- # Reset existing builder machine to a clean state if possible.
- vm_ok = False
- if not reset:
- logging.info("Checking for valid existing build server")
-
- if got_valid_builder_vm():
- logging.info("...VM is present")
- p = FDroidPopen(['VBoxManage', 'snapshot',
- get_builder_vm_id(), 'list',
- '--details'], cwd='builder')
- if 'fdroidclean' in p.output:
- logging.info("...snapshot exists - resetting build server to "
- "clean state")
- retcode, output = vagrant(['status'], cwd='builder')
-
- if 'running' in output:
- logging.info("...suspending")
- vagrant(['suspend'], cwd='builder')
- logging.info("...waiting a sec...")
- time.sleep(10)
- p = FDroidPopen(['VBoxManage', 'snapshot', get_builder_vm_id(),
- 'restore', 'fdroidclean'],
- cwd='builder')
-
- if p.returncode == 0:
- logging.info("...reset to snapshot - server is valid")
- retcode, output = vagrant(['up'], cwd='builder')
- if retcode != 0:
- raise BuildException("Failed to start build server")
- logging.info("...waiting a sec...")
- time.sleep(10)
- sshinfo = get_vagrant_sshinfo()
- vm_ok = True
- else:
- logging.info("...failed to reset to snapshot")
- else:
- logging.info("...snapshot doesn't exist - "
- "VBoxManage snapshot list:\n" + p.output)
-
- # If we can't use the existing machine for any reason, make a
- # new one from scratch.
- if not vm_ok:
- if os.path.exists('builder'):
- logging.info("Removing broken/incomplete/unwanted build server")
- vagrant(['destroy', '-f'], cwd='builder')
- shutil.rmtree('builder')
- os.mkdir('builder')
-
- p = subprocess.Popen(['vagrant', '--version'],
- universal_newlines=True,
- stdout=subprocess.PIPE)
- vver = p.communicate()[0].strip().split(' ')[1]
- if vver.split('.')[0] != '1' or int(vver.split('.')[1]) < 4:
- raise BuildException("Unsupported vagrant version {0}".format(vver))
-
- with open(os.path.join('builder', 'Vagrantfile'), 'w') as vf:
- vf.write('Vagrant.configure("2") do |config|\n')
- vf.write('config.vm.box = "buildserver"\n')
- vf.write('config.vm.synced_folder ".", "/vagrant", disabled: true\n')
- vf.write('end\n')
-
- logging.info("Starting new build server")
- retcode, _ = vagrant(['up'], cwd='builder')
- if retcode != 0:
- raise BuildException("Failed to start build server")
-
- # Open SSH connection to make sure it's working and ready...
- logging.info("Connecting to virtual machine...")
- sshinfo = get_vagrant_sshinfo()
- sshs = paramiko.SSHClient()
- sshs.set_missing_host_key_policy(paramiko.AutoAddPolicy())
- sshs.connect(sshinfo['hostname'], username=sshinfo['user'],
- port=sshinfo['port'], timeout=300,
- look_for_keys=False,
- key_filename=sshinfo['idfile'])
- sshs.close()
-
- logging.info("Saving clean state of new build server")
- retcode, _ = vagrant(['suspend'], cwd='builder')
- if retcode != 0:
- raise BuildException("Failed to suspend build server")
- logging.info("...waiting a sec...")
- time.sleep(10)
- p = FDroidPopen(['VBoxManage', 'snapshot', get_builder_vm_id(),
- 'take', 'fdroidclean'],
- cwd='builder')
- if p.returncode != 0:
- raise BuildException("Failed to take snapshot")
- logging.info("...waiting a sec...")
- time.sleep(10)
- logging.info("Restarting new build server")
- retcode, _ = vagrant(['up'], cwd='builder')
- if retcode != 0:
- raise BuildException("Failed to start build server")
- logging.info("...waiting a sec...")
- time.sleep(10)
- # Make sure it worked...
- p = FDroidPopen(['VBoxManage', 'snapshot', get_builder_vm_id(),
- 'list', '--details'],
- cwd='builder')
- if 'fdroidclean' not in p.output:
- raise BuildException("Failed to take snapshot.")
-
- return sshinfo
-
-
-def release_vm():
- """Release the VM previously started with get_clean_vm().
-
- This should always be called.
- """
- logging.info("Suspending build server")
- subprocess.call(['vagrant', 'suspend'], cwd='builder')
-
-
# Note that 'force' here also implies test mode.
def build_server(app, build, vcs, build_dir, output_dir, log_dir, force):
"""Do a build on the builder vm.
else:
logging.getLogger("paramiko").setLevel(logging.WARN)
- sshinfo = get_clean_vm()
+ sshinfo = vmtools.get_clean_builder('builder')
try:
if not buildserverid:
ftp.close()
finally:
-
# Suspend the build server.
- release_vm()
+ vm = vmtools.get_build_vm('builder')
+ vm.suspend()
def force_gradle_build_tools(build_dir, build_tools):
this is the 'unsigned' directory.
:param repo_dir: The repo directory - used for checking if the build is
necessary.
- :paaram also_check_dir: An additional location for checking if the build
+ :param also_check_dir: An additional location for checking if the build
is necessary (usually the archive repo)
:param test: True if building in test mode, in which case the build will
always happen, even if the output already exists. In test mode, the
--- /dev/null
+#!/usr/bin/env python
+
+'''
+Python-Tail - Unix tail follow implementation in Python.
+
+python-tail can be used to monitor changes to a file.
+
+Example:
+ import tail
+
+ # Create a tail instance
+ t = tail.Tail('file-to-be-followed')
+
+ # Register a callback function to be called when a new line is found in the followed file.
+ # If no callback function is registerd, new lines would be printed to standard out.
+ t.register_callback(callback_function)
+
+ # Follow the file with 5 seconds as sleep time between iterations.
+ # If sleep time is not provided 1 second is used as the default time.
+ t.follow(s=5) '''
+
+# Author - Kasun Herath <kasunh01 at gmail.com>
+# Source - https://github.com/kasun/python-tail
+
+# modified by Hans-Christoph Steiner <hans@eds.org> to add the
+# background thread and support reading multiple lines per read cycle
+
+import os
+import sys
+import time
+import threading
+
+
+class Tail(object):
+ ''' Represents a tail command. '''
+ def __init__(self, tailed_file):
+ ''' Initiate a Tail instance.
+ Check for file validity, assigns callback function to standard out.
+
+ Arguments:
+ tailed_file - File to be followed. '''
+
+ self.check_file_validity(tailed_file)
+ self.tailed_file = tailed_file
+ self.callback = sys.stdout.write
+ self.t_stop = threading.Event()
+
+ def start(self, s=1):
+ '''Start tailing a file in a background thread.
+
+ Arguments:
+ s - Number of seconds to wait between each iteration; Defaults to 3.
+ '''
+
+ t = threading.Thread(target=self.follow, args=(s,))
+ t.start()
+
+ def stop(self):
+ '''Stop a background tail.
+ '''
+ self.t_stop.set()
+
+ def follow(self, s=1):
+ ''' Do a tail follow. If a callback function is registered it is called with every new line.
+ Else printed to standard out.
+
+ Arguments:
+ s - Number of seconds to wait between each iteration; Defaults to 1. '''
+
+ with open(self.tailed_file, encoding='utf8') as file_:
+ # Go to the end of file
+ file_.seek(0, 2)
+ while not self.t_stop.is_set():
+ curr_position = file_.tell()
+ lines = file_.readlines()
+ if len(lines) == 0:
+ file_.seek(curr_position)
+ else:
+ for line in lines:
+ self.callback(line)
+ time.sleep(s)
+
+ def register_callback(self, func):
+ ''' Overrides default callback function to provided function. '''
+ self.callback = func
+
+ def check_file_validity(self, file_):
+ ''' Check whether the a given file exists, readable and is a file '''
+ if not os.access(file_, os.F_OK):
+ raise TailError("File '%s' does not exist" % (file_))
+ if not os.access(file_, os.R_OK):
+ raise TailError("File '%s' not readable" % (file_))
+ if os.path.isdir(file_):
+ raise TailError("File '%s' is a directory" % (file_))
+
+
+class TailError(Exception):
+
+ def __init__(self, msg):
+ self.message = msg
+
+ def __str__(self):
+ return self.message
--- /dev/null
+#!/usr/bin/env python3
+#
+# vmtools.py - part of the FDroid server tools
+# Copyright (C) 2017 Michael Poehn <michael.poehn@fsfe.org>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+from os import remove as rmfile
+from os.path import isdir, isfile, join as joinpath, basename, abspath, expanduser
+import os
+import math
+import json
+import tarfile
+import time
+import shutil
+import subprocess
+import textwrap
+from .common import FDroidException
+from logging import getLogger
+
+logger = getLogger('fdroidserver-vmtools')
+
+
+def get_clean_builder(serverdir, reset=False):
+ if not os.path.isdir(serverdir):
+ if os.path.islink(serverdir):
+ os.unlink(serverdir)
+ logger.info("buildserver path does not exists, creating %s", serverdir)
+ os.makedirs(serverdir)
+ vagrantfile = os.path.join(serverdir, 'Vagrantfile')
+ if not os.path.isfile(vagrantfile):
+ with open(os.path.join('builder', 'Vagrantfile'), 'w') as f:
+ f.write(textwrap.dedent("""\
+ # generated file, do not change.
+
+ Vagrant.configure("2") do |config|
+ config.vm.box = "buildserver"
+ config.vm.synced_folder ".", "/vagrant", disabled: true
+ end
+ """))
+ vm = get_build_vm(serverdir)
+ if reset:
+ logger.info('resetting buildserver by request')
+ elif not vm.vagrant_uuid_okay():
+ logger.info('resetting buildserver, bceause vagrant vm is not okay.')
+ reset = True
+ elif not vm.snapshot_exists('fdroidclean'):
+ logger.info("resetting buildserver, because snapshot 'fdroidclean' is not present.")
+ reset = True
+
+ if reset:
+ vm.destroy()
+ vm.up()
+ vm.suspend()
+
+ if reset:
+ logger.info('buildserver recreated: taking a clean snapshot')
+ vm.snapshot_create('fdroidclean')
+ else:
+ logger.info('builserver ok: reverting to clean snapshot')
+ vm.snapshot_revert('fdroidclean')
+ vm.up()
+
+ try:
+ sshinfo = vm.sshinfo()
+ except FDroidBuildVmException:
+ # workaround because libvirt sometimes likes to forget
+ # about ssh connection info even thou the vm is running
+ vm.halt()
+ vm.up()
+ sshinfo = vm.sshinfo()
+
+ return sshinfo
+
+
+def _check_call(cmd, shell=False, cwd=None):
+ logger.debug(' '.join(cmd))
+ return subprocess.check_call(cmd, shell=shell, cwd=cwd)
+
+
+def _check_output(cmd, shell=False, cwd=None):
+ logger.debug(' '.join(cmd))
+ return subprocess.check_output(cmd, shell=shell, cwd=cwd)
+
+
+def get_build_vm(srvdir, provider=None):
+ """Factory function for getting FDroidBuildVm instances.
+
+ This function tries to figure out what hypervisor should be used
+ and creates an object for controlling a build VM.
+
+ :param srvdir: path to a directory which contains a Vagrantfile
+ :param provider: optionally this parameter allows specifiying an
+ spesific vagrant provider.
+ :returns: FDroidBuildVm instance.
+ """
+ abssrvdir = abspath(srvdir)
+
+ # use supplied provider
+ if provider:
+ if provider == 'libvirt':
+ logger.debug('build vm provider \'libvirt\' selected')
+ return LibvirtBuildVm(abssrvdir)
+ elif provider == 'virtualbox':
+ logger.debug('build vm provider \'virtualbox\' selected')
+ return VirtualboxBuildVm(abssrvdir)
+ else:
+ logger.warn('build vm provider not supported: \'%s\'', provider)
+
+ # try guessing provider from installed software
+ try:
+ kvm_installed = 0 == _check_call(['which', 'kvm'])
+ except subprocess.CalledProcessError:
+ kvm_installed = False
+ try:
+ kvm_installed |= 0 == _check_call(['which', 'qemu'])
+ except subprocess.CalledProcessError:
+ pass
+ try:
+ vbox_installed = 0 == _check_call(['which', 'VBoxHeadless'])
+ except subprocess.CalledProcessError:
+ vbox_installed = False
+ if kvm_installed and vbox_installed:
+ logger.debug('both kvm and vbox are installed.')
+ elif kvm_installed:
+ logger.debug('libvirt is the sole installed and supported vagrant provider, selecting \'libvirt\'')
+ return LibvirtBuildVm(abssrvdir)
+ elif vbox_installed:
+ logger.debug('virtualbox is the sole installed and supported vagrant provider, selecting \'virtualbox\'')
+ return VirtualboxBuildVm(abssrvdir)
+ else:
+ logger.debug('could not confirm that either virtualbox or kvm/libvirt are installed')
+
+ # try guessing provider from .../srvdir/.vagrant internals
+ has_libvirt_machine = isdir(joinpath(abssrvdir, '.vagrant',
+ 'machines', 'default', 'libvirt'))
+ has_vbox_machine = isdir(joinpath(abssrvdir, '.vagrant',
+ 'machines', 'default', 'libvirt'))
+ if has_libvirt_machine and has_vbox_machine:
+ logger.info('build vm provider lookup found virtualbox and libvirt, defaulting to \'virtualbox\'')
+ return VirtualboxBuildVm(abssrvdir)
+ elif has_libvirt_machine:
+ logger.debug('build vm provider lookup found \'libvirt\'')
+ return LibvirtBuildVm(abssrvdir)
+ elif has_vbox_machine:
+ logger.debug('build vm provider lookup found \'virtualbox\'')
+ return VirtualboxBuildVm(abssrvdir)
+
+ logger.info('build vm provider lookup could not determine provider, defaulting to \'virtualbox\'')
+ return VirtualboxBuildVm(abssrvdir)
+
+
+class FDroidBuildVmException(FDroidException):
+ pass
+
+
+class FDroidBuildVm():
+ """Abstract base class for working with FDroids build-servers.
+
+ Use the factory method `fdroidserver.vmtools.get_build_vm()` for
+ getting correct instances of this class.
+
+ This is intended to be a hypervisor independant, fault tolerant
+ wrapper around the vagrant functions we use.
+ """
+
+ def __init__(self, srvdir):
+ """Create new server class.
+ """
+ self.srvdir = srvdir
+ self.srvname = basename(srvdir) + '_default'
+ self.vgrntfile = joinpath(srvdir, 'Vagrantfile')
+ self.srvuuid = self._vagrant_fetch_uuid()
+ if not isdir(srvdir):
+ raise FDroidBuildVmException("Can not init vagrant, directory %s not present" % (srvdir))
+ if not isfile(self.vgrntfile):
+ raise FDroidBuildVmException("Can not init vagrant, '%s' not present" % (self.vgrntfile))
+ import vagrant
+ self.vgrnt = vagrant.Vagrant(root=srvdir, out_cm=vagrant.stdout_cm, err_cm=vagrant.stdout_cm)
+
+ def up(self, provision=True):
+ try:
+ self.vgrnt.up(provision=provision)
+ logger.info('...waiting a sec...')
+ time.sleep(10)
+ self.srvuuid = self._vagrant_fetch_uuid()
+ except subprocess.CalledProcessError as e:
+ raise FDroidBuildVmException("could not bring up vm '%s'" % self.srvname) from e
+
+ def suspend(self):
+ logger.info('suspending buildserver')
+ try:
+ self.vgrnt.suspend()
+ logger.info('...waiting a sec...')
+ time.sleep(10)
+ except subprocess.CalledProcessError as e:
+ raise FDroidBuildVmException("could not suspend vm '%s'" % self.srvname) from e
+
+ def halt(self):
+ self.vgrnt.halt(force=True)
+
+ def destroy(self):
+ """Remove every trace of this VM from the system.
+
+ This includes deleting:
+ * hypervisor specific definitions
+ * vagrant state informations (eg. `.vagrant` folder)
+ * images related to this vm
+ """
+ logger.info("destroying vm '%s'", self.srvname)
+ try:
+ self.vgrnt.destroy()
+ logger.debug('vagrant destroy completed')
+ except subprocess.CalledProcessError as e:
+ logger.exception('vagrant destroy failed: %s', e)
+ vgrntdir = joinpath(self.srvdir, '.vagrant')
+ try:
+ shutil.rmtree(vgrntdir)
+ logger.debug('deleted vagrant dir: %s', vgrntdir)
+ except Exception as e:
+ logger.debug("could not delete vagrant dir: %s, %s", vgrntdir, e)
+ try:
+ _check_call(['vagrant', 'global-status', '--prune'])
+ except subprocess.CalledProcessError as e:
+ logger.debug('pruning global vagrant status failed: %s', e)
+
+ def package(self, output=None):
+ self.vgrnt.package(output=output)
+
+ def vagrant_uuid_okay(self):
+ '''Having an uuid means that vagrant up has run successfully.'''
+ if self.srvuuid is None:
+ return False
+ return True
+
+ def _vagrant_file_name(self, name):
+ return name.replace('/', '-VAGRANTSLASH-')
+
+ def _vagrant_fetch_uuid(self):
+ if isfile(joinpath(self.srvdir, '.vagrant')):
+ # Vagrant 1.0 - it's a json file...
+ with open(joinpath(self.srvdir, '.vagrant')) as f:
+ id = json.load(f)['active']['default']
+ logger.debug('vm uuid: %s', id)
+ return id
+ elif isfile(joinpath(self.srvdir, '.vagrant', 'machines',
+ 'default', self.provider, 'id')):
+ # Vagrant 1.2 (and maybe 1.1?) it's a directory tree...
+ with open(joinpath(self.srvdir, '.vagrant', 'machines',
+ 'default', self.provider, 'id')) as f:
+ id = f.read()
+ logger.debug('vm uuid: %s', id)
+ return id
+ else:
+ logger.debug('vm uuid is None')
+ return None
+
+ def box_add(self, boxname, boxfile, force=True):
+ """Add vagrant box to vagrant.
+
+ :param boxname: name assigned to local deployment of box
+ :param boxfile: path to box file
+ :param force: overwrite existing box image (default: True)
+ """
+ boxfile = abspath(boxfile)
+ if not isfile(boxfile):
+ raise FDroidBuildVmException('supplied boxfile \'%s\' does not exist', boxfile)
+ self.vgrnt.box_add(boxname, abspath(boxfile), force=force)
+
+ def box_remove(self, boxname):
+ try:
+ _check_call(['vagrant', 'box', 'remove', '--all', '--force', boxname])
+ except subprocess.CalledProcessError as e:
+ logger.debug('tried removing box %s, but is did not exist: %s', boxname, e)
+ boxpath = joinpath(expanduser('~'), '.vagrant',
+ self._vagrant_file_name(boxname))
+ if isdir(boxpath):
+ logger.info("attempting to remove box '%s' by deleting: %s",
+ boxname, boxpath)
+ shutil.rmtree(boxpath)
+
+ def sshinfo(self):
+ """Get ssh connection info for a vagrant VM
+
+ :returns: A dictionary containing 'hostname', 'port', 'user'
+ and 'idfile'
+ """
+ import paramiko
+ try:
+ _check_call(['vagrant ssh-config > sshconfig'],
+ cwd=self.srvdir, shell=True)
+ vagranthost = 'default' # Host in ssh config file
+ sshconfig = paramiko.SSHConfig()
+ with open(joinpath(self.srvdir, 'sshconfig'), 'r') as f:
+ sshconfig.parse(f)
+ sshconfig = sshconfig.lookup(vagranthost)
+ idfile = sshconfig['identityfile']
+ if isinstance(idfile, list):
+ idfile = idfile[0]
+ elif idfile.startswith('"') and idfile.endswith('"'):
+ idfile = idfile[1:-1]
+ return {'hostname': sshconfig['hostname'],
+ 'port': int(sshconfig['port']),
+ 'user': sshconfig['user'],
+ 'idfile': idfile}
+ except subprocess.CalledProcessError as e:
+ raise FDroidBuildVmException("Error getting ssh config") from e
+
+ def snapshot_create(self, snapshot_name):
+ raise NotImplementedError('not implemented, please use a sub-type instance')
+
+ def snapshot_list(self):
+ raise NotImplementedError('not implemented, please use a sub-type instance')
+
+ def snapshot_exists(self, snapshot_name):
+ raise NotImplementedError('not implemented, please use a sub-type instance')
+
+ def snapshot_revert(self, snapshot_name):
+ raise NotImplementedError('not implemented, please use a sub-type instance')
+
+
+class LibvirtBuildVm(FDroidBuildVm):
+ def __init__(self, srvdir):
+ self.provider = 'libvirt'
+ super().__init__(srvdir)
+ import libvirt
+
+ try:
+ self.conn = libvirt.open('qemu:///system')
+ except libvirt.libvirtError as e:
+ raise FDroidBuildVmException('could not connect to libvirtd: %s' % (e))
+
+ def destroy(self):
+
+ super().destroy()
+
+ # resorting to virsh instead of libvirt python bindings, because
+ # this is way more easy and therefore fault tolerant.
+ # (eg. lookupByName only works on running VMs)
+ try:
+ _check_call(('virsh', '-c', 'qemu:///system', 'destroy', self.srvname))
+ logger.info("...waiting a sec...")
+ time.sleep(10)
+ except subprocess.CalledProcessError as e:
+ logger.info("could not force libvirt domain '%s' off: %s", self.srvname, e)
+ try:
+ # libvirt python bindings do not support all flags required
+ # for undefining domains correctly.
+ _check_call(('virsh', '-c', 'qemu:///system', 'undefine', self.srvname, '--nvram', '--managed-save', '--remove-all-storage', '--snapshots-metadata'))
+ logger.info("...waiting a sec...")
+ time.sleep(10)
+ except subprocess.CalledProcessError as e:
+ logger.info("could not undefine libvirt domain '%s': %s", self.srvname, e)
+
+ def package(self, output=None, keep_box_file=False):
+ if not output:
+ output = "buildserver.box"
+ logger.debug('no output name set for packaging \'%s\',' +
+ 'defaulting to %s', self.srvname, output)
+ storagePool = self.conn.storagePoolLookupByName('default')
+ if storagePool:
+
+ if isfile('metadata.json'):
+ rmfile('metadata.json')
+ if isfile('Vagrantfile'):
+ rmfile('Vagrantfile')
+ if isfile('box.img'):
+ rmfile('box.img')
+
+ logger.debug('preparing box.img for box %s', output)
+ vol = storagePool.storageVolLookupByName(self.srvname + '.img')
+ imagepath = vol.path()
+ # TODO use a libvirt storage pool to ensure the img file is readable
+ _check_call(['sudo', '/bin/chmod', '-R', 'a+rX', '/var/lib/libvirt/images'])
+ shutil.copy2(imagepath, 'box.img')
+ _check_call(['qemu-img', 'rebase', '-p', '-b', '', 'box.img'])
+ img_info_raw = _check_output(['qemu-img', 'info', '--output=json', 'box.img'])
+ img_info = json.loads(img_info_raw.decode('utf-8'))
+ metadata = {"provider": "libvirt",
+ "format": img_info['format'],
+ "virtual_size": math.ceil(img_info['virtual-size'] / (1024. ** 3)),
+ }
+
+ logger.debug('preparing metadata.json for box %s', output)
+ with open('metadata.json', 'w') as fp:
+ fp.write(json.dumps(metadata))
+ logger.debug('preparing Vagrantfile for box %s', output)
+ vagrantfile = textwrap.dedent("""\
+ Vagrant.configure("2") do |config|
+ config.ssh.username = "vagrant"
+ config.ssh.password = "vagrant"
+
+ config.vm.provider :libvirt do |libvirt|
+
+ libvirt.driver = "kvm"
+ libvirt.host = ""
+ libvirt.connect_via_ssh = false
+ libvirt.storage_pool_name = "default"
+
+ end
+ end""")
+ with open('Vagrantfile', 'w') as fp:
+ fp.write(vagrantfile)
+ with tarfile.open(output, 'w:gz') as tar:
+ logger.debug('adding metadata.json to box %s ...', output)
+ tar.add('metadata.json')
+ logger.debug('adding Vagrantfile to box %s ...', output)
+ tar.add('Vagrantfile')
+ logger.debug('adding box.img to box %s ...', output)
+ tar.add('box.img')
+
+ if not keep_box_file:
+ logger.debug('box packaging complete, removing temporary files.')
+ rmfile('metadata.json')
+ rmfile('Vagrantfile')
+ rmfile('box.img')
+
+ else:
+ logger.warn('could not connect to storage-pool \'default\',' +
+ 'skipping packaging buildserver box')
+
+ def box_add(self, boxname, boxfile, force=True):
+ boximg = '%s_vagrant_box_image_0.img' % (boxname)
+ if force:
+ try:
+ _check_call(['virsh', '-c', 'qemu:///system', 'vol-delete', '--pool', 'default', boximg])
+ logger.debug("removed old box image '%s' from libvirt storeage pool", boximg)
+ except subprocess.CalledProcessError as e:
+ logger.debug("tired removing old box image '%s', file was not present in first place", boximg, exc_info=e)
+ super().box_add(boxname, boxfile, force)
+
+ def box_remove(self, boxname):
+ super().box_remove(boxname)
+ try:
+ _check_call(['virsh', '-c', 'qemu:///system', 'vol-delete', '--pool', 'default', '%s_vagrant_box_image_0.img' % (boxname)])
+ except subprocess.CalledProcessError as e:
+ logger.debug("tired removing '%s', file was not present in first place", boxname, exc_info=e)
+
+ def snapshot_create(self, snapshot_name):
+ logger.info("creating snapshot '%s' for vm '%s'", snapshot_name, self.srvname)
+ try:
+ _check_call(['virsh', '-c', 'qemu:///system', 'snapshot-create-as', self.srvname, snapshot_name])
+ logger.info('...waiting a sec...')
+ time.sleep(10)
+ except subprocess.CalledProcessError as e:
+ raise FDroidBuildVmException("could not cerate snapshot '%s' "
+ "of libvirt vm '%s'"
+ % (snapshot_name, self.srvname)) from e
+
+ def snapshot_list(self):
+ import libvirt
+ try:
+ dom = self.conn.lookupByName(self.srvname)
+ return dom.listAllSnapshots()
+ except libvirt.libvirtError as e:
+ raise FDroidBuildVmException('could not list snapshots for domain \'%s\'' % self.srvname) from e
+
+ def snapshot_exists(self, snapshot_name):
+ import libvirt
+ try:
+ dom = self.conn.lookupByName(self.srvname)
+ return dom.snapshotLookupByName(snapshot_name) is not None
+ except libvirt.libvirtError:
+ return False
+
+ def snapshot_revert(self, snapshot_name):
+ logger.info("reverting vm '%s' to snapshot '%s'", self.srvname, snapshot_name)
+ import libvirt
+ try:
+ dom = self.conn.lookupByName(self.srvname)
+ snap = dom.snapshotLookupByName(snapshot_name)
+ dom.revertToSnapshot(snap)
+ logger.info('...waiting a sec...')
+ time.sleep(10)
+ except libvirt.libvirtError as e:
+ raise FDroidBuildVmException('could not revert domain \'%s\' to snapshot \'%s\''
+ % (self.srvname, snapshot_name)) from e
+
+
+class VirtualboxBuildVm(FDroidBuildVm):
+
+ def __init__(self, srvdir):
+ self.provider = 'virtualbox'
+ super().__init__(srvdir)
+
+ def snapshot_create(self, snapshot_name):
+ logger.info("creating snapshot '%s' for vm '%s'", snapshot_name, self.srvname)
+ try:
+ _check_call(['VBoxManage', 'snapshot', self.srvuuid, 'take', 'fdroidclean'], cwd=self.srvdir)
+ logger.info('...waiting a sec...')
+ time.sleep(10)
+ except subprocess.CalledProcessError as e:
+ raise FDroidBuildVmException('could not cerate snapshot '
+ 'of virtualbox vm %s'
+ % self.srvname) from e
+
+ def snapshot_list(self):
+ try:
+ o = _check_output(['VBoxManage', 'snapshot',
+ self.srvuuid, 'list',
+ '--details'], cwd=self.srvdir)
+ return o
+ except subprocess.CalledProcessError as e:
+ raise FDroidBuildVmException("could not list snapshots "
+ "of virtualbox vm '%s'"
+ % (self.srvname)) from e
+
+ def snapshot_exists(self, snapshot_name):
+ try:
+ return str(snapshot_name) in str(self.snapshot_list())
+ except FDroidBuildVmException:
+ return False
+
+ def snapshot_revert(self, snapshot_name):
+ logger.info("reverting vm '%s' to snapshot '%s'",
+ self.srvname, snapshot_name)
+ try:
+ _check_call(['VBoxManage', 'snapshot', self.srvuuid,
+ 'restore', 'fdroidclean'], cwd=self.srvdir)
+ except subprocess.CalledProcessError as e:
+ raise FDroidBuildVmException("could not load snapshot "
+ "'fdroidclean' for vm '%s'"
+ % (self.srvname)) from e
exit
fi
+# jenkins.debian.net slaves do not export WORKSPACE
+if [ -z $WORKSPACE ]; then
+ export WORKSPACE=`pwd`
+fi
+
set -e
set -x
# this is a local repo on the Guardian Project Jenkins server
cd tests
-./complete-ci-tests /var/www/fdroid
+#./complete-ci-tests /var/www/fdroid
+
+
+# report info about virtualization
+(dmesg | grep -i -e hypervisor -e qemu -e kvm) || true
+(lspci | grep -i -e virtio -e virtualbox -e qemu -e kvm) || true
+lsmod
+if systemd-detect-virt -q ; then
+ echo "Virtualization is used:" `systemd-detect-virt`
+else
+ echo "No virtualization is used."
+fi
+sudo /bin/chmod -R a+rX /var/lib/libvirt/images
+ls -ld /var/lib/libvirt/images
+ls -l /var/lib/libvirt/images || echo no access
+ls -lR ~/.vagrant.d/ || echo no access
+virsh --connect qemu:///system list --all || echo cannot virsh list
+cat /etc/issue
+
+/sbin/ifconfig || true
+hostname || true
+
+# point to the Vagrant/VirtualBox configs created by reproducible_setup_fdroid_build_environment.sh
+# these variables are actually set in fdroidserver/jenkins-build-makebuildserver
+export SETUP_WORKSPACE=$(dirname $WORKSPACE)/fdroid/fdroidserver
+export XDG_CONFIG_HOME=$SETUP_WORKSPACE
+export VBOX_USER_HOME=$SETUP_WORKSPACE/VirtualBox
+export VAGRANT_HOME=$SETUP_WORKSPACE/vagrant.d
+
+# let's see what is actually there:
+find $SETUP_WORKSPACE | grep -v fdroiddata/metadata/ | cut -b43-9999
+
+# the way we handle jenkins slaves doesn't copy the workspace to the slaves
+# so we need to "manually" clone the git repo here…
+cd $WORKSPACE
+
+# set up Android SDK to use the Debian packages in stretch
+export ANDROID_HOME=/usr/lib/android-sdk
+
+# ignore username/password prompt for non-existant repos
+git config --global url."https://fakeusername:fakepassword@github.com".insteadOf https://github.com
+git config --global url."https://fakeusername:fakepassword@gitlab.com".insteadOf https://gitlab.com
+git config --global url."https://fakeusername:fakepassword@bitbucket.org".insteadOf https://bitbucket.org
+
+# now build the whole archive
+cd $WORKSPACE
+
+# this can be handled in the jenkins job, or here:
+if [ -e fdroiddata ]; then
+ cd fdroiddata
+ git remote update -p
+ git checkout master
+ git reset --hard origin/master
+else
+ git clone https://gitlab.com/fdroid/fdroiddata.git fdroiddata
+ cd fdroiddata
+fi
+
+echo "build_server_always = True" > config.py
+$WORKSPACE/fdroid build --verbose --latest --no-tarball --all
+
+vagrant global-status
+cd builder
+vagrant status
set +e
echo "$(date -u) - cleanup in progress..."
ps auxww | grep -e VBox -e qemu
+ virsh --connect qemu:///system list --all
+ ls -hl /var/lib/libvirt/images
cd $WORKSPACE/buildserver
vagrant halt
sleep 5
echo "debian_mirror = 'https://deb.debian.org/debian/'" > $WORKSPACE/makebuildserver.config.py
echo "boot_timeout = 1200" >> $WORKSPACE/makebuildserver.config.py
echo "apt_package_cache = True" >> $WORKSPACE/makebuildserver.config.py
-./makebuildserver --verbose --clean
+echo "copy_caches_from_host = True" >> $WORKSPACE/makebuildserver.config.py
+./makebuildserver -vv --clean
+
+if [ -z "`vagrant box list | egrep '^buildserver\s+\((libvirt|virtualbox), [0-9]+\)$'`" ]; then
+ vagrant box list
+ echo "ERROR: buildserver box does not exist!"
+ exit 1
+fi
# this can be handled in the jenkins job, or here:
if [ -e fdroiddata ]; then
. ~/.android/bashrc
else
echo "ANDROID_HOME must be set!"
- exit
+ exit 1
fi
fi
import os
import pathlib
+import re
import requests
import stat
import sys
+import shutil
import subprocess
-import time
+import vagrant
import hashlib
import yaml
+import json
+import logging
from clint.textui import progress
from optparse import OptionParser
+import fdroidserver.tail
+import fdroidserver.vmtools
-if not os.path.exists('makebuildserver') and not os.path.exists('buildserver'):
- print('This must be run as ./makebuildserver in fdroidserver.git!')
- sys.exit(1)
-
-
-def vagrant(params, cwd=None, printout=False):
- """Run vagrant.
-
- :param: list of parameters to pass to vagrant
- :cwd: directory to run in, or None for current directory
- :printout: True to print output in realtime, False to just
- return it
- :returns: (ret, out) where ret is the return code, and out
- is the stdout (and stderr) from vagrant
- """
- p = subprocess.Popen(['vagrant'] + params, cwd=cwd,
- stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
- universal_newlines=True)
- out = ''
- if printout:
- while True:
- line = p.stdout.readline()
- if len(line) == 0:
- break
- print(line.rstrip())
- out += line
- p.wait()
- else:
- out = p.communicate()[0]
- return (p.returncode, out)
-
-
-boxfile = 'buildserver.box'
-serverdir = 'buildserver'
-
parser = OptionParser()
-parser.add_option("-v", "--verbose", action="store_true", default=False,
+parser.add_option('-v', '--verbose', action="count", dest='verbosity', default=1,
help="Spew out even more information than normal")
+parser.add_option('-q', action='store_const', const=0, dest='verbosity')
parser.add_option("-c", "--clean", action="store_true", default=False,
help="Build from scratch, rather than attempting to update the existing server")
+parser.add_option('--skip-cache-update', action="store_true", default=False,
+ help="""Skip downloading and checking cache."""
+ """This assumes that the cache is already downloaded completely.""")
+parser.add_option('--keep-box-file', action="store_true", default=False,
+ help="""Box file will not be deleted after adding it to box storage"""
+ """ (KVM-only).""")
options, args = parser.parse_args()
+logger = logging.getLogger('fdroidserver-makebuildserver')
+if options.verbosity >= 2:
+ logging.basicConfig(format='%(message)s', level=logging.DEBUG)
+ logger.setLevel(logging.DEBUG)
+elif options.verbosity == 1:
+ logging.basicConfig(format='%(message)s', level=logging.INFO)
+ logger.setLevel(logging.INFO)
+elif options.verbosity <= 0:
+ logging.basicConfig(format='%(message)s', level=logging.WARNING)
+ logger.setLevel(logging.WARNING)
+
+
+if not os.path.exists('makebuildserver') and not os.path.exists('buildserver'):
+ logger.critical('This must be run as ./makebuildserver in fdroidserver.git!')
+ sys.exit(1)
+
+tail = None
+
# set up default config
cachedir = os.path.join(os.getenv('HOME'), '.cache', 'fdroidserver')
+logger.debug('cachedir set to: %s', cachedir)
+
config = {
'basebox': 'jessie64',
'baseboxurl': [
],
'debian_mirror': 'http://http.debian.net/debian/',
'apt_package_cache': False,
+ 'copy_caches_from_host': False,
'boot_timeout': 600,
'cachedir': cachedir,
'cpus': 1,
except subprocess.CalledProcessError as e:
virt = 'none'
if virt == 'qemu' or virt == 'kvm' or virt == 'bochs':
- print('Running in a VM guest, defaulting to QEMU/KVM via libvirt')
+ logger.info('Running in a VM guest, defaulting to QEMU/KVM via libvirt')
config['vm_provider'] = 'libvirt'
elif virt != 'none':
- print('Running in an unsupported VM guest (' + virt + ')!')
+ logger.info('Running in an unsupported VM guest (%s)!', virt)
+ logger.debug('detected virt: %s', virt)
# load config file, if present
if os.path.exists('makebuildserver.config.py'):
exec(compile(open('makebs.config.py').read(), 'makebs.config.py', 'exec'), config)
if '__builtins__' in config:
del(config['__builtins__']) # added by compile/exec
-
-if os.path.exists(boxfile):
- os.remove(boxfile)
-
-if options.clean:
- vagrant(['destroy', '-f'], cwd=serverdir, printout=options.verbose)
- if config['vm_provider'] == 'libvirt':
- subprocess.call(['virsh', 'undefine', 'buildserver_default'])
- subprocess.call(['virsh', 'vol-delete', '/var/lib/libvirt/images/buildserver_default.img'])
+logger.debug("makebuildserver.config.py parsed -> %s", json.dumps(config, indent=4, sort_keys=True))
# Update cached files.
cachedir = config['cachedir']
if not os.path.exists(cachedir):
os.makedirs(cachedir, 0o755)
+ logger.debug('created cachedir %s because it did not exists.', cachedir)
if config['vm_provider'] == 'libvirt':
tmp = cachedir
while tmp != '/':
mode = os.stat(tmp).st_mode
if not (stat.S_IXUSR & mode and stat.S_IXGRP & mode and stat.S_IXOTH & mode):
- print('ERROR:', tmp, 'will not be accessible to the VM! To fix, run:')
- print(' chmod a+X', tmp)
+ logger.critical('ERROR: %s will not be accessible to the VM! To fix, run:', tmp)
+ logger.critical(' chmod a+X %s', tmp)
sys.exit(1)
tmp = os.path.dirname(tmp)
+ logger.debug('cache dir %s is accessible for libvirt vm.', cachedir)
if config['apt_package_cache']:
config['aptcachedir'] = cachedir + '/apt/archives'
+ logger.debug('aptcachedir is set to %s', config['aptcachedir'])
+ aptcachelock = os.path.join(config['aptcachedir'], 'lock')
+ if os.path.isfile(aptcachelock):
+ logger.info('apt cache dir is locked, removing lock')
+ os.remove(aptcachelock)
+ aptcachepartial = os.path.join(config['aptcachedir'], 'partial')
+ if os.path.isdir(aptcachepartial):
+ logger.info('removing partial downloads from apt cache dir')
+ shutil.rmtree(aptcachepartial)
cachefiles = [
('https://dl.google.com/android/repository/tools_r25.2.3-linux.zip',
return s.hexdigest()
-for srcurl, shasum in cachefiles:
- filename = os.path.basename(srcurl)
- local_filename = os.path.join(cachedir, filename)
-
- if os.path.exists(local_filename):
- local_length = os.path.getsize(local_filename)
+def run_via_vagrant_ssh(v, cmdlist):
+ if (isinstance(cmdlist, str) or isinstance(cmdlist, bytes)):
+ cmd = cmdlist
else:
- local_length = -1
+ cmd = ' '.join(cmdlist)
+ v._run_vagrant_command(['ssh', '-c', cmd])
- resume_header = {}
- download = True
- try:
- r = requests.head(srcurl, allow_redirects=True, timeout=60)
- if r.status_code == 200:
- content_length = int(r.headers.get('content-length'))
+def update_cache(cachedir, cachefiles):
+ for srcurl, shasum in cachefiles:
+ filename = os.path.basename(srcurl)
+ local_filename = os.path.join(cachedir, filename)
+
+ if os.path.exists(local_filename):
+ local_length = os.path.getsize(local_filename)
else:
+ local_length = -1
+
+ resume_header = {}
+ download = True
+
+ try:
+ r = requests.head(srcurl, allow_redirects=True, timeout=60)
+ if r.status_code == 200:
+ content_length = int(r.headers.get('content-length'))
+ else:
+ content_length = local_length # skip the download
+ except requests.exceptions.RequestException as e:
content_length = local_length # skip the download
- except requests.exceptions.RequestException as e:
- content_length = local_length # skip the download
- print(e)
-
- if local_length == content_length:
- download = False
- elif local_length > content_length:
- print('deleting corrupt file from cache: ' + local_filename)
- os.remove(local_filename)
- print("Downloading " + filename + " to cache")
- elif local_length > -1 and local_length < content_length:
- print("Resuming download of " + local_filename)
- resume_header = {'Range': 'bytes=%d-%d' % (local_length, content_length)}
+ logger.warn('%s', e)
+
+ if local_length == content_length:
+ download = False
+ elif local_length > content_length:
+ logger.info('deleting corrupt file from cache: %s', local_filename)
+ os.remove(local_filename)
+ logger.info("Downloading %s to cache", filename)
+ elif local_length > -1 and local_length < content_length:
+ logger.info("Resuming download of %s", local_filename)
+ resume_header = {'Range': 'bytes=%d-%d' % (local_length, content_length)}
+ else:
+ logger.info("Downloading %s to cache", filename)
+
+ if download:
+ r = requests.get(srcurl, headers=resume_header,
+ stream=True, verify=False, allow_redirects=True)
+ content_length = int(r.headers.get('content-length'))
+ with open(local_filename, 'ab') as f:
+ for chunk in progress.bar(r.iter_content(chunk_size=65536),
+ expected_size=(content_length / 65536) + 1):
+ if chunk: # filter out keep-alive new chunks
+ f.write(chunk)
+
+ v = sha256_for_file(local_filename)
+ if v == shasum:
+ logger.info("\t...shasum verified for %s", local_filename)
+ else:
+ logger.critical("Invalid shasum of '%s' detected for %s", v, local_filename)
+ os.remove(local_filename)
+ sys.exit(1)
+
+
+def debug_log_vagrant_vm(vm_dir, config):
+ if options.verbosity >= 3:
+ _vagrant_dir = os.path.join(vm_dir, '.vagrant')
+ logger.debug('check %s dir exists? -> %r', _vagrant_dir, os.path.isdir(_vagrant_dir))
+ logger.debug('> vagrant status')
+ subprocess.call(['vagrant', 'status'], cwd=vm_dir)
+ logger.debug('> vagrant box list')
+ subprocess.call(['vagrant', 'box', 'list'])
+ if config['vm_provider'] == 'libvirt':
+ logger.debug('> virsh -c qmeu:///system list --all')
+ subprocess.call(['virsh', '-c', 'qemu:///system', 'list', '--all'])
+ domain = 'buildserver_default'
+ logger.debug('> virsh -c qemu:///system snapshot-list %s', domain)
+ subprocess.call(['virsh', '-c', 'qemu:///system', 'snapshot-list', domain])
+
+
+def main():
+ global cachedir, cachefiles, config, tail
+
+ if options.skip_cache_update:
+ logger.info('skipping cache update and verification...')
else:
- print("Downloading " + filename + " to cache")
-
- if download:
- r = requests.get(srcurl, headers=resume_header,
- stream=True, verify=False, allow_redirects=True)
- content_length = int(r.headers.get('content-length'))
- with open(local_filename, 'ab') as f:
- for chunk in progress.bar(r.iter_content(chunk_size=65536),
- expected_size=(content_length / 65536) + 1):
- if chunk: # filter out keep-alive new chunks
- f.write(chunk)
-
- v = sha256_for_file(local_filename)
- if v == shasum:
- print("\t...shasum verified for " + local_filename)
+ update_cache(cachedir, cachefiles)
+
+ local_qt_filename = os.path.join(cachedir, 'qt-opensource-linux-x64-android-5.7.0.run')
+ logger.info("Setting executable bit for %s", local_qt_filename)
+ os.chmod(local_qt_filename, 0o755)
+
+ # use VirtualBox software virtualization if hardware is not available,
+ # like if this is being run in kvm or some other VM platform, like
+ # http://jenkins.debian.net, the values are 'on' or 'off'
+ if sys.platform.startswith('darwin'):
+ # all < 10 year old Macs work, and OSX servers as VM host are very
+ # rare, but this could also be auto-detected if someone codes it
+ config['hwvirtex'] = 'on'
+ logger.info('platform is darwnin -> hwvirtex = \'on\'')
+ elif os.path.exists('/proc/cpuinfo'):
+ with open('/proc/cpuinfo') as f:
+ contents = f.read()
+ if 'vmx' in contents or 'svm' in contents:
+ config['hwvirtex'] = 'on'
+ logger.info('found \'vmx\' or \'svm\' in /proc/cpuinfo -> hwvirtex = \'on\'')
+
+ serverdir = os.path.join(os.getcwd(), 'buildserver')
+ logfilename = os.path.join(serverdir, 'up.log')
+ if not os.path.exists(logfilename):
+ open(logfilename, 'a').close() # create blank file
+ log_cm = vagrant.make_file_cm(logfilename)
+ v = vagrant.Vagrant(root=serverdir, out_cm=log_cm, err_cm=log_cm)
+
+ if options.verbosity >= 2:
+ tail = fdroidserver.tail.Tail(logfilename)
+ tail.start()
+
+ vm = fdroidserver.vmtools.get_build_vm(serverdir, provider=config['vm_provider'])
+ if options.clean:
+ vm.destroy()
+
+ # Check against the existing Vagrantfile.yaml, and if they differ, we
+ # need to create a new box:
+ vf = os.path.join(serverdir, 'Vagrantfile.yaml')
+ writevf = True
+ if os.path.exists(vf):
+ logger.info('Halting %s', serverdir)
+ v.halt()
+ with open(vf, 'r', encoding='utf-8') as f:
+ oldconfig = yaml.load(f)
+ if config != oldconfig:
+ logger.info("Server configuration has changed, rebuild from scratch is required")
+ vm.destroy()
+ else:
+ logger.info("Re-provisioning existing server")
+ writevf = False
else:
- print("Invalid shasum of '" + v + "' detected for " + local_filename)
- os.remove(local_filename)
+ logger.info("No existing server - building from scratch")
+ if writevf:
+ with open(vf, 'w', encoding='utf-8') as f:
+ yaml.dump(config, f)
+
+ if config['vm_provider'] == 'libvirt':
+ found_basebox = False
+ needs_mutate = False
+ for box in v.box_list():
+ if box.name == config['basebox']:
+ found_basebox = True
+ if box.provider != 'libvirt':
+ needs_mutate = True
+ continue
+ if not found_basebox:
+ if isinstance(config['baseboxurl'], str):
+ baseboxurl = config['baseboxurl']
+ else:
+ baseboxurl = config['baseboxurl'][0]
+ logger.info('Adding %s from %s', config['basebox'], baseboxurl)
+ v.box_add(config['basebox'], baseboxurl)
+ needs_mutate = True
+ if needs_mutate:
+ logger.info('Converting %s to libvirt format', config['basebox'])
+ v._call_vagrant_command(['mutate', config['basebox'], 'libvirt'])
+ logger.info('Removing virtualbox format copy of %s', config['basebox'])
+ v.box_remove(config['basebox'], 'virtualbox')
+
+ logger.info("Configuring build server VM")
+ debug_log_vagrant_vm(serverdir, config)
+ try:
+ v.up(provision=True)
+ except fdroidserver.vmtools.FDroidBuildVmException as e:
+ debug_log_vagrant_vm(serverdir, config)
+ logger.exception('could not bring buildserver vm up. %s', e)
sys.exit(1)
-local_qt_filename = os.path.join(cachedir, 'qt-opensource-linux-x64-android-5.7.0.run')
-print("Setting executable bit for " + local_qt_filename)
-os.chmod(local_qt_filename, 0o755)
-
-# use VirtualBox software virtualization if hardware is not available,
-# like if this is being run in kvm or some other VM platform, like
-# http://jenkins.debian.net, the values are 'on' or 'off'
-if sys.platform.startswith('darwin'):
- # all < 10 year old Macs work, and OSX servers as VM host are very
- # rare, but this could also be auto-detected if someone codes it
- config['hwvirtex'] = 'on'
-elif os.path.exists('/proc/cpuinfo'):
- with open('/proc/cpuinfo') as f:
- contents = f.read()
- if 'vmx' in contents or 'svm' in contents:
- config['hwvirtex'] = 'on'
+ if config['copy_caches_from_host']:
+ ssh_config = v.ssh_config()
+ user = re.search(r'User ([^ \n]+)', ssh_config).group(1)
+ hostname = re.search(r'HostName ([^ \n]+)', ssh_config).group(1)
+ port = re.search(r'Port ([0-9]+)', ssh_config).group(1)
+ key = re.search(r'IdentityFile ([^ \n]+)', ssh_config).group(1)
+
+ for d in ('.m2', '.gradle/caches', '.gradle/wrapper', '.pip_download_cache'):
+ fullpath = os.path.join(os.getenv('HOME'), d)
+ if os.path.isdir(fullpath):
+ # TODO newer versions of vagrant provide `vagrant rsync`
+ run_via_vagrant_ssh(v, ['cd ~ && test -d', d, '|| mkdir -p', d])
+ subprocess.call(['rsync', '-axv', '--progress', '--delete', '-e',
+ 'ssh -i {0} -p {1} -oIdentitiesOnly=yes'.format(key, port),
+ fullpath + '/',
+ user + '@' + hostname + ':~/' + d + '/'])
+
+ # this file changes every time but should not be cached
+ run_via_vagrant_ssh(v, ['rm', '-f', '~/.gradle/caches/modules-2/modules-2.lock'])
+ run_via_vagrant_ssh(v, ['rm', '-fr', '~/.gradle/caches/*/plugin-resolution/'])
+
+ p = subprocess.Popen(['git', 'rev-parse', 'HEAD'], stdout=subprocess.PIPE,
+ universal_newlines=True)
+ buildserverid = p.communicate()[0].strip()
+ logger.info("Writing buildserver ID ...ID is %s", buildserverid)
+ run_via_vagrant_ssh(v, 'sh -c "echo %s >/home/vagrant/buildserverid"' % buildserverid)
-# Check against the existing Vagrantfile.yaml, and if they differ, we
-# need to create a new box:
-vf = os.path.join(serverdir, 'Vagrantfile.yaml')
-writevf = True
-if os.path.exists(vf):
- print('Halting', serverdir)
- vagrant(['halt'], serverdir)
- with open(vf, 'r', encoding='utf-8') as f:
- oldconfig = yaml.load(f)
- if config != oldconfig:
- print("Server configuration has changed, rebuild from scratch is required")
- vagrant(['destroy', '-f'], serverdir)
- else:
- print("Re-provisioning existing server")
- writevf = False
-else:
- print("No existing server - building from scratch")
-if writevf:
- with open(vf, 'w', encoding='utf-8') as f:
- yaml.dump(config, f)
+ logger.info("Stopping build server VM")
+ v.halt()
-if config['vm_provider'] == 'libvirt':
- returncode, out = vagrant(['box', 'list'], serverdir, printout=options.verbose)
- found_basebox = False
- needs_mutate = False
- for line in out.splitlines():
- if line.startswith(config['basebox']):
- found_basebox = True
- if line.split('(')[1].split(',')[0] != 'libvirt':
- needs_mutate = True
- continue
- if not found_basebox:
- if isinstance(config['baseboxurl'], str):
- baseboxurl = config['baseboxurl']
- else:
- baseboxurl = config['baseboxurl'][0]
- print('Adding', config['basebox'], 'from', baseboxurl)
- vagrant(['box', 'add', '--name', config['basebox'], baseboxurl],
- serverdir, printout=options.verbose)
- needs_mutate = True
- if needs_mutate:
- print('Converting', config['basebox'], 'to libvirt format')
- vagrant(['mutate', config['basebox'], 'libvirt'],
- serverdir, printout=options.verbose)
- print('Removing virtualbox format copy of', config['basebox'])
- vagrant(['box', 'remove', '--provider', 'virtualbox', config['basebox']],
- serverdir, printout=options.verbose)
-
-print("Configuring build server VM")
-returncode, out = vagrant(['up', '--provision'], serverdir, printout=True)
-with open(os.path.join(serverdir, 'up.log'), 'w') as log:
- log.write(out)
-if returncode != 0:
- print("Failed to configure server")
- sys.exit(1)
+ logger.info("Packaging")
+ boxfile = os.path.join(os.getcwd(), 'buildserver.box')
+ if os.path.exists(boxfile):
+ os.remove(boxfile)
+
+ vm.package(output=boxfile)
+
+ logger.info("Adding box")
+ vm.box_add('buildserver', boxfile, force=True)
-print("Writing buildserver ID")
-p = subprocess.Popen(['git', 'rev-parse', 'HEAD'], stdout=subprocess.PIPE,
- universal_newlines=True)
-buildserverid = p.communicate()[0].strip()
-print("...ID is " + buildserverid)
-subprocess.call(
- ['vagrant', 'ssh', '-c', 'sh -c "echo {0} >/home/vagrant/buildserverid"'
- .format(buildserverid)],
- cwd=serverdir)
-
-print("Stopping build server VM")
-vagrant(['halt'], serverdir)
-
-print("Waiting for build server VM to be finished")
-ready = False
-while not ready:
- time.sleep(2)
- returncode, out = vagrant(['status'], serverdir)
- if returncode != 0:
- print("Error while checking status")
+ if 'buildserver' not in subprocess.check_output(['vagrant', 'box', 'list']).decode('utf-8'):
+ logger.critical('could not add box \'%s\' as \'buildserver\', terminating', boxfile)
sys.exit(1)
- for line in out.splitlines():
- if line.startswith("default"):
- if line.find("poweroff") != -1 or line.find("shutoff") != 1:
- ready = True
- else:
- print("Status: " + line)
-print("Packaging")
-vagrant(['package', '--output', os.path.join('..', boxfile)], serverdir,
- printout=options.verbose)
-print("Adding box")
-vagrant(['box', 'add', 'buildserver', boxfile, '-f'],
- printout=options.verbose)
+ if not options.keep_box_file:
+ logger.debug('box added to vagrant, ' +
+ 'removing generated box file \'%s\'',
+ boxfile)
+ os.remove(boxfile)
+
-os.remove(boxfile)
+if __name__ == '__main__':
+ try:
+ main()
+ finally:
+ if tail is not None:
+ tail.stop()
'apache-libcloud >= 0.14.1',
'pyasn1',
'pyasn1-modules',
+ 'python-vagrant',
'PyYAML',
'requests < 2.11',
'docker-py == 1.9.0',
--- /dev/null
+#!/usr/bin/env python3
+
+import os
+import sys
+import logging
+import textwrap
+import tempfile
+import inspect
+from argparse import ArgumentParser
+
+localmodule = os.path.realpath(
+ os.path.join(os.path.dirname(inspect.getfile(inspect.currentframe())), '..', '..'))
+print('localmodule: ' + localmodule)
+if localmodule not in sys.path:
+ sys.path.insert(0, localmodule)
+
+from fdroidserver.vmtools import get_build_vm
+
+
+def main(args):
+
+ if args.provider != None:
+ if args.provider not in ('libvirt', 'virtualbox'):
+ logging.critical('provider: %s not supported.', args.provider)
+ sys.exit(1)
+
+ with tempfile.TemporaryDirectory() as tmpdir:
+
+ # define a simple vagrant vm 'x'
+ x_dir = os.path.join(tmpdir, 'x')
+ os.makedirs(x_dir)
+ with open(os.path.join(x_dir, 'Vagrantfile'), 'w') as f:
+ f.write(textwrap.dedent("""\
+ Vagrant.configure("2") do |config|
+ config.vm.box = "debian/jessie64"
+ config.vm.synced_folder ".", "/vagrant", disabled: true
+ config.ssh.insert_key = false
+ end
+ """))
+ # define another simple vagrant vm 'y' which uses 'x' as a base box
+ y_dir = os.path.join(tmpdir, 'y')
+ os.makedirs(y_dir)
+ with open(os.path.join(y_dir, 'Vagrantfile'), 'w') as f:
+ f.write(textwrap.dedent("""\
+ Vagrant.configure("2") do |config|
+ config.vm.box = "x"
+ config.vm.synced_folder ".", "/vagrant", disabled: true
+ end
+ """))
+
+ # vagrant file for packaging 'x' box
+ vgrntf=textwrap.dedent("""\
+ Vagrant.configure("2") do |config|
+
+ config.vm.synced_folder ".", "/vagrant", type: "nfs", nfs_version: "4", nfs_udp: false
+
+ config.vm.provider :libvirt do |libvirt|
+ libvirt.driver = "kvm"
+ libvirt.connect_via_ssh = false
+ libvirt.username = "root"
+ libvirt.storage_pool_name = "default"
+ end
+ end
+ """)
+
+ # create a box: x
+ if not args.skip_create_x:
+ x = get_build_vm(x_dir, provider=args.provider)
+ x.destroy()
+ x.up(provision=True)
+ x.halt()
+ x.package(output='x.box', vagrantfile=vgrntf, keep_box_file=False)
+ x.box_remove('x')
+ x.box_add('x', 'x.box')
+
+ # use previously created box to spin up a new vm
+ if not args.skip_create_y:
+ y = get_build_vm(y_dir, provider=args.provider)
+ y.destroy()
+ y.up()
+
+ # create and restore a snapshot
+ if not args.skip_snapshot_y:
+ y = get_build_vm(y_dir, provider=args.provider)
+
+ if y.snapshot_exists('clean'):
+ y.destroy()
+ y.up()
+
+ y.suspend()
+ y.snapshot_create('clean')
+ y.up()
+
+ logging.info('snapshot \'clean\' exsists: %r', y.snapshot_exists('clean'))
+
+ # test if snapshot exists
+ se = y.snapshot_exists('clean')
+ logging.info('snapshot \'clean\' available: %r', se)
+
+ # revert snapshot
+ y.suspend()
+ logging.info('asdf %s', y.snapshot_revert('clean'))
+ y.resume()
+
+ # cleanup
+ if not args.skip_clean:
+ x = get_build_vm(x_dir, provider=args.provider)
+ y = get_build_vm(y_dir, provider=args.provider)
+ y.destroy()
+ x.destroy()
+ x.box_remove('x')
+
+if __name__ == '__main__':
+ logging.basicConfig(format='%(message)s', level=logging.DEBUG)
+
+ parser = ArgumentParser(description="""\
+This is intended for manually testing vmtools.py
+
+NOTE: Should this test-run fail it might leave traces of vagrant VMs or boxes
+ on your system. Those vagrant VMs are named 'x' and 'y'.
+ """)
+ parser.add_argument('--provider', help="Force this script use supplied "
+ "provider instead using our auto provider lookup. "
+ "Supported values: 'libvirt', 'virtualbox'")
+ parser.add_argument('--skip-create-x', action="store_true", default=False,
+ help="Skips: Creating 'x' vm, packaging it into a "
+ "a box and adding it to vagrant.")
+ parser.add_argument('--skip-create-y', action="store_true", default=False,
+ help="Skips: Creating 'y' vm. Depends on having "
+ "box 'x' added to vagrant.")
+ parser.add_argument('--skip-snapshot-y', action="store_true", default=False,
+ help="Skips: Taking a snapshot and restoring a "
+ "a snapshot of 'y' vm. Requires 'y' mv to be "
+ "present.")
+ parser.add_argument('--skip-clean', action="store_true", default=False,
+ help="Skips: Cleaning up mv images and vagrant "
+ "metadata on the system.")
+ args = parser.parse_args()
+
+ main(args)