3 ### Create, upgrade, and maintain (native and cross-) chroots
5 ### (c) 2018 Mark Wooding
8 ###----- Licensing notice ---------------------------------------------------
10 ### This file is part of the distorted.org.uk chroot maintenance tools.
12 ### distorted-chroot is free software: you can redistribute it and/or
13 ### modify it under the terms of the GNU General Public License as
14 ### published by the Free Software Foundation; either version 2 of the
15 ### License, or (at your option) any later version.
17 ### distorted-chroot is distributed in the hope that it will be useful,
18 ### but WITHOUT ANY WARRANTY; without even the implied warranty of
19 ### MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 ### General Public License for more details.
22 ### You should have received a copy of the GNU General Public License
23 ### along with distorted-chroot. If not, write to the Free Software
24 ### Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
30 import contextlib as CTX
43 from cStringIO import StringIO
46 import traceback as TB
48 import jobclient as JC
50 QUIS = OS.path.basename(SYS.argv[0])
51 TODAY = T.strftime("%Y-%m-%d")
54 ###--------------------------------------------------------------------------
59 """Print MSG to stderr as a warning."""
60 if not OPT.silent: OS.write(2, "%s: %s\n" % (QUIS, msg))
62 """Print MSG to stderr, and remember to exit nonzero."""
67 class ExpectedError (Exception):
68 """A fatal error which shouldn't print a backtrace."""
72 def toplevel_handler():
73 """Catch `ExpectedError's and report Unixish error messages."""
75 except ExpectedError, err: moan(err); SYS.exit(2)
78 """Print MSG to stderr as a debug trace."""
79 if OPT.debug: OS.write(2, ";; %s\n" % msg)
82 """Unique objects with no internal structure."""
83 def __init__(me, label): me._label = label
84 def __str__(me): return '#<%s %s>' % (me.__class__.__name__, me._label)
85 def __repr__(me): return '#<%s %s>' % (me.__class__.__name__, me._label)
87 class Struct (object):
88 def __init__(me, **kw): me.__dict__.update(kw)
90 class Cleanup (object):
92 A context manager for stacking other context managers.
94 By itself, it does nothing. Attach other context managers with `enter' or
95 loose cleanup functions with `add'. On exit, contexts are left and
96 cleanups performed in reverse order.
102 def __exit__(me, exty, exval, extb):
104 for c in reversed(me._cleanups):
105 if c(exty, exval, extb): trap = True
109 me._cleanups.append(ctx.__exit__)
112 me._cleanups.append(lambda exty, exval, extb: func())
115 """Return the time T (default now) as a string."""
116 return T.strftime("%Y-%m-%dT%H:%M:%SZ", T.gmtime(t))
118 R_ZULU = RX.compile(r"^(\d+)-(\d+)-(\d+)T(\d+):(\d+):(\d+)Z$")
120 """Convert the time string Z back to a Unix time."""
122 if not m: raise ValueError("bad time spec `%s'" % z)
123 yr, mo, dy, hr, mi, se = map(int, m.groups())
124 return T.mktime((yr, mo, dy, hr, mi, se, 0, 0, 0))
126 ###--------------------------------------------------------------------------
127 ### Simple select(2) utilities.
129 class BaseSelector (object):
131 A base class for hooking into `select_loop'.
133 See `select_loop' for details of the protocol.
135 def preselect(me, rfds, wfds): pass
136 def postselect_read(me, fd): pass
137 def postselect_write(me, fd): pass
139 class WriteLinesSelector (BaseSelector):
140 """Write whole lines to an output file descriptor."""
142 def __init__(me, fd, nextfn = None, *args, **kw):
144 Initialize the WriteLinesSelector to write to the file descriptor FD.
146 The FD is marked non-blocking.
148 The lines are produced by the NEXTFN, which is called without arguments.
149 It can affect the output in three ways:
151 * It can return a string (or almost any other kind of object, which
152 will be converted into a string by `str'), which will be written to
153 the descriptor followed by a newline. Lines are written in the order
154 in which they are produced.
156 * It can return `None', which indicates that there are no more items to
157 be written for the moment. The function will be called again from
158 time to time, to see if it has changed its mind. This is the right
159 thing to do in order to stall output temporarily.
161 * It can raise `StopIteration', which indicates that there will never
162 be any more items. The file descriptor will be closed.
164 Subclasses can override this behaviour by defining a method `_next' and
165 passing `None' as the NEXTFN.
167 super(WriteLinesSelector, me).__init__(*args, **kw)
170 if nextfn is not None: me._next = nextfn
174 ## * `_buf' contains a number of output items, already formatted, and
175 ## ready for output in a single batch. It might be empty.
177 ## * `_pos' is the current output position in `_buf'.
179 ## * `_more' is set unless the `_next' function has raised
180 ## `StopIteration': it indicates that we should close the descriptor
181 ## once the all of the remaining data in the buffer has been sent.
187 """Refill `_buf' by calling `_next'."""
188 sio = StringIO(); n = 0
190 try: item = me._next()
191 except StopIteration: me._more = False; break
192 if item is None: break
194 sio.write(item); sio.write("\n"); n += len(item) + 1
195 me._buf = sio.getvalue(); me._pos = 0
197 def preselect(me, rfds, wfds):
198 if me._fd == -1: return
199 if me._buf == "" and me._more: me._refill()
200 if me._buf != "" or not me._more: wfds.append(me._fd)
202 def postselect_write(me, fd):
203 if fd != me._fd: return
205 if me._pos >= len(me._buf):
206 if me._more: me._refill()
207 if not me._more: OS.close(me._fd); me._fd = -1; break
208 if not me._buf: break
209 try: n = OS.write(me._fd, me._buf[me._pos:])
211 if err.errno == E.EAGAIN or err.errno == E.WOULDBLOCK: break
212 elif err.errno == E.EPIPE: OS.close(me._fd); me._fd = -1; break
216 class ReadLinesSelector (BaseSelector):
217 """Report whole lines from an input file descriptor as they arrive."""
219 def __init__(me, fd, linefn = None, *args, **kw):
221 Initialize the ReadLinesSelector to read from the file descriptor FD.
223 The FD is marked non-blocking.
225 For each whole line, and the final partial line (if any), the selector
226 calls LINEFN with the line as an argument (without the terminating
229 Subclasses can override this behaviour by defining a method `_line' and
230 passing `None' as the LINEFN.
232 super(ReadLinesSelector, me).__init__(*args, **kw)
236 if linefn is not None: me._line = linefn
238 def preselect(me, rfds, wfds):
239 if me._fd != -1: rfds.append(me._fd)
241 def postselect_read(me, fd):
242 if fd != me._fd: return
244 try: buf = OS.read(me._fd, 4096)
246 if err.errno == E.EAGAIN or err.errno == E.WOULDBLOCK: break
249 OS.close(me._fd); me._fd = -1
250 if me._buf: me._line(me._buf)
255 try: j = buf.index("\n", i)
256 except ValueError: break
261 def select_loop(selectors):
263 Multiplex I/O between the various SELECTORS.
265 A `selector' SEL is an object which implements the selector protocol, which
266 consists of three methods.
268 * SEL.preselect(RFDS, WFDS) -- add any file descriptors which the
269 selector is interested in reading from to the list RFDS, and add file
270 descriptors it's interested in writing to to the list WFDS.
272 * SEL.postselect_read(FD) -- informs the selector that FD is ready for
275 * SEL.postselect_write(FD) -- informs the selector that FD is ready for
278 The `select_loop' function loops as follows.
280 * It calls the `preselect' method on each SELECTOR to determine what I/O
281 events it thinks are interesting.
283 * It waits for some interesting event to happen.
285 * It calls the `postselect_read' and/or `postselect_write' methods on all
286 of the selectors for each file descriptor which is ready.
288 The loop ends when no selector is interested in any events. This is simple
289 but rather inefficient.
293 for sel in selectors: sel.preselect(rfds, wfds)
294 if not rfds and not wfds: break
295 rfds, wfds, _ = SEL.select(rfds, wfds, [])
297 for sel in selectors: sel.postselect_read(fd)
299 for sel in selectors: sel.postselect_write(fd)
301 ###--------------------------------------------------------------------------
302 ### Running subprocesses.
304 def wait_outcome(st):
306 Given a ST from `waitpid' (or similar), return a human-readable outcome.
308 if OS.WIFSIGNALED(st): return "killed by signal %d" % OS.WTERMSIG(st)
309 elif OS.WIFEXITED(st):
310 rc = OS.WEXITSTATUS(st)
311 if rc: return "failed: rc = %d" % rc
312 else: return "completed successfully"
313 else: return "died with incomprehensible status 0x%04x" % st
315 class SubprocessFailure (Exception):
316 """An exception indicating that a subprocess failed."""
317 def __init__(me, what, st):
320 if OS.WIFEXITED(st): me.rc, me.sig = OS.WEXITSTATUS(st), None
321 elif OS.WIFSIGNALED(st): me.rc, me.sig = None, OS.WTERMSIG(st)
322 else: me.rc, me.sig = None, None
324 return "subprocess `%s' %s" % (me.what, wait_outcome(me.st))
326 INHERIT = Tag('INHERIT')
328 DISCARD = Tag('DISCARD')
330 def subprocess(command,
331 stdin = INHERIT, stdout = INHERIT, stderr = INHERIT,
332 cwd = INHERIT, jobserver = DISCARD):
334 Hairy context manager for running subprocesses.
336 The COMMAND is a list of arguments; COMMAND[0] names the program to be
337 invoked. (There's currently no way to run a program with an unusual
340 The keyword arguments `stdin', `stdout', and `stderr' explain what to do
341 with the standard file descriptors.
343 * `INHERIT' means that they should be left alone: the child will use a
344 copy of the parent's descriptor. This is the default.
346 * `DISCARD' means that the descriptor should be re-opened onto
347 `/dev/null' (for reading or writing as appropriate).
349 * `PIPE' means that the descriptor should be re-opened as (the read or
350 write end, as appropriate, of) a pipe, and the other end returned to
353 Simiarly, the JOBSERVER may be `INHERIT' to pass the jobserver descriptors
354 and environment variable down to the child, or `DISCARD' to close it. The
355 default is `DISCARD'.
357 The CWD may be `INHERIT' to run the child with the same working directory
358 as the parent, or a pathname to change to an explicitly given working
361 The context is returned three values, which are file descriptors for other
362 pipe ends for stdin, stdout, and stderr respectively, or -1 if there is no
365 The context owns the pipe descriptors, and is expected to close them
366 itself. (Timing of closure is significant, particularly for `stdin'.)
371 r_out, w_out = -1, -1
372 r_err, w_err = -1, -1
373 spew("running subprocess `%s'" % " ".join(command))
375 ## Clean up as necessary...
379 if stdin is PIPE: r_in, w_in = OS.pipe()
380 elif stdin is DISCARD: r_in = OS.open("/dev/null", OS.O_RDONLY)
381 elif stdin is not INHERIT:
382 raise ValueError("bad `stdin' value `%r'" % stdin)
385 if stdout is PIPE: r_out, w_out = OS.pipe()
386 elif stdout is DISCARD: w_out = OS.open("/dev/null", OS.O_WRONLY)
387 elif stdout is not INHERIT:
388 raise ValueError("bad `stderr' value `%r'" % stdout)
391 if stderr is PIPE: r_err, w_err = OS.pipe()
392 elif stderr is DISCARD: w_err = OS.open("/dev/null", OS.O_WRONLY)
393 elif stderr is not INHERIT:
394 raise ValueError("bad `stderr' value `%r'" % stderr)
396 ## Start up the child.
403 if r_in != -1: OS.dup2(r_in, 0); OS.close(r_in)
404 if w_in != -1: OS.close(w_in)
407 if w_out != -1: OS.dup2(w_out, 1); OS.close(w_out)
408 if r_out != -1: OS.close(r_out)
411 if w_err != -1: OS.dup2(w_err, 2); OS.close(w_err)
412 if r_err != -1: OS.close(r_err)
415 if cwd is not INHERIT: OS.chdir(cwd)
417 ## Fix up the jobserver.
418 if jobserver is DISCARD: SCHED.close_jobserver()
421 try: OS.execvp(command[0], command)
423 moan("failed to run `%s': %s" % err.strerror)
426 ## Close the other ends of the pipes.
427 if r_in != -1: OS.close(r_in); r_in = -1
428 if w_out != -1: OS.close(w_out); w_out = -1
429 if w_err != -1: OS.close(w_err); w_err = -1
431 ## Return control to the context body. Remember not to close its pipes.
432 yield w_in, r_out, r_err
433 w_in = r_out = r_err = -1
435 ## Collect the child process's exit status.
436 _, st = OS.waitpid(kid, 0)
437 spew("subprocess `%s' %s" % (" ".join(command), wait_outcome(st)))
438 if st: raise SubprocessFailure(" ".join(command), st)
443 ## Close any left-over file descriptors.
444 for fd in [r_in, w_in, r_out, w_out, r_err, w_err]:
445 if fd != -1: OS.close(fd)
447 def set_nonblocking(fd):
448 """Mark the descriptor FD as non-blocking."""
449 FC.fcntl(fd, FC.F_SETFL, FC.fcntl(fd, FC.F_GETFL) | OS.O_NONBLOCK)
451 class DribbleOut (BaseSelector):
452 """A simple selector to feed a string to a descriptor, in pieces."""
453 def __init__(me, fd, string, *args, **kw):
454 super(DribbleOut, me).__init__(*args, **kw)
458 set_nonblocking(me._fd)
460 def preselect(me, rfds, wfds):
461 if me._fd != -1: wfds.append(me._fd)
462 def postselect_write(me, fd):
463 if fd != me._fd: return
464 try: n = OS.write(me._fd, me._string)
466 if err.errno == E.EAGAIN or err.errno == E.EWOULDBLOCK: return
467 elif err.errno == E.EPIPE: OS.close(me._fd); me._fd = -1; return
469 if n == len(me._string): OS.close(me._fd); me._fd = -1
470 else: me._string = me._string[n:]
472 class DribbleIn (BaseSelector):
473 """A simple selector to collect all the input as a big string."""
474 def __init__(me, fd, *args, **kw):
475 super(DribbleIn, me).__init__(*args, **kw)
478 set_nonblocking(me._fd)
479 def preselect(me, rfds, wfds):
480 if me._fd != -1: rfds.append(me._fd)
481 def postselect_read(me, fd):
482 if fd != me._fd: return
484 try: buf = OS.read(me._fd, 4096)
486 if err.errno == E.EAGAIN or err.errno == E.EWOULDBLOCK: break
488 if buf == "": OS.close(me._fd); me._fd = -1; break
489 else: me._buf.write(buf)
491 def result(me): return me._buf.getvalue()
493 RETURN = Tag('RETURN')
494 def run_program(command,
495 stdin = INHERIT, stdout = INHERIT, stderr = INHERIT,
498 A simplifying wrapper around `subprocess'.
500 The COMMAND is a list of arguments; COMMAND[0] names the program to be
501 invoked, as for `subprocess'.
503 The keyword arguments `stdin', `stdout', and `stderr' explain what to do
504 with the standard file descriptors.
506 * `INHERIT' means that they should be left alone: the child will use a
507 copy of the parent's descriptor.
509 * `DISCARD' means that the descriptor should be re-opened onto
510 `/dev/null' (for reading or writing as appropriate).
512 * `RETURN', for an output descriptor, means that all of the output
513 produced on that descriptor should be collected and returned as a
516 * A string, for stdin, means that the string should be provided on the
517 child's standard input.
519 (The value `PIPE' is not permitted here.)
521 Other arguments are passed on to `subprocess'.
523 If no descriptors are marked `RETURN', then the function returns `None'; if
524 exactly one descriptor is so marked, then the function returns that
525 descriptor's output as a string; otherwise, it returns a tuple of strings
526 for each such descriptor, in the usual order.
528 kw = dict(); kw.update(kwargs)
531 if isinstance(stdin, basestring):
532 kw['stdin'] = PIPE; selfn.append(lambda fds: DribbleOut(fds[0], stdin))
533 elif stdin is INHERIT or stdin is DISCARD:
536 raise ValueError("bad `stdin' value `%r'" % stdin)
539 kw['stdout'] = PIPE; selfn.append(lambda fds: DribbleIn(fds[1]))
540 elif stdout is INHERIT or stdout is DISCARD:
541 kw['stdout'] = stdout
543 raise ValueError("bad `stdout' value `%r'" % stdout)
546 kw['stderr'] = PIPE; selfn.append(lambda fds: DribbleIn(fds[2]))
547 elif stderr is INHERIT or stderr is DISCARD:
548 kw['stderr'] = stderr
550 raise ValueError("bad `stderr' value `%r'" % stderr)
552 with subprocess(command, *args, **kw) as fds:
553 sel = [fn(fds) for fn in selfn]
558 if r is not None: rr.append(r)
559 if len(rr) == 0: return None
560 if len(rr) == 1: return rr[0]
561 else: return tuple(rr)
563 ###--------------------------------------------------------------------------
564 ### Other system-ish utilities.
569 Context manager for writing to a file.
571 A new file, named `PATH.new', is opened for writing, and the file object
572 provided to the context body. If the body completes normally, the file is
573 closed and renamed to PATH. If the body raises an exception, the file is
574 still closed, but not renamed into place.
577 with open(new, "w") as f: yield f
581 def safewrite_root(path, mode = None, uid = None, gid = None):
583 Context manager for writing to a file with root privileges.
585 This is as for `safewrite', but the file is opened and written as root.
588 with subprocess(C.ROOTLY + ["tee", new],
589 stdin = PIPE, stdout = DISCARD) as (fd_in, _, _):
590 pipe = OS.fdopen(fd_in, 'w')
592 finally: pipe.close()
593 if mode is not None: run_program(C.ROOTLY + ["chmod", mode, new])
595 run_program(C.ROOTLY + ["chown",
596 uid + (gid is not None and ":" + gid or ""),
598 elif gid is not None:
599 run_program(C.ROOTLY + ["chgrp", gid, new])
600 run_program(C.ROOTLY + ["mv", new, path])
602 def mountpoint_p(dir):
603 """Return true if DIR is a mountpoint."""
605 ## A mountpoint can be distinguished because it is a directory whose device
606 ## number differs from its parent.
607 try: st1 = OS.stat(dir)
609 if err.errno == E.ENOENT: return False
611 if not ST.S_ISDIR(st1.st_mode): return False
612 st0 = OS.stat(OS.path.join(dir, ".."))
613 return st0.st_dev != st1.st_dev
615 def mkdir_p(dir, mode = 0777):
617 Make a directory DIR, and any parents, as necessary.
619 Unlike `OS.makedirs', this doesn't fail if DIR already exists.
621 if dir.startswith("/"): d = "/"; dir = dir[1:]
623 for p in dir.split("/"):
624 d = OS.path.join(d, p)
626 try: OS.mkdir(d, mode)
628 if err.errno == E.EEXIST: pass
633 Unmount the filesystem FS.
635 The FS may be the block device holding the filesystem, or (more usually)
639 ## Sometimes random things can prevent unmounting. Be persistent.
641 try: run_program(C.ROOTLY + ["umount", fs], stderr = DISCARD)
642 except SubprocessFailure, err:
643 if err.rc == 32: pass
647 run_program(C.ROOTLY + ["umount", fs], stderr = DISCARD)
650 def lockfile(lock, exclp = True, waitp = True):
652 Acquire an exclusive lock on a named file LOCK while executing the body.
654 If WAITP is true, wait until the lock is available; if false, then fail
655 immediately if the lock can't be acquired.
659 if exclp: flag |= FC.LOCK_EX
660 else: flag |= FC.LOCK_SH
661 if not waitp: flag |= FC.LOCK_NB
662 spew("acquiring %s lock on `%s'" %
663 (exclp and "exclusive" or "shared", lock))
667 ## Open the file and take note of which file it is.
668 fd = OS.open(lock, OS.O_RDWR | OS.O_CREAT, 0666)
671 ## Acquire the lock, waiting if necessary.
674 ## Check that the lock file is still the same one. It's permissible
675 ## for the lock holder to release the lock by unlinking or renaming the
676 ## lock file, in which case there might be a different lockfile there
677 ## now which we need to acquire instead.
679 ## It's tempting to `optimize' this code by opening a new file
680 ## descriptor here so as to elide the additional call to fstat(2)
681 ## above. But this doesn't work: if we successfully acquire the lock,
682 ## we then have two file descriptors open on the lock file, so we have
683 ## to close one -- but, under the daft fcntl(2) rules, even closing
684 ## `nfd' will release the lock immediately.
688 if err.errno == E.ENOENT: pass
690 if st0.st_dev == st1.st_dev and st0.st_ino == st1.st_ino: break
693 ## We have the lock, so away we go.
694 spew("lock `%s' acquired" % lock)
696 spew("lock `%s' released" % lock)
699 if fd != -1: OS.close(fd)
701 def block_device_p(dev):
702 """Return true if DEV names a block device."""
703 try: st = OS.stat(dev)
705 if err.errno == E.ENOENT: return False
707 else: return ST.S_ISBLK(st.st_mode)
709 ###--------------------------------------------------------------------------
710 ### Running parallel jobs.
712 ## Return codes from `check'
715 FAILED = Tag('FAILED')
718 class BaseJob (object):
722 Subclasses must implement `run' and `_mkname', and probably ought to extend
723 `prepare' and `check'.
726 ## A magic token to prevent sneaky uninterned jobs.
727 _MAGIC = Tag('MAGIC')
729 ## A map from job names to objects.
732 ## Number of tail lines of the log to print on failure.
735 def __init__(me, _token, *args, **kw):
739 Jobs are interned! Don't construct instances (of subclasses) directly:
740 use the `ensure' class method.
742 assert _token is me._MAGIC
743 super(BaseJob, me).__init__(*args, **kw)
745 ## Dependencies on other jobs.
749 ## Attributes maintained by the JobServer.
761 Establish any prerequisite jobs.
763 Delaying this allows command-line settings to override those chosen by
769 def ensure(cls, *args, **kw):
771 Return the unique job with the given parameters.
773 If a matching job already exists, then return it. Otherwise, create the
774 new job, register it in the table, and notify the scheduler about it.
776 me = cls(_token = cls._MAGIC, *args, **kw)
778 job = cls._MAP[me.name]
780 cls._MAP[me.name] = me
789 """Return the job's name, as calculated by `_mkname'."""
791 except AttributeError: name = me._name = me._mkname()
794 ## Subclass responsibilities.
797 Return the job's name.
799 By default, this is an unhelpful string which is distinct for every job.
800 Subclasses should normally override this method to return a name as an
801 injective function of the job parameters.
803 return "%s.%x" % (me.__class__.__name__, id(me))
807 Return whether the job is ready to run.
809 Returns a pair STATE, REASON. The REASON is a human-readable string
810 explaining what's going on, or `None' if it's not worth explaining. The
811 STATE is one of the following.
813 * `READY' -- the job can be run at any time.
815 * `FAILED' -- the job can't be started. Usually, this means that some
816 prerequisite job failed, there was some error in the job's
817 parameters, or the environment is unsuitable for the job to run.
819 * `DONE' -- the job has nothing to do. Usually, this means that the
820 thing the job acts on is already up-to-date. It's bad form to do
821 even minor work in `check'.
823 * `SLEEP' -- the job can't be run right now. It has arranged to be
824 retried if conditions change. (Spurious wakeups are permitted and
825 must be handled correctly.)
827 The default behaviour checks the set of dependencies, as built by the
828 `await' method, and returns `SLEEP' or `FAILED' as appropriate, or
829 `READY' if all the prerequisite jobs have completed successfully.
834 return SLEEP, "waiting for job `%s'" % job.name
835 elif not job.win and not OPT.ignerr:
836 return FAILED, "dependent on failed job `%s'" % job.name
839 ## Subclass utilities.
841 """Make sure that JOB completes before allowing this job to start."""
846 Dump the last `LOGLINES' lines of the logfile.
848 This is called if the job fails and was being run quietly, to provide the
849 user with some context for the failure.
852 ## Gather blocks from the end of the log until we have enough lines.
853 with open(me._logfile, 'r') as f:
857 f.seek(0, 2); off = f.tell()
858 spew("start: off = %d" % off)
859 while nlines <= me.LOGLINES and off > 0:
860 off = max(0, off - bufsz)
862 spew("try at off = %d" % off)
864 nlines += buf.count("\n")
865 spew("now lines = %d" % nlines)
867 buf = ''.join(reversed(bufs))
869 ## We probably overshot. Skip the extra lines from the start.
871 while nlines > me.LOGLINES: i = buf.index("\n", i) + 1; nlines -= 1
873 ## If we ended up trimming the log, print an ellipsis.
874 if off > 0 or i > 0: print "%-*s * [...]" % (TAGWD, me.name)
876 ## Print the log tail.
877 lines = buf[i:].split("\n")
878 if lines and lines[-1] == '': lines.pop()
879 for line in lines: print "%-*s %s" % (TAGWD, me.name, line)
881 class BaseJobToken (object):
883 A job token is the authorization for a job to be run.
885 Subclasses must implement `recycle' to allow some other job to use the
890 class TrivialJobToken (BaseJobToken):
892 A trivial reusable token, for when issuing jobs in parallel without limit.
894 There only needs to be one of these.
897 spew("no token needed; nothing to recycle")
898 TRIVIAL_TOKEN = TrivialJobToken()
900 class JobServerToken (BaseJobToken):
901 """A job token storing a byte from the jobserver pipe."""
902 def __init__(me, char, pipefd, *args, **kw):
903 super(JobServerToken, me).__init__(*args, **kw)
907 spew("returning token to jobserver pipe")
908 OS.write(me._fd, me._char)
910 class PrivateJobToken (BaseJobToken):
912 The private job token belonging to a scheduler.
914 When running under a GNU Make jobserver, there is a token for each byte in
915 the pipe, and an additional one which represents the slot we're actually
916 running in. This class represents that additional token.
918 def __init__(me, sched, *args, **kw):
919 super(PrivateJobToken, me).__init__(*args, **kw)
922 assert me._sched._privtoken is None
923 spew("recycling private token")
924 me._sched._privtoken = me
929 class JobScheduler (object):
931 The main machinery for running and ordering jobs.
933 This handles all of the details of job scheduling.
936 def __init__(me, rfd = -1, wfd = -1, npar = 1):
938 Initialize a scheduler.
940 * RFD and WFD are the read and write ends of the jobserver pipe, as
941 determined from the `MAKEFLAGS' environment variable, or -1.
943 * NPAR is the maximum number of jobs to run in parallel, or `True' if
944 there is no maximum (i.e., we're in `forkbomb' mode).
947 ## Set the parallelism state. The `_rfd' and `_wfd' are the read and
948 ## write ends of the jobserver pipe, or -1 if there is no jobserver.
949 ## `_par' is true if we're meant to run jobs in parallel. The case _par
950 ## and _rfd = -1 means unconstrained parallelism.
952 ## The jobserver pipe contains a byte for each shared job slot. A
953 ## scheduler reads a byte from the pipe for each job it wants to run
954 ## (nearly -- see `_privtoken' below), and puts the byte back when the
955 ## job finishes. The GNU Make jobserver protocol specification insists
956 ## that we preserve the value of the byte in the pipe (though doesn't
957 ## currently make any use of this flexibility), so we record it in a
958 ## `JobToken' object's `_char' attribute.
959 me._par = rfd != -1 or npar is True or npar != 1
960 spew("par is %r" % me._par)
961 if rfd == -1 and npar > 1:
963 OS.write(wfd, (npar - 1)*'+')
964 OS.environ["MAKEFLAGS"] = \
965 (" -j --jobserver-auth=%(rfd)d,%(wfd)d " +
966 "--jobserver-fds=%(rfd)d,%(wfd)d") % dict(rfd = rfd, wfd = wfd)
967 me._rfd = rfd; me._wfd = wfd
969 ## The scheduler state. A job starts in the `_check' list. Each
970 ## iteration of the scheduler loop will inspect the jobs here and see
971 ## whether it's ready to run: if not, it gets put in the `_sleep' list,
972 ## where it will languish until something moves it back; if it is ready,
973 ## it gets moved to the `_ready' list to wait for a token from the
974 ## jobserver. At that point the job can be started, and it moves to the
975 ## `_kidmap', which associates a process-id with each running job.
976 ## Finally, jobs which have completed are simply forgotten. The `_njobs'
977 ## counter keeps track of how many jobs are outstanding, so that we can
978 ## stop when there are none left.
986 ## As well as the jobserver pipe, we implicitly have one extra job slot,
987 ## which is the one we took when we were started by our parent. The
988 ## right to do processing in this slot is represnted by the `private
989 ## token' here, distinguished from tokens from the jobserver pipe by
990 ## having `None' as its `_char' value.
991 me._privtoken = PrivateJobToken(me)
994 """Notice a new job and arrange for it to (try to) run."""
995 if job._known: return
996 spew("adding new job `%s'" % job.name)
1001 def close_jobserver(me):
1003 Close the jobserver file descriptors.
1005 This should be called within child processes to prevent them from messing
1008 if me._rfd != -1: OS.close(me._rfd); me._rfd = -1
1009 if me._wfd != -1: OS.close(me._wfd); me._wfd = -1
1010 try: del OS.environ["MAKEFLAGS"]
1011 except KeyError: pass
1014 """Zap all jobs which aren't yet running."""
1015 for jobset in [me._sleep, me._check, me._ready]:
1022 def _retire(me, job, win, outcome):
1024 Declare that a job has stopped, and deal with the consequences.
1026 JOB is the completed job, which should not be on any of the job queues.
1027 WIN is true if the job succeeded, and false otherwise. OUTCOME is a
1028 human-readable string explaining how the job came to its end, or `None'
1029 if no message should be reported.
1034 ## Return the job's token to the pool.
1035 if job._token is not None: job._token.recycle()
1039 ## Update and maybe report the job's status.
1042 if outcome is not None and not OPT.silent:
1043 if OPT.quiet and not job.win and job._logfile: job._logtail()
1044 if not job.win or not OPT.quiet:
1045 print "%-*s %c (%s)" % \
1046 (TAGWD, job.name, job.win and '|' or '*', outcome)
1048 ## If the job failed, and we care, arrange to exit nonzero.
1049 if not win and not OPT.ignerr: RC = 2
1051 ## If the job failed, and we're supposed to give up after the first
1052 ## error, then zap all of the waiting jobs.
1053 if not job.win and not OPT.keepon and not OPT.ignerr: me._killall()
1055 ## If this job has dependents then wake them up and see whether they're
1057 for j in job._waiting:
1058 try: me._sleep.remove(j)
1059 except KeyError: pass
1061 spew("waking dependent job `%s'" % j.name)
1064 def _reap(me, kid, st):
1066 Deal with the child with process-id KID having exited with status ST.
1069 ## Figure out what kind of child this is. Note that it has finished.
1070 try: job = me._kidmap[kid]
1072 try: job = me._logkidmap[kid]
1074 spew("unknown child %d exits with status 0x%04x" % (kid, st))
1077 ## It's a logging child.
1078 del me._logkidmap[kid]
1080 spew("logging process for job `%s' exits with status 0x%04x" %
1085 spew("main process for job `%s' exits with status 0x%04x" %
1088 ## If either of the job's associated processes is still running then we
1089 ## should stop now and give the other one a chance.
1090 if job._st is None or job._logkid is not DONE:
1091 spew("deferring retirement for job `%s'" % job.name)
1093 spew("completing deferred retirement for job `%s'" % job.name)
1095 ## Update and (maybe) report the job status.
1096 if job._st == 0: win = True; outcome = None
1097 else: win = False; outcome = wait_outcome(job._st)
1100 me._retire(job, win, outcome)
1103 """Reap all finished child processes."""
1105 try: kid, st = OS.waitpid(-1, OS.WNOHANG)
1106 except OSError, err:
1107 if err.errno == E.ECHILD: break
1112 def run_job(me, job):
1113 """Start running the JOB."""
1116 if OPT.dryrun: return None, None
1118 ## Make pipes to collect the job's output and error reports.
1119 r_out, w_out = OS.pipe()
1120 r_err, w_err = OS.pipe()
1122 ## Find a log file to write. Avoid races over the log names; but this
1123 ## means that the log descriptor needs to be handled somewhat carefully.
1124 logdir = OS.path.join(C.STATE, "log"); mkdir_p(logdir)
1127 logfile = OS.path.join(logdir, "%s-%s#%d" % (job.name, TODAY, logseq))
1129 logfd = OS.open(logfile, OS.O_WRONLY | OS.O_CREAT | OS.O_EXCL, 0666)
1130 except OSError, err:
1131 if err.errno == E.EEXIST: logseq += 1; continue
1135 job._logfile = logfile
1137 ## Make sure there's no pending output, or we might get two copies. (I
1138 ## don't know how to flush all output streams in Python, but this is good
1139 ## enough for our purposes.)
1142 ## Set up the logging child first. If we can't, take down the whole job.
1143 try: job._logkid = OS.fork()
1144 except OSError, err: OS.close(logfd); return None, err
1146 ## The main logging loop.
1148 ## Close the jobserver descriptors, and the write ends of the pipes.
1149 me.close_jobserver()
1150 OS.close(w_out); OS.close(w_err)
1152 ## Capture the job's stdout and stderr and wait for everything to
1154 def log_lines(fd, marker):
1157 OS.write(1, "%-*s %s %s\n" % (TAGWD, job.name, marker, line))
1158 OS.write(logfd, "%s %s\n" % (marker, line))
1159 return ReadLinesSelector(fd, fn)
1160 select_loop([log_lines(r_out, "|"), log_lines(r_err, "*")])
1162 ## We're done. (Closing the descriptors here would be like polishing
1163 ## the floors before the building is demolished.)
1166 ## Back in the main process: record the logging child. At this point we
1167 ## no longer need the logfile descriptor.
1168 me._logkidmap[job._logkid] = job
1171 ## Start the main job process.
1172 try: kid = OS.fork()
1173 except OSError, err: return None, err
1177 ## Close the read ends of the pipes, and move the write ends to the
1178 ## right places. (This will go wrong if we were started without enough
1179 ## descriptors. Fingers crossed.)
1180 OS.dup2(w_out, 1); OS.dup2(w_err, 2)
1181 OS.close(r_out); OS.close(w_out)
1182 OS.close(r_err); OS.close(w_err)
1183 spew("running job `%s' as pid %d" % (job.name, OS.getpid()))
1185 ## Run the job, catching nonlocal flow.
1188 except ExpectedError, err:
1191 except Exception, err:
1192 TB.print_exc(SYS.stderr)
1194 except BaseException, err:
1195 moan("caught unexpected exception: %r" % err)
1198 spew("job `%s' ran to completion" % job.name)
1200 ## Clean up old logs.
1202 pat = RX.compile(r"^%s-(\d{4})-(\d{2})-(\d{2})\#(\d+)$" %
1203 RX.escape(job.name))
1204 for f in OS.listdir(logdir):
1206 if m: match.append((f, int(m.group(1)), int(m.group(2)),
1207 int(m.group(3)), int(m.group(4))))
1208 match.sort(key = lambda (_, y, m, d, q): (y, m, d, q))
1209 if len(match) > LOGKEEP:
1210 for (f, _, _, _, _) in match[:-LOGKEEP]:
1211 try: OS.unlink(OS.path.join(logdir, f))
1212 except OSError, err:
1213 if err.errno == E.ENOENT: pass
1219 ## Back in the main process: close both the pipes and return the child
1221 OS.close(r_out); OS.close(w_out)
1222 OS.close(r_err); OS.close(w_err)
1223 if OPT.quiet: print "%-*s | (started)" % (TAGWD, job.name)
1227 """Run the scheduler."""
1229 spew("JobScheduler starts")
1232 ## The main scheduler loop. We go through three main phases:
1234 ## * Inspect the jobs in the `check' list to see whether they can
1235 ## run. After this, the `check' list will be empty.
1237 ## * If there are running jobs, check to see whether any of them have
1238 ## stopped, and deal with the results. Also, if there are jobs
1239 ## ready to start and a job token has become available, then
1240 ## retrieve the token. (Doing these at the same time is the tricky
1243 ## * If there is a job ready to run, and we retrieved a token, then
1244 ## start running the job.
1246 ## Check the pending jobs to see if they can make progress: run each
1247 ## job's `check' method and move it to the appropriate queue. (It's OK
1248 ## if `check' methods add more jobs to the list, as long as things
1249 ## settle down eventually.)
1251 try: job = me._check.pop()
1252 except KeyError: break
1253 if job._deps is None:
1256 state, reason = job.check()
1257 tail = reason is not None and ": %s" % reason or ""
1259 spew("job `%s' ready to run%s" % (job.name, tail))
1261 elif state is FAILED:
1262 spew("job `%s' refused to run%s" % (job.name, tail))
1263 me._retire(job, False, "refused to run%s" % tail)
1265 spew("job `%s' has nothing to do%s" % (job.name, tail))
1266 me._retire(job, True, reason)
1267 elif state is SLEEP:
1268 spew("job `%s' can't run yet%s" % (job.name, tail))
1271 raise ValueError("unexpected job check from `%s': %r, %r" %
1272 (job.name, state, reason))
1274 ## If there are no jobs left, then we're done.
1276 spew("all jobs completed")
1279 ## Make sure we can make progress. There are no jobs on the check list
1280 ## any more, because we just cleared it. We assume that jobs which are
1281 ## ready to run will eventually receive a token. So we only end up in
1282 ## trouble if there are jobs asleep, but none running or ready to run.
1283 ##spew("#jobs = %d" % me._njobs)
1284 ##spew("sleeping: %s" % ", ".join([j.name for j in me._sleep]))
1285 ##spew("ready: %s" % ", ".join([j.name for j in me._ready]))
1286 ##spew("running: %s" % ", ".join([j.name for j in me._kidmap.itervalues()]))
1287 assert not me._sleep or me._kidmap or me._logkidmap or me._ready
1289 ## Wait for something to happen.
1290 if not me._ready or (not me._par and me._privtoken is None):
1291 ## If we have no jobs ready to run, then we must wait for an existing
1292 ## child to exit. Hopefully, a sleeping job will be able to make
1293 ## progress after this.
1295 ## Alternatively, if we're not supposed to be running jobs in
1296 ## parallel and we don't have the private token, then we have no
1297 ## choice but to wait for the running job to complete.
1299 ## There's no check here for `ECHILD'. We really shouldn't be here
1300 ## if there are no children to wait for. (The check list must be
1301 ## empty because we just drained it. If the ready list is empty,
1302 ## then all of the jobs must be running or sleeping; but the
1303 ## assertion above means that either there are no jobs at all, in
1304 ## which case we should have stopped, or at least one is running, in
1305 ## which case it's safe to wait for it. The other case is that we're
1306 ## running jobs sequentially, and one is currently running, so
1307 ## there's nothing for it but to wait for it -- and hope that it will
1308 ## wake up one of the sleeping jobs. The remaining possibility is
1309 ## that we've miscounted somewhere, which will cause a crash.)
1311 spew("no new jobs ready: waiting for outstanding jobs to complete")
1313 spew("job running without parallelism: waiting for it to finish")
1314 kid, st = OS.waitpid(-1, 0)
1319 ## We have jobs ready to run, so try to acquire a token.
1320 if me._rfd == -1 and me._par:
1321 ## We're running with unlimited parallelism, so we don't need a token
1323 spew("running new job without token")
1324 token = TRIVIAL_TOKEN
1326 ## Our private token is available, so we can use that to start
1328 spew("private token available: assigning to new job")
1329 token = me._privtoken
1330 me._privtoken = None
1332 ## We have to read from the jobserver pipe. Unfortunately, we're not
1333 ## allowed to set the pipe nonblocking, because make is also using it
1334 ## and will get into a serious mess. And we must deal with `SIGCHLD'
1335 ## arriving at any moment. We use the same approach as GNU Make. We
1336 ## start by making a copy of the jobserver descriptor: it's this
1337 ## descriptor we actually try to read from. We set a signal handler
1338 ## to close this descriptor if a child exits. And we try one last
1339 ## time to reap any children which have exited just before we try
1340 ## reading the jobserver pipe. This way we're covered:
1342 ## * If a child exits during the main loop, before we establish the
1343 ## descriptor copy then we'll notice when we try reaping
1346 ## * If a child exits between the last-chance reap and the read,
1347 ## the signal handler will close the descriptor and the `read'
1348 ## call will fail with `EBADF'.
1350 ## * If a child exits while we're inside the `read' system call,
1351 ## then the syscall will fail with `EINTR'.
1353 ## The only problem is that we can't do this from Python, because
1354 ## Python signal handlers are delayed. This is what the `jobclient'
1357 ## The `jobclient' function is called as
1361 ## It returns a tuple of three values: TOKEN, PID, STATUS. If TOKEN
1362 ## is not `None', then reading the pipe succeeded; if TOKEN is empty,
1363 ## then the pipe returned EOF, so we should abort; otherwise, TOKEN
1364 ## is a singleton string holding the token character. If PID is not
1365 ## `None', then PID is the process id of a child which exited, and
1366 ## STATUS is its exit status.
1367 spew("waiting for token from jobserver")
1368 tokch, kid, st = JC.jobclient(me._rfd)
1374 spew("no token; trying again")
1377 error("jobserver pipe closed; giving up")
1380 spew("received token from jobserver")
1381 token = JobServerToken(tokch, me._wfd)
1383 ## We have a token, so we should start up the job.
1384 job = me._ready.pop()
1386 spew("start new job `%s'" % job.name)
1387 kid, err = me.run_job(job)
1389 me._retire(job, False, "failed to fork: %s" % err)
1391 if kid is None: me._retire(job, True, "dry run")
1392 else: me._kidmap[kid] = job
1394 ## We ran out of work to do.
1395 spew("JobScheduler done")
1397 ###--------------------------------------------------------------------------
1400 R_CONFIG = RX.compile(r"^([a-zA-Z0-9_]+)='(.*)'$")
1402 class Config (object):
1404 def _conv_str(s): return s
1405 def _conv_list(s): return s.split()
1406 def _conv_set(s): return set(s.split())
1409 "ROOTLY": _conv_list,
1411 "MYARCH": _conv_set,
1412 "NATIVE_ARCHS": _conv_set,
1413 "FOREIGN_ARCHS": _conv_set,
1414 "FOREIGN_GNUARCHS": _conv_list,
1415 "ALL_ARCHS": _conv_set,
1416 "NATIVE_CHROOTS": _conv_set,
1417 "FOREIGN_CHROOTS": _conv_set,
1418 "ALL_CHROOTS": _conv_set,
1419 "BASE_PACKAGES": _conv_list,
1420 "EXTRA_PACKAGES": _conv_list,
1421 "CROSS_PACKAGES": _conv_list,
1422 "CROSS_PATHS": _conv_list,
1423 "APTCONF": _conv_list,
1424 "LOCALPKGS": _conv_list,
1425 "SCHROOT_COPYFILES": _conv_list,
1426 "SCHROOT_NSSDATABASES": _conv_list
1430 "*_APTCONFSRC": ("APTCONFSRC", _conv_str),
1431 "*_DEPS": ("PKGDEPS", _conv_list),
1432 "*_QEMUHOST": ("QEMUHOST", _conv_str),
1433 "*_QEMUARCH": ("QEMUARCH", _conv_str),
1434 "*_ALIASES": ("DISTALIAS", _conv_str)
1437 _conv_str = staticmethod(_conv_str)
1438 _conv_list = staticmethod(_conv_list)
1439 _conv_set = staticmethod(_conv_set)
1443 """; raw = open('state/config.sh').read(); _ignore = """ @@@config@@@
1446 for line in raw.split("\n"):
1448 if not line or line.startswith('#'): continue
1449 m = R_CONFIG.match(line)
1450 if not m: raise ExpectedError("bad config line `%s'" % line)
1451 k, v = m.group(1), m.group(2).replace("'\\''", "'")
1453 try: conv = me._CONVERT[k]
1457 try: i = k.index("_", i + 1)
1458 except ValueError: conv = me._conv_str; break
1459 try: map, conv = me._CONV_MAP["*" + k[i:]]
1460 except KeyError: pass
1462 d = me._conf.setdefault(map, dict())
1464 if k.startswith("_"): k = k[1:]
1468 def __getattr__(me, attr):
1469 try: return me._conf[attr]
1470 except KeyError, err: raise AttributeError(err.args[0])
1472 with toplevel_handler(): C = Config()
1474 ###--------------------------------------------------------------------------
1475 ### Chroot maintenance utilities.
1477 CREATE = Tag("CREATE")
1478 FORCE = Tag("FORCE")
1480 DEBCONF_TWEAKS = """
1481 DEBIAN_FRONTEND=noninteractive; export DEBIAN_FRONTEND
1482 DEBIAN_PRIORITY=critical export DEBIAN_PRIORITY
1483 DEBCONF_NONINTERACTIVE_SEEN=true; export DEBCONF_NONINTERACTIVE_SEEN
1486 def check_fresh(fresh, update):
1488 Compare a refresh mode FRESH against an UPDATE time.
1490 Return a (STATUS, REASON) pair, suitable for returning from a job `check'
1493 The FRESH argument may be one of the following:
1495 * `CREATE' is satisfied if the thing exists at all: it returns `READY' if
1496 the thing doesn't yet exist (UPDATE is `None'), or `DONE' otherwise.
1498 * `FORCE' is never satisfied: it always returns `READY'.
1500 * an integer N is satisfied if UPDATE time is at most N seconds earlier
1501 than the present: if returns `READY' if the UPDATE is too old, or
1504 if update is None: return READY, "must create"
1505 elif fresh is FORCE: return READY, "update forced"
1506 elif fresh is CREATE: return DONE, "already created"
1507 elif NOW - unzulu(update) > fresh: return READY, "too stale: updating"
1508 else: return DONE, "already sufficiently up-to-date"
1510 def lockfile_path(file):
1512 Return the full path for a lockfile named FILE.
1514 Create the lock directory if necessary.
1516 lockdir = OS.path.join(C.STATE, "lock"); mkdir_p(lockdir)
1517 return OS.path.join(lockdir, file)
1519 def chroot_src_lockfile(dist, arch):
1521 Return the lockfile for the source-chroot for DIST on ARCH.
1523 It is not allowed to acquire a source-chroot lock while holding any other
1526 return lockfile_path("source.%s-%s" % (dist, arch))
1528 def chroot_src_lv(dist, arch):
1530 Return the logical volume name for the source-chroot for DIST on ARCH.
1532 return "%s%s-%s" % (C.LVPREFIX, dist, arch)
1534 def chroot_src_blkdev(dist, arch):
1536 Return the block-device name for the source-chroot for DIST on ARCH.
1538 return OS.path.join("/dev", C.VG, chroot_src_lv(dist, arch))
1540 def chroot_src_mntpt(dist, arch):
1542 Return mountpoint path for setting up the source-chroot for DIST on ARCH.
1544 Note that this is not the mountpoint that schroot(1) uses.
1546 mnt = OS.path.join(C.STATE, "mnt", "%s-%s" % (dist, arch))
1550 def chroot_session_mntpt(session):
1551 """Return the mountpoint for an schroot session."""
1552 return OS.path.join("/schroot", session)
1554 def crosstools_lockfile(dist, arch):
1556 Return the lockfile for the cross-build tools for DIST, hosted by ARCH.
1558 When locking multiple cross-build tools, you must acquire the locks in
1559 lexicographically ascending order.
1561 return lockfile_path("cross-tools.%s-%s" % (dist, arch))
1563 def switch_prefix(string, map):
1565 Replace the prefix of a STRING, according to the given MAP.
1567 MAP is a sequence of (OLD, NEW) pairs. For each such pair in turn, test
1568 whether STRING starts with OLD: if so, return STRING, but with the prefix
1569 OLD replaced by NEW. If no OLD prefix matches, then raise a `ValueError'.
1571 for old, new in map:
1572 if string.startswith(old): return new + string[len(old):]
1573 raise ValueError("expected `%s' to start with one of %s" %
1574 ", ".join(["`%s'" % old for old, new in map]))
1576 def host_to_chroot(path):
1578 Convert a host path under `C.LOCAL' to the corresponding chroot path under
1579 `/usr/local.schroot'.
1581 return switch_prefix(path, [(C.LOCAL + "/", "/usr/local.schroot/")])
1583 def chroot_to_host(path):
1585 Convert a chroot path under `/usr/local.schroot' to the corresponding
1586 host path under `C.LOCAL'.
1588 return switch_prefix(path, [("/usr/local.schroot/", C.LOCAL + "/")])
1590 def split_dist_arch(spec):
1591 """Split a SPEC of the form `DIST-ARCH' into the pair (DIST, ARCH)."""
1592 dash = spec.index("-")
1593 return spec[:dash], spec[dash + 1:]
1595 def elf_binary_p(arch, path):
1596 """Return whether PATH is an ELF binary for ARCH."""
1597 if not OS.path.isfile(path): return False
1598 with open(path, 'rb') as f: magic = f.read(20)
1599 if magic[0:4] != "\x7fELF": return False
1600 if magic[8:16] != 8*"\0": return False
1602 if magic[4:7] != "\x01\x01\x01": return False
1603 if magic[18:20] != "\x03\x00": return False
1604 elif arch == "amd64":
1605 if magic[4:7] != "\x02\x01\x01": return False
1606 if magic[18:20] != "\x3e\x00": return False
1608 raise ValueError("unsupported donor architecture `%s'" % arch)
1613 Print a progress message MSG.
1615 This is intended to be called within a job's `run' method, so it doesn't
1616 check `OPT.quiet' or `OPT.silent'.
1618 OS.write(1, ";; %s\n" % msg)
1620 class NoSuchChroot (Exception):
1622 Exception indicating that a chroot does not exist.
1624 Specifically, it means that it doesn't even have a logical volume.
1626 def __init__(me, dist, arch):
1630 return "chroot for `%s' on `%s' not found" % (me.dist, me.arch)
1633 def mount_chroot_src(dist, arch):
1635 Context manager for mounting the source-chroot for DIST on ARCH.
1637 The context manager automatically unmounts the filesystem again when the
1638 body exits. You must hold the appropriate source-chroot lock before
1639 calling this routine.
1641 dev = chroot_src_blkdev(dist, arch)
1642 if not block_device_p(dev): raise NoSuchChroot(dist, arch)
1643 mnt = chroot_src_mntpt(dist, arch)
1645 run_program(C.ROOTLY + ["mount", dev, mnt])
1651 def chroot_session(dist, arch, sourcep = False):
1653 Context manager for running an schroot(1) session.
1655 Returns the (ugly, automatically generated) session name to the context
1656 body. By default, a snapshot session is started: set SOURCEP true to start
1657 a source-chroot session. You must hold the appropriate source-chroot lock
1658 before starting a source-chroot session.
1660 The context manager automatically closes the session again when the body
1663 chroot = chroot_src_lv(dist, arch)
1664 if sourcep: chroot = "source:" + chroot
1665 session = run_program(["schroot", "-uroot", "-b", "-c", chroot],
1666 stdout = RETURN).rstrip("\n")
1668 root = OS.path.join(chroot_session_mntpt(session), "fs")
1671 run_program(["schroot", "-e", "-c", session])
1673 def run_root(command, **kw):
1674 """Run a COMMAND as root. Arguments are as for `run_program'."""
1675 return run_program(C.ROOTLY + command, **kw)
1677 def run_schroot_session(session, command, rootp = False, **kw):
1679 Run a COMMAND within an schroot(1) session.
1681 Arguments are as for `run_program'.
1684 return run_program(["schroot", "-uroot", "-r",
1685 "-c", session, "--"] + command, **kw)
1687 return run_program(["schroot", "-r",
1688 "-c", session, "--"] + command, **kw)
1690 def run_schroot_source(dist, arch, command, **kw):
1692 Run a COMMAND through schroot(1), in the source-chroot for DIST on ARCH.
1694 Arguments are as for `run_program'. You must hold the appropriate source-
1695 chroot lock before calling this routine.
1697 return run_program(["schroot", "-uroot",
1698 "-c", "source:%s" % chroot_src_lv(dist, arch),
1699 "--"] + command, **kw)
1701 ###--------------------------------------------------------------------------
1704 class MetadataClass (type):
1706 Metaclass for metadata classes.
1708 Notice a `VARS' attribute in the class dictionary, and augment it with a
1709 `_VARSET' attribute, constructed as a set containing the same items. (We
1710 need them both: the set satisfies fast lookups, while the original sequence
1711 remembers the ordering.)
1713 def __new__(me, name, supers, dict):
1714 try: vars = dict['VARS']
1715 except KeyError: pass
1716 else: dict['_VARSET'] = set(vars)
1717 return super(MetadataClass, me).__new__(me, name, supers, dict)
1719 class BaseMetadata (object):
1721 Base class for metadate objects.
1723 Metadata bundles are simple collections of key/value pairs. Keys should
1724 usually be Python identifiers because they're used to name attributes.
1725 Values are strings, but shouldn't have leading or trailing whitespace, and
1726 can't contain newlines.
1728 Metadata bundles are written to files. The format is simple enough: empty
1729 lines and lines starting with `#' are ignored; otherwise, the line must
1734 where KEY does not contain `='; spaces around the `=' are optional, and
1735 spaces around the KEY and VALUE are stripped. The order of keys is
1736 unimportant; keys are always written in a standard order on output.
1738 __metaclass__ = MetadataClass
1740 def __init__(me, **kw):
1741 """Initialize a metadata bundle from keyword arguments."""
1742 for k, v in kw.iteritems():
1746 except AttributeError: setattr(me, v, None)
1748 def __setattr__(me, attr, value):
1750 Try to set an attribute.
1752 Only attribute names listed in the `VARS' class attribute are permitted.
1754 if attr not in me._VARSET: raise AttributeError, attr
1755 super(BaseMetadata, me).__setattr__(attr, value)
1758 def read(cls, path):
1759 """Return a new metadata bundle read from a named PATH."""
1761 with open(path) as f:
1764 if line == "" or line.startswith("#"): continue
1765 k, v = line.split("=", 1)
1766 map[k.strip()] = v.strip()
1769 def _write(me, file):
1771 Write the metadata bundle to the FILE (a file-like object).
1773 This is intended for use by subclasses which want to override the default
1774 I/O behaviour of the main `write' method.
1776 file.write("### -*-conf-*-\n")
1778 try: v = getattr(me, k)
1779 except AttributeError: pass
1781 if v is not None: file.write("%s = %s\n" % (k, v))
1783 def write(me, path):
1785 Write the metadata bundle to a given PATH.
1787 The file is replaced atomically.
1789 with safewrite(path) as f: me._write(f)
1792 return "#<%s: %s>" % (me.__class__.__name__,
1793 ", ".join("%s=%r" % (k, getattr(me, k, None))
1796 class ChrootMetadata (BaseMetadata):
1797 VARS = ['dist', 'arch', 'update']
1800 def read(cls, dist, arch):
1802 with lockfile(chroot_src_lockfile(dist, arch), exclp = False):
1803 with mount_chroot_src(dist, arch) as mnt:
1804 return super(ChrootMetadata, cls).read(OS.path.join(mnt, "META"))
1805 except IOError, err:
1806 if err.errno == E.ENOENT: pass
1808 except NoSuchChroot: pass
1809 return cls(dist = dist, arch = arch)
1812 with mount_chroot_src(me.dist, me.arch) as mnt:
1813 with safewrite_root(OS.path.join(mnt, "META")) as f:
1816 class CrossToolsMetadata (BaseMetadata):
1817 VARS = ['dist', 'arch', 'update']
1820 def read(cls, dist, arch):
1822 return super(CrossToolsMetadata, cls)\
1823 .read(OS.path.join(C.LOCAL, "cross", "%s-%s" % (dist, arch), "META"))
1824 except IOError, err:
1825 if err.errno == E.ENOENT: pass
1827 return cls(dist = dist, arch = arch)
1829 def write(me, dir = None):
1831 dir = OS.path.join(C.LOCAL, "cross", "%s-%s" % (me.dist, me.arch))
1832 with safewrite_root(OS.path.join(dir, "META")) as f:
1835 ###--------------------------------------------------------------------------
1836 ### Constructing a chroot.
1838 R_DIVERT = RX.compile(r"^diversion of (.*) to .* by install-cross-tools$")
1840 class ChrootJob (BaseJob):
1842 Create or update a chroot.
1845 SPECS = C.ALL_CHROOTS
1847 def __init__(me, spec, fresh = CREATE, *args, **kw):
1848 super(ChrootJob, me).__init__(*args, **kw)
1849 me._dist, me._arch = split_dist_arch(spec)
1851 me._meta = ChrootMetadata.read(me._dist, me._arch)
1852 me._tools_chroot = me._qemu_chroot = None
1854 def _mkname(me): return "chroot.%s-%s" % (me._dist, me._arch)
1857 if me._arch in C.FOREIGN_ARCHS:
1858 me._tools_chroot = CrossToolsJob.ensure\
1859 ("%s-%s" % (me._dist, C.TOOLSARCH), FRESH)
1860 me._qemu_chroot = CrossToolsJob.ensure\
1861 ("%s-%s" % (me._dist, C.QEMUHOST[me._arch]), FRESH)
1862 me.await(me._tools_chroot)
1863 me.await(me._qemu_chroot)
1866 status, reason = super(ChrootJob, me).check()
1867 if status is not READY: return status, reason
1868 if (me._tools_chroot is not None and me._tools_chroot.started) or \
1869 (me._qemu_chroot is not None and me._qemu_chroot.started):
1870 return READY, "prerequisites run"
1871 return check_fresh(me._fresh, me._meta.update)
1873 def _install_cross_tools(me):
1875 Install or refresh cross-tools in the source-chroot.
1877 This function version assumes that the source-chroot lock is already
1880 Note that there isn't a job class corresponding to this function. It's
1881 done automatically as part of source-chroot setup and update for foreign
1884 with Cleanup() as clean:
1886 dist, arch = me._dist, me._arch
1888 mymulti = run_program(["dpkg-architecture", "-a", C.TOOLSARCH,
1889 "-qDEB_HOST_MULTIARCH"],
1890 stdout = RETURN).rstrip("\n")
1891 gnuarch = run_program(["dpkg-architecture", "-A", arch,
1892 "-qDEB_TARGET_GNU_TYPE"],
1893 stdout = RETURN).rstrip("\n")
1895 crossdir = OS.path.join(C.LOCAL, "cross",
1896 "%s-%s" % (dist, C.TOOLSARCH))
1898 qarch, qhost = C.QEMUARCH[arch], C.QEMUHOST[arch]
1899 qemudir = OS.path.join(C.LOCAL, "cross",
1900 "%s-%s" % (dist, qhost), "QEMU")
1902 ## Acquire lockfiles in a canonical order to prevent deadlocks.
1903 donors = [C.TOOLSARCH]
1904 if qarch != C.TOOLSARCH: donors.append(qarch)
1907 clean.enter(lockfile(crosstools_lockfile(dist, a), exclp = False))
1910 session, root = clean.enter(chroot_session(dist, arch, sourcep = True))
1912 ## Search the cross-tools tree for tools, to decide what to do with
1913 ## each file. Make lists:
1915 ## * `want_div' is simply a set of all files in the chroot which need
1916 ## dpkg diversions to prevent foreign versions of the tools from
1917 ## clobbering our native versions.
1919 ## * `want_link' is a dictionary mapping paths which need symbolic
1920 ## links into the cross-tools trees to their link destinations.
1921 progress("scan cross-tools tree")
1924 cross_prefix = crossdir + "/"
1925 qemu_prefix = qemudir + "/"
1926 toolchain_prefix = OS.path.join(crossdir, "TOOLCHAIN", gnuarch) + "/"
1928 dest = switch_prefix(path, [(qemu_prefix, "/usr/bin/"),
1929 (toolchain_prefix, "/usr/bin/"),
1930 (cross_prefix, "/")])
1931 if OS.path.islink(path): src = OS.readlink(path)
1932 else: src = host_to_chroot(path)
1933 want_link[dest] = src
1934 if not OS.path.isdir(path): want_div.add(dest)
1935 examine(OS.path.join(qemudir, "qemu-%s-static" % qarch))
1936 examine(OS.path.join(crossdir, "lib", mymulti))
1937 examine(OS.path.join(crossdir, "usr/lib", mymulti))
1938 examine(OS.path.join(crossdir, "usr/lib/gcc-cross"))
1939 def visit(_, dir, files):
1942 if f == "META" or f == "QEMU" or f == "TOOLCHAIN" or \
1943 (dir.endswith("/lib") and (f == mymulti or f == "gcc-cross")):
1946 path = OS.path.join(dir, f)
1947 if OS.path.islink(path) or not OS.path.isdir(path): examine(path)
1949 OS.path.walk(crossdir, visit, None)
1950 OS.path.walk(OS.path.join(crossdir, "TOOLCHAIN", gnuarch),
1953 ## Build the set `have_div' of paths which already have diversions.
1954 progress("scan chroot")
1956 with subprocess(["schroot", "-uroot", "-r", "-c", session, "--",
1957 "dpkg-divert", "--list"],
1958 stdout = PIPE) as (_, fd_out, _):
1960 f = OS.fdopen(fd_out)
1962 m = R_DIVERT.match(line.rstrip("\n"))
1963 if m: have_div.add(m.group(1))
1967 ## Build a dictionary `have_link' of symbolic links into the cross-
1968 ## tools trees. Also, be sure to collect all of the relative symbolic
1969 ## links which are in the cross-tools tree.
1971 with subprocess(["schroot", "-uroot", "-r", "-c", session, "--",
1972 "sh", "-e", "-c", """
1973 find / -xdev -lname "/usr/local.schroot/cross/*" -printf "%p %l\n"
1974 """], stdout = PIPE) as (_, fd_out, _):
1976 f = OS.fdopen(fd_out)
1978 dest, src = line.split()
1979 have_link[dest] = src
1982 for path in want_link.iterkeys():
1984 if not OS.path.islink(real): continue
1985 have_link[path] = OS.readlink(real)
1987 ## Add diversions for the paths which need one, but don't have one.
1988 ## There's a hack here because the `--no-rename' option was required in
1989 ## the same version in which it was introduced, so there's no single
1990 ## incantation that will work across the boundary.
1991 progress("add missing diversions")
1992 with subprocess(["schroot", "-uroot", "-r", "-c", session, "--",
1993 "sh", "-e", "-c", """
1996 if dpkg-divert >/dev/null 2>&1 --no-rename --help
1997 then no_rename=--no-rename
2002 dpkg-divert --package "install-cross-tools" $no_rename \
2003 --divert "$path.$a" --add "$path"
2005 """ % dict(arch = arch)], stdin = PIPE) as (fd_in, _, _):
2007 f = OS.fdopen(fd_in, 'w')
2008 for path in want_div:
2009 if path not in have_div: f.write(path + "\n")
2013 ## Go through each diverted tool, and, if it hasn't been moved aside,
2014 ## then /link/ it across now. If we rename it, then the chroot will
2015 ## stop working -- which is why we didn't allow `dpkg-divert' to do the
2016 ## rename. We can tell a tool that hasn't been moved, because it's a
2017 ## symlink into one of the cross trees.
2018 progress("preserve existing foreign files")
2019 chroot_cross_prefix = host_to_chroot(crossdir) + "/"
2020 chroot_qemu_prefix = host_to_chroot(qemudir) + "/"
2021 for path in want_div:
2022 real = root + path; div = real + "." + arch; cross = crossdir + path
2023 if OS.path.exists(div): continue
2024 if not OS.path.exists(real): continue
2025 if OS.path.islink(real):
2026 realdest = OS.readlink(real)
2027 if realdest.startswith(chroot_cross_prefix) or \
2028 realdest.startswith(chroot_qemu_prefix):
2030 if OS.path.islink(cross) and realdest == OS.readlink(cross):
2032 progress("preserve existing foreign file `%s'" % path)
2033 run_root(["ln", real, div])
2035 ## Update all of the symbolic links which are currently wrong: add
2036 ## links which are missing, delete ones which are obsolete, and update
2037 ## ones which have the wrong target.
2038 progress("update symlinks")
2039 for path, src in want_link.iteritems():
2041 try: old_src = have_link[path]
2042 except KeyError: pass
2044 if src == old_src: continue
2046 progress("link `%s' -> `%s'" % (path, src))
2047 dir = OS.path.dirname(real)
2048 if not OS.path.isdir(dir): run_root(["mkdir", "-p", dir])
2049 if OS.path.exists(new): run_root(["rm", "-f", new])
2050 run_root(["ln", "-s", src, new])
2051 run_root(["mv", new, real])
2052 for path in have_link.iterkeys():
2053 if path in want_link: continue
2054 progress("remove obsolete link `%s' -> `%s'" % path)
2056 run_root(["rm", "-f", real])
2058 ## Remove diversions from paths which don't need them any more. Here
2059 ## it's safe to rename, because either the tool isn't there, in which
2060 ## case it obviously wasn't important, or it is, and `dpkg-divert' will
2061 ## atomically replace our link with the foreign version.
2062 progress("remove obsolete diversions")
2063 with subprocess(["schroot", "-uroot", "-r", "-c", session, "--",
2064 "sh", "-e", "-c", """
2068 dpkg-divert --package "install-cross-tools" --rename \
2069 --divert "$path.$a" --remove "$path"
2071 """ % dict(arch = arch)], stdin = PIPE) as (fd_in, _, _):
2073 f = OS.fdopen(fd_in, 'w')
2074 for path in have_div:
2075 if path not in want_div: f.write(path + "\n")
2079 def _make_chroot(me):
2081 Create the source-chroot with chroot metadata META.
2083 This will recreate a source-chroot from scratch, destroying the existing
2084 logical volume if necessary.
2086 with Cleanup() as clean:
2088 dist, arch = me._dist, me._arch
2089 clean.enter(lockfile(chroot_src_lockfile(dist, arch)))
2091 mnt = chroot_src_mntpt(dist, arch)
2092 dev = chroot_src_blkdev(dist, arch)
2093 lv = chroot_src_lv(dist, arch)
2096 ## Clean up any leftover debris.
2097 if mountpoint_p(mnt): umount(mnt)
2098 if block_device_p(dev):
2099 run_root(["lvremove", "-f", "%s/%s" % (C.VG, lv)])
2101 ## Create the logical volume and filesystem. It's important that the
2102 ## logical volume not have its official name until after it contains a
2103 ## mountable filesystem.
2104 progress("create filesystem")
2105 run_root(["lvcreate", "--yes", C.LVSZ, "-n", newlv, C.VG])
2106 run_root(["mkfs", "-j", "-L%s-%s" % (dist, arch),
2107 OS.path.join("/dev", C.VG, newlv)])
2108 run_root(["lvrename", C.VG, newlv, lv])
2110 ## Start installing the chroot.
2111 with mount_chroot_src(dist, arch) as mnt:
2113 ## Set the basic structure.
2114 run_root(["mkdir", "-m755", OS.path.join(mnt, "fs")])
2115 run_root(["chmod", "750", mnt])
2117 ## Install the base system.
2118 progress("install base system")
2119 run_root(["eatmydata", "debootstrap"] +
2120 (arch in C.FOREIGN_ARCHS and ["--foreign"] or []) +
2121 ["--arch=" + arch, "--variant=minbase",
2122 "--include=" + ",".join(C.BASE_PACKAGES),
2123 dist, OS.path.join(mnt, "fs"), C.DEBMIRROR])
2125 ## If this is a cross-installation, then install the necessary `qemu'
2126 ## and complete the installation.
2127 if arch in C.FOREIGN_ARCHS:
2128 qemu = OS.path.join("cross", "%s-%s" % (dist, C.QEMUHOST[arch]),
2129 "QEMU", "qemu-%s-static" % C.QEMUARCH[arch])
2130 run_root(["install", OS.path.join(C.LOCAL, qemu),
2131 OS.path.join(mnt, "fs/usr/bin")])
2132 run_root(["chroot", OS.path.join(mnt, "fs"),
2133 "/debootstrap/debootstrap", "--second-stage"])
2134 run_root(["ln", "-sf",
2135 OS.path.join("/usr/local.schroot", qemu),
2136 OS.path.join(mnt, "fs/usr/bin")])
2138 ## Set up `/usr/local'.
2139 progress("install `/usr/local' symlink")
2140 run_root(["rm", "-rf", OS.path.join(mnt, "fs/usr/local")])
2141 run_root(["ln", "-s",
2142 OS.path.join("local.schroot", arch),
2143 OS.path.join(mnt, "fs/usr/local")])
2145 ## Install the `apt' configuration.
2146 progress("configure package manager")
2147 run_root(["rm", "-f", OS.path.join(mnt, "fs/etc/apt/sources.list")])
2149 run_root(["ln", "-s",
2150 OS.path.join("/usr/local.schroot/etc/apt/apt.conf.d", c),
2151 OS.path.join(mnt, "fs/etc/apt/apt.conf.d")])
2152 run_root(["ln", "-s",
2153 "/usr/local.schroot/etc/apt/sources.%s" % dist,
2154 OS.path.join(mnt, "fs/etc/apt/sources.list")])
2156 with safewrite_root\
2157 (OS.path.join(mnt, "fs/etc/apt/apt.conf.d/20arch")) as f:
2166 ## Set up the locale and time zone from the host system.
2167 progress("configure locales and timezone")
2168 run_root(["cp", "/etc/locale.gen", "/etc/timezone",
2169 OS.path.join(mnt, "fs/etc")])
2170 with open("/etc/timezone") as f: tz = f.readline().strip()
2171 run_root(["ln", "-sf",
2172 OS.path.join("/usr/share/timezone", tz),
2173 OS.path.join(mnt, "fs/etc/localtime")])
2174 run_root(["cp", "/etc/default/locale",
2175 OS.path.join(mnt, "fs/etc/default")])
2178 progress("set `/etc/mtab'")
2179 run_root(["ln", "-sf", "/proc/mounts",
2180 OS.path.join(mnt, "fs/etc/mtab")])
2182 ## Prevent daemons from starting within the chroot.
2183 progress("inhibit daemon startup")
2184 with safewrite_root(OS.path.join(mnt, "fs/usr/sbin/policy-rc.d"),
2188 echo >&2 "policy-rc.d: Services disabled by policy."
2192 ## Hack the dynamic linker to prefer libraries in `/usr' over
2193 ## `/usr/local'. This prevents `dpkg-shlibdeps' from becoming
2195 progress("configure dynamic linker")
2196 with safewrite_root\
2197 (OS.path.join(mnt, "fs/etc/ld.so.conf.d/libc.conf")) as f:
2198 f.write("# libc default configuration")
2199 with safewrite_root\
2200 (OS.path.join(mnt, "fs/etc/ld.so.conf.d/zzz-local.conf")) as f:
2203 ### Local hack to make /usr/local/ late.
2207 ## If this is a foreign architecture then we need to set it up.
2208 if arch in C.FOREIGN_ARCHS:
2210 ## Keep the chroot's native Qemu out of our way: otherwise we'll stop
2211 ## being able to run programs in the chroot. There's a hack here
2212 ## because the `--no-rename' option was required in the same version
2213 ## in which is was introduced, so there's no single incantation that
2214 ## will work across the boundary.
2215 progress("divert emulator")
2216 run_schroot_source(dist, arch, ["eatmydata", "sh", "-e", "-c", """
2217 if dpkg-divert >/dev/null 2>&1 --no-rename --help
2218 then no_rename=--no-rename
2222 dpkg-divert --package install-cross-tools $no_rename \
2223 --divert /usr/bin/%(qemu)s.%(arch)s --add /usr/bin/%(qemu)s
2224 """ % dict(arch = arch, qemu = "qemu-%s-static" % C.QEMUARCH[arch])])
2226 ## Install faster native tools.
2227 me._install_cross_tools()
2229 ## Finishing touches.
2230 progress("finishing touches")
2231 run_schroot_source(dist, arch, ["eatmydata", "sh", "-e", "-c",
2232 DEBCONF_TWEAKS + """
2235 apt-get -y install "$@"
2237 apt-get -y autoremove
2239 """, "."] + C.EXTRA_PACKAGES, stdin = DISCARD)
2241 ## Mark the chroot as done.
2242 me._meta.update = zulu()
2245 def _update_chroot(me):
2246 """Refresh the source-chroot with chroot metadata META."""
2247 with Cleanup() as clean:
2248 dist, arch = me._dist, me._arch
2249 clean.enter(lockfile(chroot_src_lockfile(dist, arch)))
2250 run_schroot_source(dist, arch, ["eatmydata", "sh", "-e", "-c",
2251 DEBCONF_TWEAKS + """
2253 apt-get -y dist-upgrade
2254 apt-get -y autoremove
2256 """], stdin = DISCARD)
2257 if arch in C.FOREIGN_ARCHS: me._install_cross_tools()
2258 me._meta.update = zulu(); me._meta.write()
2261 if me._meta.update is not None: me._update_chroot()
2262 else: me._make_chroot()
2264 ###--------------------------------------------------------------------------
2265 ### Extracting the cross tools.
2267 class CrossToolsJob (BaseJob):
2268 """Extract cross-tools from a donor chroot."""
2270 SPECS = C.NATIVE_CHROOTS
2272 def __init__(me, spec, fresh = CREATE, *args, **kw):
2273 super(CrossToolsJob, me).__init__(*args, **kw)
2274 me._dist, me._arch = split_dist_arch(spec)
2275 me._meta = CrossToolsMetadata.read(me._dist, me._arch)
2279 def _mkname(me): return "cross-tools.%s-%s" % (me._dist, me._arch)
2282 st, r = check_fresh(me._fresh, me._meta.update)
2283 if st is DONE: return
2284 me._chroot = ChrootJob.ensure("%s-%s" % (me._dist, me._arch), FRESH)
2285 me.await(me._chroot)
2288 status, reason = super(CrossToolsJob, me).check()
2289 if status is not READY: return status, reason
2290 if me._chroot is not None and me._chroot.started:
2291 return READY, "prerequisites run"
2292 return check_fresh(me._fresh, me._meta.update)
2295 with Cleanup() as clean:
2297 dist, arch = me._dist, me._arch
2299 mymulti = run_program(["dpkg-architecture", "-a" + arch,
2300 "-qDEB_HOST_MULTIARCH"],
2301 stdout = RETURN).rstrip("\n")
2302 crossarchs = [run_program(["dpkg-architecture", "-A" + a,
2303 "-qDEB_TARGET_GNU_TYPE"],
2304 stdout = RETURN).rstrip("\n")
2305 for a in C.FOREIGN_ARCHS]
2307 crossdir = OS.path.join(C.LOCAL, "cross", "%s-%s" % (dist, arch))
2308 crossold = crossdir + ".old"; crossnew = crossdir + ".new"
2309 usrbin = OS.path.join(crossnew, "usr/bin")
2311 clean.enter(lockfile(crosstools_lockfile(dist, arch)))
2312 run_program(["rm", "-rf", crossnew])
2315 ## Open a session to the donor chroot.
2316 progress("establish snapshot")
2317 session, root = clean.enter(chroot_session(dist, arch))
2319 ## Make sure the donor tree is up-to-date, and install the extra
2320 ## packages we need.
2321 progress("install tools packages")
2322 run_schroot_session(session, ["eatmydata", "sh", "-e", "-c",
2323 DEBCONF_TWEAKS + """
2326 apt-get -y install "$@"
2327 """, "."] + C.CROSS_PACKAGES, rootp = True, stdin = DISCARD)
2332 ## Work through the remaining components of the PATH.
2334 try: sl = path.index("/")
2335 except ValueError: step = path; path = ""
2336 else: step, path = path[:sl], path[sl + 1:]
2338 ## Split off and analyse the first component.
2339 if step == "" or step == ".":
2340 ## A redundant `/' or `./'. Skip it.
2343 ## A `../'. Strip off the trailing component of DEST.
2344 dest = dest[:dest.rindex("/")]
2346 ## Something else. Transfer the component name to DEST.
2349 ## If DEST refers to something in the cross-tools tree then we're
2351 crossdest = crossnew + dest
2352 try: st = OS.lstat(crossdest)
2353 except OSError, err:
2354 if err.errno == E.ENOENT:
2355 ## No. We need to copy something from the donor tree so that
2358 st = OS.lstat(root + dest)
2359 if ST.S_ISDIR(st.st_mode):
2362 progress("copy `%s'" % dest)
2363 run_program(["rsync", "-aHR",
2364 "%s/.%s" % (root, dest),
2369 ## If DEST refers to a symbolic link, then prepend the link target
2370 ## to PATH so that we can be sure the link will work.
2371 if ST.S_ISLNK(st.st_mode):
2372 link = OS.readlink(crossdest)
2373 if link.startswith("/"): dest = ""; link = link[1:]
2375 try: dest = dest[:dest.rindex("/")]
2376 except ValueError: dest = ""
2377 if path == "": path = link
2378 else: path = "%s/%s" % (path, link)
2380 ## Work through the shopping list, copying the things it names into the
2381 ## cross-tools tree.
2383 for pat in C.CROSS_PATHS:
2384 pat = pat.replace("MULTI", mymulti)
2386 for rootpath in GLOB.iglob(root + pat):
2388 path = rootpath[len(root):]
2389 progress("copy `%s'" % path)
2390 run_program(["rsync", "-aHR", "%s/.%s" % (root, path), crossnew])
2392 raise RuntimeError("no matches for cross-tool pattern `%s'" % pat)
2394 ## Scan the new tree: chase down symbolic links, copying extra stuff
2395 ## that we'll need; and examine ELF binaries to make sure we get the
2396 ## necessary shared libraries.
2397 def visit(_, dir, files):
2399 path = OS.path.join(dir, f)
2400 inside = switch_prefix(path, [(crossnew + "/", "/")])
2401 if OS.path.islink(path): chase(inside)
2402 if elf_binary_p(arch, path): scan.append(inside)
2403 OS.path.walk(crossnew, visit, None)
2405 ## Work through the ELF binaries in `scan', determining which shared
2406 ## libraries they'll need.
2408 ## The rune running in the chroot session reads ELF binary names on
2409 ## stdin, one per line, and runs `ldd' on them to discover the binary's
2410 ## needed libraries and resolve them into pathnames. Each pathname is
2411 ## printed to stderr as a line `+PATHNAME', followed by a final line
2412 ## consisting only of `-' as a terminator. This is necessary so that
2413 ## we can tell when we've finished, because newly discovered libraries
2414 ## need to be fed back to discover their recursive dependencies. (This
2415 ## is why the `WriteLinesSelector' interface is quite so hairy.)
2416 with subprocess(["schroot", "-r", "-c", session, "--",
2417 "sh", "-e", "-c", """
2419 ldd "$path" | while read a b c d; do
2421 not:a:dynamic:executable) ;;
2422 statically:linked::) ;;
2424 *:=\\>:/*) echo "+$c" ;;
2426 *) echo >&2 "failed to find shared library \\`$a'"; exit 2 ;;
2431 """], stdin = PIPE, stdout = PIPE) as (fd_in, fd_out, _):
2433 ## Keep track of the number of binaries we've reported to the `ldd'
2434 ## process for which we haven't yet seen all of their dependencies.
2435 ## (This is wrapped in a `Struct' because of Python's daft scoping
2440 ## Provide a line in., so raise `StopIteration' to signal this.
2443 ## See if there's something to scan.
2447 ## There's nothing currently waiting to be scanned.
2449 ## There are still outstanding replies, so stall.
2452 ## There are no outstanding replies left, and we have nothing
2453 ## more to scan, then we must be finished.
2457 ## The `scan' list isn't empty, so return an item from that, and
2458 ## remember that there's one more thing we expect to see answers
2460 v.n += 1; return path
2463 ## We've received a line from the `ldd' process.
2466 ## It's finished processing one of our binaries. Note this.
2467 ## Maybe it's time to stop
2471 ## Strip the leading marker (which is just there so that the
2472 ## terminating `-' is unambiguous).
2473 assert line.startswith("+")
2476 ## If we already have this binary then we'll already have submitted
2478 path = crossnew + lib
2480 except OSError, err:
2481 if err.errno == E.ENOENT: pass
2485 ## Copy it into the tools tree, together with any symbolic links
2489 ## If this is an ELF binary (and it ought to be!) then submit it
2490 ## for further scanning.
2491 if elf_binary_p(arch, path):
2492 scan.append(switch_prefix(path, [(crossnew + "/", "/")]))
2494 ## And run this entire contraption. When this is done, we should
2495 ## have all of the library dependencies for all of our binaries.
2496 select_loop([WriteLinesSelector(fd_in, line_in),
2497 ReadLinesSelector(fd_out, line_out)])
2499 ## Set up the cross-compiler and emulator. Start by moving the cross
2500 ## compilers and emulator into their specific places, so they don't end
2501 ## up cluttering chroots for non-matching architectures.
2502 progress("establish TOOLCHAIN and QEMU")
2503 OS.mkdir(OS.path.join(crossnew, "TOOLCHAIN"))
2504 qemudir = OS.path.join(crossnew, "QEMU")
2506 for gnu in C.FOREIGN_GNUARCHS:
2507 OS.mkdir(OS.path.join(crossnew, "TOOLCHAIN", gnu))
2508 for f in OS.listdir(usrbin):
2509 for gnu in C.FOREIGN_GNUARCHS:
2510 gnuprefix = gnu + "-"
2511 if f.startswith(gnuprefix):
2512 tooldir = OS.path.join(crossnew, "TOOLCHAIN", gnu)
2513 OS.rename(OS.path.join(usrbin, f), OS.path.join(tooldir, f))
2514 OS.symlink(f, OS.path.join(tooldir, f[len(gnuprefix):]))
2517 if f.startswith("qemu-") and f.endswith("-static"):
2518 OS.rename(OS.path.join(usrbin, f), OS.path.join(qemudir, f))
2520 ## The GNU cross compilers try to find their additional pieces via a
2521 ## relative path, which isn't going to end well. Add a symbolic link
2522 ## at the right place to where the things are actually going to live.
2523 toollib = OS.path.join(crossnew, "TOOLCHAIN", "lib")
2525 OS.symlink("../../usr/lib/gcc-cross",
2526 OS.path.join(toollib, "gcc-cross"))
2528 ## We're done. Replace the old cross-tools with our new one.
2529 me._meta.update = zulu()
2530 me._meta.write(crossnew)
2531 if OS.path.exists(crossdir): run_program(["mv", crossdir, crossold])
2532 OS.rename(crossnew, crossdir)
2533 run_program(["rm", "-rf", crossold])
2535 ###--------------------------------------------------------------------------
2536 ### Buliding and installing local packages.
2538 def pkg_metadata_lockfile(pkg):
2539 return lockfile_path("pkg-meta.%s" % pkg)
2541 def pkg_srcdir_lockfile(pkg, ver):
2542 return lockfile_path("pkg-source.%s-%s" % (pkg, ver))
2544 def pkg_srcdir(pkg, ver):
2545 return OS.path.join(C.LOCAL, "src", "%s-%s" % (pkg, ver))
2547 def pkg_builddir(pkg, ver, arch):
2548 return OS.path.join(pkg_srcdir(pkg, ver), "build.%s" % arch)
2550 class PackageMetadata (BaseMetadata):
2551 VARS = ["pkg"] + list(C.ALL_ARCHS)
2556 return super(PackageMetadata, cls)\
2557 .read(OS.path.join(C.LOCAL, "src", "META.%s" % pkg))
2558 except IOError, err:
2559 if err.errno == E.ENOENT: pass
2561 return cls(pkg = pkg)
2564 super(PackageMetadata, me)\
2565 .write(OS.path.join(C.LOCAL, "src", "META.%s" % me.pkg))
2567 class PackageSourceJob (BaseJob):
2571 def __init__(me, pkg, fresh = CREATE, *args, **kw):
2572 super(PackageSourceJob, me).__init__(*args, **kw)
2574 tar = None; ver = None
2575 r = RX.compile("^%s-(\d.*)\.tar.(?:Z|z|gz|bz2|xz|lzma)$" %
2577 for f in OS.listdir("pkg"):
2580 elif tar is not None:
2581 raise ExpectedError("multiple source tarballs of package `%s'" % pkg)
2582 else: tar, ver = f, m.group(1)
2584 me.tarball = OS.path.join("pkg", tar)
2586 def _mkname(me): return "pkg-source.%s" % me._pkg
2589 status, reason = super(PackageSourceJob, me).check()
2590 if status is not READY: return status, reason
2591 if OS.path.isdir(pkg_srcdir(me._pkg, me.version)):
2592 return DONE, "already unpacked"
2594 return READY, "no source tree"
2597 with Cleanup() as clean:
2598 pkg, ver, tar = me._pkg, me.version, me.tarball
2599 srcdir = pkg_srcdir(pkg, ver)
2600 newdir = srcdir + ".new"
2602 progress("unpack `%s'" % me.tarball)
2603 clean.enter(lockfile(pkg_srcdir_lockfile(pkg, ver)))
2604 run_program(["rm", "-rf", newdir])
2606 run_program(["tar", "xf", OS.path.join(OS.getcwd(), me.tarball)],
2608 things = OS.listdir(newdir)
2609 if len(things) == 1:
2610 OS.rename(OS.path.join(newdir, things[0]), srcdir)
2613 OS.rename(newdir, srcdir)
2615 class PackageBuildJob (BaseJob):
2617 SPECS = ["%s:%s" % (pkg, arch)
2618 for pkg in C.LOCALPKGS
2619 for arch in C.ALL_ARCHS]
2621 def __init__(me, spec, fresh = CREATE, *args, **kw):
2622 super(PackageBuildJob, me).__init__(*args, **kw)
2623 colon = spec.index(":")
2624 me._pkg, me._arch = spec[:colon], spec[colon + 1:]
2626 def _mkname(me): return "pkg-build.%s:%s" % (me._pkg, me._arch)
2629 me.await(ChrootJob.ensure("%s-%s" % (C.PRIMARY_DIST, me._arch), CREATE))
2630 me._meta = PackageMetadata.read(me._pkg)
2631 me._src = PackageSourceJob.ensure(me._pkg, FRESH); me.await(me._src)
2632 me._prereq = [PackageBuildJob.ensure("%s:%s" % (prereq, me._arch), FRESH)
2633 for prereq in C.PKGDEPS[me._pkg]]
2634 for j in me._prereq: me.await(j)
2637 status, reason = super(PackageBuildJob, me).check()
2638 if status is not READY: return status, reason
2639 if me._src.started: return READY, "fresh source directory"
2640 for j in me._prereq:
2642 return READY, "dependency `%s' freshly installed" % j._pkg
2643 if getattr(me._meta, me._arch) == me._src.version:
2644 return DONE, "already installed"
2645 return READY, "not yet installed"
2648 with Cleanup() as clean:
2649 pkg, ver, arch = me._pkg, me._src.version, me._arch
2651 session, _ = clean.enter(chroot_session(C.PRIMARY_DIST, arch))
2652 builddir = OS.path.join(pkg_srcdir(pkg, ver), "build.%s" % arch)
2653 chroot_builddir = host_to_chroot(builddir)
2654 run_program(["rm", "-rf", builddir])
2657 progress("prepare %s chroot" % (arch))
2658 run_schroot_session(session,
2659 ["eatmydata", "apt-get", "update"],
2660 rootp = True, stdin = DISCARD)
2661 run_schroot_session(session,
2662 ["eatmydata", "apt-get", "-y", "upgrade"],
2663 rootp = True, stdin = DISCARD)
2664 run_schroot_session(session,
2665 ["eatmydata", "apt-get", "-y",
2666 "install", "pkg-config"],
2667 rootp = True, stdin = DISCARD)
2668 run_schroot_session(session,
2669 ["mount", "-oremount,rw", "/usr/local.schroot"],
2670 rootp = True, stdin = DISCARD)
2672 progress("configure `%s' %s for %s" % (pkg, ver, arch))
2673 run_schroot_session(session, ["sh", "-e", "-c", """
2675 ../configure PKG_CONFIG_PATH=/usr/local/lib/pkgconfig.hidden
2676 """, ".", chroot_builddir])
2678 progress("compile `%s' %s for %s" % (pkg, ver, arch))
2679 run_schroot_session(session, ["sh", "-e", "-c", """
2680 cd "$1" && make -j4 && make -j4 check
2681 """, ".", chroot_builddir])
2683 existing = getattr(me._meta, arch, None)
2684 if existing is not None and existing != ver:
2685 progress("uninstall existing `%s' %s for %s" % (pkg, existing, arch))
2686 run_schroot_session(session, ["sh", "-e", "-c", """
2687 cd "$1" && make uninstall
2688 """, ".", OS.path.join(pkg_srcdir(pkg, existing),
2689 "build.%s" % arch)],
2692 progress("install `%s' %s for %s" % (pkg, existing, arch))
2693 run_schroot_session(session, ["sh", "-e", "-c", """
2694 cd "$1" && make install
2695 mkdir -p /usr/local/lib/pkgconfig.hidden
2696 mv /usr/local/lib/pkgconfig/*.pc /usr/local/lib/pkgconfig.hidden || :
2697 """, ".", chroot_builddir], rootp = True)
2699 clean.enter(lockfile(pkg_metadata_lockfile(pkg)))
2700 me._meta = PackageMetadata.read(pkg)
2701 setattr(me._meta, arch, ver); me._meta.write()
2703 with lockfile(chroot_src_lockfile(C.PRIMARY_DIST, arch)):
2704 run_schroot_source(C.PRIMARY_DIST, arch, ["ldconfig"])
2706 ###--------------------------------------------------------------------------
2707 ### Process the configuration and options.
2709 OPTIONS = OP.OptionParser\
2710 (usage = "chroot-maint [-diknqs] [-fFRESH] [-jN] JOB[.SPEC,...] ...")
2711 for short, long, props in [
2713 'dest': 'debug', 'default': False, 'action': 'store_true',
2714 'help': "print lots of debugging drivel" }),
2716 'dest': 'fresh', 'metavar': 'FRESH', 'default': "create",
2717 'help': "how fresh (`create', `force', or `N[s|m|h|d|w]')" }),
2718 ("-i", "--ignore-errors", {
2719 'dest': 'ignerr', 'default': False, 'action': 'store_true',
2720 'help': "ignore all errors encountered while processing" }),
2722 'dest': 'njobs', 'metavar': 'N', 'default': 1, 'type': 'int',
2723 'help': 'run up to N jobs in parallel' }),
2724 ("-J", "--forkbomb", {
2725 'dest': 'njobs', 'action': 'store_true',
2726 'help': 'run as many jobs in parallel as possible' }),
2727 ("-k", "--keep-going", {
2728 'dest': 'keepon', 'default': False, 'action': 'store_true',
2729 'help': "keep going even if independent jobs fail" }),
2730 ("-n", "--dry-run", {
2731 'dest': 'dryrun', 'default': False, 'action': 'store_true',
2732 'help': "don't actually do anything" }),
2734 'dest': 'quiet', 'default': False, 'action': 'store_true',
2735 'help': "don't print the output from successful jobs" }),
2736 ("-s", "--silent", {
2737 'dest': 'silent', 'default': False, 'action': 'store_true',
2738 'help': "don't print progress messages" })]:
2739 OPTIONS.add_option(short, long, **props)
2741 ###--------------------------------------------------------------------------
2744 R_JOBSERV = RX.compile(r'^--jobserver-(?:fds|auth)=(\d+),(\d+)$')
2746 JOBMAP = { "chroot": ChrootJob,
2747 "cross-tools": CrossToolsJob,
2748 "pkg-source": PackageSourceJob,
2749 "pkg-build": PackageBuildJob }
2751 R_FRESH = RX.compile(r"^(?:create|force|(\d+)(|[smhdw]))$")
2753 def parse_fresh(spec):
2754 m = R_FRESH.match(spec)
2755 if not m: raise ExpectedError("bad freshness `%s'" % spec)
2756 if spec == "create": fresh = CREATE
2757 elif spec == "force": fresh = FORCE
2759 n, u = int(m.group(1)), m.group(2)
2760 if u == "" or u == "s": fresh = n
2761 elif u == "m": fresh = 60*n
2762 elif u == "h": fresh = 3600*n
2763 elif u == "d": fresh = 86400*n
2764 elif u == "w": fresh = 604800*n
2768 with toplevel_handler():
2769 OPT, args = OPTIONS.parse_args()
2772 try: mkflags = OS.environ['MAKEFLAGS']
2773 except KeyError: pass
2775 ff = mkflags.split()
2778 m = R_JOBSERV.match(f)
2779 if m: rfd, wfd = int(m.group(1)), int(m.group(2))
2780 elif f == '-j': njobs = None
2781 elif not f.startswith('-'):
2783 if ch == 'i': OPT.ignerr = True
2784 elif ch == 'k': OPT.keepon = True
2785 elif ch == 'n': OPT.dryrun = True
2786 elif ch == 's': OPT.silent = True
2788 raise ExpectedError("running no more than %d jobs is silly" % OPT.njobs)
2790 FRESH = parse_fresh(OPT.fresh)
2792 SCHED = JobScheduler(rfd, wfd, njobs)
2793 OS.environ["http_proxy"] = C.PROXY
2796 if not args: OPTIONS.print_usage(SYS.stderr); SYS.exit(2)
2798 try: sl = arg.index("/")
2799 except ValueError: fresh = FRESH
2800 else: arg, fresh = arg[:sl], parse_fresh(arg[sl + 1:])
2801 try: dot = arg.index(".")
2802 except ValueError: jty, pats = arg, "*"
2803 else: jty, pats = arg[:dot], arg[dot + 1:]
2804 try: jcls = JOBMAP[jty]
2805 except KeyError: raise ExpectedError("unknown job type `%s'" % jty)
2807 for pat in pats.split(","):
2809 for s in jcls.SPECS:
2810 if FM.fnmatch(s, pat): specs.append(s); any = True
2811 if not any: raise ExpectedError("no match for `%s'" % pat)
2813 jobs.append(jcls.ensure(s, fresh))
2819 ###----- That's all, folks --------------------------------------------------