3 ### Create, upgrade, and maintain (native and cross-) chroots
5 ### (c) 2018 Mark Wooding
8 ###----- Licensing notice ---------------------------------------------------
10 ### This file is part of the distorted.org.uk chroot maintenance tools.
12 ### distorted-chroot is free software: you can redistribute it and/or
13 ### modify it under the terms of the GNU General Public License as
14 ### published by the Free Software Foundation; either version 2 of the
15 ### License, or (at your option) any later version.
17 ### distorted-chroot is distributed in the hope that it will be useful,
18 ### but WITHOUT ANY WARRANTY; without even the implied warranty of
19 ### MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 ### General Public License for more details.
22 ### You should have received a copy of the GNU General Public License
23 ### along with distorted-chroot. If not, write to the Free Software
24 ### Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
30 import contextlib as CTX
43 from cStringIO import StringIO
46 import traceback as TB
48 import jobclient as JC
50 QUIS = OS.path.basename(SYS.argv[0])
51 TODAY = T.strftime("%Y-%m-%d")
54 ###--------------------------------------------------------------------------
59 """Print MSG to stderr as a warning."""
60 if not OPT.silent: OS.write(2, "%s: %s\n" % (QUIS, msg))
62 """Print MSG to stderr, and remember to exit nonzero."""
67 class ExpectedError (Exception):
68 """A fatal error which shouldn't print a backtrace."""
72 def toplevel_handler():
73 """Catch `ExpectedError's and report Unixish error messages."""
75 except ExpectedError, err: moan(err); SYS.exit(2)
78 """Print MSG to stderr as a debug trace."""
79 if OPT.debug: OS.write(2, ";; %s\n" % msg)
82 """Unique objects with no internal structure."""
83 def __init__(me, label): me._label = label
84 def __str__(me): return '#<%s %s>' % (me.__class__.__name__, me._label)
85 def __repr__(me): return '#<%s %s>' % (me.__class__.__name__, me._label)
87 class Struct (object):
88 def __init__(me, **kw): me.__dict__.update(kw)
90 class Cleanup (object):
92 A context manager for stacking other context managers.
94 By itself, it does nothing. Attach other context managers with `enter' or
95 loose cleanup functions with `add'. On exit, contexts are left and
96 cleanups performed in reverse order.
102 def __exit__(me, exty, exval, extb):
104 for c in reversed(me._cleanups):
105 if c(exty, exval, extb): trap = True
109 me._cleanups.append(ctx.__exit__)
112 me._cleanups.append(lambda exty, exval, extb: func())
115 """Return the time T (default now) as a string."""
116 return T.strftime("%Y-%m-%dT%H:%M:%SZ", T.gmtime(t))
118 R_ZULU = RX.compile(r"^(\d+)-(\d+)-(\d+)T(\d+):(\d+):(\d+)Z$")
120 """Convert the time string Z back to a Unix time."""
122 if not m: raise ValueError("bad time spec `%s'" % z)
123 yr, mo, dy, hr, mi, se = map(int, m.groups())
124 return T.mktime((yr, mo, dy, hr, mi, se, 0, 0, 0))
126 ###--------------------------------------------------------------------------
127 ### Simple select(2) utilities.
129 class BaseSelector (object):
131 A base class for hooking into `select_loop'.
133 See `select_loop' for details of the protocol.
135 def preselect(me, rfds, wfds): pass
136 def postselect_read(me, fd): pass
137 def postselect_write(me, fd): pass
139 class WriteLinesSelector (BaseSelector):
140 """Write whole lines to an output file descriptor."""
142 def __init__(me, fd, nextfn = None, *args, **kw):
144 Initialize the WriteLinesSelector to write to the file descriptor FD.
146 The FD is marked non-blocking.
148 The lines are produced by the NEXTFN, which is called without arguments.
149 It can affect the output in three ways:
151 * It can return a string (or almost any other kind of object, which
152 will be converted into a string by `str'), which will be written to
153 the descriptor followed by a newline. Lines are written in the order
154 in which they are produced.
156 * It can return `None', which indicates that there are no more items to
157 be written for the moment. The function will be called again from
158 time to time, to see if it has changed its mind. This is the right
159 thing to do in order to stall output temporarily.
161 * It can raise `StopIteration', which indicates that there will never
162 be any more items. The file descriptor will be closed.
164 Subclasses can override this behaviour by defining a method `_next' and
165 passing `None' as the NEXTFN.
167 super(WriteLinesSelector, me).__init__(*args, **kw)
170 if nextfn is not None: me._next = nextfn
174 ## * `_buf' contains a number of output items, already formatted, and
175 ## ready for output in a single batch. It might be empty.
177 ## * `_pos' is the current output position in `_buf'.
179 ## * `_more' is set unless the `_next' function has raised
180 ## `StopIteration': it indicates that we should close the descriptor
181 ## once the all of the remaining data in the buffer has been sent.
187 """Refill `_buf' by calling `_next'."""
188 sio = StringIO(); n = 0
190 try: item = me._next()
191 except StopIteration: me._more = False; break
192 if item is None: break
194 sio.write(item); sio.write("\n"); n += len(item) + 1
195 me._buf = sio.getvalue(); me._pos = 0
197 def preselect(me, rfds, wfds):
198 if me._fd == -1: return
199 if me._buf == "" and me._more: me._refill()
200 if me._buf != "" or not me._more: wfds.append(me._fd)
202 def postselect_write(me, fd):
203 if fd != me._fd: return
205 if me._pos >= len(me._buf):
206 if me._more: me._refill()
207 if not me._more: OS.close(me._fd); me._fd = -1; break
208 if not me._buf: break
209 try: n = OS.write(me._fd, me._buf[me._pos:])
211 if err.errno == E.EAGAIN or err.errno == E.WOULDBLOCK: break
212 elif err.errno == E.EPIPE: OS.close(me._fd); me._fd = -1; break
216 class ReadLinesSelector (BaseSelector):
217 """Report whole lines from an input file descriptor as they arrive."""
219 def __init__(me, fd, linefn = None, *args, **kw):
221 Initialize the ReadLinesSelector to read from the file descriptor FD.
223 The FD is marked non-blocking.
225 For each whole line, and the final partial line (if any), the selector
226 calls LINEFN with the line as an argument (without the terminating
229 Subclasses can override this behaviour by defining a method `_line' and
230 passing `None' as the LINEFN.
232 super(ReadLinesSelector, me).__init__(*args, **kw)
236 if linefn is not None: me._line = linefn
238 def preselect(me, rfds, wfds):
239 if me._fd != -1: rfds.append(me._fd)
241 def postselect_read(me, fd):
242 if fd != me._fd: return
244 try: buf = OS.read(me._fd, 4096)
246 if err.errno == E.EAGAIN or err.errno == E.WOULDBLOCK: break
249 OS.close(me._fd); me._fd = -1
250 if me._buf: me._line(me._buf)
255 try: j = buf.index("\n", i)
256 except ValueError: break
261 def select_loop(selectors):
263 Multiplex I/O between the various SELECTORS.
265 A `selector' SEL is an object which implements the selector protocol, which
266 consists of three methods.
268 * SEL.preselect(RFDS, WFDS) -- add any file descriptors which the
269 selector is interested in reading from to the list RFDS, and add file
270 descriptors it's interested in writing to to the list WFDS.
272 * SEL.postselect_read(FD) -- informs the selector that FD is ready for
275 * SEL.postselect_write(FD) -- informs the selector that FD is ready for
278 The `select_loop' function loops as follows.
280 * It calls the `preselect' method on each SELECTOR to determine what I/O
281 events it thinks are interesting.
283 * It waits for some interesting event to happen.
285 * It calls the `postselect_read' and/or `postselect_write' methods on all
286 of the selectors for each file descriptor which is ready.
288 The loop ends when no selector is interested in any events. This is simple
289 but rather inefficient.
293 for sel in selectors: sel.preselect(rfds, wfds)
294 if not rfds and not wfds: break
295 rfds, wfds, _ = SEL.select(rfds, wfds, [])
297 for sel in selectors: sel.postselect_read(fd)
299 for sel in selectors: sel.postselect_write(fd)
301 ###--------------------------------------------------------------------------
302 ### Running subprocesses.
304 def wait_outcome(st):
306 Given a ST from `waitpid' (or similar), return a human-readable outcome.
308 if OS.WIFSIGNALED(st): return "killed by signal %d" % OS.WTERMSIG(st)
309 elif OS.WIFEXITED(st):
310 rc = OS.WEXITSTATUS(st)
311 if rc: return "failed: rc = %d" % rc
312 else: return "completed successfully"
313 else: return "died with incomprehensible status 0x%04x" % st
315 class SubprocessFailure (Exception):
316 """An exception indicating that a subprocess failed."""
317 def __init__(me, what, st):
320 if OS.WIFEXITED(st): me.rc, me.sig = OS.WEXITSTATUS(st), None
321 elif OS.WIFSIGNALED(st): me.rc, me.sig = None, OS.WTERMSIG(st)
322 else: me.rc, me.sig = None, None
324 return "subprocess `%s' %s" % (me.what, wait_outcome(me.st))
326 INHERIT = Tag('INHERIT')
328 DISCARD = Tag('DISCARD')
330 def subprocess(command,
331 stdin = INHERIT, stdout = INHERIT, stderr = INHERIT,
332 cwd = INHERIT, jobserver = DISCARD):
334 Hairy context manager for running subprocesses.
336 The COMMAND is a list of arguments; COMMAND[0] names the program to be
337 invoked. (There's currently no way to run a program with an unusual
340 The keyword arguments `stdin', `stdout', and `stderr' explain what to do
341 with the standard file descriptors.
343 * `INHERIT' means that they should be left alone: the child will use a
344 copy of the parent's descriptor. This is the default.
346 * `DISCARD' means that the descriptor should be re-opened onto
347 `/dev/null' (for reading or writing as appropriate).
349 * `PIPE' means that the descriptor should be re-opened as (the read or
350 write end, as appropriate, of) a pipe, and the other end returned to
353 Simiarly, the JOBSERVER may be `INHERIT' to pass the jobserver descriptors
354 and environment variable down to the child, or `DISCARD' to close it. The
355 default is `DISCARD'.
357 The CWD may be `INHERIT' to run the child with the same working directory
358 as the parent, or a pathname to change to an explicitly given working
361 The context is returned three values, which are file descriptors for other
362 pipe ends for stdin, stdout, and stderr respectively, or -1 if there is no
365 The context owns the pipe descriptors, and is expected to close them
366 itself. (Timing of closure is significant, particularly for `stdin'.)
371 r_out, w_out = -1, -1
372 r_err, w_err = -1, -1
373 spew("running subprocess `%s'" % " ".join(command))
375 ## Clean up as necessary...
379 if stdin is PIPE: r_in, w_in = OS.pipe()
380 elif stdin is DISCARD: r_in = OS.open("/dev/null", OS.O_RDONLY)
381 elif stdin is not INHERIT:
382 raise ValueError("bad `stdin' value `%r'" % stdin)
385 if stdout is PIPE: r_out, w_out = OS.pipe()
386 elif stdout is DISCARD: w_out = OS.open("/dev/null", OS.O_WRONLY)
387 elif stdout is not INHERIT:
388 raise ValueError("bad `stderr' value `%r'" % stdout)
391 if stderr is PIPE: r_err, w_err = OS.pipe()
392 elif stderr is DISCARD: w_err = OS.open("/dev/null", OS.O_WRONLY)
393 elif stderr is not INHERIT:
394 raise ValueError("bad `stderr' value `%r'" % stderr)
396 ## Start up the child.
403 if r_in != -1: OS.dup2(r_in, 0); OS.close(r_in)
404 if w_in != -1: OS.close(w_in)
407 if w_out != -1: OS.dup2(w_out, 1); OS.close(w_out)
408 if r_out != -1: OS.close(r_out)
411 if w_err != -1: OS.dup2(w_err, 2); OS.close(w_err)
412 if r_err != -1: OS.close(r_err)
415 if cwd is not INHERIT: OS.chdir(cwd)
417 ## Fix up the jobserver.
418 if jobserver is DISCARD: SCHED.close_jobserver()
421 try: OS.execvp(command[0], command)
423 moan("failed to run `%s': %s" % err.strerror)
426 ## Close the other ends of the pipes.
427 if r_in != -1: OS.close(r_in); r_in = -1
428 if w_out != -1: OS.close(w_out); w_out = -1
429 if w_err != -1: OS.close(w_err); w_err = -1
431 ## Return control to the context body. Remember not to close its pipes.
432 yield w_in, r_out, r_err
433 w_in = r_out = r_err = -1
435 ## Collect the child process's exit status.
436 _, st = OS.waitpid(kid, 0)
437 spew("subprocess `%s' %s" % (" ".join(command), wait_outcome(st)))
438 if st: raise SubprocessFailure(" ".join(command), st)
443 ## Close any left-over file descriptors.
444 for fd in [r_in, w_in, r_out, w_out, r_err, w_err]:
445 if fd != -1: OS.close(fd)
447 def set_nonblocking(fd):
448 """Mark the descriptor FD as non-blocking."""
449 FC.fcntl(fd, FC.F_SETFL, FC.fcntl(fd, FC.F_GETFL) | OS.O_NONBLOCK)
451 class DribbleOut (BaseSelector):
452 """A simple selector to feed a string to a descriptor, in pieces."""
453 def __init__(me, fd, string, *args, **kw):
454 super(DribbleOut, me).__init__(*args, **kw)
458 set_nonblocking(me._fd)
460 def preselect(me, rfds, wfds):
461 if me._fd != -1: wfds.append(me._fd)
462 def postselect_write(me, fd):
463 if fd != me._fd: return
464 try: n = OS.write(me._fd, me._string)
466 if err.errno == E.EAGAIN or err.errno == E.EWOULDBLOCK: return
467 elif err.errno == E.EPIPE: OS.close(me._fd); me._fd = -1; return
469 if n == len(me._string): OS.close(me._fd); me._fd = -1
470 else: me._string = me._string[n:]
472 class DribbleIn (BaseSelector):
473 """A simple selector to collect all the input as a big string."""
474 def __init__(me, fd, *args, **kw):
475 super(DribbleIn, me).__init__(*args, **kw)
478 set_nonblocking(me._fd)
479 def preselect(me, rfds, wfds):
480 if me._fd != -1: rfds.append(me._fd)
481 def postselect_read(me, fd):
482 if fd != me._fd: return
484 try: buf = OS.read(me._fd, 4096)
486 if err.errno == E.EAGAIN or err.errno == E.EWOULDBLOCK: break
488 if buf == "": OS.close(me._fd); me._fd = -1; break
489 else: me._buf.write(buf)
491 def result(me): return me._buf.getvalue()
493 RETURN = Tag('RETURN')
494 def run_program(command,
495 stdin = INHERIT, stdout = INHERIT, stderr = INHERIT,
498 A simplifying wrapper around `subprocess'.
500 The COMMAND is a list of arguments; COMMAND[0] names the program to be
501 invoked, as for `subprocess'.
503 The keyword arguments `stdin', `stdout', and `stderr' explain what to do
504 with the standard file descriptors.
506 * `INHERIT' means that they should be left alone: the child will use a
507 copy of the parent's descriptor.
509 * `DISCARD' means that the descriptor should be re-opened onto
510 `/dev/null' (for reading or writing as appropriate).
512 * `RETURN', for an output descriptor, means that all of the output
513 produced on that descriptor should be collected and returned as a
516 * A string, for stdin, means that the string should be provided on the
517 child's standard input.
519 (The value `PIPE' is not permitted here.)
521 Other arguments are passed on to `subprocess'.
523 If no descriptors are marked `RETURN', then the function returns `None'; if
524 exactly one descriptor is so marked, then the function returns that
525 descriptor's output as a string; otherwise, it returns a tuple of strings
526 for each such descriptor, in the usual order.
528 kw = dict(); kw.update(kwargs)
531 if isinstance(stdin, basestring):
532 kw['stdin'] = PIPE; selfn.append(lambda fds: DribbleOut(fds[0], stdin))
533 elif stdin is INHERIT or stdin is DISCARD:
536 raise ValueError("bad `stdin' value `%r'" % stdin)
539 kw['stdout'] = PIPE; selfn.append(lambda fds: DribbleIn(fds[1]))
540 elif stdout is INHERIT or stdout is DISCARD:
541 kw['stdout'] = stdout
543 raise ValueError("bad `stdout' value `%r'" % stdout)
546 kw['stderr'] = PIPE; selfn.append(lambda fds: DribbleIn(fds[2]))
547 elif stderr is INHERIT or stderr is DISCARD:
548 kw['stderr'] = stderr
550 raise ValueError("bad `stderr' value `%r'" % stderr)
552 with subprocess(command, *args, **kw) as fds:
553 sel = [fn(fds) for fn in selfn]
558 if r is not None: rr.append(r)
559 if len(rr) == 0: return None
560 if len(rr) == 1: return rr[0]
561 else: return tuple(rr)
563 ###--------------------------------------------------------------------------
564 ### Other system-ish utilities.
569 Context manager for writing to a file.
571 A new file, named `PATH.new', is opened for writing, and the file object
572 provided to the context body. If the body completes normally, the file is
573 closed and renamed to PATH. If the body raises an exception, the file is
574 still closed, but not renamed into place.
577 with open(new, "w") as f: yield f
581 def safewrite_root(path, mode = None, uid = None, gid = None):
583 Context manager for writing to a file with root privileges.
585 This is as for `safewrite', but the file is opened and written as root.
588 with subprocess(C.ROOTLY + ["tee", new],
589 stdin = PIPE, stdout = DISCARD) as (fd_in, _, _):
590 pipe = OS.fdopen(fd_in, 'w')
592 finally: pipe.close()
593 if mode is not None: run_program(C.ROOTLY + ["chmod", mode, new])
595 run_program(C.ROOTLY + ["chown",
596 uid + (gid is not None and ":" + gid or ""),
598 elif gid is not None:
599 run_program(C.ROOTLY + ["chgrp", gid, new])
600 run_program(C.ROOTLY + ["mv", new, path])
602 def mountpoint_p(dir):
603 """Return true if DIR is a mountpoint."""
605 ## A mountpoint can be distinguished because it is a directory whose device
606 ## number differs from its parent.
607 try: st1 = OS.stat(dir)
609 if err.errno == E.ENOENT: return False
611 if not ST.S_ISDIR(st1.st_mode): return False
612 st0 = OS.stat(OS.path.join(dir, ".."))
613 return st0.st_dev != st1.st_dev
615 def mkdir_p(dir, mode = 0777):
617 Make a directory DIR, and any parents, as necessary.
619 Unlike `OS.makedirs', this doesn't fail if DIR already exists.
621 if dir.startswith("/"): d = "/"; dir = dir[1:]
623 for p in dir.split("/"):
624 d = OS.path.join(d, p)
626 try: OS.mkdir(d, mode)
628 if err.errno == E.EEXIST: pass
633 Unmount the filesystem FS.
635 The FS may be the block device holding the filesystem, or (more usually)
639 ## Sometimes random things can prevent unmounting. Be persistent.
641 try: run_program(C.ROOTLY + ["umount", fs], stderr = DISCARD)
642 except SubprocessFailure, err:
643 if err.rc == 32: pass
647 run_program(C.ROOTLY + ["umount", fs], stderr = DISCARD)
650 def lockfile(lock, exclp = True, waitp = True):
652 Acquire an exclusive lock on a named file LOCK while executing the body.
654 If WAITP is true, wait until the lock is available; if false, then fail
655 immediately if the lock can't be acquired.
659 if exclp: flag |= FC.LOCK_EX
660 else: flag |= FC.LOCK_SH
661 if not waitp: flag |= FC.LOCK_NB
662 spew("acquiring %s lock on `%s'" %
663 (exclp and "exclusive" or "shared", lock))
667 ## Open the file and take note of which file it is.
668 fd = OS.open(lock, OS.O_RDWR | OS.O_CREAT, 0666)
671 ## Acquire the lock, waiting if necessary.
674 ## Check that the lock file is still the same one. It's permissible
675 ## for the lock holder to release the lock by unlinking or renaming the
676 ## lock file, in which case there might be a different lockfile there
677 ## now which we need to acquire instead.
679 ## It's tempting to `optimize' this code by opening a new file
680 ## descriptor here so as to elide the additional call to fstat(2)
681 ## above. But this doesn't work: if we successfully acquire the lock,
682 ## we then have two file descriptors open on the lock file, so we have
683 ## to close one -- but, under the daft fcntl(2) rules, even closing
684 ## `nfd' will release the lock immediately.
688 if err.errno == E.ENOENT: pass
690 if st0.st_dev == st1.st_dev and st0.st_ino == st1.st_ino: break
693 ## We have the lock, so away we go.
694 spew("lock `%s' acquired" % lock)
696 spew("lock `%s' released" % lock)
699 if fd != -1: OS.close(fd)
701 def block_device_p(dev):
702 """Return true if DEV names a block device."""
703 try: st = OS.stat(dev)
705 if err.errno == E.ENOENT: return False
707 else: return ST.S_ISBLK(st.st_mode)
709 ###--------------------------------------------------------------------------
710 ### Running parallel jobs.
712 ## Return codes from `check'
715 FAILED = Tag('FAILED')
718 class BaseJob (object):
722 Subclasses must implement `run' and `_mkname', and probably ought to extend
723 `prepare' and `check'.
726 ## A magic token to prevent sneaky uninterned jobs.
727 _MAGIC = Tag('MAGIC')
729 ## A map from job names to objects.
732 ## Number of tail lines of the log to print on failure.
735 def __init__(me, _token, *args, **kw):
739 Jobs are interned! Don't construct instances (of subclasses) directly:
740 use the `ensure' class method.
742 assert _token is me._MAGIC
743 super(BaseJob, me).__init__(*args, **kw)
745 ## Dependencies on other jobs.
749 ## Attributes maintained by the JobServer.
761 Establish any prerequisite jobs.
763 Delaying this allows command-line settings to override those chosen by
769 def ensure(cls, *args, **kw):
771 Return the unique job with the given parameters.
773 If a matching job already exists, then return it. Otherwise, create the
774 new job, register it in the table, and notify the scheduler about it.
776 me = cls(_token = cls._MAGIC, *args, **kw)
778 job = cls._MAP[me.name]
780 cls._MAP[me.name] = me
789 """Return the job's name, as calculated by `_mkname'."""
791 except AttributeError: name = me._name = me._mkname()
794 ## Subclass responsibilities.
797 Return the job's name.
799 By default, this is an unhelpful string which is distinct for every job.
800 Subclasses should normally override this method to return a name as an
801 injective function of the job parameters.
803 return "%s.%x" % (me.__class__.__name__, id(me))
807 Return whether the job is ready to run.
809 Returns a pair STATE, REASON. The REASON is a human-readable string
810 explaining what's going on, or `None' if it's not worth explaining. The
811 STATE is one of the following.
813 * `READY' -- the job can be run at any time.
815 * `FAILED' -- the job can't be started. Usually, this means that some
816 prerequisite job failed, there was some error in the job's
817 parameters, or the environment is unsuitable for the job to run.
819 * `DONE' -- the job has nothing to do. Usually, this means that the
820 thing the job acts on is already up-to-date. It's bad form to do
821 even minor work in `check'.
823 * `SLEEP' -- the job can't be run right now. It has arranged to be
824 retried if conditions change. (Spurious wakeups are permitted and
825 must be handled correctly.)
827 The default behaviour checks the set of dependencies, as built by the
828 `await' method, and returns `SLEEP' or `FAILED' as appropriate, or
829 `READY' if all the prerequisite jobs have completed successfully.
834 return SLEEP, "waiting for job `%s'" % job.name
835 elif not job.win and not OPT.ignerr:
836 return FAILED, "dependent on failed job `%s'" % job.name
839 ## Subclass utilities.
841 """Make sure that JOB completes before allowing this job to start."""
846 Dump the last `LOGLINES' lines of the logfile.
848 This is called if the job fails and was being run quietly, to provide the
849 user with some context for the failure.
852 ## Gather blocks from the end of the log until we have enough lines.
853 with open(me._logfile, 'r') as f:
857 f.seek(0, 2); off = f.tell()
858 spew("start: off = %d" % off)
859 while nlines <= me.LOGLINES and off > 0:
860 off = max(0, off - bufsz)
862 spew("try at off = %d" % off)
864 nlines += buf.count("\n")
865 spew("now lines = %d" % nlines)
867 buf = ''.join(reversed(bufs))
869 ## We probably overshot. Skip the extra lines from the start.
871 while nlines > me.LOGLINES: i = buf.index("\n", i) + 1; nlines -= 1
873 ## If we ended up trimming the log, print an ellipsis.
874 if off > 0 or i > 0: print "%-*s * [...]" % (TAGWD, me.name)
876 ## Print the log tail.
877 lines = buf[i:].split("\n")
878 if lines and lines[-1] == '': lines.pop()
879 for line in lines: print "%-*s %s" % (TAGWD, me.name, line)
881 class BaseJobToken (object):
883 A job token is the authorization for a job to be run.
885 Subclasses must implement `recycle' to allow some other job to use the
890 class TrivialJobToken (BaseJobToken):
892 A trivial reusable token, for when issuing jobs in parallel without limit.
894 There only needs to be one of these.
897 spew("no token needed; nothing to recycle")
898 TRIVIAL_TOKEN = TrivialJobToken()
900 class JobServerToken (BaseJobToken):
901 """A job token storing a byte from the jobserver pipe."""
902 def __init__(me, char, pipefd, *args, **kw):
903 super(JobServerToken, me).__init__(*args, **kw)
907 spew("returning token to jobserver pipe")
908 OS.write(me._fd, me._char)
910 class PrivateJobToken (BaseJobToken):
912 The private job token belonging to a scheduler.
914 When running under a GNU Make jobserver, there is a token for each byte in
915 the pipe, and an additional one which represents the slot we're actually
916 running in. This class represents that additional token.
918 def __init__(me, sched, *args, **kw):
919 super(PrivateJobToken, me).__init__(*args, **kw)
922 assert me._sched._privtoken is None
923 spew("recycling private token")
924 me._sched._privtoken = me
929 class JobScheduler (object):
931 The main machinery for running and ordering jobs.
933 This handles all of the details of job scheduling.
936 def __init__(me, rfd = -1, wfd = -1, npar = 1):
938 Initialize a scheduler.
940 * RFD and WFD are the read and write ends of the jobserver pipe, as
941 determined from the `MAKEFLAGS' environment variable, or -1.
943 * NPAR is the maximum number of jobs to run in parallel, or `True' if
944 there is no maximum (i.e., we're in `forkbomb' mode).
947 ## Set the parallelism state. The `_rfd' and `_wfd' are the read and
948 ## write ends of the jobserver pipe, or -1 if there is no jobserver.
949 ## `_par' is true if we're meant to run jobs in parallel. The case _par
950 ## and _rfd = -1 means unconstrained parallelism.
952 ## The jobserver pipe contains a byte for each shared job slot. A
953 ## scheduler reads a byte from the pipe for each job it wants to run
954 ## (nearly -- see `_privtoken' below), and puts the byte back when the
955 ## job finishes. The GNU Make jobserver protocol specification insists
956 ## that we preserve the value of the byte in the pipe (though doesn't
957 ## currently make any use of this flexibility), so we record it in a
958 ## `JobToken' object's `_char' attribute.
959 me._par = rfd != -1 or npar is True or npar != 1
960 spew("par is %r" % me._par)
961 if rfd == -1 and npar > 1:
963 OS.write(wfd, (npar - 1)*'+')
964 OS.environ["MAKEFLAGS"] = \
965 (" -j --jobserver-auth=%(rfd)d,%(wfd)d " +
966 "--jobserver-fds=%(rfd)d,%(wfd)d") % dict(rfd = rfd, wfd = wfd)
967 me._rfd = rfd; me._wfd = wfd
969 ## The scheduler state. A job starts in the `_check' list. Each
970 ## iteration of the scheduler loop will inspect the jobs here and see
971 ## whether it's ready to run: if not, it gets put in the `_sleep' list,
972 ## where it will languish until something moves it back; if it is ready,
973 ## it gets moved to the `_ready' list to wait for a token from the
974 ## jobserver. At that point the job can be started, and it moves to the
975 ## `_kidmap', which associates a process-id with each running job.
976 ## Finally, jobs which have completed are simply forgotten. The `_njobs'
977 ## counter keeps track of how many jobs are outstanding, so that we can
978 ## stop when there are none left.
986 ## As well as the jobserver pipe, we implicitly have one extra job slot,
987 ## which is the one we took when we were started by our parent. The
988 ## right to do processing in this slot is represnted by the `private
989 ## token' here, distinguished from tokens from the jobserver pipe by
990 ## having `None' as its `_char' value.
991 me._privtoken = PrivateJobToken(me)
994 """Notice a new job and arrange for it to (try to) run."""
995 if job._known: return
996 spew("adding new job `%s'" % job.name)
1001 def close_jobserver(me):
1003 Close the jobserver file descriptors.
1005 This should be called within child processes to prevent them from messing
1008 if me._rfd != -1: OS.close(me._rfd); me._rfd = -1
1009 if me._wfd != -1: OS.close(me._wfd); me._wfd = -1
1010 try: del OS.environ["MAKEFLAGS"]
1011 except KeyError: pass
1014 """Zap all jobs which aren't yet running."""
1015 for jobset in [me._sleep, me._check, me._ready]:
1022 def _retire(me, job, win, outcome):
1024 Declare that a job has stopped, and deal with the consequences.
1026 JOB is the completed job, which should not be on any of the job queues.
1027 WIN is true if the job succeeded, and false otherwise. OUTCOME is a
1028 human-readable string explaining how the job came to its end, or `None'
1029 if no message should be reported.
1034 ## Return the job's token to the pool.
1035 if job._token is not None: job._token.recycle()
1039 ## Update and maybe report the job's status.
1042 if outcome is not None and not OPT.silent:
1043 if OPT.quiet and not job.win and job._logfile: job._logtail()
1044 if not job.win or not OPT.quiet:
1045 print "%-*s %c (%s)" % \
1046 (TAGWD, job.name, job.win and '|' or '*', outcome)
1048 ## If the job failed, and we care, arrange to exit nonzero.
1049 if not win and not OPT.ignerr: RC = 2
1051 ## If the job failed, and we're supposed to give up after the first
1052 ## error, then zap all of the waiting jobs.
1053 if not job.win and not OPT.keepon and not OPT.ignerr: me._killall()
1055 ## If this job has dependents then wake them up and see whether they're
1057 for j in job._waiting:
1058 try: me._sleep.remove(j)
1059 except KeyError: pass
1061 spew("waking dependent job `%s'" % j.name)
1064 def _reap(me, kid, st):
1066 Deal with the child with process-id KID having exited with status ST.
1069 ## Figure out what kind of child this is. Note that it has finished.
1070 try: job = me._kidmap[kid]
1072 try: job = me._logkidmap[kid]
1074 spew("unknown child %d exits with status 0x%04x" % (kid, st))
1077 ## It's a logging child.
1078 del me._logkidmap[kid]
1080 spew("logging process for job `%s' exits with status 0x%04x" %
1085 spew("main process for job `%s' exits with status 0x%04x" %
1088 ## If either of the job's associated processes is still running then we
1089 ## should stop now and give the other one a chance.
1090 if job._st is None or job._logkid is not DONE:
1091 spew("deferring retirement for job `%s'" % job.name)
1093 spew("completing deferred retirement for job `%s'" % job.name)
1095 ## Update and (maybe) report the job status.
1096 if job._st == 0: win = True; outcome = None
1097 else: win = False; outcome = wait_outcome(job._st)
1100 me._retire(job, win, outcome)
1103 """Reap all finished child processes."""
1105 try: kid, st = OS.waitpid(-1, OS.WNOHANG)
1106 except OSError, err:
1107 if err.errno == E.ECHILD: break
1112 def run_job(me, job):
1113 """Start running the JOB."""
1116 if OPT.dryrun: return None, None
1118 ## Make pipes to collect the job's output and error reports.
1119 r_out, w_out = OS.pipe()
1120 r_err, w_err = OS.pipe()
1122 ## Find a log file to write. Avoid races over the log names; but this
1123 ## means that the log descriptor needs to be handled somewhat carefully.
1124 logdir = OS.path.join(C.STATE, "log"); mkdir_p(logdir)
1127 logfile = OS.path.join(logdir, "%s-%s#%d" % (job.name, TODAY, logseq))
1129 logfd = OS.open(logfile, OS.O_WRONLY | OS.O_CREAT | OS.O_EXCL, 0666)
1130 except OSError, err:
1131 if err.errno == E.EEXIST: logseq += 1; continue
1135 job._logfile = logfile
1137 ## Make sure there's no pending output, or we might get two copies. (I
1138 ## don't know how to flush all output streams in Python, but this is good
1139 ## enough for our purposes.)
1142 ## Set up the logging child first. If we can't, take down the whole job.
1143 try: job._logkid = OS.fork()
1144 except OSError, err: OS.close(logfd); return None, err
1146 ## The main logging loop.
1148 ## Close the jobserver descriptors, and the write ends of the pipes.
1149 me.close_jobserver()
1150 OS.close(w_out); OS.close(w_err)
1152 ## Capture the job's stdout and stderr and wait for everything to
1154 def log_lines(fd, marker):
1157 OS.write(1, "%-*s %s %s\n" % (TAGWD, job.name, marker, line))
1158 OS.write(logfd, "%s %s\n" % (marker, line))
1159 return ReadLinesSelector(fd, fn)
1160 select_loop([log_lines(r_out, "|"), log_lines(r_err, "*")])
1162 ## We're done. (Closing the descriptors here would be like polishing
1163 ## the floors before the building is demolished.)
1166 ## Back in the main process: record the logging child. At this point we
1167 ## no longer need the logfile descriptor.
1168 me._logkidmap[job._logkid] = job
1171 ## Start the main job process.
1172 try: kid = OS.fork()
1173 except OSError, err: return None, err
1177 ## Close the read ends of the pipes, and move the write ends to the
1178 ## right places. (This will go wrong if we were started without enough
1179 ## descriptors. Fingers crossed.)
1180 OS.dup2(w_out, 1); OS.dup2(w_err, 2)
1181 OS.close(r_out); OS.close(w_out)
1182 OS.close(r_err); OS.close(w_err)
1183 spew("running job `%s' as pid %d" % (job.name, OS.getpid()))
1185 ## Run the job, catching nonlocal flow.
1188 except ExpectedError, err:
1191 except Exception, err:
1192 TB.print_exc(SYS.stderr)
1194 except BaseException, err:
1195 moan("caught unexpected exception: %r" % err)
1198 spew("job `%s' ran to completion" % job.name)
1200 ## Clean up old logs.
1202 pat = RX.compile(r"^%s-(\d{4})-(\d{2})-(\d{2})\#(\d+)$" %
1203 RX.escape(job.name))
1204 for f in OS.listdir(logdir):
1206 if m: match.append((f, int(m.group(1)), int(m.group(2)),
1207 int(m.group(3)), int(m.group(4))))
1208 match.sort(key = lambda (_, y, m, d, q): (y, m, d, q))
1209 if len(match) > LOGKEEP:
1210 for (f, _, _, _, _) in match[:-LOGKEEP]:
1211 try: OS.unlink(OS.path.join(logdir, f))
1212 except OSError, err:
1213 if err.errno == E.ENOENT: pass
1219 ## Back in the main process: close both the pipes and return the child
1221 OS.close(r_out); OS.close(w_out)
1222 OS.close(r_err); OS.close(w_err)
1223 if OPT.quiet: print "%-*s | (started)" % (TAGWD, job.name)
1227 """Run the scheduler."""
1229 spew("JobScheduler starts")
1232 ## The main scheduler loop. We go through three main phases:
1234 ## * Inspect the jobs in the `check' list to see whether they can
1235 ## run. After this, the `check' list will be empty.
1237 ## * If there are running jobs, check to see whether any of them have
1238 ## stopped, and deal with the results. Also, if there are jobs
1239 ## ready to start and a job token has become available, then
1240 ## retrieve the token. (Doing these at the same time is the tricky
1243 ## * If there is a job ready to run, and we retrieved a token, then
1244 ## start running the job.
1246 ## Check the pending jobs to see if they can make progress: run each
1247 ## job's `check' method and move it to the appropriate queue. (It's OK
1248 ## if `check' methods add more jobs to the list, as long as things
1249 ## settle down eventually.)
1251 try: job = me._check.pop()
1252 except KeyError: break
1253 if job._deps is None:
1256 state, reason = job.check()
1257 tail = reason is not None and ": %s" % reason or ""
1259 spew("job `%s' ready to run%s" % (job.name, tail))
1261 elif state is FAILED:
1262 spew("job `%s' refused to run%s" % (job.name, tail))
1263 me._retire(job, False, "refused to run%s" % tail)
1265 spew("job `%s' has nothing to do%s" % (job.name, tail))
1266 me._retire(job, True, reason)
1267 elif state is SLEEP:
1268 spew("job `%s' can't run yet%s" % (job.name, tail))
1271 raise ValueError("unexpected job check from `%s': %r, %r" %
1272 (job.name, state, reason))
1274 ## If there are no jobs left, then we're done.
1276 spew("all jobs completed")
1279 ## Make sure we can make progress. There are no jobs on the check list
1280 ## any more, because we just cleared it. We assume that jobs which are
1281 ## ready to run will eventually receive a token. So we only end up in
1282 ## trouble if there are jobs asleep, but none running or ready to run.
1283 ##spew("#jobs = %d" % me._njobs)
1284 ##spew("sleeping: %s" % ", ".join([j.name for j in me._sleep]))
1285 ##spew("ready: %s" % ", ".join([j.name for j in me._ready]))
1286 ##spew("running: %s" % ", ".join([j.name for j in me._kidmap.itervalues()]))
1287 assert not me._sleep or me._kidmap or me._logkidmap or me._ready
1289 ## Wait for something to happen.
1290 if not me._ready or (not me._par and me._privtoken is None):
1291 ## If we have no jobs ready to run, then we must wait for an existing
1292 ## child to exit. Hopefully, a sleeping job will be able to make
1293 ## progress after this.
1295 ## Alternatively, if we're not supposed to be running jobs in
1296 ## parallel and we don't have the private token, then we have no
1297 ## choice but to wait for the running job to complete.
1299 ## There's no check here for `ECHILD'. We really shouldn't be here
1300 ## if there are no children to wait for. (The check list must be
1301 ## empty because we just drained it. If the ready list is empty,
1302 ## then all of the jobs must be running or sleeping; but the
1303 ## assertion above means that either there are no jobs at all, in
1304 ## which case we should have stopped, or at least one is running, in
1305 ## which case it's safe to wait for it. The other case is that we're
1306 ## running jobs sequentially, and one is currently running, so
1307 ## there's nothing for it but to wait for it -- and hope that it will
1308 ## wake up one of the sleeping jobs. The remaining possibility is
1309 ## that we've miscounted somewhere, which will cause a crash.)
1311 spew("no new jobs ready: waiting for outstanding jobs to complete")
1313 spew("job running without parallelism: waiting for it to finish")
1314 kid, st = OS.waitpid(-1, 0)
1319 ## We have jobs ready to run, so try to acquire a token.
1320 if me._rfd == -1 and me._par:
1321 ## We're running with unlimited parallelism, so we don't need a token
1323 spew("running new job without token")
1324 token = TRIVIAL_TOKEN
1326 ## Our private token is available, so we can use that to start
1328 spew("private token available: assigning to new job")
1329 token = me._privtoken
1330 me._privtoken = None
1332 ## We have to read from the jobserver pipe. Unfortunately, we're not
1333 ## allowed to set the pipe nonblocking, because make is also using it
1334 ## and will get into a serious mess. And we must deal with `SIGCHLD'
1335 ## arriving at any moment. We use the same approach as GNU Make. We
1336 ## start by making a copy of the jobserver descriptor: it's this
1337 ## descriptor we actually try to read from. We set a signal handler
1338 ## to close this descriptor if a child exits. And we try one last
1339 ## time to reap any children which have exited just before we try
1340 ## reading the jobserver pipe. This way we're covered:
1342 ## * If a child exits during the main loop, before we establish the
1343 ## descriptor copy then we'll notice when we try reaping
1346 ## * If a child exits between the last-chance reap and the read,
1347 ## the signal handler will close the descriptor and the `read'
1348 ## call will fail with `EBADF'.
1350 ## * If a child exits while we're inside the `read' system call,
1351 ## then the syscall will fail with `EINTR'.
1353 ## The only problem is that we can't do this from Python, because
1354 ## Python signal handlers are delayed. This is what the `jobclient'
1357 ## The `jobclient' function is called as
1361 ## It returns a tuple of three values: TOKEN, PID, STATUS. If TOKEN
1362 ## is not `None', then reading the pipe succeeded; if TOKEN is empty,
1363 ## then the pipe returned EOF, so we should abort; otherwise, TOKEN
1364 ## is a singleton string holding the token character. If PID is not
1365 ## `None', then PID is the process id of a child which exited, and
1366 ## STATUS is its exit status.
1367 spew("waiting for token from jobserver")
1368 tokch, kid, st = JC.jobclient(me._rfd)
1374 spew("no token; trying again")
1377 error("jobserver pipe closed; giving up")
1380 spew("received token from jobserver")
1381 token = JobServerToken(tokch, me._wfd)
1383 ## We have a token, so we should start up the job.
1384 job = me._ready.pop()
1386 spew("start new job `%s'" % job.name)
1387 kid, err = me.run_job(job)
1389 me._retire(job, False, "failed to fork: %s" % err)
1391 if kid is None: me._retire(job, True, "dry run")
1392 else: me._kidmap[kid] = job
1394 ## We ran out of work to do.
1395 spew("JobScheduler done")
1397 ###--------------------------------------------------------------------------
1400 R_CONFIG = RX.compile(r"^([a-zA-Z0-9_]+)='(.*)'$")
1402 class Config (object):
1404 def _conv_str(s): return s
1405 def _conv_list(s): return s.split()
1406 def _conv_set(s): return set(s.split())
1409 "ROOTLY": _conv_list,
1411 "MYARCH": _conv_set,
1412 "NATIVE_ARCHS": _conv_set,
1413 "FOREIGN_ARCHS": _conv_set,
1414 "FOREIGN_GNUARCHS": _conv_list,
1415 "ALL_ARCHS": _conv_set,
1416 "NATIVE_CHROOTS": _conv_set,
1417 "FOREIGN_CHROOTS": _conv_set,
1418 "ALL_CHROOTS": _conv_set,
1419 "BASE_PACKAGES": _conv_list,
1420 "EXTRA_PACKAGES": _conv_list,
1421 "CROSS_PACKAGES": _conv_list,
1422 "CROSS_PATHS": _conv_list,
1423 "APTCONF": _conv_list,
1424 "LOCALPKGS": _conv_list,
1425 "SCHROOT_COPYFILES": _conv_list,
1426 "SCHROOT_NSSDATABASES": _conv_list
1430 "*_APTCONFSRC": ("APTCONFSRC", _conv_str),
1431 "*_DEPS": ("PKGDEPS", _conv_list),
1432 "*_QEMUHOST": ("QEMUHOST", _conv_str),
1433 "*_QEMUARCH": ("QEMUARCH", _conv_str),
1434 "*_QEMUDIST": ("QEMUDIST", _conv_str),
1435 "*_ALIASES": ("DISTALIAS", _conv_str)
1438 _conv_str = staticmethod(_conv_str)
1439 _conv_list = staticmethod(_conv_list)
1440 _conv_set = staticmethod(_conv_set)
1444 """; raw = open('state/config.sh').read(); _ignore = """ @@@config@@@
1447 for line in raw.split("\n"):
1449 if not line or line.startswith('#'): continue
1450 m = R_CONFIG.match(line)
1451 if not m: raise ExpectedError("bad config line `%s'" % line)
1452 k, v = m.group(1), m.group(2).replace("'\\''", "'")
1454 try: conv = me._CONVERT[k]
1458 try: i = k.index("_", i + 1)
1459 except ValueError: conv = me._conv_str; break
1460 try: map, conv = me._CONV_MAP["*" + k[i:]]
1461 except KeyError: pass
1463 d = me._conf.setdefault(map, dict())
1465 if k.startswith("_"): k = k[1:]
1469 def __getattr__(me, attr):
1470 try: return me._conf[attr]
1471 except KeyError, err: raise AttributeError(err.args[0])
1473 with toplevel_handler(): C = Config()
1475 ###--------------------------------------------------------------------------
1476 ### Chroot maintenance utilities.
1478 CREATE = Tag("CREATE")
1479 FORCE = Tag("FORCE")
1481 DEBCONF_TWEAKS = """
1482 DEBIAN_FRONTEND=noninteractive; export DEBIAN_FRONTEND
1483 DEBIAN_PRIORITY=critical export DEBIAN_PRIORITY
1484 DEBCONF_NONINTERACTIVE_SEEN=true; export DEBCONF_NONINTERACTIVE_SEEN
1487 def check_fresh(fresh, update):
1489 Compare a refresh mode FRESH against an UPDATE time.
1491 Return a (STATUS, REASON) pair, suitable for returning from a job `check'
1494 The FRESH argument may be one of the following:
1496 * `CREATE' is satisfied if the thing exists at all: it returns `READY' if
1497 the thing doesn't yet exist (UPDATE is `None'), or `DONE' otherwise.
1499 * `FORCE' is never satisfied: it always returns `READY'.
1501 * an integer N is satisfied if UPDATE time is at most N seconds earlier
1502 than the present: if returns `READY' if the UPDATE is too old, or
1505 if update is None: return READY, "must create"
1506 elif fresh is FORCE: return READY, "update forced"
1507 elif fresh is CREATE: return DONE, "already created"
1508 elif NOW - unzulu(update) > fresh: return READY, "too stale: updating"
1509 else: return DONE, "already sufficiently up-to-date"
1511 def lockfile_path(file):
1513 Return the full path for a lockfile named FILE.
1515 Create the lock directory if necessary.
1517 lockdir = OS.path.join(C.STATE, "lock"); mkdir_p(lockdir)
1518 return OS.path.join(lockdir, file)
1520 def chroot_src_lockfile(dist, arch):
1522 Return the lockfile for the source-chroot for DIST on ARCH.
1524 It is not allowed to acquire a source-chroot lock while holding any other
1527 return lockfile_path("source.%s-%s" % (dist, arch))
1529 def chroot_src_lv(dist, arch):
1531 Return the logical volume name for the source-chroot for DIST on ARCH.
1533 return "%s%s-%s" % (C.LVPREFIX, dist, arch)
1535 def chroot_src_blkdev(dist, arch):
1537 Return the block-device name for the source-chroot for DIST on ARCH.
1539 return OS.path.join("/dev", C.VG, chroot_src_lv(dist, arch))
1541 def chroot_src_mntpt(dist, arch):
1543 Return mountpoint path for setting up the source-chroot for DIST on ARCH.
1545 Note that this is not the mountpoint that schroot(1) uses.
1547 mnt = OS.path.join(C.STATE, "mnt", "%s-%s" % (dist, arch))
1551 def chroot_session_mntpt(session):
1552 """Return the mountpoint for an schroot session."""
1553 return OS.path.join("/schroot", session)
1555 def crosstools_lockfile(dist, arch):
1557 Return the lockfile for the cross-build tools for DIST, hosted by ARCH.
1559 When locking multiple cross-build tools, you must acquire the locks in
1560 lexicographically ascending order.
1562 return lockfile_path("cross-tools.%s-%s" % (dist, arch))
1564 def switch_prefix(string, map):
1566 Replace the prefix of a STRING, according to the given MAP.
1568 MAP is a sequence of (OLD, NEW) pairs. For each such pair in turn, test
1569 whether STRING starts with OLD: if so, return STRING, but with the prefix
1570 OLD replaced by NEW. If no OLD prefix matches, then raise a `ValueError'.
1572 for old, new in map:
1573 if string.startswith(old): return new + string[len(old):]
1574 raise ValueError("expected `%s' to start with one of %s" %
1575 ", ".join(["`%s'" % old for old, new in map]))
1577 def host_to_chroot(path):
1579 Convert a host path under `C.LOCAL' to the corresponding chroot path under
1580 `/usr/local.schroot'.
1582 return switch_prefix(path, [(C.LOCAL + "/", "/usr/local.schroot/")])
1584 def chroot_to_host(path):
1586 Convert a chroot path under `/usr/local.schroot' to the corresponding
1587 host path under `C.LOCAL'.
1589 return switch_prefix(path, [("/usr/local.schroot/", C.LOCAL + "/")])
1591 def split_dist_arch(spec):
1592 """Split a SPEC of the form `DIST-ARCH' into the pair (DIST, ARCH)."""
1593 dash = spec.index("-")
1594 return spec[:dash], spec[dash + 1:]
1596 def elf_binary_p(arch, path):
1597 """Return whether PATH is an ELF binary for ARCH."""
1598 if not OS.path.isfile(path): return False
1599 with open(path, 'rb') as f: magic = f.read(20)
1600 if magic[0:4] != "\x7fELF": return False
1601 if magic[8:16] != 8*"\0": return False
1603 if magic[4:7] != "\x01\x01\x01": return False
1604 if magic[18:20] != "\x03\x00": return False
1605 elif arch == "amd64":
1606 if magic[4:7] != "\x02\x01\x01": return False
1607 if magic[18:20] != "\x3e\x00": return False
1609 raise ValueError("unsupported donor architecture `%s'" % arch)
1614 Print a progress message MSG.
1616 This is intended to be called within a job's `run' method, so it doesn't
1617 check `OPT.quiet' or `OPT.silent'.
1619 OS.write(1, ";; %s\n" % msg)
1621 class NoSuchChroot (Exception):
1623 Exception indicating that a chroot does not exist.
1625 Specifically, it means that it doesn't even have a logical volume.
1627 def __init__(me, dist, arch):
1631 return "chroot for `%s' on `%s' not found" % (me.dist, me.arch)
1634 def mount_chroot_src(dist, arch):
1636 Context manager for mounting the source-chroot for DIST on ARCH.
1638 The context manager automatically unmounts the filesystem again when the
1639 body exits. You must hold the appropriate source-chroot lock before
1640 calling this routine.
1642 dev = chroot_src_blkdev(dist, arch)
1643 if not block_device_p(dev): raise NoSuchChroot(dist, arch)
1644 mnt = chroot_src_mntpt(dist, arch)
1646 run_program(C.ROOTLY + ["mount", dev, mnt])
1652 def chroot_session(dist, arch, sourcep = False):
1654 Context manager for running an schroot(1) session.
1656 Returns the (ugly, automatically generated) session name to the context
1657 body. By default, a snapshot session is started: set SOURCEP true to start
1658 a source-chroot session. You must hold the appropriate source-chroot lock
1659 before starting a source-chroot session.
1661 The context manager automatically closes the session again when the body
1664 chroot = chroot_src_lv(dist, arch)
1665 if sourcep: chroot = "source:" + chroot
1666 session = run_program(["schroot", "-uroot", "-b", "-c", chroot],
1667 stdout = RETURN).rstrip("\n")
1669 root = OS.path.join(chroot_session_mntpt(session), "fs")
1672 run_program(["schroot", "-e", "-c", session])
1674 def run_root(command, **kw):
1675 """Run a COMMAND as root. Arguments are as for `run_program'."""
1676 return run_program(C.ROOTLY + command, **kw)
1678 def run_schroot_session(session, command, rootp = False, **kw):
1680 Run a COMMAND within an schroot(1) session.
1682 Arguments are as for `run_program'.
1685 return run_program(["schroot", "-uroot", "-r",
1686 "-c", session, "--"] + command, **kw)
1688 return run_program(["schroot", "-r",
1689 "-c", session, "--"] + command, **kw)
1691 def run_schroot_source(dist, arch, command, **kw):
1693 Run a COMMAND through schroot(1), in the source-chroot for DIST on ARCH.
1695 Arguments are as for `run_program'. You must hold the appropriate source-
1696 chroot lock before calling this routine.
1698 return run_program(["schroot", "-uroot",
1699 "-c", "source:%s" % chroot_src_lv(dist, arch),
1700 "--"] + command, **kw)
1702 ###--------------------------------------------------------------------------
1705 class MetadataClass (type):
1707 Metaclass for metadata classes.
1709 Notice a `VARS' attribute in the class dictionary, and augment it with a
1710 `_VARSET' attribute, constructed as a set containing the same items. (We
1711 need them both: the set satisfies fast lookups, while the original sequence
1712 remembers the ordering.)
1714 def __new__(me, name, supers, dict):
1715 try: vars = dict['VARS']
1716 except KeyError: pass
1717 else: dict['_VARSET'] = set(vars)
1718 return super(MetadataClass, me).__new__(me, name, supers, dict)
1720 class BaseMetadata (object):
1722 Base class for metadate objects.
1724 Metadata bundles are simple collections of key/value pairs. Keys should
1725 usually be Python identifiers because they're used to name attributes.
1726 Values are strings, but shouldn't have leading or trailing whitespace, and
1727 can't contain newlines.
1729 Metadata bundles are written to files. The format is simple enough: empty
1730 lines and lines starting with `#' are ignored; otherwise, the line must
1735 where KEY does not contain `='; spaces around the `=' are optional, and
1736 spaces around the KEY and VALUE are stripped. The order of keys is
1737 unimportant; keys are always written in a standard order on output.
1739 __metaclass__ = MetadataClass
1741 def __init__(me, **kw):
1742 """Initialize a metadata bundle from keyword arguments."""
1743 for k, v in kw.iteritems():
1747 except AttributeError: setattr(me, v, None)
1749 def __setattr__(me, attr, value):
1751 Try to set an attribute.
1753 Only attribute names listed in the `VARS' class attribute are permitted.
1755 if attr not in me._VARSET: raise AttributeError, attr
1756 super(BaseMetadata, me).__setattr__(attr, value)
1759 def read(cls, path):
1760 """Return a new metadata bundle read from a named PATH."""
1762 with open(path) as f:
1765 if line == "" or line.startswith("#"): continue
1766 k, v = line.split("=", 1)
1767 map[k.strip()] = v.strip()
1770 def _write(me, file):
1772 Write the metadata bundle to the FILE (a file-like object).
1774 This is intended for use by subclasses which want to override the default
1775 I/O behaviour of the main `write' method.
1777 file.write("### -*-conf-*-\n")
1779 try: v = getattr(me, k)
1780 except AttributeError: pass
1782 if v is not None: file.write("%s = %s\n" % (k, v))
1784 def write(me, path):
1786 Write the metadata bundle to a given PATH.
1788 The file is replaced atomically.
1790 with safewrite(path) as f: me._write(f)
1793 return "#<%s: %s>" % (me.__class__.__name__,
1794 ", ".join("%s=%r" % (k, getattr(me, k, None))
1797 class ChrootMetadata (BaseMetadata):
1798 VARS = ['dist', 'arch', 'update']
1801 def read(cls, dist, arch):
1803 with lockfile(chroot_src_lockfile(dist, arch), exclp = False):
1804 with mount_chroot_src(dist, arch) as mnt:
1805 return super(ChrootMetadata, cls).read(OS.path.join(mnt, "META"))
1806 except IOError, err:
1807 if err.errno == E.ENOENT: pass
1809 except NoSuchChroot: pass
1810 return cls(dist = dist, arch = arch)
1813 with mount_chroot_src(me.dist, me.arch) as mnt:
1814 with safewrite_root(OS.path.join(mnt, "META")) as f:
1817 class CrossToolsMetadata (BaseMetadata):
1818 VARS = ['dist', 'arch', 'update']
1821 def read(cls, dist, arch):
1823 return super(CrossToolsMetadata, cls)\
1824 .read(OS.path.join(C.LOCAL, "cross", "%s-%s" % (dist, arch), "META"))
1825 except IOError, err:
1826 if err.errno == E.ENOENT: pass
1828 return cls(dist = dist, arch = arch)
1830 def write(me, dir = None):
1832 dir = OS.path.join(C.LOCAL, "cross", "%s-%s" % (me.dist, me.arch))
1833 with safewrite_root(OS.path.join(dir, "META")) as f:
1836 ###--------------------------------------------------------------------------
1837 ### Constructing a chroot.
1839 R_DIVERT = RX.compile(r"^diversion of (.*) to .* by install-cross-tools$")
1841 class ChrootJob (BaseJob):
1843 Create or update a chroot.
1846 SPECS = C.ALL_CHROOTS
1848 def __init__(me, spec, fresh = CREATE, *args, **kw):
1849 super(ChrootJob, me).__init__(*args, **kw)
1850 me._dist, me._arch = split_dist_arch(spec)
1852 me._meta = ChrootMetadata.read(me._dist, me._arch)
1853 me._tools_chroot = me._qemu_chroot = None
1855 def _mkname(me): return "chroot.%s-%s" % (me._dist, me._arch)
1858 if me._arch in C.FOREIGN_ARCHS:
1859 me._tools_chroot = CrossToolsJob.ensure\
1860 ("%s-%s" % (me._dist, C.TOOLSARCH), FRESH)
1861 me._qemu_chroot = CrossToolsJob.ensure\
1862 ("%s-%s" % (C.QEMUDIST.get(me._dist, me._dist),
1863 C.QEMUHOST[me._arch]), FRESH)
1864 me.await(me._tools_chroot)
1865 me.await(me._qemu_chroot)
1868 status, reason = super(ChrootJob, me).check()
1869 if status is not READY: return status, reason
1870 if (me._tools_chroot is not None and me._tools_chroot.started) or \
1871 (me._qemu_chroot is not None and me._qemu_chroot.started):
1872 return READY, "prerequisites run"
1873 return check_fresh(me._fresh, me._meta.update)
1875 def _install_cross_tools(me):
1877 Install or refresh cross-tools in the source-chroot.
1879 This function version assumes that the source-chroot lock is already
1882 Note that there isn't a job class corresponding to this function. It's
1883 done automatically as part of source-chroot setup and update for foreign
1886 with Cleanup() as clean:
1888 dist, arch = me._dist, me._arch
1890 mymulti = run_program(["dpkg-architecture", "-a", C.TOOLSARCH,
1891 "-qDEB_HOST_MULTIARCH"],
1892 stdout = RETURN).rstrip("\n")
1893 gnuarch = run_program(["dpkg-architecture", "-A", arch,
1894 "-qDEB_TARGET_GNU_TYPE"],
1895 stdout = RETURN).rstrip("\n")
1897 crossdir = OS.path.join(C.LOCAL, "cross",
1898 "%s-%s" % (dist, C.TOOLSARCH))
1900 qarch, qhost, qdist = \
1901 C.QEMUARCH[arch], C.QEMUHOST[arch], C.QEMUDIST.get(dist, dist)
1902 qemudir = OS.path.join(C.LOCAL, "cross",
1903 "%s-%s" % (qdist, qhost), "QEMU")
1905 ## Acquire lockfiles in a canonical order to prevent deadlocks.
1906 donors = [C.TOOLSARCH]
1907 if qarch != C.TOOLSARCH: donors.append(qarch)
1910 clean.enter(lockfile(crosstools_lockfile(dist, a), exclp = False))
1913 session, root = clean.enter(chroot_session(dist, arch, sourcep = True))
1915 ## Search the cross-tools tree for tools, to decide what to do with
1916 ## each file. Make lists:
1918 ## * `want_div' is simply a set of all files in the chroot which need
1919 ## dpkg diversions to prevent foreign versions of the tools from
1920 ## clobbering our native versions.
1922 ## * `want_link' is a dictionary mapping paths which need symbolic
1923 ## links into the cross-tools trees to their link destinations.
1924 progress("scan cross-tools tree")
1927 cross_prefix = crossdir + "/"
1928 qemu_prefix = qemudir + "/"
1929 toolchain_prefix = OS.path.join(crossdir, "TOOLCHAIN", gnuarch) + "/"
1931 dest = switch_prefix(path, [(qemu_prefix, "/usr/bin/"),
1932 (toolchain_prefix, "/usr/bin/"),
1933 (cross_prefix, "/")])
1934 if OS.path.islink(path): src = OS.readlink(path)
1935 else: src = host_to_chroot(path)
1936 want_link[dest] = src
1937 if not OS.path.isdir(path): want_div.add(dest)
1938 examine(OS.path.join(qemudir, "qemu-%s-static" % qarch))
1939 examine(OS.path.join(crossdir, "lib", mymulti))
1940 examine(OS.path.join(crossdir, "usr/lib", mymulti))
1941 examine(OS.path.join(crossdir, "usr/lib/gcc-cross"))
1942 def visit(_, dir, files):
1945 if f == "META" or f == "QEMU" or f == "TOOLCHAIN" or \
1946 (dir.endswith("/lib") and (f == mymulti or f == "gcc-cross")):
1949 path = OS.path.join(dir, f)
1950 if OS.path.islink(path) or not OS.path.isdir(path): examine(path)
1952 OS.path.walk(crossdir, visit, None)
1953 OS.path.walk(OS.path.join(crossdir, "TOOLCHAIN", gnuarch),
1956 ## Build the set `have_div' of paths which already have diversions.
1957 progress("scan chroot")
1959 with subprocess(["schroot", "-uroot", "-r", "-c", session, "--",
1960 "dpkg-divert", "--list"],
1961 stdout = PIPE) as (_, fd_out, _):
1963 f = OS.fdopen(fd_out)
1965 m = R_DIVERT.match(line.rstrip("\n"))
1966 if m: have_div.add(m.group(1))
1970 ## Build a dictionary `have_link' of symbolic links into the cross-
1971 ## tools trees. Also, be sure to collect all of the relative symbolic
1972 ## links which are in the cross-tools tree.
1974 with subprocess(["schroot", "-uroot", "-r", "-c", session, "--",
1975 "sh", "-e", "-c", """
1976 find / -xdev -lname "/usr/local.schroot/cross/*" -printf "%p %l\n"
1977 """], stdout = PIPE) as (_, fd_out, _):
1979 f = OS.fdopen(fd_out)
1981 dest, src = line.split()
1982 have_link[dest] = src
1985 for path in want_link.iterkeys():
1987 if not OS.path.islink(real): continue
1988 have_link[path] = OS.readlink(real)
1990 ## Add diversions for the paths which need one, but don't have one.
1991 ## There's a hack here because the `--no-rename' option was required in
1992 ## the same version in which it was introduced, so there's no single
1993 ## incantation that will work across the boundary.
1994 progress("add missing diversions")
1995 with subprocess(["schroot", "-uroot", "-r", "-c", session, "--",
1996 "sh", "-e", "-c", """
1999 if dpkg-divert >/dev/null 2>&1 --no-rename --help
2000 then no_rename=--no-rename
2005 dpkg-divert --package "install-cross-tools" $no_rename \
2006 --divert "$path.$a" --add "$path"
2008 """ % dict(arch = arch)], stdin = PIPE) as (fd_in, _, _):
2010 f = OS.fdopen(fd_in, 'w')
2011 for path in want_div:
2012 if path not in have_div: f.write(path + "\n")
2016 ## Go through each diverted tool, and, if it hasn't been moved aside,
2017 ## then /link/ it across now. If we rename it, then the chroot will
2018 ## stop working -- which is why we didn't allow `dpkg-divert' to do the
2019 ## rename. We can tell a tool that hasn't been moved, because it's a
2020 ## symlink into one of the cross trees.
2021 progress("preserve existing foreign files")
2022 chroot_cross_prefix = host_to_chroot(crossdir) + "/"
2023 chroot_qemu_prefix = host_to_chroot(qemudir) + "/"
2024 for path in want_div:
2025 real = root + path; div = real + "." + arch; cross = crossdir + path
2026 if OS.path.exists(div): continue
2027 if not OS.path.exists(real): continue
2028 if OS.path.islink(real):
2029 realdest = OS.readlink(real)
2030 if realdest.startswith(chroot_cross_prefix) or \
2031 realdest.startswith(chroot_qemu_prefix):
2033 if OS.path.islink(cross) and realdest == OS.readlink(cross):
2035 progress("preserve existing foreign file `%s'" % path)
2036 run_root(["ln", real, div])
2038 ## Update all of the symbolic links which are currently wrong: add
2039 ## links which are missing, delete ones which are obsolete, and update
2040 ## ones which have the wrong target.
2041 progress("update symlinks")
2042 for path, src in want_link.iteritems():
2044 try: old_src = have_link[path]
2045 except KeyError: pass
2047 if src == old_src: continue
2049 progress("link `%s' -> `%s'" % (path, src))
2050 dir = OS.path.dirname(real)
2051 if not OS.path.isdir(dir): run_root(["mkdir", "-p", dir])
2052 if OS.path.exists(new): run_root(["rm", "-f", new])
2053 run_root(["ln", "-s", src, new])
2054 run_root(["mv", new, real])
2055 for path in have_link.iterkeys():
2056 if path in want_link: continue
2058 progress("remove obsolete link `%s' -> `%s'" %
2059 (path, OS.readlink(real)))
2060 run_root(["rm", "-f", real])
2062 ## Remove diversions from paths which don't need them any more. Here
2063 ## it's safe to rename, because either the tool isn't there, in which
2064 ## case it obviously wasn't important, or it is, and `dpkg-divert' will
2065 ## atomically replace our link with the foreign version.
2066 progress("remove obsolete diversions")
2067 with subprocess(["schroot", "-uroot", "-r", "-c", session, "--",
2068 "sh", "-e", "-c", """
2072 dpkg-divert --package "install-cross-tools" --rename \
2073 --divert "$path.$a" --remove "$path"
2075 """ % dict(arch = arch)], stdin = PIPE) as (fd_in, _, _):
2077 f = OS.fdopen(fd_in, 'w')
2078 for path in have_div:
2079 if path not in want_div: f.write(path + "\n")
2083 def _make_chroot(me):
2085 Create the source-chroot with chroot metadata META.
2087 This will recreate a source-chroot from scratch, destroying the existing
2088 logical volume if necessary.
2090 with Cleanup() as clean:
2092 dist, arch = me._dist, me._arch
2093 clean.enter(lockfile(chroot_src_lockfile(dist, arch)))
2095 mnt = chroot_src_mntpt(dist, arch)
2096 dev = chroot_src_blkdev(dist, arch)
2097 lv = chroot_src_lv(dist, arch)
2100 ## Clean up any leftover debris.
2101 if mountpoint_p(mnt): umount(mnt)
2102 if block_device_p(dev):
2103 run_root(["lvremove", "-f", "%s/%s" % (C.VG, lv)])
2105 ## Create the logical volume and filesystem. It's important that the
2106 ## logical volume not have its official name until after it contains a
2107 ## mountable filesystem.
2108 progress("create filesystem")
2109 run_root(["lvcreate", "--yes", C.LVSZ, "-n", newlv, C.VG])
2110 run_root(["mkfs", "-j", "-L%s-%s" % (dist, arch),
2111 OS.path.join("/dev", C.VG, newlv)])
2112 run_root(["lvrename", C.VG, newlv, lv])
2114 ## Start installing the chroot.
2115 with mount_chroot_src(dist, arch) as mnt:
2117 ## Set the basic structure.
2118 run_root(["mkdir", "-m755", OS.path.join(mnt, "fs")])
2119 run_root(["chmod", "750", mnt])
2121 ## Install the base system.
2122 progress("install base system")
2123 run_root(["eatmydata", "debootstrap", "--no-merged-usr"] +
2124 (arch in C.FOREIGN_ARCHS and ["--foreign"] or []) +
2125 ["--arch=" + arch, "--variant=minbase",
2126 "--include=" + ",".join(C.BASE_PACKAGES),
2127 dist, OS.path.join(mnt, "fs"), C.DEBMIRROR])
2129 ## If this is a cross-installation, then install the necessary `qemu'
2130 ## and complete the installation.
2131 if arch in C.FOREIGN_ARCHS:
2132 qemu = OS.path.join("cross", "%s-%s" % (dist, C.QEMUHOST[arch]),
2133 "QEMU", "qemu-%s-static" % C.QEMUARCH[arch])
2134 run_root(["install", OS.path.join(C.LOCAL, qemu),
2135 OS.path.join(mnt, "fs/usr/bin")])
2136 run_root(["chroot", OS.path.join(mnt, "fs"),
2137 "/debootstrap/debootstrap", "--second-stage"])
2138 run_root(["ln", "-sf",
2139 OS.path.join("/usr/local.schroot", qemu),
2140 OS.path.join(mnt, "fs/usr/bin")])
2142 ## Set up `/usr/local'.
2143 progress("install `/usr/local' symlink")
2144 run_root(["rm", "-rf", OS.path.join(mnt, "fs/usr/local")])
2145 run_root(["ln", "-s",
2146 OS.path.join("local.schroot", arch),
2147 OS.path.join(mnt, "fs/usr/local")])
2149 ## Install the `apt' configuration.
2150 progress("configure package manager")
2151 run_root(["rm", "-f", OS.path.join(mnt, "fs/etc/apt/sources.list")])
2153 run_root(["ln", "-s",
2154 OS.path.join("/usr/local.schroot/etc/apt/apt.conf.d", c),
2155 OS.path.join(mnt, "fs/etc/apt/apt.conf.d")])
2156 run_root(["ln", "-s",
2157 "/usr/local.schroot/etc/apt/sources.%s" % dist,
2158 OS.path.join(mnt, "fs/etc/apt/sources.list")])
2160 with safewrite_root\
2161 (OS.path.join(mnt, "fs/etc/apt/apt.conf.d/20arch")) as f:
2170 ## Set up the locale and time zone from the host system.
2171 progress("configure locales and timezone")
2172 run_root(["cp", "/etc/locale.gen", "/etc/timezone",
2173 OS.path.join(mnt, "fs/etc")])
2174 with open("/etc/timezone") as f: tz = f.readline().strip()
2175 run_root(["ln", "-sf",
2176 OS.path.join("/usr/share/timezone", tz),
2177 OS.path.join(mnt, "fs/etc/localtime")])
2178 run_root(["cp", "/etc/default/locale",
2179 OS.path.join(mnt, "fs/etc/default")])
2182 progress("set `/etc/mtab'")
2183 run_root(["ln", "-sf", "/proc/mounts",
2184 OS.path.join(mnt, "fs/etc/mtab")])
2186 ## Prevent daemons from starting within the chroot.
2187 progress("inhibit daemon startup")
2188 with safewrite_root(OS.path.join(mnt, "fs/usr/sbin/policy-rc.d"),
2192 echo >&2 "policy-rc.d: Services disabled by policy."
2196 ## Hack the dynamic linker to prefer libraries in `/usr' over
2197 ## `/usr/local'. This prevents `dpkg-shlibdeps' from becoming
2199 progress("configure dynamic linker")
2200 with safewrite_root\
2201 (OS.path.join(mnt, "fs/etc/ld.so.conf.d/libc.conf")) as f:
2202 f.write("# libc default configuration")
2203 with safewrite_root\
2204 (OS.path.join(mnt, "fs/etc/ld.so.conf.d/zzz-local.conf")) as f:
2207 ### Local hack to make /usr/local/ late.
2211 ## If this is a foreign architecture then we need to set it up.
2212 if arch in C.FOREIGN_ARCHS:
2214 ## Keep the chroot's native Qemu out of our way: otherwise we'll stop
2215 ## being able to run programs in the chroot. There's a hack here
2216 ## because the `--no-rename' option was required in the same version
2217 ## in which is was introduced, so there's no single incantation that
2218 ## will work across the boundary.
2219 progress("divert emulator")
2220 run_schroot_source(dist, arch, ["eatmydata", "sh", "-e", "-c", """
2221 if dpkg-divert >/dev/null 2>&1 --no-rename --help
2222 then no_rename=--no-rename
2226 dpkg-divert --package install-cross-tools $no_rename \
2227 --divert /usr/bin/%(qemu)s.%(arch)s --add /usr/bin/%(qemu)s
2228 """ % dict(arch = arch, qemu = "qemu-%s-static" % C.QEMUARCH[arch])])
2230 ## Install faster native tools.
2231 me._install_cross_tools()
2233 ## Finishing touches.
2234 progress("finishing touches")
2235 run_schroot_source(dist, arch, ["eatmydata", "sh", "-e", "-c",
2236 DEBCONF_TWEAKS + """
2239 apt-get -y install "$@"
2241 apt-get -y autoremove
2243 """, "."] + C.EXTRA_PACKAGES, stdin = DISCARD)
2245 ## Mark the chroot as done.
2246 me._meta.update = zulu()
2249 def _update_chroot(me):
2250 """Refresh the source-chroot with chroot metadata META."""
2251 with Cleanup() as clean:
2252 dist, arch = me._dist, me._arch
2253 clean.enter(lockfile(chroot_src_lockfile(dist, arch)))
2254 run_schroot_source(dist, arch, ["eatmydata", "sh", "-e", "-c",
2255 DEBCONF_TWEAKS + """
2257 apt-get -y dist-upgrade
2258 apt-get -y autoremove
2261 """], stdin = DISCARD)
2262 if arch in C.FOREIGN_ARCHS: me._install_cross_tools()
2263 me._meta.update = zulu(); me._meta.write()
2266 if me._meta.update is not None: me._update_chroot()
2267 else: me._make_chroot()
2269 ###--------------------------------------------------------------------------
2270 ### Extracting the cross tools.
2272 class CrossToolsJob (BaseJob):
2273 """Extract cross-tools from a donor chroot."""
2275 SPECS = C.NATIVE_CHROOTS
2277 def __init__(me, spec, fresh = CREATE, *args, **kw):
2278 super(CrossToolsJob, me).__init__(*args, **kw)
2279 me._dist, me._arch = split_dist_arch(spec)
2280 me._meta = CrossToolsMetadata.read(me._dist, me._arch)
2284 def _mkname(me): return "cross-tools.%s-%s" % (me._dist, me._arch)
2287 st, r = check_fresh(me._fresh, me._meta.update)
2288 if st is DONE: return
2289 me._chroot = ChrootJob.ensure("%s-%s" % (me._dist, me._arch), FRESH)
2290 me.await(me._chroot)
2293 status, reason = super(CrossToolsJob, me).check()
2294 if status is not READY: return status, reason
2295 if me._chroot is not None and me._chroot.started:
2296 return READY, "prerequisites run"
2297 return check_fresh(me._fresh, me._meta.update)
2300 with Cleanup() as clean:
2302 dist, arch = me._dist, me._arch
2304 mymulti = run_program(["dpkg-architecture", "-a" + arch,
2305 "-qDEB_HOST_MULTIARCH"],
2306 stdout = RETURN).rstrip("\n")
2307 crossarchs = [run_program(["dpkg-architecture", "-A" + a,
2308 "-qDEB_TARGET_GNU_TYPE"],
2309 stdout = RETURN).rstrip("\n")
2310 for a in C.FOREIGN_ARCHS]
2312 crossdir = OS.path.join(C.LOCAL, "cross", "%s-%s" % (dist, arch))
2313 crossold = crossdir + ".old"; crossnew = crossdir + ".new"
2314 usrbin = OS.path.join(crossnew, "usr/bin")
2316 clean.enter(lockfile(crosstools_lockfile(dist, arch)))
2317 run_program(["rm", "-rf", crossnew])
2320 ## Open a session to the donor chroot.
2321 progress("establish snapshot")
2322 session, root = clean.enter(chroot_session(dist, arch))
2324 ## Make sure the donor tree is up-to-date, and install the extra
2325 ## packages we need.
2326 progress("install tools packages")
2327 run_schroot_session(session, ["eatmydata", "sh", "-e", "-c",
2328 DEBCONF_TWEAKS + """
2331 apt-get -y install "$@"
2332 """, "."] + C.CROSS_PACKAGES, rootp = True, stdin = DISCARD)
2337 ## Work through the remaining components of the PATH.
2339 try: sl = path.index("/")
2340 except ValueError: step = path; path = ""
2341 else: step, path = path[:sl], path[sl + 1:]
2343 ## Split off and analyse the first component.
2344 if step == "" or step == ".":
2345 ## A redundant `/' or `./'. Skip it.
2348 ## A `../'. Strip off the trailing component of DEST.
2349 dest = dest[:dest.rindex("/")]
2351 ## Something else. Transfer the component name to DEST.
2354 ## If DEST refers to something in the cross-tools tree then we're
2356 crossdest = crossnew + dest
2357 try: st = OS.lstat(crossdest)
2358 except OSError, err:
2359 if err.errno == E.ENOENT:
2360 ## No. We need to copy something from the donor tree so that
2363 st = OS.lstat(root + dest)
2364 if ST.S_ISDIR(st.st_mode):
2367 progress("copy `%s'" % dest)
2368 run_program(["rsync", "-aHR",
2369 "%s/.%s" % (root, dest),
2374 ## If DEST refers to a symbolic link, then prepend the link target
2375 ## to PATH so that we can be sure the link will work.
2376 if ST.S_ISLNK(st.st_mode):
2377 link = OS.readlink(crossdest)
2378 if link.startswith("/"): dest = ""; link = link[1:]
2380 try: dest = dest[:dest.rindex("/")]
2381 except ValueError: dest = ""
2382 if path == "": path = link
2383 else: path = "%s/%s" % (link, path)
2385 ## Work through the shopping list, copying the things it names into the
2386 ## cross-tools tree.
2388 ## Each thing in the `CROSS_PATHS' list is a `|'-separated list of glob
2389 ## patterns, optionally preceded by `?'. Unless the list starts with
2390 ## `?', at least one of the patterns must match at least one file.
2391 ## Patterns may contain the token `MULTI', which is replaced by the
2392 ## donor architecture's multiarch triplet.
2394 for pat in C.CROSS_PATHS:
2396 pat = pat.replace("MULTI", mymulti)
2397 if pat.startswith("?"):
2400 for subpat in pat.split("|"):
2401 for rootpath in GLOB.iglob(root + subpat):
2403 path = rootpath[len(root):]
2404 progress("copy `%s'" % path)
2405 run_program(["rsync", "-aHR", "%s/.%s" % (root, path), crossnew])
2407 raise RuntimeError("no matches for cross-tool pattern `%s'" % pat)
2409 ## Scan the new tree: chase down symbolic links, copying extra stuff
2410 ## that we'll need; and examine ELF binaries to make sure we get the
2411 ## necessary shared libraries.
2412 def visit(_, dir, files):
2414 path = OS.path.join(dir, f)
2415 inside = switch_prefix(path, [(crossnew + "/", "/")])
2416 if OS.path.islink(path): chase(inside)
2417 if elf_binary_p(arch, path): scan.append(inside)
2418 OS.path.walk(crossnew, visit, None)
2420 ## Work through the ELF binaries in `scan', determining which shared
2421 ## libraries they'll need.
2423 ## The rune running in the chroot session reads ELF binary names on
2424 ## stdin, one per line, and runs `ldd' on them to discover the binary's
2425 ## needed libraries and resolve them into pathnames. Each pathname is
2426 ## printed to stderr as a line `+PATHNAME', followed by a final line
2427 ## consisting only of `-' as a terminator. This is necessary so that
2428 ## we can tell when we've finished, because newly discovered libraries
2429 ## need to be fed back to discover their recursive dependencies. (This
2430 ## is why the `WriteLinesSelector' interface is quite so hairy.)
2431 with subprocess(["schroot", "-r", "-c", session, "--",
2432 "sh", "-e", "-c", """
2434 ldd "$path" | while read a b c d; do
2436 not:a:dynamic:executable) ;;
2437 statically:linked::) ;;
2439 *:=\\>:/*) echo "+$c" ;;
2441 *) echo >&2 "failed to find shared library \\`$a'"; exit 2 ;;
2446 """], stdin = PIPE, stdout = PIPE) as (fd_in, fd_out, _):
2448 ## Keep track of the number of binaries we've reported to the `ldd'
2449 ## process for which we haven't yet seen all of their dependencies.
2450 ## (This is wrapped in a `Struct' because of Python's daft scoping
2455 ## Provide a line in., so raise `StopIteration' to signal this.
2458 ## See if there's something to scan.
2462 ## There's nothing currently waiting to be scanned.
2464 ## There are still outstanding replies, so stall.
2467 ## There are no outstanding replies left, and we have nothing
2468 ## more to scan, then we must be finished.
2472 ## The `scan' list isn't empty, so return an item from that, and
2473 ## remember that there's one more thing we expect to see answers
2475 v.n += 1; return path
2478 ## We've received a line from the `ldd' process.
2481 ## It's finished processing one of our binaries. Note this.
2482 ## Maybe it's time to stop
2486 ## Strip the leading marker (which is just there so that the
2487 ## terminating `-' is unambiguous).
2488 assert line.startswith("+")
2491 ## If we already have this binary then we'll already have submitted
2493 path = crossnew + lib
2495 except OSError, err:
2496 if err.errno == E.ENOENT: pass
2500 ## Copy it into the tools tree, together with any symbolic links
2504 ## If this is an ELF binary (and it ought to be!) then submit it
2505 ## for further scanning.
2506 if elf_binary_p(arch, path):
2507 scan.append(switch_prefix(path, [(crossnew + "/", "/")]))
2509 ## And run this entire contraption. When this is done, we should
2510 ## have all of the library dependencies for all of our binaries.
2511 select_loop([WriteLinesSelector(fd_in, line_in),
2512 ReadLinesSelector(fd_out, line_out)])
2514 ## Set up the cross-compiler and emulator. Start by moving the cross
2515 ## compilers and emulator into their specific places, so they don't end
2516 ## up cluttering chroots for non-matching architectures.
2517 progress("establish TOOLCHAIN and QEMU")
2518 OS.mkdir(OS.path.join(crossnew, "TOOLCHAIN"))
2519 qemudir = OS.path.join(crossnew, "QEMU")
2521 for gnu in C.FOREIGN_GNUARCHS:
2522 OS.mkdir(OS.path.join(crossnew, "TOOLCHAIN", gnu))
2523 for f in OS.listdir(usrbin):
2524 for gnu in C.FOREIGN_GNUARCHS:
2525 gnuprefix = gnu + "-"
2526 if f.startswith(gnuprefix):
2527 tooldir = OS.path.join(crossnew, "TOOLCHAIN", gnu)
2528 OS.rename(OS.path.join(usrbin, f), OS.path.join(tooldir, f))
2529 OS.symlink(f, OS.path.join(tooldir, f[len(gnuprefix):]))
2532 if f.startswith("qemu-") and f.endswith("-static"):
2533 OS.rename(OS.path.join(usrbin, f), OS.path.join(qemudir, f))
2535 ## The GNU cross compilers try to find their additional pieces via a
2536 ## relative path, which isn't going to end well. Add a symbolic link
2537 ## at the right place to where the things are actually going to live.
2538 toollib = OS.path.join(crossnew, "TOOLCHAIN", "lib")
2540 OS.symlink("../../usr/lib/gcc-cross",
2541 OS.path.join(toollib, "gcc-cross"))
2543 ## We're done. Replace the old cross-tools with our new one.
2544 me._meta.update = zulu()
2545 me._meta.write(crossnew)
2546 if OS.path.exists(crossdir): run_program(["mv", crossdir, crossold])
2547 OS.rename(crossnew, crossdir)
2548 run_program(["rm", "-rf", crossold])
2550 ###--------------------------------------------------------------------------
2551 ### Buliding and installing local packages.
2553 def pkg_metadata_lockfile(pkg):
2554 return lockfile_path("pkg-meta.%s" % pkg)
2556 def pkg_srcdir_lockfile(pkg, ver):
2557 return lockfile_path("pkg-source.%s-%s" % (pkg, ver))
2559 def pkg_srcdir(pkg, ver):
2560 return OS.path.join(C.LOCAL, "src", "%s-%s" % (pkg, ver))
2562 def pkg_builddir(pkg, ver, arch):
2563 return OS.path.join(pkg_srcdir(pkg, ver), "build.%s" % arch)
2565 class PackageMetadata (BaseMetadata):
2566 VARS = ["pkg"] + list(C.ALL_ARCHS)
2571 return super(PackageMetadata, cls)\
2572 .read(OS.path.join(C.LOCAL, "src", "META.%s" % pkg))
2573 except IOError, err:
2574 if err.errno == E.ENOENT: pass
2576 return cls(pkg = pkg)
2579 super(PackageMetadata, me)\
2580 .write(OS.path.join(C.LOCAL, "src", "META.%s" % me.pkg))
2582 class PackageSourceJob (BaseJob):
2586 def __init__(me, pkg, fresh = CREATE, *args, **kw):
2587 super(PackageSourceJob, me).__init__(*args, **kw)
2589 tar = None; ver = None
2590 r = RX.compile("^%s-(\d.*)\.tar.(?:Z|z|gz|bz2|xz|lzma)$" %
2592 for f in OS.listdir("pkg"):
2595 elif tar is not None:
2596 raise ExpectedError("multiple source tarballs of package `%s'" % pkg)
2597 else: tar, ver = f, m.group(1)
2599 me.tarball = OS.path.join("pkg", tar)
2601 def _mkname(me): return "pkg-source.%s" % me._pkg
2604 status, reason = super(PackageSourceJob, me).check()
2605 if status is not READY: return status, reason
2606 if OS.path.isdir(pkg_srcdir(me._pkg, me.version)):
2607 return DONE, "already unpacked"
2609 return READY, "no source tree"
2612 with Cleanup() as clean:
2613 pkg, ver, tar = me._pkg, me.version, me.tarball
2614 srcdir = pkg_srcdir(pkg, ver)
2615 newdir = srcdir + ".new"
2617 progress("unpack `%s'" % me.tarball)
2618 clean.enter(lockfile(pkg_srcdir_lockfile(pkg, ver)))
2619 run_program(["rm", "-rf", newdir])
2621 run_program(["tar", "xf", OS.path.join(OS.getcwd(), me.tarball)],
2623 things = OS.listdir(newdir)
2624 if len(things) == 1:
2625 OS.rename(OS.path.join(newdir, things[0]), srcdir)
2628 OS.rename(newdir, srcdir)
2630 class PackageBuildJob (BaseJob):
2632 SPECS = ["%s:%s" % (pkg, arch)
2633 for pkg in C.LOCALPKGS
2634 for arch in C.ALL_ARCHS]
2636 def __init__(me, spec, fresh = CREATE, *args, **kw):
2637 super(PackageBuildJob, me).__init__(*args, **kw)
2638 colon = spec.index(":")
2639 me._pkg, me._arch = spec[:colon], spec[colon + 1:]
2641 def _mkname(me): return "pkg-build.%s:%s" % (me._pkg, me._arch)
2644 me.await(ChrootJob.ensure("%s-%s" % (C.PRIMARY_DIST, me._arch), CREATE))
2645 me._meta = PackageMetadata.read(me._pkg)
2646 me._src = PackageSourceJob.ensure(me._pkg, FRESH); me.await(me._src)
2647 me._prereq = [PackageBuildJob.ensure("%s:%s" % (prereq, me._arch), FRESH)
2648 for prereq in C.PKGDEPS[me._pkg]]
2649 for j in me._prereq: me.await(j)
2652 status, reason = super(PackageBuildJob, me).check()
2653 if status is not READY: return status, reason
2654 if me._src.started: return READY, "fresh source directory"
2655 for j in me._prereq:
2657 return READY, "dependency `%s' freshly installed" % j._pkg
2658 if getattr(me._meta, me._arch) == me._src.version:
2659 return DONE, "already installed"
2660 return READY, "not yet installed"
2663 with Cleanup() as clean:
2664 pkg, ver, arch = me._pkg, me._src.version, me._arch
2666 session, _ = clean.enter(chroot_session(C.PRIMARY_DIST, arch))
2667 builddir = OS.path.join(pkg_srcdir(pkg, ver), "build.%s" % arch)
2668 chroot_builddir = host_to_chroot(builddir)
2669 run_program(["rm", "-rf", builddir])
2672 progress("prepare %s chroot" % (arch))
2673 run_schroot_session(session,
2674 ["eatmydata", "apt-get", "update"],
2675 rootp = True, stdin = DISCARD)
2676 run_schroot_session(session,
2677 ["eatmydata", "apt-get", "-y", "upgrade"],
2678 rootp = True, stdin = DISCARD)
2679 run_schroot_session(session,
2680 ["eatmydata", "apt-get", "-y",
2681 "install", "pkg-config"],
2682 rootp = True, stdin = DISCARD)
2683 run_schroot_session(session,
2684 ["mount", "-oremount,rw", "/usr/local.schroot"],
2685 rootp = True, stdin = DISCARD)
2686 run_schroot_session(session,
2688 "/usr/local.schroot/%s/include.aside" % arch,
2689 "/usr/local.schroot/%s/include" % arch],
2690 rootp = True, stdin = DISCARD)
2692 progress("configure `%s' %s for %s" % (pkg, ver, arch))
2693 run_schroot_session(session, ["sh", "-e", "-c", """
2695 ../configure PKG_CONFIG_PATH=/usr/local/lib/pkgconfig.hidden
2696 """, ".", chroot_builddir])
2698 progress("compile `%s' %s for %s" % (pkg, ver, arch))
2699 run_schroot_session(session, ["sh", "-e", "-c", """
2700 cd "$1" && make -j4 && make -j4 check
2701 """, ".", chroot_builddir])
2703 existing = getattr(me._meta, arch, None)
2704 if existing is not None and existing != ver:
2705 progress("uninstall existing `%s' %s for %s" % (pkg, existing, arch))
2706 run_schroot_session(session, ["sh", "-e", "-c", """
2707 cd "$1" && make uninstall
2708 """, ".", OS.path.join(pkg_srcdir(pkg, existing),
2709 "build.%s" % arch)],
2712 progress("install `%s' %s for %s" % (pkg, existing, arch))
2713 run_schroot_session(session, ["sh", "-e", "-c", """
2714 cd "$1" && make install
2715 mkdir -p /usr/local/lib/pkgconfig.hidden
2716 mv /usr/local/lib/pkgconfig/*.pc /usr/local/lib/pkgconfig.hidden || :
2717 """, ".", chroot_builddir], rootp = True)
2719 clean.enter(lockfile(pkg_metadata_lockfile(pkg)))
2720 me._meta = PackageMetadata.read(pkg)
2721 setattr(me._meta, arch, ver); me._meta.write()
2723 with lockfile(chroot_src_lockfile(C.PRIMARY_DIST, arch)):
2724 run_schroot_source(C.PRIMARY_DIST, arch, ["ldconfig"])
2726 ###--------------------------------------------------------------------------
2727 ### Process the configuration and options.
2729 OPTIONS = OP.OptionParser\
2730 (usage = "chroot-maint [-diknqs] [-fFRESH] [-jN] JOB[.SPEC,...] ...")
2731 for short, long, props in [
2733 'dest': 'debug', 'default': False, 'action': 'store_true',
2734 'help': "print lots of debugging drivel" }),
2736 'dest': 'fresh', 'metavar': 'FRESH', 'default': "create",
2737 'help': "how fresh (`create', `force', or `N[s|m|h|d|w]')" }),
2738 ("-i", "--ignore-errors", {
2739 'dest': 'ignerr', 'default': False, 'action': 'store_true',
2740 'help': "ignore all errors encountered while processing" }),
2742 'dest': 'njobs', 'metavar': 'N', 'default': 1, 'type': 'int',
2743 'help': 'run up to N jobs in parallel' }),
2744 ("-J", "--forkbomb", {
2745 'dest': 'njobs', 'action': 'store_true',
2746 'help': 'run as many jobs in parallel as possible' }),
2747 ("-k", "--keep-going", {
2748 'dest': 'keepon', 'default': False, 'action': 'store_true',
2749 'help': "keep going even if independent jobs fail" }),
2750 ("-n", "--dry-run", {
2751 'dest': 'dryrun', 'default': False, 'action': 'store_true',
2752 'help': "don't actually do anything" }),
2754 'dest': 'quiet', 'default': False, 'action': 'store_true',
2755 'help': "don't print the output from successful jobs" }),
2756 ("-s", "--silent", {
2757 'dest': 'silent', 'default': False, 'action': 'store_true',
2758 'help': "don't print progress messages" })]:
2759 OPTIONS.add_option(short, long, **props)
2761 ###--------------------------------------------------------------------------
2764 R_JOBSERV = RX.compile(r'^--jobserver-(?:fds|auth)=(\d+),(\d+)$')
2766 JOBMAP = { "chroot": ChrootJob,
2767 "cross-tools": CrossToolsJob,
2768 "pkg-source": PackageSourceJob,
2769 "pkg-build": PackageBuildJob }
2771 R_FRESH = RX.compile(r"^(?:create|force|(\d+)(|[smhdw]))$")
2773 def parse_fresh(spec):
2774 m = R_FRESH.match(spec)
2775 if not m: raise ExpectedError("bad freshness `%s'" % spec)
2776 if spec == "create": fresh = CREATE
2777 elif spec == "force": fresh = FORCE
2779 n, u = int(m.group(1)), m.group(2)
2780 if u == "" or u == "s": fresh = n
2781 elif u == "m": fresh = 60*n
2782 elif u == "h": fresh = 3600*n
2783 elif u == "d": fresh = 86400*n
2784 elif u == "w": fresh = 604800*n
2788 with toplevel_handler():
2789 OPT, args = OPTIONS.parse_args()
2792 try: mkflags = OS.environ['MAKEFLAGS']
2793 except KeyError: pass
2795 ff = mkflags.split()
2798 m = R_JOBSERV.match(f)
2799 if m: rfd, wfd = int(m.group(1)), int(m.group(2))
2800 elif f == '-j': njobs = None
2801 elif not f.startswith('-'):
2803 if ch == 'i': OPT.ignerr = True
2804 elif ch == 'k': OPT.keepon = True
2805 elif ch == 'n': OPT.dryrun = True
2806 elif ch == 's': OPT.silent = True
2808 raise ExpectedError("running no more than %d jobs is silly" % OPT.njobs)
2810 FRESH = parse_fresh(OPT.fresh)
2812 SCHED = JobScheduler(rfd, wfd, njobs)
2813 OS.environ["http_proxy"] = OS.environ["https_proxy"] = C.PROXY
2816 if not args: OPTIONS.print_usage(SYS.stderr); SYS.exit(2)
2818 try: sl = arg.index("/")
2819 except ValueError: fresh = FRESH
2820 else: arg, fresh = arg[:sl], parse_fresh(arg[sl + 1:])
2821 try: dot = arg.index(".")
2822 except ValueError: jty, pats = arg, "*"
2823 else: jty, pats = arg[:dot], arg[dot + 1:]
2824 try: jcls = JOBMAP[jty]
2825 except KeyError: raise ExpectedError("unknown job type `%s'" % jty)
2827 for pat in pats.split(","):
2829 for s in jcls.SPECS:
2830 if FM.fnmatch(s, pat): specs.append(s); any = True
2831 if not any: raise ExpectedError("no match for `%s'" % pat)
2833 jobs.append(jcls.ensure(s, fresh))
2839 ###----- That's all, folks --------------------------------------------------