chiark / gitweb /
bin/chroot-maint: Add missing format argument to diagnostic.
[distorted-chroot] / bin / chroot-maint
CommitLineData
a98c9dba
MW
1#! /usr/bin/python
2###
3### Create, upgrade, and maintain (native and cross-) chroots
4###
5### (c) 2018 Mark Wooding
6###
7
8###----- Licensing notice ---------------------------------------------------
9###
10### This file is part of the distorted.org.uk chroot maintenance tools.
11###
12### distorted-chroot is free software: you can redistribute it and/or
13### modify it under the terms of the GNU General Public License as
14### published by the Free Software Foundation; either version 2 of the
15### License, or (at your option) any later version.
16###
17### distorted-chroot is distributed in the hope that it will be useful,
18### but WITHOUT ANY WARRANTY; without even the implied warranty of
19### MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20### General Public License for more details.
21###
22### You should have received a copy of the GNU General Public License
23### along with distorted-chroot. If not, write to the Free Software
24### Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
25### USA.
26
27## still to do:
28## tidy up
29
30import contextlib as CTX
31import errno as E
32import fcntl as FC
33import fnmatch as FM
34import glob as GLOB
35import itertools as I
36import optparse as OP
37import os as OS
38import random as R
39import re as RX
40import signal as SIG
41import select as SEL
42import stat as ST
43from cStringIO import StringIO
44import sys as SYS
45import time as T
46import traceback as TB
47
48import jobclient as JC
49
50QUIS = OS.path.basename(SYS.argv[0])
51TODAY = T.strftime("%Y-%m-%d")
52NOW = T.time()
53
54###--------------------------------------------------------------------------
55### Random utilities.
56
57RC = 0
58def moan(msg):
59 """Print MSG to stderr as a warning."""
60 if not OPT.silent: OS.write(2, "%s: %s\n" % (QUIS, msg))
61def error(msg):
62 """Print MSG to stderr, and remember to exit nonzero."""
63 global RC
64 moan(msg)
65 RC = 2
66
67class ExpectedError (Exception):
68 """A fatal error which shouldn't print a backtrace."""
69 pass
70
71@CTX.contextmanager
72def toplevel_handler():
73 """Catch `ExpectedError's and report Unixish error messages."""
74 try: yield None
75 except ExpectedError, err: moan(err); SYS.exit(2)
76
77def spew(msg):
78 """Print MSG to stderr as a debug trace."""
79 if OPT.debug: OS.write(2, ";; %s\n" % msg)
80
81class Tag (object):
82 """Unique objects with no internal structure."""
83 def __init__(me, label): me._label = label
84 def __str__(me): return '#<%s %s>' % (me.__class__.__name__, me._label)
85 def __repr__(me): return '#<%s %s>' % (me.__class__.__name__, me._label)
86
87class Struct (object):
88 def __init__(me, **kw): me.__dict__.update(kw)
89
90class Cleanup (object):
91 """
92 A context manager for stacking other context managers.
93
94 By itself, it does nothing. Attach other context managers with `enter' or
95 loose cleanup functions with `add'. On exit, contexts are left and
96 cleanups performed in reverse order.
97 """
98 def __init__(me):
99 me._cleanups = []
100 def __enter__(me):
101 return me
102 def __exit__(me, exty, exval, extb):
103 trap = False
104 for c in reversed(me._cleanups):
105 if c(exty, exval, extb): trap = True
106 return trap
107 def enter(me, ctx):
108 v = ctx.__enter__()
109 me._cleanups.append(ctx.__exit__)
110 return v
111 def add(me, func):
112 me._cleanups.append(lambda exty, exval, extb: func())
113
114def zulu(t = None):
115 """Return the time T (default now) as a string."""
116 return T.strftime("%Y-%m-%dT%H:%M:%SZ", T.gmtime(t))
117
118R_ZULU = RX.compile(r"^(\d+)-(\d+)-(\d+)T(\d+):(\d+):(\d+)Z$")
119def unzulu(z):
120 """Convert the time string Z back to a Unix time."""
121 m = R_ZULU.match(z)
122 if not m: raise ValueError("bad time spec `%s'" % z)
123 yr, mo, dy, hr, mi, se = map(int, m.groups())
124 return T.mktime((yr, mo, dy, hr, mi, se, 0, 0, 0))
125
126###--------------------------------------------------------------------------
127### Simple select(2) utilities.
128
129class BaseSelector (object):
130 """
131 A base class for hooking into `select_loop'.
132
133 See `select_loop' for details of the protocol.
134 """
135 def preselect(me, rfds, wfds): pass
136 def postselect_read(me, fd): pass
137 def postselect_write(me, fd): pass
138
139class WriteLinesSelector (BaseSelector):
140 """Write whole lines to an output file descriptor."""
141
142 def __init__(me, fd, nextfn = None, *args, **kw):
143 """
144 Initialize the WriteLinesSelector to write to the file descriptor FD.
145
146 The FD is marked non-blocking.
147
148 The lines are produced by the NEXTFN, which is called without arguments.
149 It can affect the output in three ways:
150
151 * It can return a string (or almost any other kind of object, which
152 will be converted into a string by `str'), which will be written to
153 the descriptor followed by a newline. Lines are written in the order
154 in which they are produced.
155
156 * It can return `None', which indicates that there are no more items to
157 be written for the moment. The function will be called again from
158 time to time, to see if it has changed its mind. This is the right
159 thing to do in order to stall output temporarily.
160
161 * It can raise `StopIteration', which indicates that there will never
162 be any more items. The file descriptor will be closed.
163
164 Subclasses can override this behaviour by defining a method `_next' and
165 passing `None' as the NEXTFN.
166 """
167 super(WriteLinesSelector, me).__init__(*args, **kw)
168 set_nonblocking(fd)
169 me._fd = fd
170 if nextfn is not None: me._next = nextfn
171
172 ## Selector state.
173 ##
174 ## * `_buf' contains a number of output items, already formatted, and
175 ## ready for output in a single batch. It might be empty.
176 ##
177 ## * `_pos' is the current output position in `_buf'.
178 ##
179 ## * `_more' is set unless the `_next' function has raised
180 ## `StopIteration': it indicates that we should close the descriptor
181 ## once the all of the remaining data in the buffer has been sent.
182 me._buf = ""
183 me._pos = 0
184 me._more = True
185
186 def _refill(me):
187 """Refill `_buf' by calling `_next'."""
188 sio = StringIO(); n = 0
189 while n < 4096:
190 try: item = me._next()
191 except StopIteration: me._more = False; break
192 if item is None: break
193 item = str(item)
194 sio.write(item); sio.write("\n"); n += len(item) + 1
195 me._buf = sio.getvalue(); me._pos = 0
196
197 def preselect(me, rfds, wfds):
198 if me._fd == -1: return
199 if me._buf == "" and me._more: me._refill()
200 if me._buf != "" or not me._more: wfds.append(me._fd)
201
202 def postselect_write(me, fd):
203 if fd != me._fd: return
204 while True:
205 if me._pos >= len(me._buf):
206 if me._more: me._refill()
207 if not me._more: OS.close(me._fd); me._fd = -1; break
208 if not me._buf: break
209 try: n = OS.write(me._fd, me._buf[me._pos:])
210 except OSError, err:
211 if err.errno == E.EAGAIN or err.errno == E.WOULDBLOCK: break
212 elif err.errno == E.EPIPE: OS.close(me._fd); me._fd = -1; break
213 else: raise
214 me._pos += n
215
216class ReadLinesSelector (BaseSelector):
217 """Report whole lines from an input file descriptor as they arrive."""
218
219 def __init__(me, fd, linefn = None, *args, **kw):
220 """
221 Initialize the ReadLinesSelector to read from the file descriptor FD.
222
223 The FD is marked non-blocking.
224
225 For each whole line, and the final partial line (if any), the selector
226 calls LINEFN with the line as an argument (without the terminating
227 newline, if any).
228
229 Subclasses can override this behaviour by defining a method `_line' and
230 passing `None' as the LINEFN.
231 """
232 super(ReadLinesSelector, me).__init__(*args, **kw)
233 set_nonblocking(fd)
234 me._fd = fd
235 me._buf = ""
236 if linefn is not None: me._line = linefn
237
238 def preselect(me, rfds, wfds):
239 if me._fd != -1: rfds.append(me._fd)
240
241 def postselect_read(me, fd):
242 if fd != me._fd: return
243 while True:
244 try: buf = OS.read(me._fd, 4096)
245 except OSError, err:
246 if err.errno == E.EAGAIN or err.errno == E.WOULDBLOCK: break
247 else: raise
248 if buf == "":
249 OS.close(me._fd); me._fd = -1
250 if me._buf: me._line(me._buf)
251 break
252 buf = me._buf + buf
253 i = 0
254 while True:
255 try: j = buf.index("\n", i)
256 except ValueError: break
257 me._line(buf[i:j])
258 i = j + 1
259 me._buf = buf[i:]
260
261def select_loop(selectors):
262 """
263 Multiplex I/O between the various SELECTORS.
264
265 A `selector' SEL is an object which implements the selector protocol, which
266 consists of three methods.
267
268 * SEL.preselect(RFDS, WFDS) -- add any file descriptors which the
269 selector is interested in reading from to the list RFDS, and add file
270 descriptors it's interested in writing to to the list WFDS.
271
272 * SEL.postselect_read(FD) -- informs the selector that FD is ready for
273 reading.
274
275 * SEL.postselect_write(FD) -- informs the selector that FD is ready for
276 writing.
277
278 The `select_loop' function loops as follows.
279
280 * It calls the `preselect' method on each SELECTOR to determine what I/O
281 events it thinks are interesting.
282
283 * It waits for some interesting event to happen.
284
285 * It calls the `postselect_read' and/or `postselect_write' methods on all
286 of the selectors for each file descriptor which is ready.
287
288 The loop ends when no selector is interested in any events. This is simple
289 but rather inefficient.
290 """
291 while True:
292 rfds, wfds = [], []
293 for sel in selectors: sel.preselect(rfds, wfds)
294 if not rfds and not wfds: break
295 rfds, wfds, _ = SEL.select(rfds, wfds, [])
296 for fd in rfds:
297 for sel in selectors: sel.postselect_read(fd)
298 for fd in wfds:
299 for sel in selectors: sel.postselect_write(fd)
300
301###--------------------------------------------------------------------------
302### Running subprocesses.
303
304def wait_outcome(st):
305 """
306 Given a ST from `waitpid' (or similar), return a human-readable outcome.
307 """
308 if OS.WIFSIGNALED(st): return "killed by signal %d" % OS.WTERMSIG(st)
309 elif OS.WIFEXITED(st):
310 rc = OS.WEXITSTATUS(st)
311 if rc: return "failed: rc = %d" % rc
312 else: return "completed successfully"
313 else: return "died with incomprehensible status 0x%04x" % st
314
315class SubprocessFailure (Exception):
316 """An exception indicating that a subprocess failed."""
317 def __init__(me, what, st):
318 me.st = st
319 me.what = what
320 if OS.WIFEXITED(st): me.rc, me.sig = OS.WEXITSTATUS(st), None
321 elif OS.WIFSIGNALED(st): me.rc, me.sig = None, OS.WTERMSIG(st)
322 else: me.rc, me.sig = None, None
323 def __str__(me):
324 return "subprocess `%s' %s" % (me.what, wait_outcome(me.st))
325
326INHERIT = Tag('INHERIT')
327PIPE = Tag('PIPE')
328DISCARD = Tag('DISCARD')
329@CTX.contextmanager
330def subprocess(command,
331 stdin = INHERIT, stdout = INHERIT, stderr = INHERIT,
332 cwd = INHERIT, jobserver = DISCARD):
333 """
334 Hairy context manager for running subprocesses.
335
336 The COMMAND is a list of arguments; COMMAND[0] names the program to be
337 invoked. (There's currently no way to run a program with an unusual
338 `argv[0]'.)
339
340 The keyword arguments `stdin', `stdout', and `stderr' explain what to do
341 with the standard file descriptors.
342
343 * `INHERIT' means that they should be left alone: the child will use a
344 copy of the parent's descriptor. This is the default.
345
346 * `DISCARD' means that the descriptor should be re-opened onto
347 `/dev/null' (for reading or writing as appropriate).
348
349 * `PIPE' means that the descriptor should be re-opened as (the read or
350 write end, as appropriate, of) a pipe, and the other end returned to
351 the context body.
352
353 Simiarly, the JOBSERVER may be `INHERIT' to pass the jobserver descriptors
354 and environment variable down to the child, or `DISCARD' to close it. The
355 default is `DISCARD'.
356
357 The CWD may be `INHERIT' to run the child with the same working directory
358 as the parent, or a pathname to change to an explicitly given working
359 directory.
360
361 The context is returned three values, which are file descriptors for other
362 pipe ends for stdin, stdout, and stderr respectively, or -1 if there is no
363 pipe.
364
365 The context owns the pipe descriptors, and is expected to close them
366 itself. (Timing of closure is significant, particularly for `stdin'.)
367 """
368
369 ## Set up.
370 r_in, w_in = -1, -1
371 r_out, w_out = -1, -1
372 r_err, w_err = -1, -1
373 spew("running subprocess `%s'" % " ".join(command))
374
375 ## Clean up as necessary...
376 try:
377
378 ## Set up stdin.
379 if stdin is PIPE: r_in, w_in = OS.pipe()
380 elif stdin is DISCARD: r_in = OS.open("/dev/null", OS.O_RDONLY)
381 elif stdin is not INHERIT:
382 raise ValueError("bad `stdin' value `%r'" % stdin)
383
384 ## Set up stdout.
385 if stdout is PIPE: r_out, w_out = OS.pipe()
386 elif stdout is DISCARD: w_out = OS.open("/dev/null", OS.O_WRONLY)
387 elif stdout is not INHERIT:
388 raise ValueError("bad `stderr' value `%r'" % stdout)
389
390 ## Set up stderr.
391 if stderr is PIPE: r_err, w_err = OS.pipe()
392 elif stderr is DISCARD: w_err = OS.open("/dev/null", OS.O_WRONLY)
393 elif stderr is not INHERIT:
394 raise ValueError("bad `stderr' value `%r'" % stderr)
395
396 ## Start up the child.
397 kid = OS.fork()
398
399 if kid == 0:
400 ## Child process.
401
402 ## Fix up stdin.
403 if r_in != -1: OS.dup2(r_in, 0); OS.close(r_in)
404 if w_in != -1: OS.close(w_in)
405
406 ## Fix up stdout.
407 if w_out != -1: OS.dup2(w_out, 1); OS.close(w_out)
408 if r_out != -1: OS.close(r_out)
409
410 ## Fix up stderr.
411 if w_err != -1: OS.dup2(w_err, 2); OS.close(w_err)
412 if r_err != -1: OS.close(r_err)
413
414 ## Change directory.
415 if cwd is not INHERIT: OS.chdir(cwd)
416
417 ## Fix up the jobserver.
418 if jobserver is DISCARD: SCHED.close_jobserver()
419
420 ## Run the program.
421 try: OS.execvp(command[0], command)
422 except OSError, err:
423 moan("failed to run `%s': %s" % err.strerror)
424 OS._exit(127)
425
426 ## Close the other ends of the pipes.
427 if r_in != -1: OS.close(r_in); r_in = -1
428 if w_out != -1: OS.close(w_out); w_out = -1
429 if w_err != -1: OS.close(w_err); w_err = -1
430
431 ## Return control to the context body. Remember not to close its pipes.
432 yield w_in, r_out, r_err
433 w_in = r_out = r_err = -1
434
435 ## Collect the child process's exit status.
436 _, st = OS.waitpid(kid, 0)
437 spew("subprocess `%s' %s" % (" ".join(command), wait_outcome(st)))
438 if st: raise SubprocessFailure(" ".join(command), st)
439
440 ## Tidy up.
441 finally:
442
443 ## Close any left-over file descriptors.
444 for fd in [r_in, w_in, r_out, w_out, r_err, w_err]:
445 if fd != -1: OS.close(fd)
446
447def set_nonblocking(fd):
448 """Mark the descriptor FD as non-blocking."""
449 FC.fcntl(fd, FC.F_SETFL, FC.fcntl(fd, FC.F_GETFL) | OS.O_NONBLOCK)
450
451class DribbleOut (BaseSelector):
452 """A simple selector to feed a string to a descriptor, in pieces."""
453 def __init__(me, fd, string, *args, **kw):
454 super(DribbleOut, me).__init__(*args, **kw)
455 me._fd = fd
456 me._string = string
457 me._i = 0
458 set_nonblocking(me._fd)
459 me.result = None
460 def preselect(me, rfds, wfds):
461 if me._fd != -1: wfds.append(me._fd)
462 def postselect_write(me, fd):
463 if fd != me._fd: return
464 try: n = OS.write(me._fd, me._string)
465 except OSError, err:
466 if err.errno == E.EAGAIN or err.errno == E.EWOULDBLOCK: return
467 elif err.errno == E.EPIPE: OS.close(me._fd); me._fd = -1; return
468 else: raise
469 if n == len(me._string): OS.close(me._fd); me._fd = -1
470 else: me._string = me._string[n:]
471
472class DribbleIn (BaseSelector):
473 """A simple selector to collect all the input as a big string."""
474 def __init__(me, fd, *args, **kw):
475 super(DribbleIn, me).__init__(*args, **kw)
476 me._fd = fd
477 me._buf = StringIO()
478 set_nonblocking(me._fd)
479 def preselect(me, rfds, wfds):
480 if me._fd != -1: rfds.append(me._fd)
481 def postselect_read(me, fd):
482 if fd != me._fd: return
483 while True:
484 try: buf = OS.read(me._fd, 4096)
485 except OSError, err:
486 if err.errno == E.EAGAIN or err.errno == E.EWOULDBLOCK: break
487 else: raise
488 if buf == "": OS.close(me._fd); me._fd = -1; break
489 else: me._buf.write(buf)
490 @property
491 def result(me): return me._buf.getvalue()
492
493RETURN = Tag('RETURN')
494def run_program(command,
495 stdin = INHERIT, stdout = INHERIT, stderr = INHERIT,
496 *args, **kwargs):
497 """
498 A simplifying wrapper around `subprocess'.
499
500 The COMMAND is a list of arguments; COMMAND[0] names the program to be
501 invoked, as for `subprocess'.
502
503 The keyword arguments `stdin', `stdout', and `stderr' explain what to do
504 with the standard file descriptors.
505
506 * `INHERIT' means that they should be left alone: the child will use a
507 copy of the parent's descriptor.
508
509 * `DISCARD' means that the descriptor should be re-opened onto
510 `/dev/null' (for reading or writing as appropriate).
511
512 * `RETURN', for an output descriptor, means that all of the output
513 produced on that descriptor should be collected and returned as a
514 string.
515
516 * A string, for stdin, means that the string should be provided on the
517 child's standard input.
518
519 (The value `PIPE' is not permitted here.)
520
521 Other arguments are passed on to `subprocess'.
522
523 If no descriptors are marked `RETURN', then the function returns `None'; if
524 exactly one descriptor is so marked, then the function returns that
525 descriptor's output as a string; otherwise, it returns a tuple of strings
526 for each such descriptor, in the usual order.
527 """
528 kw = dict(); kw.update(kwargs)
529 selfn = []
530
531 if isinstance(stdin, basestring):
532 kw['stdin'] = PIPE; selfn.append(lambda fds: DribbleOut(fds[0], stdin))
533 elif stdin is INHERIT or stdin is DISCARD:
534 kw['stdin'] = stdin
535 else:
536 raise ValueError("bad `stdin' value `%r'" % stdin)
537
538 if stdout is RETURN:
539 kw['stdout'] = PIPE; selfn.append(lambda fds: DribbleIn(fds[1]))
540 elif stdout is INHERIT or stdout is DISCARD:
541 kw['stdout'] = stdout
542 else:
543 raise ValueError("bad `stdout' value `%r'" % stdout)
544
545 if stderr is RETURN:
546 kw['stderr'] = PIPE; selfn.append(lambda fds: DribbleIn(fds[2]))
547 elif stderr is INHERIT or stderr is DISCARD:
548 kw['stderr'] = stderr
549 else:
550 raise ValueError("bad `stderr' value `%r'" % stderr)
551
552 with subprocess(command, *args, **kw) as fds:
553 sel = [fn(fds) for fn in selfn]
554 select_loop(sel)
555 rr = []
556 for s in sel:
557 r = s.result
558 if r is not None: rr.append(r)
559 if len(rr) == 0: return None
560 if len(rr) == 1: return rr[0]
561 else: return tuple(rr)
562
563###--------------------------------------------------------------------------
564### Other system-ish utilities.
565
566@CTX.contextmanager
567def safewrite(path):
568 """
569 Context manager for writing to a file.
570
571 A new file, named `PATH.new', is opened for writing, and the file object
572 provided to the context body. If the body completes normally, the file is
573 closed and renamed to PATH. If the body raises an exception, the file is
574 still closed, but not renamed into place.
575 """
576 new = path + ".new"
577 with open(new, "w") as f: yield f
578 OS.rename(new, path)
579
580@CTX.contextmanager
581def safewrite_root(path, mode = None, uid = None, gid = None):
582 """
583 Context manager for writing to a file with root privileges.
584
585 This is as for `safewrite', but the file is opened and written as root.
586 """
587 new = path + ".new"
588 with subprocess(C.ROOTLY + ["tee", new],
589 stdin = PIPE, stdout = DISCARD) as (fd_in, _, _):
590 pipe = OS.fdopen(fd_in, 'w')
591 try: yield pipe
592 finally: pipe.close()
593 if mode is not None: run_program(C.ROOTLY + ["chmod", mode, new])
594 if uid is not None:
595 run_program(C.ROOTLY + ["chown",
596 uid + (gid is not None and ":" + gid or ""),
597 new])
598 elif gid is not None:
599 run_program(C.ROOTLY + ["chgrp", gid, new])
600 run_program(C.ROOTLY + ["mv", new, path])
601
602def mountpoint_p(dir):
603 """Return true if DIR is a mountpoint."""
604
605 ## A mountpoint can be distinguished because it is a directory whose device
606 ## number differs from its parent.
607 try: st1 = OS.stat(dir)
608 except OSError, err:
609 if err.errno == E.ENOENT: return False
610 else: raise
611 if not ST.S_ISDIR(st1.st_mode): return False
612 st0 = OS.stat(OS.path.join(dir, ".."))
613 return st0.st_dev != st1.st_dev
614
615def mkdir_p(dir, mode = 0777):
616 """
617 Make a directory DIR, and any parents, as necessary.
618
619 Unlike `OS.makedirs', this doesn't fail if DIR already exists.
620 """
1633cd4f
MW
621 if dir.startswith("/"): d = "/"; dir = dir[1:]
622 else: d = ""
a98c9dba
MW
623 for p in dir.split("/"):
624 d = OS.path.join(d, p)
625 if d == "": continue
626 try: OS.mkdir(d, mode)
627 except OSError, err:
628 if err.errno == E.EEXIST: pass
629 else: raise
630
631def umount(fs):
632 """
633 Unmount the filesystem FS.
634
635 The FS may be the block device holding the filesystem, or (more usually)
636 the mount point.
637 """
638
639 ## Sometimes random things can prevent unmounting. Be persistent.
640 for i in xrange(5):
641 try: run_program(C.ROOTLY + ["umount", fs], stderr = DISCARD)
642 except SubprocessFailure, err:
643 if err.rc == 32: pass
644 else: raise
645 else: return
646 T.sleep(0.2)
647 run_program(C.ROOTLY + ["umount", fs], stderr = DISCARD)
648
649@CTX.contextmanager
650def lockfile(lock, exclp = True, waitp = True):
651 """
652 Acquire an exclusive lock on a named file LOCK while executing the body.
653
654 If WAITP is true, wait until the lock is available; if false, then fail
655 immediately if the lock can't be acquired.
656 """
657 fd = -1
658 flag = 0
659 if exclp: flag |= FC.LOCK_EX
660 else: flag |= FC.LOCK_SH
661 if not waitp: flag |= FC.LOCK_NB
662 spew("acquiring %s lock on `%s'" %
663 (exclp and "exclusive" or "shared", lock))
664 try:
665 while True:
666
667 ## Open the file and take note of which file it is.
668 fd = OS.open(lock, OS.O_RDWR | OS.O_CREAT, 0666)
669 st0 = OS.fstat(fd)
670
671 ## Acquire the lock, waiting if necessary.
672 FC.lockf(fd, flag)
673
674 ## Check that the lock file is still the same one. It's permissible
675 ## for the lock holder to release the lock by unlinking or renaming the
676 ## lock file, in which case there might be a different lockfile there
677 ## now which we need to acquire instead.
678 ##
679 ## It's tempting to `optimize' this code by opening a new file
680 ## descriptor here so as to elide the additional call to fstat(2)
681 ## above. But this doesn't work: if we successfully acquire the lock,
682 ## we then have two file descriptors open on the lock file, so we have
683 ## to close one -- but, under the daft fcntl(2) rules, even closing
684 ## `nfd' will release the lock immediately.
685 try:
686 st1 = OS.stat(lock)
687 except OSError, err:
688 if err.errno == E.ENOENT: pass
689 else: raise
690 if st0.st_dev == st1.st_dev and st0.st_ino == st1.st_ino: break
691 OS.close(fd)
692
693 ## We have the lock, so away we go.
694 spew("lock `%s' acquired" % lock)
695 yield None
696 spew("lock `%s' released" % lock)
697
698 finally:
699 if fd != -1: OS.close(fd)
700
701def block_device_p(dev):
702 """Return true if DEV names a block device."""
703 try: st = OS.stat(dev)
704 except OSError, err:
705 if err.errno == E.ENOENT: return False
706 else: raise
707 else: return ST.S_ISBLK(st.st_mode)
708
709###--------------------------------------------------------------------------
710### Running parallel jobs.
711
712## Return codes from `check'
713SLEEP = Tag('SLEEP')
714READY = Tag('READY')
715FAILED = Tag('FAILED')
716DONE = Tag('DONE')
717
718class BaseJob (object):
719 """
720 Base class for jobs.
721
722 Subclasses must implement `run' and `_mkname', and probably ought to extend
723 `prepare' and `check'.
724 """
725
726 ## A magic token to prevent sneaky uninterned jobs.
727 _MAGIC = Tag('MAGIC')
728
729 ## A map from job names to objects.
730 _MAP = {}
731
732 ## Number of tail lines of the log to print on failure.
733 LOGLINES = 20
734
735 def __init__(me, _token, *args, **kw):
736 """
737 Initialize a job.
738
739 Jobs are interned! Don't construct instances (of subclasses) directly:
740 use the `ensure' class method.
741 """
742 assert _token is me._MAGIC
743 super(BaseJob, me).__init__(*args, **kw)
744
745 ## Dependencies on other jobs.
746 me._deps = None
747 me._waiting = set()
748
749 ## Attributes maintained by the JobServer.
750 me.done = False
751 me.started = False
752 me.win = None
753 me._token = None
754 me._known = False
755 me._st = None
756 me._logkid = -1
757 me._logfile = None
758
759 def prepare(me):
760 """
761 Establish any prerequisite jobs.
762
763 Delaying this allows command-line settings to override those chosen by
764 dependent jobs.
765 """
766 pass
767
768 @classmethod
769 def ensure(cls, *args, **kw):
770 """
771 Return the unique job with the given parameters.
772
773 If a matching job already exists, then return it. Otherwise, create the
774 new job, register it in the table, and notify the scheduler about it.
775 """
776 me = cls(_token = cls._MAGIC, *args, **kw)
777 try:
778 job = cls._MAP[me.name]
779 except KeyError:
780 cls._MAP[me.name] = me
781 SCHED.add(me)
782 return me
783 else:
784 return job
785
786 ## Naming.
787 @property
788 def name(me):
789 """Return the job's name, as calculated by `_mkname'."""
790 try: name = me._name
791 except AttributeError: name = me._name = me._mkname()
792 return name
793
794 ## Subclass responsibilities.
795 def _mkname(me):
796 """
797 Return the job's name.
798
799 By default, this is an unhelpful string which is distinct for every job.
800 Subclasses should normally override this method to return a name as an
801 injective function of the job parameters.
802 """
803 return "%s.%x" % (me.__class__.__name__, id(me))
804
805 def check(me):
806 """
807 Return whether the job is ready to run.
808
809 Returns a pair STATE, REASON. The REASON is a human-readable string
810 explaining what's going on, or `None' if it's not worth explaining. The
811 STATE is one of the following.
812
813 * `READY' -- the job can be run at any time.
814
815 * `FAILED' -- the job can't be started. Usually, this means that some
816 prerequisite job failed, there was some error in the job's
817 parameters, or the environment is unsuitable for the job to run.
818
819 * `DONE' -- the job has nothing to do. Usually, this means that the
820 thing the job acts on is already up-to-date. It's bad form to do
821 even minor work in `check'.
822
823 * `SLEEP' -- the job can't be run right now. It has arranged to be
824 retried if conditions change. (Spurious wakeups are permitted and
825 must be handled correctly.)
826
827 The default behaviour checks the set of dependencies, as built by the
828 `await' method, and returns `SLEEP' or `FAILED' as appropriate, or
829 `READY' if all the prerequisite jobs have completed successfully.
830 """
831 for job in me._deps:
832 if not job.done:
833 job._waiting.add(me)
834 return SLEEP, "waiting for job `%s'" % job.name
835 elif not job.win and not OPT.ignerr:
836 return FAILED, "dependent on failed job `%s'" % job.name
837 return READY, None
838
839 ## Subclass utilities.
840 def await(me, job):
841 """Make sure that JOB completes before allowing this job to start."""
842 me._deps.add(job)
843
844 def _logtail(me):
845 """
846 Dump the last `LOGLINES' lines of the logfile.
847
848 This is called if the job fails and was being run quietly, to provide the
849 user with some context for the failure.
850 """
851
852 ## Gather blocks from the end of the log until we have enough lines.
853 with open(me._logfile, 'r') as f:
854 nlines = 0
855 bufs = []
856 bufsz = 4096
857 f.seek(0, 2); off = f.tell()
858 spew("start: off = %d" % off)
859 while nlines <= me.LOGLINES and off > 0:
860 off = max(0, off - bufsz)
861 f.seek(off, 0)
862 spew("try at off = %d" % off)
863 buf = f.read(bufsz)
864 nlines += buf.count("\n")
865 spew("now lines = %d" % nlines)
866 bufs.append(buf)
867 buf = ''.join(reversed(bufs))
868
869 ## We probably overshot. Skip the extra lines from the start.
870 i = 0
871 while nlines > me.LOGLINES: i = buf.index("\n", i) + 1; nlines -= 1
872
873 ## If we ended up trimming the log, print an ellipsis.
874 if off > 0 or i > 0: print "%-*s * [...]" % (TAGWD, me.name)
875
876 ## Print the log tail.
877 lines = buf[i:].split("\n")
878 if lines and lines[-1] == '': lines.pop()
879 for line in lines: print "%-*s %s" % (TAGWD, me.name, line)
880
881class BaseJobToken (object):
882 """
883 A job token is the authorization for a job to be run.
884
885 Subclasses must implement `recycle' to allow some other job to use the
886 token.
887 """
888 pass
889
890class TrivialJobToken (BaseJobToken):
891 """
892 A trivial reusable token, for when issuing jobs in parallel without limit.
893
894 There only needs to be one of these.
895 """
896 def recycle(me):
897 spew("no token needed; nothing to recycle")
898TRIVIAL_TOKEN = TrivialJobToken()
899
900class JobServerToken (BaseJobToken):
901 """A job token storing a byte from the jobserver pipe."""
902 def __init__(me, char, pipefd, *args, **kw):
903 super(JobServerToken, me).__init__(*args, **kw)
904 me._char = char
905 me._fd = pipefd
906 def recycle(me):
907 spew("returning token to jobserver pipe")
908 OS.write(me._fd, me._char)
909
910class PrivateJobToken (BaseJobToken):
911 """
912 The private job token belonging to a scheduler.
913
914 When running under a GNU Make jobserver, there is a token for each byte in
915 the pipe, and an additional one which represents the slot we're actually
916 running in. This class represents that additional token.
917 """
918 def __init__(me, sched, *args, **kw):
919 super(PrivateJobToken, me).__init__(*args, **kw)
920 me._sched = sched
921 def recycle(me):
922 assert me._sched._privtoken is None
923 spew("recycling private token")
924 me._sched._privtoken = me
925
926TAGWD = 29
927LOGKEEP = 20
928
929class JobScheduler (object):
930 """
931 The main machinery for running and ordering jobs.
932
933 This handles all of the details of job scheduling.
934 """
935
936 def __init__(me, rfd = -1, wfd = -1, npar = 1):
937 """
938 Initialize a scheduler.
939
940 * RFD and WFD are the read and write ends of the jobserver pipe, as
941 determined from the `MAKEFLAGS' environment variable, or -1.
942
943 * NPAR is the maximum number of jobs to run in parallel, or `True' if
944 there is no maximum (i.e., we're in `forkbomb' mode).
945 """
946
947 ## Set the parallelism state. The `_rfd' and `_wfd' are the read and
948 ## write ends of the jobserver pipe, or -1 if there is no jobserver.
949 ## `_par' is true if we're meant to run jobs in parallel. The case _par
950 ## and _rfd = -1 means unconstrained parallelism.
951 ##
952 ## The jobserver pipe contains a byte for each shared job slot. A
953 ## scheduler reads a byte from the pipe for each job it wants to run
954 ## (nearly -- see `_privtoken' below), and puts the byte back when the
955 ## job finishes. The GNU Make jobserver protocol specification insists
956 ## that we preserve the value of the byte in the pipe (though doesn't
957 ## currently make any use of this flexibility), so we record it in a
958 ## `JobToken' object's `_char' attribute.
959 me._par = rfd != -1 or npar is True or npar != 1
960 spew("par is %r" % me._par)
961 if rfd == -1 and npar > 1:
962 rfd, wfd = OS.pipe()
963 OS.write(wfd, (npar - 1)*'+')
964 OS.environ["MAKEFLAGS"] = \
965 (" -j --jobserver-auth=%(rfd)d,%(wfd)d " +
966 "--jobserver-fds=%(rfd)d,%(wfd)d") % dict(rfd = rfd, wfd = wfd)
967 me._rfd = rfd; me._wfd = wfd
968
969 ## The scheduler state. A job starts in the `_check' list. Each
970 ## iteration of the scheduler loop will inspect the jobs here and see
971 ## whether it's ready to run: if not, it gets put in the `_sleep' list,
972 ## where it will languish until something moves it back; if it is ready,
973 ## it gets moved to the `_ready' list to wait for a token from the
974 ## jobserver. At that point the job can be started, and it moves to the
975 ## `_kidmap', which associates a process-id with each running job.
976 ## Finally, jobs which have completed are simply forgotten. The `_njobs'
977 ## counter keeps track of how many jobs are outstanding, so that we can
978 ## stop when there are none left.
979 me._check = set()
980 me._sleep = set()
981 me._ready = set()
982 me._kidmap = {}
983 me._logkidmap = {}
984 me._njobs = 0
985
986 ## As well as the jobserver pipe, we implicitly have one extra job slot,
987 ## which is the one we took when we were started by our parent. The
988 ## right to do processing in this slot is represnted by the `private
989 ## token' here, distinguished from tokens from the jobserver pipe by
990 ## having `None' as its `_char' value.
991 me._privtoken = PrivateJobToken(me)
992
993 def add(me, job):
994 """Notice a new job and arrange for it to (try to) run."""
995 if job._known: return
996 spew("adding new job `%s'" % job.name)
997 job._known = True
998 me._check.add(job)
999 me._njobs += 1
1000
1001 def close_jobserver(me):
1002 """
1003 Close the jobserver file descriptors.
1004
1005 This should be called within child processes to prevent them from messing
1006 with the jobserver.
1007 """
1008 if me._rfd != -1: OS.close(me._rfd); me._rfd = -1
1009 if me._wfd != -1: OS.close(me._wfd); me._wfd = -1
1010 try: del OS.environ["MAKEFLAGS"]
1011 except KeyError: pass
1012
1013 def _killall(me):
1014 """Zap all jobs which aren't yet running."""
1015 for jobset in [me._sleep, me._check, me._ready]:
1016 while jobset:
1017 job = jobset.pop()
1018 job.done = True
1019 job.win = False
1020 me._njobs -= 1
1021
1022 def _retire(me, job, win, outcome):
1023 """
1024 Declare that a job has stopped, and deal with the consequences.
1025
1026 JOB is the completed job, which should not be on any of the job queues.
1027 WIN is true if the job succeeded, and false otherwise. OUTCOME is a
1028 human-readable string explaining how the job came to its end, or `None'
1029 if no message should be reported.
1030 """
1031
1032 global RC
1033
1034 ## Return the job's token to the pool.
1035 if job._token is not None: job._token.recycle()
1036 job._token = None
1037 me._njobs -= 1
1038
1039 ## Update and maybe report the job's status.
1040 job.done = True
1041 job.win = win
1042 if outcome is not None and not OPT.silent:
1043 if OPT.quiet and not job.win and job._logfile: job._logtail()
1044 if not job.win or not OPT.quiet:
1045 print "%-*s %c (%s)" % \
1046 (TAGWD, job.name, job.win and '|' or '*', outcome)
1047
1048 ## If the job failed, and we care, arrange to exit nonzero.
1049 if not win and not OPT.ignerr: RC = 2
1050
1051 ## If the job failed, and we're supposed to give up after the first
1052 ## error, then zap all of the waiting jobs.
1053 if not job.win and not OPT.keepon and not OPT.ignerr: me._killall()
1054
1055 ## If this job has dependents then wake them up and see whether they're
1056 ## ready to run.
1057 for j in job._waiting:
1058 try: me._sleep.remove(j)
1059 except KeyError: pass
1060 else:
1061 spew("waking dependent job `%s'" % j.name)
1062 me._check.add(j)
1063
1064 def _reap(me, kid, st):
1065 """
1066 Deal with the child with process-id KID having exited with status ST.
1067 """
1068
1069 ## Figure out what kind of child this is. Note that it has finished.
1070 try: job = me._kidmap[kid]
1071 except KeyError:
1072 try: job = me._logkidmap[kid]
1073 except KeyError:
1074 spew("unknown child %d exits with status 0x%04x" % (kid, st))
1075 return
1076 else:
1077 ## It's a logging child.
1078 del me._logkidmap[kid]
1079 job._logkid = DONE
1080 spew("logging process for job `%s' exits with status 0x%04x" %
1081 (job.name, st))
1082 else:
1083 job._st = st
1084 del me._kidmap[kid]
1085 spew("main process for job `%s' exits with status 0x%04x" %
1086 (job.name, st))
1087
1088 ## If either of the job's associated processes is still running then we
1089 ## should stop now and give the other one a chance.
1090 if job._st is None or job._logkid is not DONE:
1091 spew("deferring retirement for job `%s'" % job.name)
1092 return
1093 spew("completing deferred retirement for job `%s'" % job.name)
1094
1095 ## Update and (maybe) report the job status.
1096 if job._st == 0: win = True; outcome = None
1097 else: win = False; outcome = wait_outcome(job._st)
1098
1099 ## Retire the job.
1100 me._retire(job, win, outcome)
1101
1102 def _reapkids(me):
1103 """Reap all finished child processes."""
1104 while True:
1105 try: kid, st = OS.waitpid(-1, OS.WNOHANG)
1106 except OSError, err:
1107 if err.errno == E.ECHILD: break
1108 else: raise
1109 if kid == 0: break
1110 me._reap(kid, st)
1111
1112 def run_job(me, job):
1113 """Start running the JOB."""
1114
1115 job.started = True
1116 if OPT.dryrun: return None, None
1117
1118 ## Make pipes to collect the job's output and error reports.
1119 r_out, w_out = OS.pipe()
1120 r_err, w_err = OS.pipe()
1121
1122 ## Find a log file to write. Avoid races over the log names; but this
1123 ## means that the log descriptor needs to be handled somewhat carefully.
1124 logdir = OS.path.join(C.STATE, "log"); mkdir_p(logdir)
1125 logseq = 1
1126 while True:
1127 logfile = OS.path.join(logdir, "%s-%s#%d" % (job.name, TODAY, logseq))
1128 try:
1129 logfd = OS.open(logfile, OS.O_WRONLY | OS.O_CREAT | OS.O_EXCL, 0666)
1130 except OSError, err:
1131 if err.errno == E.EEXIST: logseq += 1; continue
1132 else: raise
1133 else:
1134 break
1135 job._logfile = logfile
1136
1137 ## Make sure there's no pending output, or we might get two copies. (I
1138 ## don't know how to flush all output streams in Python, but this is good
1139 ## enough for our purposes.)
1140 SYS.stdout.flush()
1141
1142 ## Set up the logging child first. If we can't, take down the whole job.
1143 try: job._logkid = OS.fork()
1144 except OSError, err: OS.close(logfd); return None, err
1145 if not job._logkid:
1146 ## The main logging loop.
1147
1148 ## Close the jobserver descriptors, and the write ends of the pipes.
1149 me.close_jobserver()
1150 OS.close(w_out); OS.close(w_err)
1151
1152 ## Capture the job's stdout and stderr and wait for everything to
1153 ## happen.
1154 def log_lines(fd, marker):
1155 def fn(line):
1156 if not OPT.quiet:
1157 OS.write(1, "%-*s %s %s\n" % (TAGWD, job.name, marker, line))
1158 OS.write(logfd, "%s %s\n" % (marker, line))
1159 return ReadLinesSelector(fd, fn)
1160 select_loop([log_lines(r_out, "|"), log_lines(r_err, "*")])
1161
1162 ## We're done. (Closing the descriptors here would be like polishing
1163 ## the floors before the building is demolished.)
1164 OS._exit(0)
1165
1166 ## Back in the main process: record the logging child. At this point we
1167 ## no longer need the logfile descriptor.
1168 me._logkidmap[job._logkid] = job
1169 OS.close(logfd)
1170
1171 ## Start the main job process.
1172 try: kid = OS.fork()
1173 except OSError, err: return None, err
1174 if not kid:
1175 ## The main job.
1176
1177 ## Close the read ends of the pipes, and move the write ends to the
1178 ## right places. (This will go wrong if we were started without enough
1179 ## descriptors. Fingers crossed.)
1180 OS.dup2(w_out, 1); OS.dup2(w_err, 2)
1181 OS.close(r_out); OS.close(w_out)
1182 OS.close(r_err); OS.close(w_err)
1183 spew("running job `%s' as pid %d" % (job.name, OS.getpid()))
1184
1185 ## Run the job, catching nonlocal flow.
1186 try:
1187 job.run()
1188 except ExpectedError, err:
1189 moan(str(err))
1190 OS._exit(2)
1191 except Exception, err:
1192 TB.print_exc(SYS.stderr)
1193 OS._exit(3)
1194 except BaseException, err:
1195 moan("caught unexpected exception: %r" % err)
1196 OS._exit(112)
1197 else:
1198 spew("job `%s' ran to completion" % job.name)
1199
1200 ## Clean up old logs.
1201 match = []
1202 pat = RX.compile(r"^%s-(\d{4})-(\d{2})-(\d{2})\#(\d+)$" %
1203 RX.escape(job.name))
1204 for f in OS.listdir(logdir):
1205 m = pat.match(f)
1206 if m: match.append((f, int(m.group(1)), int(m.group(2)),
1207 int(m.group(3)), int(m.group(4))))
1208 match.sort(key = lambda (_, y, m, d, q): (y, m, d, q))
1209 if len(match) > LOGKEEP:
1210 for (f, _, _, _, _) in match[:-LOGKEEP]:
1211 try: OS.unlink(OS.path.join(logdir, f))
1212 except OSError, err:
1213 if err.errno == E.ENOENT: pass
1214 else: raise
1215
1216 ## All done.
1217 OS._exit(0)
1218
1219 ## Back in the main process: close both the pipes and return the child
1220 ## process.
1221 OS.close(r_out); OS.close(w_out)
1222 OS.close(r_err); OS.close(w_err)
1223 if OPT.quiet: print "%-*s | (started)" % (TAGWD, job.name)
1224 return kid, None
1225
1226 def run(me):
1227 """Run the scheduler."""
1228
1229 spew("JobScheduler starts")
1230
1231 while True:
1232 ## The main scheduler loop. We go through three main phases:
1233 ##
1234 ## * Inspect the jobs in the `check' list to see whether they can
1235 ## run. After this, the `check' list will be empty.
1236 ##
1237 ## * If there are running jobs, check to see whether any of them have
1238 ## stopped, and deal with the results. Also, if there are jobs
1239 ## ready to start and a job token has become available, then
1240 ## retrieve the token. (Doing these at the same time is the tricky
1241 ## part.)
1242 ##
1243 ## * If there is a job ready to run, and we retrieved a token, then
1244 ## start running the job.
1245
1246 ## Check the pending jobs to see if they can make progress: run each
1247 ## job's `check' method and move it to the appropriate queue. (It's OK
1248 ## if `check' methods add more jobs to the list, as long as things
1249 ## settle down eventually.)
1250 while True:
1251 try: job = me._check.pop()
1252 except KeyError: break
1253 if job._deps is None:
1254 job._deps = set()
1255 job.prepare()
1256 state, reason = job.check()
1257 tail = reason is not None and ": %s" % reason or ""
1258 if state == READY:
1259 spew("job `%s' ready to run%s" % (job.name, tail))
1260 me._ready.add(job)
1261 elif state is FAILED:
1262 spew("job `%s' refused to run%s" % (job.name, tail))
1263 me._retire(job, False, "refused to run%s" % tail)
1264 elif state is DONE:
1265 spew("job `%s' has nothing to do%s" % (job.name, tail))
1266 me._retire(job, True, reason)
1267 elif state is SLEEP:
1268 spew("job `%s' can't run yet%s" % (job.name, tail))
1269 me._sleep.add(job)
1270 else:
1271 raise ValueError("unexpected job check from `%s': %r, %r" %
1272 (job.name, state, reason))
1273
1274 ## If there are no jobs left, then we're done.
1275 if not me._njobs:
1276 spew("all jobs completed")
1277 break
1278
1279 ## Make sure we can make progress. There are no jobs on the check list
1280 ## any more, because we just cleared it. We assume that jobs which are
1281 ## ready to run will eventually receive a token. So we only end up in
1282 ## trouble if there are jobs asleep, but none running or ready to run.
1283 ##spew("#jobs = %d" % me._njobs)
1284 ##spew("sleeping: %s" % ", ".join([j.name for j in me._sleep]))
1285 ##spew("ready: %s" % ", ".join([j.name for j in me._ready]))
1286 ##spew("running: %s" % ", ".join([j.name for j in me._kidmap.itervalues()]))
1287 assert not me._sleep or me._kidmap or me._logkidmap or me._ready
1288
1289 ## Wait for something to happen.
1290 if not me._ready or (not me._par and me._privtoken is None):
1291 ## If we have no jobs ready to run, then we must wait for an existing
1292 ## child to exit. Hopefully, a sleeping job will be able to make
1293 ## progress after this.
1294 ##
1295 ## Alternatively, if we're not supposed to be running jobs in
1296 ## parallel and we don't have the private token, then we have no
1297 ## choice but to wait for the running job to complete.
1298 ##
1299 ## There's no check here for `ECHILD'. We really shouldn't be here
1300 ## if there are no children to wait for. (The check list must be
1301 ## empty because we just drained it. If the ready list is empty,
1302 ## then all of the jobs must be running or sleeping; but the
1303 ## assertion above means that either there are no jobs at all, in
1304 ## which case we should have stopped, or at least one is running, in
1305 ## which case it's safe to wait for it. The other case is that we're
1306 ## running jobs sequentially, and one is currently running, so
1307 ## there's nothing for it but to wait for it -- and hope that it will
1308 ## wake up one of the sleeping jobs. The remaining possibility is
1309 ## that we've miscounted somewhere, which will cause a crash.)
1310 if not me._ready:
1311 spew("no new jobs ready: waiting for outstanding jobs to complete")
1312 else:
1313 spew("job running without parallelism: waiting for it to finish")
1314 kid, st = OS.waitpid(-1, 0)
1315 me._reap(kid, st)
1316 me._reapkids()
1317 continue
1318
1319 ## We have jobs ready to run, so try to acquire a token.
1320 if me._rfd == -1 and me._par:
1321 ## We're running with unlimited parallelism, so we don't need a token
1322 ## to run a job.
1323 spew("running new job without token")
1324 token = TRIVIAL_TOKEN
1325 elif me._privtoken:
1326 ## Our private token is available, so we can use that to start
1327 ## a new job.
1328 spew("private token available: assigning to new job")
1329 token = me._privtoken
1330 me._privtoken = None
1331 else:
1332 ## We have to read from the jobserver pipe. Unfortunately, we're not
1333 ## allowed to set the pipe nonblocking, because make is also using it
1334 ## and will get into a serious mess. And we must deal with `SIGCHLD'
1335 ## arriving at any moment. We use the same approach as GNU Make. We
1336 ## start by making a copy of the jobserver descriptor: it's this
1337 ## descriptor we actually try to read from. We set a signal handler
1338 ## to close this descriptor if a child exits. And we try one last
1339 ## time to reap any children which have exited just before we try
1340 ## reading the jobserver pipe. This way we're covered:
1341 ##
1342 ## * If a child exits during the main loop, before we establish the
1343 ## descriptor copy then we'll notice when we try reaping
1344 ## children.
1345 ##
1346 ## * If a child exits between the last-chance reap and the read,
1347 ## the signal handler will close the descriptor and the `read'
1348 ## call will fail with `EBADF'.
1349 ##
1350 ## * If a child exits while we're inside the `read' system call,
1351 ## then the syscall will fail with `EINTR'.
1352 ##
1353 ## The only problem is that we can't do this from Python, because
1354 ## Python signal handlers are delayed. This is what the `jobclient'
1355 ## module is for.
1356 ##
1357 ## The `jobclient' function is called as
1358 ##
1359 ## jobclient(FD)
1360 ##
1361 ## It returns a tuple of three values: TOKEN, PID, STATUS. If TOKEN
1362 ## is not `None', then reading the pipe succeeded; if TOKEN is empty,
1363 ## then the pipe returned EOF, so we should abort; otherwise, TOKEN
1364 ## is a singleton string holding the token character. If PID is not
1365 ## `None', then PID is the process id of a child which exited, and
1366 ## STATUS is its exit status.
1367 spew("waiting for token from jobserver")
1368 tokch, kid, st = JC.jobclient(me._rfd)
1369
1370 if kid is not None:
1371 me._reap(kid, st)
1372 me._reapkids()
1373 if tokch is None:
1374 spew("no token; trying again")
1375 continue
1376 elif token == '':
1377 error("jobserver pipe closed; giving up")
1378 me._killall()
1379 continue
1380 spew("received token from jobserver")
1381 token = JobServerToken(tokch, me._wfd)
1382
1383 ## We have a token, so we should start up the job.
1384 job = me._ready.pop()
1385 job._token = token
1386 spew("start new job `%s'" % job.name)
1387 kid, err = me.run_job(job)
1388 if err is not None:
1389 me._retire(job, False, "failed to fork: %s" % err)
1390 continue
1391 if kid is None: me._retire(job, True, "dry run")
1392 else: me._kidmap[kid] = job
1393
1394 ## We ran out of work to do.
1395 spew("JobScheduler done")
1396
1397###--------------------------------------------------------------------------
1398### Configuration.
1399
1400R_CONFIG = RX.compile(r"^([a-zA-Z0-9_]+)='(.*)'$")
1401
1402class Config (object):
1403
1404 def _conv_str(s): return s
1405 def _conv_list(s): return s.split()
1406 def _conv_set(s): return set(s.split())
1407
1408 _CONVERT = {
1409 "ROOTLY": _conv_list,
1410 "DISTS": _conv_set,
1411 "MYARCH": _conv_set,
1412 "NATIVE_ARCHS": _conv_set,
1413 "FOREIGN_ARCHS": _conv_set,
1414 "FOREIGN_GNUARCHS": _conv_list,
1415 "ALL_ARCHS": _conv_set,
1416 "NATIVE_CHROOTS": _conv_set,
1417 "FOREIGN_CHROOTS": _conv_set,
1418 "ALL_CHROOTS": _conv_set,
1419 "BASE_PACKAGES": _conv_list,
1420 "EXTRA_PACKAGES": _conv_list,
1421 "CROSS_PACKAGES": _conv_list,
1422 "CROSS_PATHS": _conv_list,
1423 "APTCONF": _conv_list,
1424 "LOCALPKGS": _conv_list,
1425 "SCHROOT_COPYFILES": _conv_list,
1426 "SCHROOT_NSSDATABASES": _conv_list
1427 }
1428
1429 _CONV_MAP = {
1430 "*_APTCONFSRC": ("APTCONFSRC", _conv_str),
1431 "*_DEPS": ("PKGDEPS", _conv_list),
1432 "*_QEMUHOST": ("QEMUHOST", _conv_str),
1433 "*_QEMUARCH": ("QEMUARCH", _conv_str),
1434 "*_ALIASES": ("DISTALIAS", _conv_str)
1435 }
1436
1437 _conv_str = staticmethod(_conv_str)
1438 _conv_list = staticmethod(_conv_list)
1439 _conv_set = staticmethod(_conv_set)
1440
1441 def __init__(me):
1442 raw = r"""
1443 """; raw = open('state/config.sh').read(); _ignore = """ @@@config@@@
1444 """
1445 me._conf = {}
1446 for line in raw.split("\n"):
1447 line = line.strip()
1448 if not line or line.startswith('#'): continue
1449 m = R_CONFIG.match(line)
1450 if not m: raise ExpectedError("bad config line `%s'" % line)
1451 k, v = m.group(1), m.group(2).replace("'\\''", "'")
1452 d = me._conf
1453 try: conv = me._CONVERT[k]
1454 except KeyError:
1455 i = 0
1456 while True:
1457 try: i = k.index("_", i + 1)
1458 except ValueError: conv = me._conv_str; break
1459 try: map, conv = me._CONV_MAP["*" + k[i:]]
1460 except KeyError: pass
1461 else:
1462 d = me._conf.setdefault(map, dict())
1463 k = k[:i]
1464 if k.startswith("_"): k = k[1:]
1465 break
1466 d[k] = conv(v)
1467
1468 def __getattr__(me, attr):
1469 try: return me._conf[attr]
1470 except KeyError, err: raise AttributeError(err.args[0])
1471
1472with toplevel_handler(): C = Config()
1473
1474###--------------------------------------------------------------------------
1475### Chroot maintenance utilities.
1476
1477CREATE = Tag("CREATE")
1478FORCE = Tag("FORCE")
1479
a6395bc3
MW
1480DEBCONF_TWEAKS = """
1481 DEBIAN_FRONTEND=noninteractive; export DEBIAN_FRONTEND
1482 DEBIAN_PRIORITY=critical export DEBIAN_PRIORITY
1483 DEBCONF_NONINTERACTIVE_SEEN=true; export DEBCONF_NONINTERACTIVE_SEEN
1484"""
1485
a98c9dba
MW
1486def check_fresh(fresh, update):
1487 """
1488 Compare a refresh mode FRESH against an UPDATE time.
1489
1490 Return a (STATUS, REASON) pair, suitable for returning from a job `check'
1491 method.
1492
1493 The FRESH argument may be one of the following:
1494
1495 * `CREATE' is satisfied if the thing exists at all: it returns `READY' if
1496 the thing doesn't yet exist (UPDATE is `None'), or `DONE' otherwise.
1497
1498 * `FORCE' is never satisfied: it always returns `READY'.
1499
1500 * an integer N is satisfied if UPDATE time is at most N seconds earlier
1501 than the present: if returns `READY' if the UPDATE is too old, or
1502 `DONE' otherwise.
1503 """
1504 if update is None: return READY, "must create"
1505 elif fresh is FORCE: return READY, "update forced"
1506 elif fresh is CREATE: return DONE, "already created"
1507 elif NOW - unzulu(update) > fresh: return READY, "too stale: updating"
1508 else: return DONE, "already sufficiently up-to-date"
1509
1510def lockfile_path(file):
1511 """
1512 Return the full path for a lockfile named FILE.
1513
1514 Create the lock directory if necessary.
1515 """
1516 lockdir = OS.path.join(C.STATE, "lock"); mkdir_p(lockdir)
1517 return OS.path.join(lockdir, file)
1518
1519def chroot_src_lockfile(dist, arch):
1520 """
1521 Return the lockfile for the source-chroot for DIST on ARCH.
1522
1523 It is not allowed to acquire a source-chroot lock while holding any other
1524 locks.
1525 """
1526 return lockfile_path("source.%s-%s" % (dist, arch))
1527
1528def chroot_src_lv(dist, arch):
1529 """
1530 Return the logical volume name for the source-chroot for DIST on ARCH.
1531 """
1532 return "%s%s-%s" % (C.LVPREFIX, dist, arch)
1533
1534def chroot_src_blkdev(dist, arch):
1535 """
1536 Return the block-device name for the source-chroot for DIST on ARCH.
1537 """
1538 return OS.path.join("/dev", C.VG, chroot_src_lv(dist, arch))
1539
1540def chroot_src_mntpt(dist, arch):
1541 """
1542 Return mountpoint path for setting up the source-chroot for DIST on ARCH.
1543
1544 Note that this is not the mountpoint that schroot(1) uses.
1545 """
1546 mnt = OS.path.join(C.STATE, "mnt", "%s-%s" % (dist, arch))
1547 mkdir_p(mnt)
1548 return mnt
1549
1550def chroot_session_mntpt(session):
1551 """Return the mountpoint for an schroot session."""
1552 return OS.path.join("/schroot", session)
1553
1554def crosstools_lockfile(dist, arch):
1555 """
1556 Return the lockfile for the cross-build tools for DIST, hosted by ARCH.
1557
1558 When locking multiple cross-build tools, you must acquire the locks in
1559 lexicographically ascending order.
1560 """
1561 return lockfile_path("cross-tools.%s-%s" % (dist, arch))
1562
1563def switch_prefix(string, map):
1564 """
1565 Replace the prefix of a STRING, according to the given MAP.
1566
1567 MAP is a sequence of (OLD, NEW) pairs. For each such pair in turn, test
1568 whether STRING starts with OLD: if so, return STRING, but with the prefix
1569 OLD replaced by NEW. If no OLD prefix matches, then raise a `ValueError'.
1570 """
1571 for old, new in map:
1572 if string.startswith(old): return new + string[len(old):]
1573 raise ValueError("expected `%s' to start with one of %s" %
1574 ", ".join(["`%s'" % old for old, new in map]))
1575
1576def host_to_chroot(path):
1577 """
1578 Convert a host path under `C.LOCAL' to the corresponding chroot path under
1579 `/usr/local.schroot'.
1580 """
1581 return switch_prefix(path, [(C.LOCAL + "/", "/usr/local.schroot/")])
1582
1583def chroot_to_host(path):
1584 """
1585 Convert a chroot path under `/usr/local.schroot' to the corresponding
1586 host path under `C.LOCAL'.
1587 """
1588 return switch_prefix(path, [("/usr/local.schroot/", C.LOCAL + "/")])
1589
1590def split_dist_arch(spec):
1591 """Split a SPEC of the form `DIST-ARCH' into the pair (DIST, ARCH)."""
1592 dash = spec.index("-")
1593 return spec[:dash], spec[dash + 1:]
1594
1595def elf_binary_p(arch, path):
1596 """Return whether PATH is an ELF binary for ARCH."""
1597 if not OS.path.isfile(path): return False
1598 with open(path, 'rb') as f: magic = f.read(20)
1599 if magic[0:4] != "\x7fELF": return False
1600 if magic[8:16] != 8*"\0": return False
1601 if arch == "i386":
1602 if magic[4:7] != "\x01\x01\x01": return False
1603 if magic[18:20] != "\x03\x00": return False
1604 elif arch == "amd64":
1605 if magic[4:7] != "\x02\x01\x01": return False
1606 if magic[18:20] != "\x3e\x00": return False
1607 else:
1608 raise ValueError("unsupported donor architecture `%s'" % arch)
1609 return True
1610
1611def progress(msg):
1612 """
1613 Print a progress message MSG.
1614
1615 This is intended to be called within a job's `run' method, so it doesn't
1616 check `OPT.quiet' or `OPT.silent'.
1617 """
1618 OS.write(1, ";; %s\n" % msg)
1619
1620class NoSuchChroot (Exception):
1621 """
1622 Exception indicating that a chroot does not exist.
1623
1624 Specifically, it means that it doesn't even have a logical volume.
1625 """
1626 def __init__(me, dist, arch):
1627 me.dist = dist
1628 me.arch = arch
1629 def __str__(me):
1630 return "chroot for `%s' on `%s' not found" % (me.dist, me.arch)
1631
1632@CTX.contextmanager
1633def mount_chroot_src(dist, arch):
1634 """
1635 Context manager for mounting the source-chroot for DIST on ARCH.
1636
1637 The context manager automatically unmounts the filesystem again when the
1638 body exits. You must hold the appropriate source-chroot lock before
1639 calling this routine.
1640 """
1641 dev = chroot_src_blkdev(dist, arch)
1642 if not block_device_p(dev): raise NoSuchChroot(dist, arch)
1643 mnt = chroot_src_mntpt(dist, arch)
1644 try:
1645 run_program(C.ROOTLY + ["mount", dev, mnt])
1646 yield mnt
1647 finally:
1648 umount(mnt)
1649
1650@CTX.contextmanager
1651def chroot_session(dist, arch, sourcep = False):
1652 """
1653 Context manager for running an schroot(1) session.
1654
1655 Returns the (ugly, automatically generated) session name to the context
1656 body. By default, a snapshot session is started: set SOURCEP true to start
1657 a source-chroot session. You must hold the appropriate source-chroot lock
1658 before starting a source-chroot session.
1659
1660 The context manager automatically closes the session again when the body
1661 exits.
1662 """
1663 chroot = chroot_src_lv(dist, arch)
1664 if sourcep: chroot = "source:" + chroot
1665 session = run_program(["schroot", "-uroot", "-b", "-c", chroot],
1666 stdout = RETURN).rstrip("\n")
1667 try:
1668 root = OS.path.join(chroot_session_mntpt(session), "fs")
1669 yield session, root
1670 finally:
1671 run_program(["schroot", "-e", "-c", session])
1672
1673def run_root(command, **kw):
1674 """Run a COMMAND as root. Arguments are as for `run_program'."""
1675 return run_program(C.ROOTLY + command, **kw)
1676
1677def run_schroot_session(session, command, rootp = False, **kw):
1678 """
1679 Run a COMMAND within an schroot(1) session.
1680
1681 Arguments are as for `run_program'.
1682 """
1683 if rootp:
1684 return run_program(["schroot", "-uroot", "-r",
1685 "-c", session, "--"] + command, **kw)
1686 else:
1687 return run_program(["schroot", "-r",
1688 "-c", session, "--"] + command, **kw)
1689
1690def run_schroot_source(dist, arch, command, **kw):
1691 """
1692 Run a COMMAND through schroot(1), in the source-chroot for DIST on ARCH.
1693
1694 Arguments are as for `run_program'. You must hold the appropriate source-
1695 chroot lock before calling this routine.
1696 """
1697 return run_program(["schroot", "-uroot",
1698 "-c", "source:%s" % chroot_src_lv(dist, arch),
1699 "--"] + command, **kw)
1700
1701###--------------------------------------------------------------------------
1702### Metadata files.
1703
1704class MetadataClass (type):
1705 """
1706 Metaclass for metadata classes.
1707
1708 Notice a `VARS' attribute in the class dictionary, and augment it with a
1709 `_VARSET' attribute, constructed as a set containing the same items. (We
1710 need them both: the set satisfies fast lookups, while the original sequence
1711 remembers the ordering.)
1712 """
1713 def __new__(me, name, supers, dict):
1714 try: vars = dict['VARS']
1715 except KeyError: pass
1716 else: dict['_VARSET'] = set(vars)
1717 return super(MetadataClass, me).__new__(me, name, supers, dict)
1718
1719class BaseMetadata (object):
1720 """
1721 Base class for metadate objects.
1722
1723 Metadata bundles are simple collections of key/value pairs. Keys should
1724 usually be Python identifiers because they're used to name attributes.
1725 Values are strings, but shouldn't have leading or trailing whitespace, and
1726 can't contain newlines.
1727
1728 Metadata bundles are written to files. The format is simple enough: empty
1729 lines and lines starting with `#' are ignored; otherwise, the line must
1730 have the form
1731
1732 KEY = VALUE
1733
1734 where KEY does not contain `='; spaces around the `=' are optional, and
1735 spaces around the KEY and VALUE are stripped. The order of keys is
1736 unimportant; keys are always written in a standard order on output.
1737 """
1738 __metaclass__ = MetadataClass
1739
1740 def __init__(me, **kw):
1741 """Initialize a metadata bundle from keyword arguments."""
1742 for k, v in kw.iteritems():
1743 setattr(me, k, v)
1744 for v in me.VARS:
1745 try: getattr(me, v)
1746 except AttributeError: setattr(me, v, None)
1747
1748 def __setattr__(me, attr, value):
1749 """
1750 Try to set an attribute.
1751
1752 Only attribute names listed in the `VARS' class attribute are permitted.
1753 """
1754 if attr not in me._VARSET: raise AttributeError, attr
1755 super(BaseMetadata, me).__setattr__(attr, value)
1756
1757 @classmethod
1758 def read(cls, path):
1759 """Return a new metadata bundle read from a named PATH."""
1760 map = {}
1761 with open(path) as f:
1762 for line in f:
1763 line = line.strip()
1764 if line == "" or line.startswith("#"): continue
1765 k, v = line.split("=", 1)
1766 map[k.strip()] = v.strip()
1767 return cls(**map)
1768
1769 def _write(me, file):
1770 """
1771 Write the metadata bundle to the FILE (a file-like object).
1772
1773 This is intended for use by subclasses which want to override the default
1774 I/O behaviour of the main `write' method.
1775 """
1776 file.write("### -*-conf-*-\n")
1777 for k in me.VARS:
1778 try: v = getattr(me, k)
1779 except AttributeError: pass
1780 else:
1781 if v is not None: file.write("%s = %s\n" % (k, v))
1782
1783 def write(me, path):
1784 """
1785 Write the metadata bundle to a given PATH.
1786
1787 The file is replaced atomically.
1788 """
1789 with safewrite(path) as f: me._write(f)
1790
1791 def __repr__(me):
1792 return "#<%s: %s>" % (me.__class__.__name__,
1793 ", ".join("%s=%r" % (k, getattr(me, k, None))
1794 for k in me.VARS))
1795
1796class ChrootMetadata (BaseMetadata):
1797 VARS = ['dist', 'arch', 'update']
1798
1799 @classmethod
1800 def read(cls, dist, arch):
1801 try:
1802 with lockfile(chroot_src_lockfile(dist, arch), exclp = False):
1803 with mount_chroot_src(dist, arch) as mnt:
1804 return super(ChrootMetadata, cls).read(OS.path.join(mnt, "META"))
1805 except IOError, err:
1806 if err.errno == E.ENOENT: pass
1807 else: raise
1808 except NoSuchChroot: pass
1809 return cls(dist = dist, arch = arch)
1810
1811 def write(me):
1812 with mount_chroot_src(me.dist, me.arch) as mnt:
1813 with safewrite_root(OS.path.join(mnt, "META")) as f:
1814 me._write(f)
1815
1816class CrossToolsMetadata (BaseMetadata):
1817 VARS = ['dist', 'arch', 'update']
1818
1819 @classmethod
1820 def read(cls, dist, arch):
1821 try:
1822 return super(CrossToolsMetadata, cls)\
1823 .read(OS.path.join(C.LOCAL, "cross", "%s-%s" % (dist, arch), "META"))
1824 except IOError, err:
1825 if err.errno == E.ENOENT: pass
1826 else: raise
1827 return cls(dist = dist, arch = arch)
1828
1829 def write(me, dir = None):
1830 if dir is None:
1831 dir = OS.path.join(C.LOCAL, "cross", "%s-%s" % (me.dist, me.arch))
1832 with safewrite_root(OS.path.join(dir, "META")) as f:
1833 me._write(f)
1834
1835###--------------------------------------------------------------------------
1836### Constructing a chroot.
1837
1838R_DIVERT = RX.compile(r"^diversion of (.*) to .* by install-cross-tools$")
1839
1840class ChrootJob (BaseJob):
1841 """
1842 Create or update a chroot.
1843 """
1844
1845 SPECS = C.ALL_CHROOTS
1846
1847 def __init__(me, spec, fresh = CREATE, *args, **kw):
1848 super(ChrootJob, me).__init__(*args, **kw)
1849 me._dist, me._arch = split_dist_arch(spec)
1850 me._fresh = fresh
1851 me._meta = ChrootMetadata.read(me._dist, me._arch)
1852 me._tools_chroot = me._qemu_chroot = None
1853
1854 def _mkname(me): return "chroot.%s-%s" % (me._dist, me._arch)
1855
1856 def prepare(me):
1857 if me._arch in C.FOREIGN_ARCHS:
1858 me._tools_chroot = CrossToolsJob.ensure\
1859 ("%s-%s" % (me._dist, C.TOOLSARCH), FRESH)
1860 me._qemu_chroot = CrossToolsJob.ensure\
1861 ("%s-%s" % (me._dist, C.QEMUHOST[me._arch]), FRESH)
1862 me.await(me._tools_chroot)
1863 me.await(me._qemu_chroot)
1864
1865 def check(me):
1866 status, reason = super(ChrootJob, me).check()
1867 if status is not READY: return status, reason
1868 if (me._tools_chroot is not None and me._tools_chroot.started) or \
1869 (me._qemu_chroot is not None and me._qemu_chroot.started):
1870 return READY, "prerequisites run"
1871 return check_fresh(me._fresh, me._meta.update)
1872
1873 def _install_cross_tools(me):
1874 """
1875 Install or refresh cross-tools in the source-chroot.
1876
1877 This function version assumes that the source-chroot lock is already
1878 held.
1879
1880 Note that there isn't a job class corresponding to this function. It's
1881 done automatically as part of source-chroot setup and update for foreign
1882 architectures.
1883 """
1884 with Cleanup() as clean:
1885
1886 dist, arch = me._dist, me._arch
1887
1888 mymulti = run_program(["dpkg-architecture", "-a", C.TOOLSARCH,
1889 "-qDEB_HOST_MULTIARCH"],
1890 stdout = RETURN).rstrip("\n")
1891 gnuarch = run_program(["dpkg-architecture", "-A", arch,
1892 "-qDEB_TARGET_GNU_TYPE"],
1893 stdout = RETURN).rstrip("\n")
1894
1895 crossdir = OS.path.join(C.LOCAL, "cross",
1896 "%s-%s" % (dist, C.TOOLSARCH))
1897
1898 qarch, qhost = C.QEMUARCH[arch], C.QEMUHOST[arch]
1899 qemudir = OS.path.join(C.LOCAL, "cross",
1900 "%s-%s" % (dist, qhost), "QEMU")
1901
1902 ## Acquire lockfiles in a canonical order to prevent deadlocks.
1903 donors = [C.TOOLSARCH]
1904 if qarch != C.TOOLSARCH: donors.append(qarch)
1905 donors.sort()
1906 for a in donors:
1907 clean.enter(lockfile(crosstools_lockfile(dist, a), exclp = False))
1908
1909 ## Open a session.
1910 session, root = clean.enter(chroot_session(dist, arch, sourcep = True))
1911
1912 ## Search the cross-tools tree for tools, to decide what to do with
1913 ## each file. Make lists:
1914 ##
1915 ## * `want_div' is simply a set of all files in the chroot which need
1916 ## dpkg diversions to prevent foreign versions of the tools from
1917 ## clobbering our native versions.
1918 ##
1919 ## * `want_link' is a dictionary mapping paths which need symbolic
1920 ## links into the cross-tools trees to their link destinations.
1921 progress("scan cross-tools tree")
1922 want_div = set()
1923 want_link = dict()
1924 cross_prefix = crossdir + "/"
1925 qemu_prefix = qemudir + "/"
1926 toolchain_prefix = OS.path.join(crossdir, "TOOLCHAIN", gnuarch) + "/"
1927 def examine(path):
1928 dest = switch_prefix(path, [(qemu_prefix, "/usr/bin/"),
1929 (toolchain_prefix, "/usr/bin/"),
1930 (cross_prefix, "/")])
1931 if OS.path.islink(path): src = OS.readlink(path)
1932 else: src = host_to_chroot(path)
1933 want_link[dest] = src
1934 if not OS.path.isdir(path): want_div.add(dest)
1935 examine(OS.path.join(qemudir, "qemu-%s-static" % qarch))
1936 examine(OS.path.join(crossdir, "lib", mymulti))
1937 examine(OS.path.join(crossdir, "usr/lib", mymulti))
1938 examine(OS.path.join(crossdir, "usr/lib/gcc-cross"))
1939 def visit(_, dir, files):
1940 ff = []
1941 for f in files:
1942 if f == "META" or f == "QEMU" or f == "TOOLCHAIN" or \
1943 (dir.endswith("/lib") and (f == mymulti or f == "gcc-cross")):
1944 continue
1945 ff.append(f)
1946 path = OS.path.join(dir, f)
feb9ff8d 1947 if OS.path.islink(path) or not OS.path.isdir(path): examine(path)
a98c9dba
MW
1948 files[:] = ff
1949 OS.path.walk(crossdir, visit, None)
1950 OS.path.walk(OS.path.join(crossdir, "TOOLCHAIN", gnuarch),
1951 visit, None)
1952
1953 ## Build the set `have_div' of paths which already have diversions.
1954 progress("scan chroot")
1955 have_div = set()
1956 with subprocess(["schroot", "-uroot", "-r", "-c", session, "--",
1957 "dpkg-divert", "--list"],
1958 stdout = PIPE) as (_, fd_out, _):
1959 try:
1960 f = OS.fdopen(fd_out)
1961 for line in f:
1962 m = R_DIVERT.match(line.rstrip("\n"))
1963 if m: have_div.add(m.group(1))
1964 finally:
1965 f.close()
1966
1967 ## Build a dictionary `have_link' of symbolic links into the cross-
1968 ## tools trees. Also, be sure to collect all of the relative symbolic
1969 ## links which are in the cross-tools tree.
1970 have_link = dict()
1971 with subprocess(["schroot", "-uroot", "-r", "-c", session, "--",
1972 "sh", "-e", "-c", """
1973 find / -xdev -lname "/usr/local.schroot/cross/*" -printf "%p %l\n"
1974 """], stdout = PIPE) as (_, fd_out, _):
1975 try:
1976 f = OS.fdopen(fd_out)
1977 for line in f:
1978 dest, src = line.split()
1979 have_link[dest] = src
1980 finally:
1981 f.close()
1982 for path in want_link.iterkeys():
1983 real = root + path
1984 if not OS.path.islink(real): continue
1985 have_link[path] = OS.readlink(real)
1986
1987 ## Add diversions for the paths which need one, but don't have one.
1988 ## There's a hack here because the `--no-rename' option was required in
1989 ## the same version in which it was introduced, so there's no single
1990 ## incantation that will work across the boundary.
1991 progress("add missing diversions")
1992 with subprocess(["schroot", "-uroot", "-r", "-c", session, "--",
1993 "sh", "-e", "-c", """
1994 a="%(arch)s"
1995
1996 if dpkg-divert >/dev/null 2>&1 --no-rename --help
1997 then no_rename=--no-rename
1998 else no_rename=
1999 fi
2000
2001 while read path; do
2002 dpkg-divert --package "install-cross-tools" $no_rename \
2003 --divert "$path.$a" --add "$path"
2004 done
2005 """ % dict(arch = arch)], stdin = PIPE) as (fd_in, _, _):
2006 try:
2007 f = OS.fdopen(fd_in, 'w')
2008 for path in want_div:
2009 if path not in have_div: f.write(path + "\n")
2010 finally:
2011 f.close()
2012
2013 ## Go through each diverted tool, and, if it hasn't been moved aside,
2014 ## then /link/ it across now. If we rename it, then the chroot will
2015 ## stop working -- which is why we didn't allow `dpkg-divert' to do the
2016 ## rename. We can tell a tool that hasn't been moved, because it's a
2017 ## symlink into one of the cross trees.
2018 progress("preserve existing foreign files")
2019 chroot_cross_prefix = host_to_chroot(crossdir) + "/"
2020 chroot_qemu_prefix = host_to_chroot(qemudir) + "/"
2021 for path in want_div:
2022 real = root + path; div = real + "." + arch; cross = crossdir + path
2023 if OS.path.exists(div): continue
2024 if not OS.path.exists(real): continue
2025 if OS.path.islink(real):
2026 realdest = OS.readlink(real)
2027 if realdest.startswith(chroot_cross_prefix) or \
2028 realdest.startswith(chroot_qemu_prefix):
2029 continue
2030 if OS.path.islink(cross) and realdest == OS.readlink(cross):
2031 continue
2032 progress("preserve existing foreign file `%s'" % path)
2033 run_root(["ln", real, div])
2034
2035 ## Update all of the symbolic links which are currently wrong: add
2036 ## links which are missing, delete ones which are obsolete, and update
2037 ## ones which have the wrong target.
2038 progress("update symlinks")
2039 for path, src in want_link.iteritems():
2040 real = root + path
2041 try: old_src = have_link[path]
2042 except KeyError: pass
2043 else:
2044 if src == old_src: continue
2045 new = real + ".new"
2046 progress("link `%s' -> `%s'" % (path, src))
2047 dir = OS.path.dirname(real)
2048 if not OS.path.isdir(dir): run_root(["mkdir", "-p", dir])
2049 if OS.path.exists(new): run_root(["rm", "-f", new])
2050 run_root(["ln", "-s", src, new])
2051 run_root(["mv", new, real])
2052 for path in have_link.iterkeys():
2053 if path in want_link: continue
a98c9dba 2054 real = root + path
12ef8239
MW
2055 progress("remove obsolete link `%s' -> `%s'" %
2056 (path, OS.readlink(real)))
a98c9dba
MW
2057 run_root(["rm", "-f", real])
2058
2059 ## Remove diversions from paths which don't need them any more. Here
2060 ## it's safe to rename, because either the tool isn't there, in which
2061 ## case it obviously wasn't important, or it is, and `dpkg-divert' will
2062 ## atomically replace our link with the foreign version.
2063 progress("remove obsolete diversions")
2064 with subprocess(["schroot", "-uroot", "-r", "-c", session, "--",
2065 "sh", "-e", "-c", """
2066 a="%(arch)s"
2067
2068 while read path; do
2069 dpkg-divert --package "install-cross-tools" --rename \
2070 --divert "$path.$a" --remove "$path"
2071 done
2072 """ % dict(arch = arch)], stdin = PIPE) as (fd_in, _, _):
2073 try:
2074 f = OS.fdopen(fd_in, 'w')
2075 for path in have_div:
2076 if path not in want_div: f.write(path + "\n")
2077 finally:
2078 f.close()
2079
2080 def _make_chroot(me):
2081 """
2082 Create the source-chroot with chroot metadata META.
2083
2084 This will recreate a source-chroot from scratch, destroying the existing
2085 logical volume if necessary.
2086 """
2087 with Cleanup() as clean:
2088
2089 dist, arch = me._dist, me._arch
2090 clean.enter(lockfile(chroot_src_lockfile(dist, arch)))
2091
2092 mnt = chroot_src_mntpt(dist, arch)
2093 dev = chroot_src_blkdev(dist, arch)
2094 lv = chroot_src_lv(dist, arch)
2095 newlv = lv + ".new"
2096
2097 ## Clean up any leftover debris.
2098 if mountpoint_p(mnt): umount(mnt)
2099 if block_device_p(dev):
2100 run_root(["lvremove", "-f", "%s/%s" % (C.VG, lv)])
2101
2102 ## Create the logical volume and filesystem. It's important that the
2103 ## logical volume not have its official name until after it contains a
2104 ## mountable filesystem.
2105 progress("create filesystem")
2106 run_root(["lvcreate", "--yes", C.LVSZ, "-n", newlv, C.VG])
2107 run_root(["mkfs", "-j", "-L%s-%s" % (dist, arch),
2108 OS.path.join("/dev", C.VG, newlv)])
2109 run_root(["lvrename", C.VG, newlv, lv])
2110
2111 ## Start installing the chroot.
2112 with mount_chroot_src(dist, arch) as mnt:
2113
2114 ## Set the basic structure.
2115 run_root(["mkdir", "-m755", OS.path.join(mnt, "fs")])
2116 run_root(["chmod", "750", mnt])
2117
2118 ## Install the base system.
2119 progress("install base system")
a323afdf 2120 run_root(["eatmydata", "debootstrap", "--no-merged-usr"] +
a98c9dba
MW
2121 (arch in C.FOREIGN_ARCHS and ["--foreign"] or []) +
2122 ["--arch=" + arch, "--variant=minbase",
2123 "--include=" + ",".join(C.BASE_PACKAGES),
2124 dist, OS.path.join(mnt, "fs"), C.DEBMIRROR])
2125
2126 ## If this is a cross-installation, then install the necessary `qemu'
2127 ## and complete the installation.
2128 if arch in C.FOREIGN_ARCHS:
2129 qemu = OS.path.join("cross", "%s-%s" % (dist, C.QEMUHOST[arch]),
2130 "QEMU", "qemu-%s-static" % C.QEMUARCH[arch])
2131 run_root(["install", OS.path.join(C.LOCAL, qemu),
2132 OS.path.join(mnt, "fs/usr/bin")])
2133 run_root(["chroot", OS.path.join(mnt, "fs"),
2134 "/debootstrap/debootstrap", "--second-stage"])
2135 run_root(["ln", "-sf",
2136 OS.path.join("/usr/local.schroot", qemu),
2137 OS.path.join(mnt, "fs/usr/bin")])
2138
2139 ## Set up `/usr/local'.
2140 progress("install `/usr/local' symlink")
2141 run_root(["rm", "-rf", OS.path.join(mnt, "fs/usr/local")])
2142 run_root(["ln", "-s",
2143 OS.path.join("local.schroot", arch),
2144 OS.path.join(mnt, "fs/usr/local")])
2145
2146 ## Install the `apt' configuration.
2147 progress("configure package manager")
2148 run_root(["rm", "-f", OS.path.join(mnt, "fs/etc/apt/sources.list")])
2149 for c in C.APTCONF:
2150 run_root(["ln", "-s",
2151 OS.path.join("/usr/local.schroot/etc/apt/apt.conf.d", c),
2152 OS.path.join(mnt, "fs/etc/apt/apt.conf.d")])
2153 run_root(["ln", "-s",
2154 "/usr/local.schroot/etc/apt/sources.%s" % dist,
2155 OS.path.join(mnt, "fs/etc/apt/sources.list")])
2156
2157 with safewrite_root\
2158 (OS.path.join(mnt, "fs/etc/apt/apt.conf.d/20arch")) as f:
2159 f.write("""\
4bd9f538 2160### -*-conf-*-
a98c9dba 2161
4bd9f538
MW
2162APT {
2163 Architecture "%s";
2164};
2165""" % arch)
a98c9dba
MW
2166
2167 ## Set up the locale and time zone from the host system.
2168 progress("configure locales and timezone")
2169 run_root(["cp", "/etc/locale.gen", "/etc/timezone",
2170 OS.path.join(mnt, "fs/etc")])
2171 with open("/etc/timezone") as f: tz = f.readline().strip()
2172 run_root(["ln", "-sf",
2173 OS.path.join("/usr/share/timezone", tz),
2174 OS.path.join(mnt, "fs/etc/localtime")])
2175 run_root(["cp", "/etc/default/locale",
2176 OS.path.join(mnt, "fs/etc/default")])
2177
2178 ## Fix `/etc/mtab'.
2179 progress("set `/etc/mtab'")
2180 run_root(["ln", "-sf", "/proc/mounts",
2181 OS.path.join(mnt, "fs/etc/mtab")])
2182
2183 ## Prevent daemons from starting within the chroot.
2184 progress("inhibit daemon startup")
2185 with safewrite_root(OS.path.join(mnt, "fs/usr/sbin/policy-rc.d"),
2186 mode = "755") as f:
2187 f.write("""\
4bd9f538
MW
2188#! /bin/sh
2189echo >&2 "policy-rc.d: Services disabled by policy."
2190exit 101
2191""")
a98c9dba
MW
2192
2193 ## Hack the dynamic linker to prefer libraries in `/usr' over
2194 ## `/usr/local'. This prevents `dpkg-shlibdeps' from becoming
2195 ## confused.
2196 progress("configure dynamic linker")
2197 with safewrite_root\
2198 (OS.path.join(mnt, "fs/etc/ld.so.conf.d/libc.conf")) as f:
2199 f.write("# libc default configuration")
2200 with safewrite_root\
2201 (OS.path.join(mnt, "fs/etc/ld.so.conf.d/zzz-local.conf")) as f:
2202 f.write("""\
4bd9f538
MW
2203### -*-conf-*-
2204### Local hack to make /usr/local/ late.
2205/usr/local/lib
2206""")
a98c9dba
MW
2207
2208 ## If this is a foreign architecture then we need to set it up.
2209 if arch in C.FOREIGN_ARCHS:
2210
2211 ## Keep the chroot's native Qemu out of our way: otherwise we'll stop
2212 ## being able to run programs in the chroot. There's a hack here
2213 ## because the `--no-rename' option was required in the same version
2214 ## in which is was introduced, so there's no single incantation that
2215 ## will work across the boundary.
2216 progress("divert emulator")
2217 run_schroot_source(dist, arch, ["eatmydata", "sh", "-e", "-c", """
2218 if dpkg-divert >/dev/null 2>&1 --no-rename --help
2219 then no_rename=--no-rename
2220 else no_rename=
2221 fi
2222
2223 dpkg-divert --package install-cross-tools $no_rename \
2224 --divert /usr/bin/%(qemu)s.%(arch)s --add /usr/bin/%(qemu)s
2225 """ % dict(arch = arch, qemu = "qemu-%s-static" % C.QEMUARCH[arch])])
2226
2227 ## Install faster native tools.
2228 me._install_cross_tools()
2229
2230 ## Finishing touches.
2231 progress("finishing touches")
a6395bc3
MW
2232 run_schroot_source(dist, arch, ["eatmydata", "sh", "-e", "-c",
2233 DEBCONF_TWEAKS + """
a98c9dba
MW
2234 apt-get update
2235 apt-get -y upgrade
2236 apt-get -y install "$@"
2237 ldconfig
2238 apt-get -y autoremove
2239 apt-get clean
2240 """, "."] + C.EXTRA_PACKAGES, stdin = DISCARD)
2241
2242 ## Mark the chroot as done.
2243 me._meta.update = zulu()
2244 me._meta.write()
2245
2246 def _update_chroot(me):
2247 """Refresh the source-chroot with chroot metadata META."""
2248 with Cleanup() as clean:
2249 dist, arch = me._dist, me._arch
2250 clean.enter(lockfile(chroot_src_lockfile(dist, arch)))
a6395bc3
MW
2251 run_schroot_source(dist, arch, ["eatmydata", "sh", "-e", "-c",
2252 DEBCONF_TWEAKS + """
a98c9dba
MW
2253 apt-get update
2254 apt-get -y dist-upgrade
2255 apt-get -y autoremove
2256 apt-get -y clean
cac84082 2257 ldconfig
a98c9dba
MW
2258 """], stdin = DISCARD)
2259 if arch in C.FOREIGN_ARCHS: me._install_cross_tools()
2260 me._meta.update = zulu(); me._meta.write()
2261
2262 def run(me):
2263 if me._meta.update is not None: me._update_chroot()
2264 else: me._make_chroot()
2265
2266###--------------------------------------------------------------------------
2267### Extracting the cross tools.
2268
2269class CrossToolsJob (BaseJob):
2270 """Extract cross-tools from a donor chroot."""
2271
2272 SPECS = C.NATIVE_CHROOTS
2273
2274 def __init__(me, spec, fresh = CREATE, *args, **kw):
2275 super(CrossToolsJob, me).__init__(*args, **kw)
2276 me._dist, me._arch = split_dist_arch(spec)
2277 me._meta = CrossToolsMetadata.read(me._dist, me._arch)
2278 me._fresh = fresh
2279 me._chroot = None
2280
2281 def _mkname(me): return "cross-tools.%s-%s" % (me._dist, me._arch)
2282
2283 def prepare(me):
2284 st, r = check_fresh(me._fresh, me._meta.update)
2285 if st is DONE: return
2286 me._chroot = ChrootJob.ensure("%s-%s" % (me._dist, me._arch), FRESH)
2287 me.await(me._chroot)
2288
2289 def check(me):
2290 status, reason = super(CrossToolsJob, me).check()
2291 if status is not READY: return status, reason
2292 if me._chroot is not None and me._chroot.started:
2293 return READY, "prerequisites run"
2294 return check_fresh(me._fresh, me._meta.update)
2295
2296 def run(me):
2297 with Cleanup() as clean:
2298
2299 dist, arch = me._dist, me._arch
2300
2301 mymulti = run_program(["dpkg-architecture", "-a" + arch,
2302 "-qDEB_HOST_MULTIARCH"],
2303 stdout = RETURN).rstrip("\n")
2304 crossarchs = [run_program(["dpkg-architecture", "-A" + a,
2305 "-qDEB_TARGET_GNU_TYPE"],
2306 stdout = RETURN).rstrip("\n")
2307 for a in C.FOREIGN_ARCHS]
2308
2309 crossdir = OS.path.join(C.LOCAL, "cross", "%s-%s" % (dist, arch))
2310 crossold = crossdir + ".old"; crossnew = crossdir + ".new"
2311 usrbin = OS.path.join(crossnew, "usr/bin")
2312
2313 clean.enter(lockfile(crosstools_lockfile(dist, arch)))
2314 run_program(["rm", "-rf", crossnew])
2315 mkdir_p(crossnew)
2316
2317 ## Open a session to the donor chroot.
2318 progress("establish snapshot")
2319 session, root = clean.enter(chroot_session(dist, arch))
2320
2321 ## Make sure the donor tree is up-to-date, and install the extra
2322 ## packages we need.
2323 progress("install tools packages")
a6395bc3
MW
2324 run_schroot_session(session, ["eatmydata", "sh", "-e", "-c",
2325 DEBCONF_TWEAKS + """
a98c9dba
MW
2326 apt-get update
2327 apt-get -y upgrade
2328 apt-get -y install "$@"
2329 """, "."] + C.CROSS_PACKAGES, rootp = True, stdin = DISCARD)
2330
2331 def chase(path):
2332 dest = ""
2333
2334 ## Work through the remaining components of the PATH.
2335 while path != "":
2336 try: sl = path.index("/")
2337 except ValueError: step = path; path = ""
2338 else: step, path = path[:sl], path[sl + 1:]
2339
2340 ## Split off and analyse the first component.
2341 if step == "" or step == ".":
2342 ## A redundant `/' or `./'. Skip it.
2343 pass
2344 elif step == "..":
2345 ## A `../'. Strip off the trailing component of DEST.
2346 dest = dest[:dest.rindex("/")]
2347 else:
2348 ## Something else. Transfer the component name to DEST.
2349 dest += "/" + step
2350
2351 ## If DEST refers to something in the cross-tools tree then we're
2352 ## good.
2353 crossdest = crossnew + dest
2354 try: st = OS.lstat(crossdest)
2355 except OSError, err:
2356 if err.errno == E.ENOENT:
2357 ## No. We need to copy something from the donor tree so that
2358 ## the name works.
2359
2360 st = OS.lstat(root + dest)
2361 if ST.S_ISDIR(st.st_mode):
2362 OS.mkdir(crossdest)
2363 else:
2364 progress("copy `%s'" % dest)
2365 run_program(["rsync", "-aHR",
2366 "%s/.%s" % (root, dest),
2367 crossnew])
2368 else:
2369 raise
2370
2371 ## If DEST refers to a symbolic link, then prepend the link target
2372 ## to PATH so that we can be sure the link will work.
2373 if ST.S_ISLNK(st.st_mode):
2374 link = OS.readlink(crossdest)
2375 if link.startswith("/"): dest = ""; link = link[1:]
2376 else:
2377 try: dest = dest[:dest.rindex("/")]
2378 except ValueError: dest = ""
2379 if path == "": path = link
770c8228 2380 else: path = "%s/%s" % (link, path)
a98c9dba
MW
2381
2382 ## Work through the shopping list, copying the things it names into the
2383 ## cross-tools tree.
2384 scan = []
2385 for pat in C.CROSS_PATHS:
2386 pat = pat.replace("MULTI", mymulti)
2387 any = False
2388 for rootpath in GLOB.iglob(root + pat):
2389 any = True
2390 path = rootpath[len(root):]
2391 progress("copy `%s'" % path)
2392 run_program(["rsync", "-aHR", "%s/.%s" % (root, path), crossnew])
2393 if not any:
2394 raise RuntimeError("no matches for cross-tool pattern `%s'" % pat)
2395
2396 ## Scan the new tree: chase down symbolic links, copying extra stuff
2397 ## that we'll need; and examine ELF binaries to make sure we get the
2398 ## necessary shared libraries.
2399 def visit(_, dir, files):
2400 for f in files:
2401 path = OS.path.join(dir, f)
2402 inside = switch_prefix(path, [(crossnew + "/", "/")])
2403 if OS.path.islink(path): chase(inside)
2404 if elf_binary_p(arch, path): scan.append(inside)
2405 OS.path.walk(crossnew, visit, None)
2406
2407 ## Work through the ELF binaries in `scan', determining which shared
2408 ## libraries they'll need.
2409 ##
2410 ## The rune running in the chroot session reads ELF binary names on
2411 ## stdin, one per line, and runs `ldd' on them to discover the binary's
2412 ## needed libraries and resolve them into pathnames. Each pathname is
2413 ## printed to stderr as a line `+PATHNAME', followed by a final line
2414 ## consisting only of `-' as a terminator. This is necessary so that
2415 ## we can tell when we've finished, because newly discovered libraries
2416 ## need to be fed back to discover their recursive dependencies. (This
2417 ## is why the `WriteLinesSelector' interface is quite so hairy.)
2418 with subprocess(["schroot", "-r", "-c", session, "--",
2419 "sh", "-e", "-c", """
2420 while read path; do
2421 ldd "$path" | while read a b c d; do
2422 case $a:$b:$c:$d in
2423 not:a:dynamic:executable) ;;
2424 statically:linked::) ;;
2425 /*) echo "+$a" ;;
2426 *:=\\>:/*) echo "+$c" ;;
2427 linux-*) ;;
2428 *) echo >&2 "failed to find shared library \\`$a'"; exit 2 ;;
2429 esac
2430 done
2431 echo -
2432 done
2433 """], stdin = PIPE, stdout = PIPE) as (fd_in, fd_out, _):
2434
2435 ## Keep track of the number of binaries we've reported to the `ldd'
2436 ## process for which we haven't yet seen all of their dependencies.
2437 ## (This is wrapped in a `Struct' because of Python's daft scoping
2438 ## rules.)
2439 v = Struct(n = 0)
2440
2441 def line_in():
2442 ## Provide a line in., so raise `StopIteration' to signal this.
2443
2444 try:
2445 ## See if there's something to scan.
2446 path = scan.pop()
2447
2448 except IndexError:
2449 ## There's nothing currently waiting to be scanned.
2450 if v.n:
2451 ## There are still outstanding replies, so stall.
2452 return None
2453 else:
2454 ## There are no outstanding replies left, and we have nothing
2455 ## more to scan, then we must be finished.
2456 raise StopIteration
2457
2458 else:
2459 ## The `scan' list isn't empty, so return an item from that, and
2460 ## remember that there's one more thing we expect to see answers
2461 ## from.
2462 v.n += 1; return path
2463
2464 def line_out(line):
2465 ## We've received a line from the `ldd' process.
2466
2467 if line == "-":
2468 ## It's finished processing one of our binaries. Note this.
2469 ## Maybe it's time to stop
2470 v.n -= 1
2471 return
2472
2473 ## Strip the leading marker (which is just there so that the
2474 ## terminating `-' is unambiguous).
2475 assert line.startswith("+")
2476 lib = line[1:]
2477
2478 ## If we already have this binary then we'll already have submitted
2479 ## it.
2480 path = crossnew + lib
2481 try: OS.lstat(path)
2482 except OSError, err:
2483 if err.errno == E.ENOENT: pass
2484 else: raise
2485 else: return
2486
2487 ## Copy it into the tools tree, together with any symbolic links
2488 ## along the path.
2489 chase(lib)
2490
2491 ## If this is an ELF binary (and it ought to be!) then submit it
2492 ## for further scanning.
2493 if elf_binary_p(arch, path):
2494 scan.append(switch_prefix(path, [(crossnew + "/", "/")]))
2495
2496 ## And run this entire contraption. When this is done, we should
2497 ## have all of the library dependencies for all of our binaries.
2498 select_loop([WriteLinesSelector(fd_in, line_in),
2499 ReadLinesSelector(fd_out, line_out)])
2500
2501 ## Set up the cross-compiler and emulator. Start by moving the cross
2502 ## compilers and emulator into their specific places, so they don't end
2503 ## up cluttering chroots for non-matching architectures.
2504 progress("establish TOOLCHAIN and QEMU")
2505 OS.mkdir(OS.path.join(crossnew, "TOOLCHAIN"))
2506 qemudir = OS.path.join(crossnew, "QEMU")
2507 OS.mkdir(qemudir)
2508 for gnu in C.FOREIGN_GNUARCHS:
2509 OS.mkdir(OS.path.join(crossnew, "TOOLCHAIN", gnu))
2510 for f in OS.listdir(usrbin):
2511 for gnu in C.FOREIGN_GNUARCHS:
2512 gnuprefix = gnu + "-"
2513 if f.startswith(gnuprefix):
2514 tooldir = OS.path.join(crossnew, "TOOLCHAIN", gnu)
2515 OS.rename(OS.path.join(usrbin, f), OS.path.join(tooldir, f))
2516 OS.symlink(f, OS.path.join(tooldir, f[len(gnuprefix):]))
2517 break
2518 else:
2519 if f.startswith("qemu-") and f.endswith("-static"):
2520 OS.rename(OS.path.join(usrbin, f), OS.path.join(qemudir, f))
2521
2522 ## The GNU cross compilers try to find their additional pieces via a
2523 ## relative path, which isn't going to end well. Add a symbolic link
2524 ## at the right place to where the things are actually going to live.
2525 toollib = OS.path.join(crossnew, "TOOLCHAIN", "lib")
2526 OS.mkdir(toollib)
2527 OS.symlink("../../usr/lib/gcc-cross",
2528 OS.path.join(toollib, "gcc-cross"))
2529
2530 ## We're done. Replace the old cross-tools with our new one.
2531 me._meta.update = zulu()
2532 me._meta.write(crossnew)
2533 if OS.path.exists(crossdir): run_program(["mv", crossdir, crossold])
2534 OS.rename(crossnew, crossdir)
2535 run_program(["rm", "-rf", crossold])
2536
2537###--------------------------------------------------------------------------
2538### Buliding and installing local packages.
2539
2540def pkg_metadata_lockfile(pkg):
2541 return lockfile_path("pkg-meta.%s" % pkg)
2542
2543def pkg_srcdir_lockfile(pkg, ver):
2544 return lockfile_path("pkg-source.%s-%s" % (pkg, ver))
2545
2546def pkg_srcdir(pkg, ver):
2547 return OS.path.join(C.LOCAL, "src", "%s-%s" % (pkg, ver))
2548
2549def pkg_builddir(pkg, ver, arch):
2550 return OS.path.join(pkg_srcdir(pkg, ver), "build.%s" % arch)
2551
2552class PackageMetadata (BaseMetadata):
2553 VARS = ["pkg"] + list(C.ALL_ARCHS)
2554
2555 @classmethod
2556 def read(cls, pkg):
2557 try:
2558 return super(PackageMetadata, cls)\
2559 .read(OS.path.join(C.LOCAL, "src", "META.%s" % pkg))
2560 except IOError, err:
2561 if err.errno == E.ENOENT: pass
2562 else: raise
2563 return cls(pkg = pkg)
2564
2565 def write(me):
2566 super(PackageMetadata, me)\
2567 .write(OS.path.join(C.LOCAL, "src", "META.%s" % me.pkg))
2568
2569class PackageSourceJob (BaseJob):
2570
2571 SPECS = C.LOCALPKGS
2572
2573 def __init__(me, pkg, fresh = CREATE, *args, **kw):
2574 super(PackageSourceJob, me).__init__(*args, **kw)
2575 me._pkg = pkg
2576 tar = None; ver = None
2577 r = RX.compile("^%s-(\d.*)\.tar.(?:Z|z|gz|bz2|xz|lzma)$" %
2578 RX.escape(pkg))
2579 for f in OS.listdir("pkg"):
2580 m = r.match(f)
2581 if not m: pass
2582 elif tar is not None:
2583 raise ExpectedError("multiple source tarballs of package `%s'" % pkg)
2584 else: tar, ver = f, m.group(1)
2585 me.version = ver
2586 me.tarball = OS.path.join("pkg", tar)
2587
2588 def _mkname(me): return "pkg-source.%s" % me._pkg
2589
2590 def check(me):
2591 status, reason = super(PackageSourceJob, me).check()
2592 if status is not READY: return status, reason
2593 if OS.path.isdir(pkg_srcdir(me._pkg, me.version)):
2594 return DONE, "already unpacked"
2595 else:
2596 return READY, "no source tree"
2597
2598 def run(me):
2599 with Cleanup() as clean:
2600 pkg, ver, tar = me._pkg, me.version, me.tarball
2601 srcdir = pkg_srcdir(pkg, ver)
2602 newdir = srcdir + ".new"
2603
2604 progress("unpack `%s'" % me.tarball)
2605 clean.enter(lockfile(pkg_srcdir_lockfile(pkg, ver)))
2606 run_program(["rm", "-rf", newdir])
2607 mkdir_p(newdir)
2608 run_program(["tar", "xf", OS.path.join(OS.getcwd(), me.tarball)],
2609 cwd = newdir)
2610 things = OS.listdir(newdir)
2611 if len(things) == 1:
2612 OS.rename(OS.path.join(newdir, things[0]), srcdir)
2613 OS.rmdir(newdir)
2614 else:
2615 OS.rename(newdir, srcdir)
2616
2617class PackageBuildJob (BaseJob):
2618
2619 SPECS = ["%s:%s" % (pkg, arch)
2620 for pkg in C.LOCALPKGS
2621 for arch in C.ALL_ARCHS]
2622
2623 def __init__(me, spec, fresh = CREATE, *args, **kw):
2624 super(PackageBuildJob, me).__init__(*args, **kw)
2625 colon = spec.index(":")
2626 me._pkg, me._arch = spec[:colon], spec[colon + 1:]
2627
2628 def _mkname(me): return "pkg-build.%s:%s" % (me._pkg, me._arch)
2629
2630 def prepare(me):
2631 me.await(ChrootJob.ensure("%s-%s" % (C.PRIMARY_DIST, me._arch), CREATE))
2632 me._meta = PackageMetadata.read(me._pkg)
2633 me._src = PackageSourceJob.ensure(me._pkg, FRESH); me.await(me._src)
2634 me._prereq = [PackageBuildJob.ensure("%s:%s" % (prereq, me._arch), FRESH)
2635 for prereq in C.PKGDEPS[me._pkg]]
2636 for j in me._prereq: me.await(j)
2637
2638 def check(me):
2639 status, reason = super(PackageBuildJob, me).check()
2640 if status is not READY: return status, reason
2641 if me._src.started: return READY, "fresh source directory"
2642 for j in me._prereq:
2643 if j.started:
2644 return READY, "dependency `%s' freshly installed" % j._pkg
2645 if getattr(me._meta, me._arch) == me._src.version:
2646 return DONE, "already installed"
2647 return READY, "not yet installed"
2648
2649 def run(me):
2650 with Cleanup() as clean:
2651 pkg, ver, arch = me._pkg, me._src.version, me._arch
2652
2653 session, _ = clean.enter(chroot_session(C.PRIMARY_DIST, arch))
2654 builddir = OS.path.join(pkg_srcdir(pkg, ver), "build.%s" % arch)
2655 chroot_builddir = host_to_chroot(builddir)
2656 run_program(["rm", "-rf", builddir])
2657 OS.mkdir(builddir)
2658
2659 progress("prepare %s chroot" % (arch))
2660 run_schroot_session(session,
2661 ["eatmydata", "apt-get", "update"],
2662 rootp = True, stdin = DISCARD)
2663 run_schroot_session(session,
2664 ["eatmydata", "apt-get", "-y", "upgrade"],
2665 rootp = True, stdin = DISCARD)
2666 run_schroot_session(session,
2667 ["eatmydata", "apt-get", "-y",
2668 "install", "pkg-config"],
2669 rootp = True, stdin = DISCARD)
2670 run_schroot_session(session,
2671 ["mount", "-oremount,rw", "/usr/local.schroot"],
2672 rootp = True, stdin = DISCARD)
942fed18
MW
2673 run_schroot_session(session,
2674 ["mount", "--bind",
2675 "/usr/local.schroot/%s/include.aside" % arch,
2676 "/usr/local.schroot/%s/include" % arch],
2677 rootp = True, stdin = DISCARD)
a98c9dba
MW
2678
2679 progress("configure `%s' %s for %s" % (pkg, ver, arch))
2680 run_schroot_session(session, ["sh", "-e", "-c", """
2681 cd "$1" &&
2682 ../configure PKG_CONFIG_PATH=/usr/local/lib/pkgconfig.hidden
2683 """, ".", chroot_builddir])
2684
2685 progress("compile `%s' %s for %s" % (pkg, ver, arch))
2686 run_schroot_session(session, ["sh", "-e", "-c", """
2687 cd "$1" && make -j4 && make -j4 check
2688 """, ".", chroot_builddir])
2689
2690 existing = getattr(me._meta, arch, None)
2691 if existing is not None and existing != ver:
2692 progress("uninstall existing `%s' %s for %s" % (pkg, existing, arch))
2693 run_schroot_session(session, ["sh", "-e", "-c", """
2694 cd "$1" && make uninstall
2695 """, ".", OS.path.join(pkg_srcdir(pkg, existing),
2696 "build.%s" % arch)],
2697 rootp = True)
2698
2699 progress("install `%s' %s for %s" % (pkg, existing, arch))
2700 run_schroot_session(session, ["sh", "-e", "-c", """
2701 cd "$1" && make install
2702 mkdir -p /usr/local/lib/pkgconfig.hidden
2703 mv /usr/local/lib/pkgconfig/*.pc /usr/local/lib/pkgconfig.hidden || :
2704 """, ".", chroot_builddir], rootp = True)
2705
2706 clean.enter(lockfile(pkg_metadata_lockfile(pkg)))
2707 me._meta = PackageMetadata.read(pkg)
2708 setattr(me._meta, arch, ver); me._meta.write()
2709
2710 with lockfile(chroot_src_lockfile(C.PRIMARY_DIST, arch)):
2711 run_schroot_source(C.PRIMARY_DIST, arch, ["ldconfig"])
2712
2713###--------------------------------------------------------------------------
2714### Process the configuration and options.
2715
2716OPTIONS = OP.OptionParser\
2717 (usage = "chroot-maint [-diknqs] [-fFRESH] [-jN] JOB[.SPEC,...] ...")
2718for short, long, props in [
2719 ("-d", "--debug", {
2720 'dest': 'debug', 'default': False, 'action': 'store_true',
2721 'help': "print lots of debugging drivel" }),
2722 ("-f", "--fresh", {
2723 'dest': 'fresh', 'metavar': 'FRESH', 'default': "create",
2724 'help': "how fresh (`create', `force', or `N[s|m|h|d|w]')" }),
2725 ("-i", "--ignore-errors", {
2726 'dest': 'ignerr', 'default': False, 'action': 'store_true',
2727 'help': "ignore all errors encountered while processing" }),
2728 ("-j", "--jobs", {
2729 'dest': 'njobs', 'metavar': 'N', 'default': 1, 'type': 'int',
2730 'help': 'run up to N jobs in parallel' }),
2731 ("-J", "--forkbomb", {
2732 'dest': 'njobs', 'action': 'store_true',
2733 'help': 'run as many jobs in parallel as possible' }),
2734 ("-k", "--keep-going", {
2735 'dest': 'keepon', 'default': False, 'action': 'store_true',
2736 'help': "keep going even if independent jobs fail" }),
2737 ("-n", "--dry-run", {
2738 'dest': 'dryrun', 'default': False, 'action': 'store_true',
2739 'help': "don't actually do anything" }),
2740 ("-q", "--quiet", {
2741 'dest': 'quiet', 'default': False, 'action': 'store_true',
2742 'help': "don't print the output from successful jobs" }),
2743 ("-s", "--silent", {
2744 'dest': 'silent', 'default': False, 'action': 'store_true',
2745 'help': "don't print progress messages" })]:
2746 OPTIONS.add_option(short, long, **props)
2747
2748###--------------------------------------------------------------------------
2749### Main program.
2750
2751R_JOBSERV = RX.compile(r'^--jobserver-(?:fds|auth)=(\d+),(\d+)$')
2752
2753JOBMAP = { "chroot": ChrootJob,
2754 "cross-tools": CrossToolsJob,
2755 "pkg-source": PackageSourceJob,
2756 "pkg-build": PackageBuildJob }
2757
2758R_FRESH = RX.compile(r"^(?:create|force|(\d+)(|[smhdw]))$")
2759
2760def parse_fresh(spec):
2761 m = R_FRESH.match(spec)
2762 if not m: raise ExpectedError("bad freshness `%s'" % spec)
2763 if spec == "create": fresh = CREATE
2764 elif spec == "force": fresh = FORCE
2765 else:
2766 n, u = int(m.group(1)), m.group(2)
2767 if u == "" or u == "s": fresh = n
2768 elif u == "m": fresh = 60*n
2769 elif u == "h": fresh = 3600*n
2770 elif u == "d": fresh = 86400*n
2771 elif u == "w": fresh = 604800*n
2772 else: assert False
2773 return fresh
2774
2775with toplevel_handler():
2776 OPT, args = OPTIONS.parse_args()
2777 rfd, wfd = -1, -1
2778 njobs = OPT.njobs
2779 try: mkflags = OS.environ['MAKEFLAGS']
2780 except KeyError: pass
2781 else:
2782 ff = mkflags.split()
2783 for f in ff:
2784 if f == "--": break
2785 m = R_JOBSERV.match(f)
2786 if m: rfd, wfd = int(m.group(1)), int(m.group(2))
2787 elif f == '-j': njobs = None
2788 elif not f.startswith('-'):
2789 for ch in f:
2790 if ch == 'i': OPT.ignerr = True
2791 elif ch == 'k': OPT.keepon = True
2792 elif ch == 'n': OPT.dryrun = True
2793 elif ch == 's': OPT.silent = True
2794 if OPT.njobs < 1:
2795 raise ExpectedError("running no more than %d jobs is silly" % OPT.njobs)
2796
2797 FRESH = parse_fresh(OPT.fresh)
2798
2799 SCHED = JobScheduler(rfd, wfd, njobs)
2800 OS.environ["http_proxy"] = C.PROXY
2801
2802 jobs = []
2803 if not args: OPTIONS.print_usage(SYS.stderr); SYS.exit(2)
2804 for arg in args:
2805 try: sl = arg.index("/")
2806 except ValueError: fresh = FRESH
2807 else: arg, fresh = arg[:sl], parse_fresh(arg[sl + 1:])
2808 try: dot = arg.index(".")
2809 except ValueError: jty, pats = arg, "*"
2810 else: jty, pats = arg[:dot], arg[dot + 1:]
2811 try: jcls = JOBMAP[jty]
2812 except KeyError: raise ExpectedError("unknown job type `%s'" % jty)
2813 specs = []
2814 for pat in pats.split(","):
2815 any = False
2816 for s in jcls.SPECS:
2817 if FM.fnmatch(s, pat): specs.append(s); any = True
2818 if not any: raise ExpectedError("no match for `%s'" % pat)
2819 for s in specs:
2820 jobs.append(jcls.ensure(s, fresh))
2821
2822 SCHED.run()
2823
2824SYS.exit(RC)
2825
2826###----- That's all, folks --------------------------------------------------