chiark / gitweb /
import: add new minimal tool "systemd-import" for pulling down foreign containers...
authorLennart Poettering <lennart@poettering.net>
Fri, 19 Dec 2014 00:59:52 +0000 (01:59 +0100)
committerLennart Poettering <lennart@poettering.net>
Fri, 19 Dec 2014 01:08:14 +0000 (02:08 +0100)
This adds a simply but powerful tool for downloading container images
from the most popular container solution used today. Use it like
this:

       # systemd-import pull-dck mattdm/fedora
       # systemd-nspawn -M fedora

This will donwload the layers for "mattdm/fedora", and make them
available locally as /var/lib/container/fedora.

The tool is pretty complete, as long as it's only about pulling down
images, or updating them. Pushing or searching is not supported yet.

.gitignore
Makefile.am
src/import/Makefile [new symlink]
src/import/aufs-util.c [new file with mode: 0644]
src/import/aufs-util.h [new file with mode: 0644]
src/import/curl-util.c [new file with mode: 0644]
src/import/curl-util.h [new file with mode: 0644]
src/import/import-dck.c [new file with mode: 0644]
src/import/import-dck.h [new file with mode: 0644]
src/import/import.c [new file with mode: 0644]
src/shared/util.h

index c849c4d..1b5d60f 100644 (file)
@@ -82,6 +82,7 @@
 /systemd-hibernate-resume-generator
 /systemd-hostnamed
 /systemd-hwdb
+/systemd-import
 /systemd-inhibit
 /systemd-initctl
 /systemd-journal-gatewayd
index 5a41ba6..2cc19a9 100644 (file)
@@ -5094,6 +5094,32 @@ libnss_mymachines_la_LIBADD = \
 
 lib_LTLIBRARIES += \
        libnss_mymachines.la
+
+if HAVE_LIBCURL
+
+bin_PROGRAMS += \
+       systemd-import
+
+systemd_import_SOURCES = \
+       src/import/import.c \
+       src/import/import-dck.c \
+       src/import/import-dck.h \
+       src/import/curl-util.c \
+       src/import/curl-util.h \
+       src/import/aufs-util.c \
+       src/import/aufs-util.h
+
+systemd_import_CFLAGS = \
+       $(AM_CFLAGS) \
+       $(LIBCURL_CFLAGS)
+
+systemd_import_LDADD = \
+       libsystemd-internal.la \
+       libsystemd-shared.la \
+       $(LIBCURL_LIBS) \
+       -lm
+endif
+
 endif
 
 # ------------------------------------------------------------------------------
diff --git a/src/import/Makefile b/src/import/Makefile
new file mode 120000 (symlink)
index 0000000..d0b0e8e
--- /dev/null
@@ -0,0 +1 @@
+../Makefile
\ No newline at end of file
diff --git a/src/import/aufs-util.c b/src/import/aufs-util.c
new file mode 100644 (file)
index 0000000..c1301cd
--- /dev/null
@@ -0,0 +1,73 @@
+/*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/
+
+/***
+  This file is part of systemd.
+
+  Copyright 2014 Lennart Poettering
+
+  systemd is free software; you can redistribute it and/or modify it
+  under the terms of the GNU Lesser General Public License as published by
+  the Free Software Foundation; either version 2.1 of the License, or
+  (at your option) any later version.
+
+  systemd is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+  Lesser General Public License for more details.
+
+  You should have received a copy of the GNU Lesser General Public License
+  along with systemd; If not, see <http://www.gnu.org/licenses/>.
+***/
+
+#include <ftw.h>
+
+#include "util.h"
+#include "aufs-util.h"
+
+static int nftw_cb(
+                const char *fpath,
+                const struct stat *sb,
+                int flag,
+                struct FTW *ftwbuf) {
+
+        const char *fn, *original;
+        char *p;
+        int r;
+
+        fn = fpath + ftwbuf->base;
+
+        /* We remove all whiteout files, and all whiteouts */
+
+        original = startswith(fn, ".wh.");
+        if (!original)
+                return FTW_CONTINUE;
+
+        log_debug("Removing whiteout indicator %s.", fpath);
+        r = rm_rf_dangerous(fpath, false, true, false);
+        if (r < 0)
+                return FTW_STOP;
+
+        if (!startswith(fn, ".wh..wh.")) {
+
+                p = alloca(ftwbuf->base + strlen(original));
+                strcpy(mempcpy(p, fpath, ftwbuf->base), original);
+
+                log_debug("Removing deleted file %s.", p);
+                r = rm_rf_dangerous(p, false, true, false);
+                if (r < 0)
+                        return FTW_STOP;
+        }
+
+        return FTW_CONTINUE;
+}
+
+int aufs_resolve(const char *path) {
+        int r;
+
+        errno = 0;
+        r = nftw(path, nftw_cb, 64, FTW_MOUNT|FTW_PHYS|FTW_ACTIONRETVAL);
+        if (r == FTW_STOP)
+                return errno ? -errno : -EIO;
+
+        return 0;
+}
diff --git a/src/import/aufs-util.h b/src/import/aufs-util.h
new file mode 100644 (file)
index 0000000..712fb81
--- /dev/null
@@ -0,0 +1,24 @@
+/*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/
+
+#pragma once
+
+/***
+  This file is part of systemd.
+
+  Copyright 2014 Lennart Poettering
+
+  systemd is free software; you can redistribute it and/or modify it
+  under the terms of the GNU Lesser General Public License as published by
+  the Free Software Foundation; either version 2.1 of the License, or
+  (at your option) any later version.
+
+  systemd is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+  Lesser General Public License for more details.
+
+  You should have received a copy of the GNU Lesser General Public License
+  along with systemd; If not, see <http://www.gnu.org/licenses/>.
+***/
+
+int aufs_resolve(const char *path);
diff --git a/src/import/curl-util.c b/src/import/curl-util.c
new file mode 100644 (file)
index 0000000..eaaebae
--- /dev/null
@@ -0,0 +1,415 @@
+/*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/
+
+/***
+  This file is part of systemd.
+
+  Copyright 2014 Lennart Poettering
+
+  systemd is free software; you can redistribute it and/or modify it
+  under the terms of the GNU Lesser General Public License as published by
+  the Free Software Foundation; either version 2.1 of the License, or
+  (at your option) any later version.
+
+  systemd is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+  Lesser General Public License for more details.
+
+  You should have received a copy of the GNU Lesser General Public License
+  along with systemd; If not, see <http://www.gnu.org/licenses/>.
+***/
+
+#include "curl-util.h"
+
+static void curl_glue_check_finished(CurlGlue *g) {
+        CURLMsg *msg;
+        int k = 0;
+
+        assert(g);
+
+        msg = curl_multi_info_read(g->curl, &k);
+        if (!msg)
+                return;
+
+        if (msg->msg != CURLMSG_DONE)
+                return;
+
+        if (g->on_finished)
+                g->on_finished(g, msg->easy_handle, msg->data.result);
+}
+
+static int curl_glue_on_io(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
+        CurlGlue *g = userdata;
+        int action, k = 0, translated_fd;
+
+        assert(s);
+        assert(g);
+
+        translated_fd = PTR_TO_INT(hashmap_get(g->translate_fds, INT_TO_PTR(fd+1)));
+        assert(translated_fd > 0);
+        translated_fd--;
+
+        if ((revents & (EPOLLIN|EPOLLOUT)) == (EPOLLIN|EPOLLOUT))
+                action = CURL_POLL_INOUT;
+        else if (revents & EPOLLIN)
+                action = CURL_POLL_IN;
+        else if (revents & EPOLLOUT)
+                action = CURL_POLL_OUT;
+        else
+                action = 0;
+
+        if (curl_multi_socket_action(g->curl, translated_fd, action, &k) < 0) {
+                log_debug("Failed to propagate IO event.");
+                return -EINVAL;
+        }
+
+        curl_glue_check_finished(g);
+        return 0;
+}
+
+static int curl_glue_socket_callback(CURLM *curl, curl_socket_t s, int action, void *userdata, void *socketp) {
+        sd_event_source *io;
+        CurlGlue *g = userdata;
+        uint32_t events = 0;
+        int r;
+
+        assert(curl);
+        assert(g);
+
+        io = hashmap_get(g->ios, INT_TO_PTR(s+1));
+
+        if (action == CURL_POLL_REMOVE) {
+                if (io) {
+                        int fd;
+
+                        fd = sd_event_source_get_io_fd(io);
+                        assert(fd >= 0);
+
+                        sd_event_source_set_enabled(io, SD_EVENT_OFF);
+                        sd_event_source_unref(io);
+
+                        hashmap_remove(g->ios, INT_TO_PTR(s+1));
+                        hashmap_remove(g->translate_fds, INT_TO_PTR(fd+1));
+
+                        safe_close(fd);
+                }
+
+                return 0;
+        }
+
+        r = hashmap_ensure_allocated(&g->ios, &trivial_hash_ops);
+        if (r < 0) {
+                log_oom();
+                return -1;
+        }
+
+        r = hashmap_ensure_allocated(&g->translate_fds, &trivial_hash_ops);
+        if (r < 0) {
+                log_oom();
+                return -1;
+        }
+
+        if (action == CURL_POLL_IN)
+                events = EPOLLIN;
+        else if (action == CURL_POLL_OUT)
+                events = EPOLLOUT;
+        else if (action == CURL_POLL_INOUT)
+                events = EPOLLIN|EPOLLOUT;
+
+        if (io) {
+                if (sd_event_source_set_io_events(io, events) < 0)
+                        return -1;
+
+                if (sd_event_source_set_enabled(io, SD_EVENT_ON) < 0)
+                        return -1;
+        } else {
+                _cleanup_close_ int fd = -1;
+
+                /* When curl needs to remove an fd from us it closes
+                 * the fd first, and only then calls into us. This is
+                 * nasty, since we cannot pass the fd on to epoll()
+                 * anymore. Hence, duplicate the fds here, and keep a
+                 * copy for epoll which we control after use. */
+
+                fd = fcntl(s, F_DUPFD_CLOEXEC, 3);
+                if (fd < 0)
+                        return -1;
+
+                if (sd_event_add_io(g->event, &io, fd, events, curl_glue_on_io, g) < 0)
+                        return -1;
+
+                sd_event_source_set_description(io, "curl-io");
+
+                r = hashmap_put(g->ios, INT_TO_PTR(s+1), io);
+                if (r < 0) {
+                        log_oom();
+                        sd_event_source_unref(io);
+                        return -1;
+                }
+
+                r = hashmap_put(g->translate_fds, INT_TO_PTR(fd+1), INT_TO_PTR(s+1));
+                if (r < 0) {
+                        log_oom();
+                        hashmap_remove(g->ios, INT_TO_PTR(s+1));
+                        sd_event_source_unref(io);
+                        return -1;
+                }
+
+                fd = -1;
+        }
+
+        return 0;
+}
+
+static int curl_glue_on_timer(sd_event_source *s, uint64_t usec, void *userdata) {
+        CurlGlue *g = userdata;
+        int k = 0;
+
+        assert(s);
+        assert(g);
+
+        if (curl_multi_socket_action(g->curl, CURL_SOCKET_TIMEOUT, 0, &k) != CURLM_OK) {
+                log_debug("Failed to propagate timeout.");
+                return -EINVAL;
+        }
+
+        curl_glue_check_finished(g);
+        return 0;
+}
+
+static int curl_glue_timer_callback(CURLM *curl, long timeout_ms, void *userdata) {
+        CurlGlue *g = userdata;
+        usec_t usec;
+
+        assert(curl);
+        assert(g);
+
+        if (timeout_ms < 0) {
+                if (g->timer) {
+                        if (sd_event_source_set_enabled(g->timer, SD_EVENT_OFF) < 0)
+                                return -1;
+                }
+
+                return 0;
+        }
+
+        usec = now(clock_boottime_or_monotonic()) + (usec_t) timeout_ms * USEC_PER_MSEC + USEC_PER_MSEC - 1;
+
+        if (g->timer) {
+                if (sd_event_source_set_time(g->timer, usec) < 0)
+                        return -1;
+
+                if (sd_event_source_set_enabled(g->timer, SD_EVENT_ONESHOT) < 0)
+                        return -1;
+        } else {
+                if (sd_event_add_time(g->event, &g->timer, clock_boottime_or_monotonic(), usec, 0, curl_glue_on_timer, g) < 0)
+                        return -1;
+
+                sd_event_source_set_description(g->timer, "curl-timer");
+        }
+
+        return 0;
+}
+
+CurlGlue *curl_glue_unref(CurlGlue *g) {
+        sd_event_source *io;
+
+        if (!g)
+                return NULL;
+
+        if (g->curl)
+                curl_multi_cleanup(g->curl);
+
+        while ((io = hashmap_steal_first(g->ios))) {
+                int fd;
+
+                fd = sd_event_source_get_io_fd(io);
+                assert(fd >= 0);
+
+                hashmap_remove(g->translate_fds, INT_TO_PTR(fd+1));
+
+                safe_close(fd);
+                sd_event_source_unref(io);
+        }
+
+        hashmap_free(g->ios);
+
+        sd_event_source_unref(g->timer);
+        sd_event_unref(g->event);
+
+        return NULL;
+}
+
+int curl_glue_new(CurlGlue **glue, sd_event *event) {
+        _cleanup_(curl_glue_unrefp) CurlGlue *g = NULL;
+        int r;
+
+        g = new0(CurlGlue, 1);
+        if (!g)
+                return -ENOMEM;
+
+        if (event)
+                g->event = sd_event_ref(event);
+        else {
+                r = sd_event_default(&g->event);
+                if (r < 0)
+                        return r;
+        }
+
+        g->curl = curl_multi_init();
+        if (!g->curl)
+                return -ENOMEM;
+
+        if (curl_multi_setopt(g->curl, CURLMOPT_SOCKETDATA, g) != CURLM_OK)
+                return -EINVAL;
+
+        if (curl_multi_setopt(g->curl, CURLMOPT_SOCKETFUNCTION, curl_glue_socket_callback) != CURLM_OK)
+                return -EINVAL;
+
+        if (curl_multi_setopt(g->curl, CURLMOPT_TIMERDATA, g) != CURLM_OK)
+                return -EINVAL;
+
+        if (curl_multi_setopt(g->curl, CURLMOPT_TIMERFUNCTION, curl_glue_timer_callback) != CURLM_OK)
+                return -EINVAL;
+
+        *glue = g;
+        g = NULL;
+
+        return 0;
+}
+
+int curl_glue_make(CURL **ret, const char *url, void *userdata) {
+        const char *useragent;
+        CURL *c;
+        int r;
+
+        assert(ret);
+        assert(url);
+
+        c = curl_easy_init();
+        if (!c)
+                return -ENOMEM;
+
+        /* curl_easy_setopt(c, CURLOPT_VERBOSE, 1L); */
+
+        if (curl_easy_setopt(c, CURLOPT_URL, url) != CURLE_OK) {
+                r = -EIO;
+                goto fail;
+        }
+
+        if (curl_easy_setopt(c, CURLOPT_PRIVATE, userdata) != CURLE_OK) {
+                r = -EIO;
+                goto fail;
+        }
+
+        useragent = strappenda(program_invocation_short_name, "/" PACKAGE_VERSION);
+        if (curl_easy_setopt(c, CURLOPT_USERAGENT, useragent) != CURLE_OK) {
+                r = -EIO;
+                goto fail;
+        }
+
+        if (curl_easy_setopt(c, CURLOPT_FOLLOWLOCATION, 1L) != CURLE_OK) {
+                r = -EIO;
+                goto fail;
+        }
+
+        *ret = c;
+        return 0;
+
+fail:
+        curl_easy_cleanup(c);
+        return r;
+}
+
+int curl_glue_add(CurlGlue *g, CURL *c) {
+        assert(g);
+        assert(c);
+
+        if (curl_multi_add_handle(g->curl, c) != CURLM_OK)
+                return -EIO;
+
+        return 0;
+}
+
+void curl_glue_remove_and_free(CurlGlue *g, CURL *c) {
+        assert(g);
+
+        if (!c)
+                return;
+
+        if (g->curl)
+                curl_multi_remove_handle(g->curl, c);
+
+        curl_easy_cleanup(c);
+}
+
+struct curl_slist *curl_slist_new(const char *first, ...) {
+        struct curl_slist *l;
+        va_list ap;
+
+        if (!first)
+                return NULL;
+
+        l = curl_slist_append(NULL, first);
+        if (!l)
+                return NULL;
+
+        va_start(ap, first);
+
+        for (;;) {
+                struct curl_slist *n;
+                const char *i;
+
+                i = va_arg(ap, const char*);
+                if (!i)
+                        break;
+
+                n = curl_slist_append(l, i);
+                if (!n) {
+                        va_end(ap);
+                        curl_slist_free_all(l);
+                        return NULL;
+                }
+
+                l = n;
+        }
+
+        va_end(ap);
+        return l;
+}
+
+int curl_header_strdup(const void *contents, size_t sz, const char *field, char **value) {
+        const char *p = contents;
+        size_t l;
+        char *s;
+
+        l = strlen(field);
+        if (sz < l)
+                return 0;
+
+        if (memcmp(p, field, l) != 0)
+                return 0;
+
+        p += l;
+        sz -= l;
+
+        if (memchr(p, 0, sz))
+                return 0;
+
+        /* Skip over preceeding whitespace */
+        while (sz > 0 && strchr(WHITESPACE, p[0])) {
+                p++;
+                sz--;
+        }
+
+        /* Truncate trailing whitespace*/
+        while (sz > 0 && strchr(WHITESPACE, p[sz-1]))
+                sz--;
+
+        s = strndup(p, sz);
+        if (!s)
+                return -ENOMEM;
+
+        *value = s;
+        return 1;
+}
diff --git a/src/import/curl-util.h b/src/import/curl-util.h
new file mode 100644 (file)
index 0000000..5a7550d
--- /dev/null
@@ -0,0 +1,56 @@
+/*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/
+
+#pragma once
+
+/***
+  This file is part of systemd.
+
+  Copyright 2014 Lennart Poettering
+
+  systemd is free software; you can redistribute it and/or modify it
+  under the terms of the GNU Lesser General Public License as published by
+  the Free Software Foundation; either version 2.1 of the License, or
+  (at your option) any later version.
+
+  systemd is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+  Lesser General Public License for more details.
+
+  You should have received a copy of the GNU Lesser General Public License
+  along with systemd; If not, see <http://www.gnu.org/licenses/>.
+***/
+
+#include <sys/types.h>
+#include <curl/curl.h>
+
+#include "hashmap.h"
+#include "sd-event.h"
+
+typedef struct CurlGlue CurlGlue;
+
+struct CurlGlue {
+        sd_event *event;
+        CURLM *curl;
+        sd_event_source *timer;
+        Hashmap *ios;
+        Hashmap *translate_fds;
+
+        void (*on_finished)(CurlGlue *g, CURL *curl, CURLcode code);
+        void *userdata;
+};
+
+int curl_glue_new(CurlGlue **glue, sd_event *event);
+CurlGlue* curl_glue_unref(CurlGlue *glue);
+
+DEFINE_TRIVIAL_CLEANUP_FUNC(CurlGlue*, curl_glue_unref);
+
+int curl_glue_make(CURL **ret, const char *url, void *userdata);
+int curl_glue_add(CurlGlue *g, CURL *c);
+void curl_glue_remove_and_free(CurlGlue *g, CURL *c);
+
+struct curl_slist *curl_slist_new(const char *first, ...) _sentinel_;
+int curl_header_strdup(const void *contents, size_t sz, const char *field, char **value);
+
+DEFINE_TRIVIAL_CLEANUP_FUNC(CURL*, curl_easy_cleanup);
+DEFINE_TRIVIAL_CLEANUP_FUNC(struct curl_slist*, curl_slist_free_all);
diff --git a/src/import/import-dck.c b/src/import/import-dck.c
new file mode 100644 (file)
index 0000000..7c83ff7
--- /dev/null
@@ -0,0 +1,1156 @@
+/*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/
+
+/***
+  This file is part of systemd.
+
+  Copyright 2014 Lennart Poettering
+
+  systemd is free software; you can redistribute it and/or modify it
+  under the terms of the GNU Lesser General Public License as published by
+  the Free Software Foundation; either version 2.1 of the License, or
+  (at your option) any later version.
+
+  systemd is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+  Lesser General Public License for more details.
+
+  You should have received a copy of the GNU Lesser General Public License
+  along with systemd; If not, see <http://www.gnu.org/licenses/>.
+***/
+
+#include <curl/curl.h>
+#include <sys/prctl.h>
+
+#include "hashmap.h"
+#include "set.h"
+#include "json.h"
+#include "strv.h"
+#include "curl-util.h"
+#include "import-dck.h"
+#include "btrfs-util.h"
+#include "aufs-util.h"
+
+/* TODO:
+  - convert json bits
+  - man page
+  - fall back to btrfs loop pool device
+*/
+
+typedef struct DckImportJob DckImportJob;
+typedef struct DckImportName DckImportName;
+
+typedef enum DckImportJobType {
+        DCK_IMPORT_JOB_IMAGES,
+        DCK_IMPORT_JOB_TAGS,
+        DCK_IMPORT_JOB_ANCESTRY,
+        DCK_IMPORT_JOB_JSON,
+        DCK_IMPORT_JOB_LAYER,
+} DckImportJobType;
+
+struct DckImportJob {
+        DckImport *import;
+        DckImportJobType type;
+        bool done;
+
+        char *url;
+
+        Set *needed_by; /* DckImport Name objects */
+
+        CURL *curl;
+        struct curl_slist *request_header;
+        void *payload;
+        size_t payload_size;
+
+        char *response_token;
+        char **response_registries;
+
+        char *temp_path;
+        char *final_path;
+
+        pid_t tar_pid;
+        FILE *tar_stream;
+};
+
+struct DckImportName {
+        DckImport *import;
+
+        char *name;
+        char *tag;
+        char *id;
+        char *local;
+
+        DckImportJob *job_images, *job_tags, *job_ancestry, *job_json, *job_layer;
+
+        char **ancestry;
+        unsigned current_ancestry;
+
+        bool force_local;
+};
+
+struct DckImport {
+        sd_event *event;
+        CurlGlue *glue;
+
+        Hashmap *names;
+        Hashmap *jobs;
+
+        dck_import_on_finished on_finished;
+        void *userdata;
+};
+
+#define PROTOCOL_PREFIX "https://"
+#define INDEX_HOST "index.do" /* the URL we get the data from */ "cker.io"
+
+#define HEADER_TOKEN "X-Do" /* the HTTP header for the auth token */ "cker-Token:"
+#define HEADER_REGISTRY "X-Do" /*the HTTP header for the registry */ "cker-Endpoints:"
+
+#define PAYLOAD_MAX (16*1024*1024)
+#define LAYERS_MAX 2048
+
+static int dck_import_name_add_job(DckImportName *name, DckImportJobType type, const char *url, DckImportJob **ret);
+
+static DckImportJob *dck_import_job_unref(DckImportJob *job) {
+        if (!job)
+                return NULL;
+
+        if (job->import)
+                curl_glue_remove_and_free(job->import->glue, job->curl);
+        curl_slist_free_all(job->request_header);
+
+        if (job->tar_stream)
+                fclose(job->tar_stream);
+
+        free(job->final_path);
+
+        if (job->temp_path) {
+                btrfs_subvol_remove(job->temp_path);
+                free(job->temp_path);
+        }
+
+        set_free(job->needed_by);
+
+        if (job->tar_pid > 0)
+                kill(job->tar_pid, SIGTERM);
+
+        free(job->url);
+        free(job->payload);
+        free(job->response_token);
+        strv_free(job->response_registries);
+
+        free(job);
+
+        return NULL;
+}
+
+static DckImportName *dck_import_name_unref(DckImportName *name) {
+        if (!name)
+                return NULL;
+
+        if (name->job_images)
+                set_remove(name->job_images->needed_by, name);
+
+        if (name->job_tags)
+                set_remove(name->job_tags->needed_by, name);
+
+        if (name->job_ancestry)
+                set_remove(name->job_ancestry->needed_by, name);
+
+        if (name->job_json)
+                set_remove(name->job_json->needed_by, name);
+
+        if (name->job_layer)
+                set_remove(name->job_layer->needed_by, name);
+
+        free(name->name);
+        free(name->id);
+        free(name->tag);
+        free(name->local);
+
+        strv_free(name->ancestry);
+        free(name);
+
+        return NULL;
+}
+
+DEFINE_TRIVIAL_CLEANUP_FUNC(DckImportJob*, dck_import_job_unref);
+DEFINE_TRIVIAL_CLEANUP_FUNC(DckImportName*, dck_import_name_unref);
+
+static void dck_import_finish(DckImport *import, int error) {
+        assert(import);
+
+        if (import->on_finished)
+                import->on_finished(import, error, import->userdata);
+        else
+                sd_event_exit(import->event, error);
+}
+
+static int parse_id(const void *payload, size_t size, char **ret) {
+        _cleanup_free_ char *buf = NULL, *id = NULL, *other = NULL;
+        union json_value v = {};
+        void *json_state = NULL;
+        const char *p;
+        int t;
+
+        assert(payload);
+        assert(ret);
+
+        if (size <= 0)
+                return -EBADMSG;
+
+        if (memchr(payload, 0, size))
+                return -EBADMSG;
+
+        buf = strndup(payload, size);
+        if (!buf)
+                return -ENOMEM;
+
+        p = buf;
+        t = json_tokenize(&p, &id, &v, &json_state, NULL);
+        if (t < 0)
+                return t;
+        if (t != JSON_STRING)
+                return -EBADMSG;
+
+        t = json_tokenize(&p, &other, &v, &json_state, NULL);
+        if (t < 0)
+                return t;
+        if (t != JSON_END)
+                return -EBADMSG;
+
+        if (!dck_id_is_valid(id))
+                return -EBADMSG;
+
+        *ret = id;
+        id = NULL;
+
+        return 0;
+}
+
+static int parse_ancestry(const void *payload, size_t size, char ***ret) {
+        _cleanup_free_ char *buf = NULL;
+        void *json_state = NULL;
+        const char *p;
+        enum {
+                STATE_BEGIN,
+                STATE_ITEM,
+                STATE_COMMA,
+                STATE_END,
+        } state = STATE_BEGIN;
+        _cleanup_strv_free_ char **l = NULL;
+        size_t n = 0, allocated = 0;
+
+        if (size <= 0)
+                return -EBADMSG;
+
+        if (memchr(payload, 0, size))
+                return -EBADMSG;
+
+        buf = strndup(payload, size);
+        if (!buf)
+                return -ENOMEM;
+
+        p = buf;
+        for (;;) {
+                _cleanup_free_ char *str;
+                union json_value v = {};
+                int t;
+
+                t = json_tokenize(&p, &str, &v, &json_state, NULL);
+                if (t < 0)
+                        return t;
+
+                switch (state) {
+
+                case STATE_BEGIN:
+                        if (t == JSON_ARRAY_OPEN)
+                                state = STATE_ITEM;
+                        else
+                                return -EBADMSG;
+
+                        break;
+
+                case STATE_ITEM:
+                        if (t == JSON_STRING) {
+                                if (!dck_id_is_valid(str))
+                                        return -EBADMSG;
+
+                                if (n+1 > LAYERS_MAX)
+                                        return -EFBIG;
+
+                                if (!GREEDY_REALLOC(l, allocated, n + 2))
+                                        return -ENOMEM;
+
+                                l[n++] = str;
+                                str = NULL;
+                                l[n] = NULL;
+
+                                state = STATE_COMMA;
+
+                        } else if (t == JSON_ARRAY_CLOSE)
+                                state = STATE_END;
+                        else
+                                return -EBADMSG;
+
+                        break;
+
+                case STATE_COMMA:
+                        if (t == JSON_COMMA)
+                                state = STATE_ITEM;
+                        else if (t == JSON_ARRAY_CLOSE)
+                                state = STATE_END;
+                        else
+                                return -EBADMSG;
+                        break;
+
+                case STATE_END:
+                        if (t == JSON_END) {
+
+                                if (strv_isempty(l))
+                                        return -EBADMSG;
+
+                                if (!strv_is_uniq(l))
+                                        return -EBADMSG;
+
+                                l = strv_reverse(l);
+
+                                *ret = l;
+                                l = NULL;
+                                return 0;
+                        } else
+                                return -EBADMSG;
+                }
+
+        }
+}
+
+static const char *dck_import_name_current_layer(DckImportName *name) {
+        assert(name);
+
+        if (strv_isempty(name->ancestry))
+                return NULL;
+
+        return name->ancestry[name->current_ancestry];
+}
+
+static const char *dck_import_name_current_base_layer(DckImportName *name) {
+        assert(name);
+
+        if (strv_isempty(name->ancestry))
+                return NULL;
+
+        if (name->current_ancestry <= 0)
+                return NULL;
+
+        return name->ancestry[name->current_ancestry-1];
+}
+
+static char** dck_import_name_get_registries(DckImportName *name) {
+        assert(name);
+
+        if (!name->job_images)
+                return NULL;
+
+        if (!name->job_images->done)
+                return NULL;
+
+        if (strv_isempty(name->job_images->response_registries))
+                return NULL;
+
+        return name->job_images->response_registries;
+}
+
+static const char*dck_import_name_get_token(DckImportName *name) {
+        assert(name);
+
+        if (!name->job_images)
+                return NULL;
+
+        if (!name->job_images->done)
+                return NULL;
+
+        return name->job_images->response_token;
+}
+
+static void dck_import_name_maybe_finish(DckImportName *name) {
+        int r;
+
+        assert(name);
+
+        if (!name->job_images || !name->job_images->done)
+                return;
+
+        if (!name->job_ancestry || !name->job_ancestry->done)
+                return;
+
+        if (!name->job_json || !name->job_json->done)
+                return;
+
+        if (name->job_layer && !name->job_json->done)
+                return;
+
+        if (dck_import_name_current_layer(name))
+                return;
+
+        if (name->local) {
+                const char *p, *q;
+
+                assert(name->id);
+
+                p = strappenda("/var/lib/container/", name->local);
+                q = strappenda("/var/lib/container/.dck-", name->id);
+
+                if (name->force_local) {
+                        (void) btrfs_subvol_remove(p);
+                        (void) rm_rf(p, false, true, false);
+                }
+
+                r = btrfs_subvol_snapshot(q, p, false, false);
+                if (r < 0) {
+                        log_error_errno(r, "Failed to snapshot final image: %m");
+                        dck_import_finish(name->import, r);
+                        return;
+                }
+
+                log_info("Created new image %s.", p);
+        }
+
+        dck_import_finish(name->import, 0);
+}
+
+static int dck_import_job_run_tar(DckImportJob *job) {
+        _cleanup_close_pair_ int pipefd[2] = { -1, -1 };
+        bool gzip;
+
+        assert(job);
+
+        /* A stream to run tar on? */
+        if (!job->temp_path)
+                return 0;
+
+        if (job->tar_stream)
+                return 0;
+
+        /* Maybe fork off tar, if we have enough to figure out that
+         * something is gzip compressed or not */
+
+        if (job->payload_size < 2)
+                return 0;
+
+        /* Detect gzip signature */
+        gzip = ((uint8_t*) job->payload)[0] == 0x1f &&
+               ((uint8_t*) job->payload)[1] == 0x8b;
+
+        assert(!job->tar_stream);
+        assert(job->tar_pid <= 0);
+
+        if (pipe2(pipefd, O_CLOEXEC) < 0)
+                return log_error_errno(errno, "Failed to create pipe for tar: %m");
+
+        job->tar_pid = fork();
+        if (job->tar_pid < 0)
+                return log_error_errno(errno, "Failed to fork off tar: %m");
+        if (job->tar_pid == 0) {
+                int null_fd;
+
+                reset_all_signal_handlers();
+                reset_signal_mask();
+                assert_se(prctl(PR_SET_PDEATHSIG, SIGTERM) == 0);
+
+                pipefd[1] = safe_close(pipefd[1]);
+
+                if (dup2(pipefd[0], STDIN_FILENO) != STDIN_FILENO) {
+                        log_error_errno(errno, "Failed to dup2() fd: %m");
+                        _exit(EXIT_FAILURE);
+                }
+
+                if (pipefd[0] != STDIN_FILENO)
+                        safe_close(pipefd[0]);
+                if (pipefd[1] != STDIN_FILENO)
+                        safe_close(pipefd[1]);
+
+                null_fd = open("/dev/null", O_WRONLY|O_NOCTTY);
+                if (null_fd < 0) {
+                        log_error_errno(errno, "Failed to open /dev/null: %m");
+                        _exit(EXIT_FAILURE);
+                }
+
+                if (dup2(null_fd, STDOUT_FILENO) != STDOUT_FILENO) {
+                        log_error_errno(errno, "Failed to dup2() fd: %m");
+                        _exit(EXIT_FAILURE);
+                }
+
+                if (null_fd != STDOUT_FILENO)
+                        safe_close(null_fd);
+
+                execlp("tar", "tar", "-C", job->temp_path, gzip ? "-xz" : "-x", NULL);
+                _exit(EXIT_FAILURE);
+        }
+
+        pipefd[0] = safe_close(pipefd[0]);
+
+        job->tar_stream = fdopen(pipefd[1], "w");
+        if (!job->tar_stream)
+                return log_error_errno(errno, "Failed to allocate tar stream: %m");
+
+        pipefd[1] = -1;
+
+        if (fwrite(job->payload, 1, job->payload_size, job->tar_stream) != job->payload_size)
+                return log_error_errno(errno, "Couldn't write payload: %m");
+
+        free(job->payload);
+        job->payload = NULL;
+        job->payload_size = 0;
+
+        return 0;
+}
+
+static int dck_import_name_pull_layer(DckImportName *name) {
+        _cleanup_free_ char *path = NULL, *temp = NULL;
+        const char *url, *layer = NULL, *base = NULL;
+        char **rg;
+        int r;
+
+        assert(name);
+
+        if (name->job_layer) {
+                set_remove(name->job_layer->needed_by, name);
+                name->job_layer = NULL;
+        }
+
+        for (;;) {
+                layer = dck_import_name_current_layer(name);
+                if (!layer) {
+                        dck_import_name_maybe_finish(name);
+                        return 0;
+                }
+
+                path = strjoin("/var/lib/container/.dck-", layer, NULL);
+                if (!path)
+                        return log_oom();
+
+                if (laccess(path, F_OK) < 0) {
+                        if (errno == ENOENT)
+                                break;
+
+                        return log_error_errno(errno, "Failed to check for container: %m");
+                }
+
+                log_info("Layer %s already exists, skipping.", layer);
+
+                name->current_ancestry++;
+
+                free(path);
+                path = NULL;
+        }
+
+        rg = dck_import_name_get_registries(name);
+        assert(rg && rg[0]);
+
+        url = strappenda(PROTOCOL_PREFIX, rg[0], "/v1/images/", layer, "/layer");
+        r = dck_import_name_add_job(name, DCK_IMPORT_JOB_LAYER, url, &name->job_layer);
+        if (r < 0) {
+                log_error_errno(r, "Failed to issue HTTP request: %m");
+                return r;
+        }
+        if (r == 0) /* Already downloading this one? */
+                return 0;
+
+        log_info("Pulling layer %s...", layer);
+
+        r = tempfn_random(path, &temp);
+        if (r < 0)
+                return log_oom();
+
+        base = dck_import_name_current_base_layer(name);
+        if (base) {
+                const char *base_path;
+
+                base_path = strappend("/var/lib/container/.dck-", base);
+                r = btrfs_subvol_snapshot(base_path, temp, false, true);
+        } else
+                r = btrfs_subvol_make(temp);
+
+        if (r < 0)
+                return log_error_errno(r, "Failed to make btrfs subvolume %s", temp);
+
+        name->job_layer->final_path = path;
+        name->job_layer->temp_path = temp;
+        path = temp = NULL;
+
+        return 0;
+}
+
+static void dck_import_name_job_finished(DckImportName *name, DckImportJob *job) {
+        int r;
+
+        assert(name);
+        assert(job);
+
+        if (name->job_images == job) {
+                const char *url;
+                char **rg;
+
+                assert(!name->job_tags);
+                assert(!name->job_ancestry);
+                assert(!name->job_json);
+                assert(!name->job_layer);
+
+                rg = dck_import_name_get_registries(name);
+                if (strv_isempty(rg)) {
+                        log_error("Didn't get registry information.");
+                        r = -EBADMSG;
+                        goto fail;
+                }
+
+                log_info("Index lookup succeeded, directed to registry %s.", rg[0]);
+
+                url = strappenda(PROTOCOL_PREFIX, rg[0], "/v1/repositories/", name->name, "/tags/", name->tag);
+
+                r = dck_import_name_add_job(name, DCK_IMPORT_JOB_TAGS, url, &name->job_tags);
+                if (r < 0) {
+                        log_error_errno(r, "Failed to issue HTTP request: %m");
+                        goto fail;
+                }
+
+        } else if (name->job_tags == job) {
+                const char *url;
+                char *id = NULL, **rg;
+
+                assert(!name->job_ancestry);
+                assert(!name->job_json);
+                assert(!name->job_layer);
+
+                r = parse_id(job->payload, job->payload_size, &id);
+                if (r < 0) {
+                        log_error_errno(r, "Failed to parse JSON id.");
+                        goto fail;
+                }
+
+                free(name->id);
+                name->id = id;
+
+                rg = dck_import_name_get_registries(name);
+                assert(rg && rg[0]);
+
+                log_info("Tag lookup succeeded, resolved to layer %s.", name->id);
+
+                url = strappenda(PROTOCOL_PREFIX, rg[0], "/v1/images/", name->id, "/ancestry");
+                r = dck_import_name_add_job(name, DCK_IMPORT_JOB_ANCESTRY, url, &name->job_ancestry);
+                if (r < 0) {
+                        log_error_errno(r, "Failed to issue HTTP request: %m");
+                        goto fail;
+                }
+
+                url = strappenda(PROTOCOL_PREFIX, rg[0], "/v1/images/", name->id, "/json");
+                r = dck_import_name_add_job(name, DCK_IMPORT_JOB_JSON, url, &name->job_json);
+                if (r < 0) {
+                        log_error_errno(r, "Failed to issue HTTP request: %m");
+                        goto fail;
+                }
+
+        } else if (name->job_ancestry == job) {
+                char **ancestry = NULL, **i;
+                unsigned n;
+
+                r = parse_ancestry(job->payload, job->payload_size, &ancestry);
+                if (r < 0) {
+                        log_error_errno(r, "Failed to parse JSON id.");
+                        goto fail;
+                }
+
+                n = strv_length(ancestry);
+                if (n <= 0 || !streq(ancestry[n-1], name->id)) {
+                        log_error("Ancestry doesn't end in main layer.");
+                        r = -EBADMSG;
+                        goto fail;
+                }
+
+                log_info("Ancestor lookup succeeded, requires layers:\n");
+                STRV_FOREACH(i, ancestry)
+                        log_info("\t%s", *i);
+
+                strv_free(name->ancestry);
+                name->ancestry = ancestry;
+
+                name->current_ancestry = 0;
+                r = dck_import_name_pull_layer(name);
+                if (r < 0)
+                        goto fail;
+
+        } else if (name->job_json == job) {
+
+                dck_import_name_maybe_finish(name);
+
+        } else if (name->job_layer == job) {
+
+                name->current_ancestry ++;
+                r = dck_import_name_pull_layer(name);
+                if (r < 0)
+                        goto fail;
+
+        } else
+                assert_not_reached("Got finished event for unknown curl object");
+
+        return;
+
+fail:
+        dck_import_finish(name->import, r);
+}
+
+static void dck_import_curl_on_finished(CurlGlue *g, CURL *curl, CURLcode result) {
+        DckImportJob *job = NULL;
+        CURLcode code;
+        DckImportName *n;
+        long status;
+        Iterator i;
+        int r;
+
+        if (curl_easy_getinfo(curl, CURLINFO_PRIVATE, &job) != CURLE_OK)
+                return;
+
+        if (!job)
+                return;
+
+        job->done = true;
+
+        if (result != CURLE_OK) {
+                log_error("Transfer failed: %s", curl_easy_strerror(code));
+                r = -EIO;
+                goto fail;
+        }
+
+        code = curl_easy_getinfo(curl, CURLINFO_RESPONSE_CODE, &status);
+        if (code != CURLE_OK) {
+                log_error("Failed to retrieve response code: %s", curl_easy_strerror(code));
+                r = -EIO;
+                goto fail;
+        } else if (status >= 300) {
+                log_error("HTTP request to %s failed with code %li.", job->url, status);
+                r = -EIO;
+                goto fail;
+        } else if (status < 200) {
+                log_error("HTTP request to %s finished with unexpected code %li.", job->url, status);
+                r = -EIO;
+                goto fail;
+        }
+
+        switch (job->type) {
+
+        case DCK_IMPORT_JOB_LAYER: {
+                siginfo_t si;
+
+                if (!job->tar_stream) {
+                        log_error("Downloaded layer too short.");
+                        r = -EIO;
+                        goto fail;
+                }
+
+                fclose(job->tar_stream);
+                job->tar_stream = NULL;
+
+                assert(job->tar_pid > 0);
+
+                r = wait_for_terminate(job->tar_pid, &si);
+                if (r < 0) {
+                        log_error_errno(r, "Failed to wait for tar process: %m");
+                        goto fail;
+                }
+
+                job->tar_pid = 0;
+
+                if (si.si_code != CLD_EXITED || si.si_status != EXIT_SUCCESS) {
+                        log_error_errno(r, "tar failed abnormally.");
+                        r = -EIO;
+                        goto fail;
+                }
+
+                r = aufs_resolve(job->temp_path);
+                if (r < 0) {
+                        log_error_errno(r, "Couldn't resolve aufs whiteouts: %m");
+                        goto fail;
+                }
+
+                r = btrfs_subvol_read_only(job->temp_path, true);
+                if (r < 0) {
+                        log_error_errno(r, "Failed to mark snapshot read-only: %m");
+                        goto fail;
+                }
+
+                if (rename(job->temp_path, job->final_path) < 0) {
+                        log_error_errno(r, "Failed to rename snapshot: %m");
+                        goto fail;
+                }
+
+                log_info("Completed writing to layer %s", job->final_path);
+                break;
+        }
+
+        default:
+                ;
+        }
+
+        SET_FOREACH(n, job->needed_by, i)
+                dck_import_name_job_finished(n, job);
+
+        return;
+
+fail:
+        dck_import_finish(job->import, r);
+}
+
+static size_t dck_import_job_write_callback(void *contents, size_t size, size_t nmemb, void *userdata) {
+        DckImportJob *j = userdata;
+        size_t sz = size * nmemb;
+        char *p;
+        int r;
+
+        assert(contents);
+        assert(j);
+
+        if (j->tar_stream) {
+                size_t l;
+
+                l = fwrite(contents, size, nmemb, j->tar_stream);
+                if (l != nmemb) {
+                        r = -errno;
+                        goto fail;
+                }
+
+                return l;
+        }
+
+        if (j->payload_size + sz > PAYLOAD_MAX) {
+                r = -EFBIG;
+                goto fail;
+        }
+
+        p = realloc(j->payload, j->payload_size + sz);
+        if (!p) {
+                r = -ENOMEM;
+                goto fail;
+        }
+
+        memcpy(p + j->payload_size, contents, sz);
+        j->payload_size += sz;
+        j->payload = p;
+
+        r = dck_import_job_run_tar(j);
+        if (r < 0)
+                goto fail;
+
+        return sz;
+
+fail:
+        dck_import_finish(j->import, r);
+        return 0;
+}
+
+static size_t dck_import_job_header_callback(void *contents, size_t size, size_t nmemb, void *userdata) {
+        _cleanup_free_ char *registry = NULL;
+        size_t sz = size * nmemb;
+        DckImportJob *j = userdata;
+        char *token;
+        int r;
+
+        assert(contents);
+        assert(j);
+
+        r = curl_header_strdup(contents, sz, HEADER_TOKEN, &token);
+        if (r < 0) {
+                log_oom();
+                goto fail;
+        }
+        if (r > 0) {
+                free(j->response_token);
+                j->response_token = token;
+        }
+
+        r = curl_header_strdup(contents, sz, HEADER_REGISTRY, &registry);
+        if (r < 0) {
+                log_oom();
+                goto fail;
+        }
+        if (r > 0) {
+                char **l, **i;
+
+                l = strv_split(registry, ",");
+                if (!l) {
+                        r = log_oom();
+                        goto fail;
+                }
+
+                STRV_FOREACH(i, l) {
+                        if (!hostname_is_valid(*i)) {
+                                log_error("Registry hostname is not valid.");
+                                strv_free(l);
+                                r = -EBADMSG;
+                                goto fail;
+                        }
+                }
+
+                strv_free(j->response_registries);
+                j->response_registries = l;
+        }
+
+        return sz;
+
+fail:
+        dck_import_finish(j->import, r);
+        return 0;
+}
+
+static int dck_import_name_add_job(DckImportName *name, DckImportJobType type, const char *url, DckImportJob **ret) {
+        _cleanup_(dck_import_job_unrefp) DckImportJob *j = NULL;
+        DckImportJob *f = NULL;
+        const char *t, *token;
+        int r;
+
+        assert(name);
+        assert(url);
+        assert(ret);
+
+        log_info("Getting %s.", url);
+        f = hashmap_get(name->import->jobs, url);
+        if (f) {
+                if (f->type != type)
+                        return -EINVAL;
+
+                r = set_put(f->needed_by, name);
+                if (r < 0)
+                        return r;
+
+                return 0;
+        }
+
+        r = hashmap_ensure_allocated(&name->import->jobs, &string_hash_ops);
+        if (r < 0)
+                return r;
+
+        j = new0(DckImportJob, 1);
+        if (!j)
+                return -ENOMEM;
+
+        j->import = name->import;
+        j->type = type;
+        j->url = strdup(url);
+        if (!j->url)
+                return -ENOMEM;
+
+        r = set_ensure_allocated(&j->needed_by, &trivial_hash_ops);
+        if (r < 0)
+                return r;
+
+        r = curl_glue_make(&j->curl, j->url, j);
+        if (r < 0)
+                return r;
+
+        token = dck_import_name_get_token(name);
+        if (token)
+                t = strappenda("Authorization: Token ", token);
+        else
+                t = HEADER_TOKEN " true";
+
+        j->request_header = curl_slist_new("Accept: application/json", t, NULL);
+        if (!j->request_header)
+                return -ENOMEM;
+
+        if (curl_easy_setopt(j->curl, CURLOPT_HTTPHEADER, j->request_header) != CURLE_OK)
+                return -EIO;
+
+        if (curl_easy_setopt(j->curl, CURLOPT_WRITEFUNCTION, dck_import_job_write_callback) != CURLE_OK)
+                return -EIO;
+
+        if (curl_easy_setopt(j->curl, CURLOPT_WRITEDATA, j) != CURLE_OK)
+                return -EIO;
+
+        if (curl_easy_setopt(j->curl, CURLOPT_HEADERFUNCTION, dck_import_job_header_callback) != CURLE_OK)
+                return -EIO;
+
+        if (curl_easy_setopt(j->curl, CURLOPT_HEADERDATA, j) != CURLE_OK)
+                return -EIO;
+
+        r = curl_glue_add(name->import->glue, j->curl);
+        if (r < 0)
+                return r;
+
+        r = hashmap_put(name->import->jobs, j->url, j);
+        if (r < 0)
+                return r;
+
+        r = set_put(j->needed_by, name);
+        if (r < 0) {
+                hashmap_remove(name->import->jobs, url);
+                return r;
+        }
+
+        *ret = j;
+        j = NULL;
+
+        return 1;
+}
+
+static int dck_import_name_begin(DckImportName *name) {
+        const char *url;
+
+        assert(name);
+        assert(!name->job_images);
+
+        url = strappenda(PROTOCOL_PREFIX, INDEX_HOST, "/v1/repositories/", name->name, "/images");
+
+        return dck_import_name_add_job(name, DCK_IMPORT_JOB_IMAGES, url, &name->job_images);
+}
+
+int dck_import_new(DckImport **import, sd_event *event, dck_import_on_finished on_finished, void *userdata) {
+        _cleanup_(dck_import_unrefp) DckImport *i = NULL;
+        int r;
+
+        assert(import);
+
+        i = new0(DckImport, 1);
+        if (!i)
+                return -ENOMEM;
+
+        i->on_finished = on_finished;
+        i->userdata = userdata;
+
+        if (event)
+                i->event = sd_event_ref(event);
+        else {
+                r = sd_event_default(&i->event);
+                if (r < 0)
+                        return r;
+        }
+
+        r = curl_glue_new(&i->glue, i->event);
+        if (r < 0)
+                return r;
+
+        i->glue->on_finished = dck_import_curl_on_finished;
+        i->glue->userdata = i;
+
+        *import = i;
+        i = NULL;
+
+        return 0;
+}
+
+DckImport* dck_import_unref(DckImport *import) {
+        DckImportName *n;
+        DckImportJob *j;
+
+        if (!import)
+                return NULL;
+
+        while ((n = hashmap_steal_first(import->names)))
+               dck_import_name_unref(n);
+        hashmap_free(import->names);
+
+        while ((j = hashmap_steal_first(import->jobs)))
+                dck_import_job_unref(j);
+        hashmap_free(import->jobs);
+
+        curl_glue_unref(import->glue);
+        sd_event_unref(import->event);
+
+        free(import);
+        return NULL;
+}
+
+int dck_import_cancel(DckImport *import, const char *name) {
+        DckImportName *n;
+
+        assert(import);
+        assert(name);
+
+        n = hashmap_remove(import->names, name);
+        if (!n)
+                return 0;
+
+        dck_import_name_unref(n);
+        return 1;
+}
+
+int dck_import_pull(DckImport *import, const char *name, const char *tag, const char *local, bool force_local) {
+        _cleanup_(dck_import_name_unrefp) DckImportName *n = NULL;
+        int r;
+
+        assert(import);
+        assert(dck_name_is_valid(name));
+        assert(dck_tag_is_valid(tag));
+        assert(!local || machine_name_is_valid(local));
+
+        if (hashmap_get(import->names, name))
+                return -EEXIST;
+
+        r = hashmap_ensure_allocated(&import->names, &string_hash_ops);
+        if (r < 0)
+                return r;
+
+        n = new0(DckImportName, 1);
+        if (!n)
+                return -ENOMEM;
+
+        n->import = import;
+
+        n->name = strdup(name);
+        if (!n->name)
+                return -ENOMEM;
+
+        n->tag = strdup(tag);
+        if (!n->tag)
+                return -ENOMEM;
+
+        if (local) {
+                n->local = strdup(local);
+                if (!n->local)
+                        return -ENOMEM;
+                n->force_local = force_local;
+        }
+
+        r = hashmap_put(import->names, name, n);
+        if (r < 0)
+                return r;
+
+        r = dck_import_name_begin(n);
+        if (r < 0) {
+                dck_import_cancel(import, n->name);
+                n = NULL;
+                return r;
+        }
+
+        n = NULL;
+
+        return 0;
+}
+
+bool dck_name_is_valid(const char *name) {
+        const char *slash, *p;
+
+        if (isempty(name))
+                return false;
+
+        slash = strchr(name, '/');
+        if (!slash)
+                return false;
+
+        if (!filename_is_valid(slash + 1))
+                return false;
+
+        p = strndupa(name, slash - name);
+        if (!filename_is_valid(p))
+                return false;
+
+        return true;
+}
+
+bool dck_id_is_valid(const char *id) {
+
+        if (!filename_is_valid(id))
+                return false;
+
+        if (!in_charset(id, "0123456789abcdef"))
+                return false;
+
+        return true;
+}
diff --git a/src/import/import-dck.h b/src/import/import-dck.h
new file mode 100644 (file)
index 0000000..cd2d27c
--- /dev/null
@@ -0,0 +1,39 @@
+/*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/
+
+/***
+  This file is part of systemd.
+
+  Copyright 2014 Lennart Poettering
+
+  systemd is free software; you can redistribute it and/or modify it
+  under the terms of the GNU Lesser General Public License as published by
+  the Free Software Foundation; either version 2.1 of the License, or
+  (at your option) any later version.
+
+  systemd is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+  Lesser General Public License for more details.
+
+  You should have received a copy of the GNU Lesser General Public License
+  along with systemd; If not, see <http://www.gnu.org/licenses/>.
+***/
+
+#include "sd-event.h"
+#include "util.h"
+
+typedef struct DckImport DckImport;
+
+typedef void (*dck_import_on_finished)(DckImport *import, int error, void *userdata);
+
+int dck_import_new(DckImport **import, sd_event *event, dck_import_on_finished on_finished, void *userdata);
+DckImport* dck_import_unref(DckImport *import);
+
+DEFINE_TRIVIAL_CLEANUP_FUNC(DckImport*, dck_import_unref);
+
+int dck_import_pull(DckImport *import, const char *name, const char *tag, const char *local, bool force_local);
+int dck_import_cancel(DckImport *import, const char *name);
+
+bool dck_name_is_valid(const char *name);
+bool dck_id_is_valid(const char *id);
+#define dck_tag_is_valid(tag) filename_is_valid(tag)
diff --git a/src/import/import.c b/src/import/import.c
new file mode 100644 (file)
index 0000000..9bade38
--- /dev/null
@@ -0,0 +1,213 @@
+/*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/
+
+/***
+  This file is part of systemd.
+
+  Copyright 2014 Lennart Poettering
+
+  systemd is free software; you can redistribute it and/or modify it
+  under the terms of the GNU Lesser General Public License as published by
+  the Free Software Foundation; either version 2.1 of the License, or
+  (at your option) any later version.
+
+  systemd is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+  Lesser General Public License for more details.
+
+  You should have received a copy of the GNU Lesser General Public License
+  along with systemd; If not, see <http://www.gnu.org/licenses/>.
+***/
+
+#include <getopt.h>
+
+#include "sd-event.h"
+#include "event-util.h"
+#include "verbs.h"
+#include "build.h"
+#include "import-dck.h"
+
+static bool arg_force = false;
+
+static void on_finished(DckImport *import, int error, void *userdata) {
+        sd_event *event = userdata;
+        assert(import);
+
+        if (error == 0)
+                log_info("Operation completed successfully.");
+        else
+                log_info_errno(error, "Operation failed: %m");
+
+        sd_event_exit(event, error);
+}
+
+static int pull_dck(int argc, char *argv[], void *userdata) {
+        _cleanup_(dck_import_unrefp) DckImport *import = NULL;
+        _cleanup_event_unref_ sd_event *event = NULL;
+        const char *name, *tag, *local;
+        int r;
+
+        tag = strchr(argv[1], ':');
+        if (tag) {
+                name = strndupa(argv[1], tag - argv[1]);
+                tag++;
+        } else {
+                name = argv[1];
+                tag = "latest";
+        }
+
+        if (argc >= 3)
+                local = argv[2];
+        else {
+                local = strchr(name, '/');
+                if (local)
+                        local++;
+                else
+                        local = name;
+        }
+
+        if (streq(local, "-") || isempty(local))
+                local = NULL;
+
+        if (!dck_name_is_valid(name)) {
+                log_error("Remote name '%s' is not valid.", name);
+                return -EINVAL;
+        }
+
+        if (!dck_tag_is_valid(tag)) {
+                log_error("Tag name '%s' is not valid.", tag);
+                return -EINVAL;
+        }
+
+        if (local) {
+                const char *p;
+
+                if (!machine_name_is_valid(tag)) {
+                        log_error("Local image name '%s' is not valid.", local);
+                        return -EINVAL;
+                }
+
+                p = strappenda("/var/lib/container/", local);
+                if (laccess(p, F_OK) >= 0) {
+                        if (!arg_force) {
+                                log_info("Image '%s' already exists.", local);
+                                return 0;
+                        }
+                } else if (errno != ENOENT)
+                        return log_error_errno(errno, "Can't check if image '%s' already exists: %m", local);
+
+                log_info("Pulling '%s' with tag '%s', saving as '%s'.", name, tag, local);
+        } else
+                log_info("Pulling '%s' with tag '%s'.", name, tag);
+
+        r = sd_event_default(&event);
+        if (r < 0)
+                return log_error_errno(r, "Failed to allocate event loop: %m");
+
+        assert_se(sigprocmask_many(SIG_BLOCK, SIGTERM, SIGINT, -1) == 0);
+        sd_event_add_signal(event, NULL, SIGTERM, NULL,  NULL);
+        sd_event_add_signal(event, NULL, SIGINT, NULL, NULL);
+
+        r = dck_import_new(&import, event, on_finished, event);
+        if (r < 0)
+                return log_error_errno(r, "Failed to allocate importer: %m");
+
+        r = dck_import_pull(import, name, tag, local, arg_force);
+        if (r < 0)
+                return log_error_errno(r, "Failed to pull image: %m");
+
+        r = sd_event_loop(event);
+        if (r < 0)
+                return log_error_errno(r, "Failed to run event loop: %m");
+
+        log_info("Exiting.");
+
+        return 0;
+}
+
+static int help(int argc, char *argv[], void *userdata) {
+
+        printf("%s [OPTIONS...] {COMMAND} ...\n\n"
+               "Import container or virtual machine image.\n\n"
+               "  -h --help                   Show this help\n"
+               "     --version                Show package version\n"
+               "     --force                  Force creation of image\n\n"
+               "Commands:\n"
+               "  pull-dck REMOTE [NAME]      Download an image\n",
+               program_invocation_short_name);
+
+        return 0;
+}
+
+static int parse_argv(int argc, char *argv[]) {
+
+        enum {
+                ARG_VERSION = 0x100,
+                ARG_FORCE,
+        };
+
+        static const struct option options[] = {
+                { "help",            no_argument,       NULL, 'h'                 },
+                { "version",         no_argument,       NULL, ARG_VERSION         },
+                { "force",           no_argument,       NULL, ARG_FORCE           },
+                {}
+        };
+
+        int c;
+
+        assert(argc >= 0);
+        assert(argv);
+
+        while ((c = getopt_long(argc, argv, "h", options, NULL)) >= 0)
+
+                switch (c) {
+
+                case 'h':
+                        return help(argc, argv, NULL);
+
+                case ARG_VERSION:
+                        puts(PACKAGE_STRING);
+                        puts(SYSTEMD_FEATURES);
+                        return 0;
+
+                case ARG_FORCE:
+                        arg_force = true;
+                        break;
+
+                case '?':
+                        return -EINVAL;
+
+                default:
+                        assert_not_reached("Unhandled option");
+                }
+
+        return 1;
+}
+
+static int import_main(int argc, char *argv[]) {
+
+        const Verb verbs[] = {
+                { "help",     VERB_ANY, VERB_ANY, 0, help     },
+                { "pull-dck", 2,        3,        0, pull_dck },
+                {}
+        };
+
+        return dispatch_verb(argc, argv, verbs, NULL);
+}
+
+int main(int argc, char *argv[]) {
+        int r;
+
+        setlocale(LC_ALL, "");
+        log_parse_environment();
+        log_open();
+
+        r = parse_argv(argc, argv);
+        if (r <= 0)
+                goto finish;
+
+        r = import_main(argc, argv);
+
+finish:
+        return r < 0 ? EXIT_FAILURE : EXIT_SUCCESS;
+}
index a8ccf20..96b8c1b 100644 (file)
@@ -1040,3 +1040,5 @@ int sethostname_idempotent(const char *s);
         for ((e) = (struct inotify_event*) (buffer);    \
              (uint8_t*) (e) < (uint8_t*) (buffer) + (sz); \
              (e) = (struct inotify_event*) ((uint8_t*) (e) + sizeof(struct inotify_event) + (e)->len))
+
+#define laccess(path, mode) faccessat(AT_FDCWD, (path), (mode), AT_SYMLINK_NOFOLLOW)