1 /*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/
4 This file is part of systemd.
6 Copyright 2014 Lennart Poettering
8 systemd is free software; you can redistribute it and/or modify it
9 under the terms of the GNU Lesser General Public License as published by
10 the Free Software Foundation; either version 2.1 of the License, or
11 (at your option) any later version.
13 systemd is distributed in the hope that it will be useful, but
14 WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 Lesser General Public License for more details.
18 You should have received a copy of the GNU Lesser General Public License
19 along with systemd; If not, see <http://www.gnu.org/licenses/>.
22 #include <curl/curl.h>
23 #include <sys/prctl.h>
29 #include "curl-util.h"
30 #include "import-dkr.h"
31 #include "btrfs-util.h"
32 #include "aufs-util.h"
38 - fall back to btrfs loop pool device
41 typedef struct DkrImportJob DkrImportJob;
42 typedef struct DkrImportName DkrImportName;
44 typedef enum DkrImportJobType {
45 DKR_IMPORT_JOB_IMAGES,
47 DKR_IMPORT_JOB_ANCESTRY,
54 DkrImportJobType type;
59 Set *needed_by; /* DkrImport Name objects */
62 struct curl_slist *request_header;
67 char **response_registries;
76 struct DkrImportName {
85 DkrImportJob *job_images, *job_tags, *job_ancestry, *job_json, *job_layer;
88 unsigned current_ancestry;
100 dkr_import_on_finished on_finished;
104 #define PROTOCOL_PREFIX "https://"
106 #define HEADER_TOKEN "X-Do" /* the HTTP header for the auth token */ "cker-Token:"
107 #define HEADER_REGISTRY "X-Do" /*the HTTP header for the registry */ "cker-Endpoints:"
109 #define PAYLOAD_MAX (16*1024*1024)
110 #define LAYERS_MAX 2048
112 static int dkr_import_name_add_job(DkrImportName *name, DkrImportJobType type, const char *url, DkrImportJob **ret);
114 static DkrImportJob *dkr_import_job_unref(DkrImportJob *job) {
119 curl_glue_remove_and_free(job->import->glue, job->curl);
120 curl_slist_free_all(job->request_header);
123 fclose(job->tar_stream);
125 free(job->final_path);
127 if (job->temp_path) {
128 btrfs_subvol_remove(job->temp_path);
129 free(job->temp_path);
132 set_free(job->needed_by);
134 if (job->tar_pid > 0)
135 kill(job->tar_pid, SIGTERM);
139 free(job->response_token);
140 strv_free(job->response_registries);
147 static DkrImportName *dkr_import_name_unref(DkrImportName *name) {
151 if (name->job_images)
152 set_remove(name->job_images->needed_by, name);
155 set_remove(name->job_tags->needed_by, name);
157 if (name->job_ancestry)
158 set_remove(name->job_ancestry->needed_by, name);
161 set_remove(name->job_json->needed_by, name);
164 set_remove(name->job_layer->needed_by, name);
166 free(name->index_url);
172 strv_free(name->ancestry);
178 DEFINE_TRIVIAL_CLEANUP_FUNC(DkrImportJob*, dkr_import_job_unref);
179 DEFINE_TRIVIAL_CLEANUP_FUNC(DkrImportName*, dkr_import_name_unref);
181 static void dkr_import_finish(DkrImport *import, int error) {
184 if (import->on_finished)
185 import->on_finished(import, error, import->userdata);
187 sd_event_exit(import->event, error);
190 static int parse_id(const void *payload, size_t size, char **ret) {
191 _cleanup_free_ char *buf = NULL, *id = NULL, *other = NULL;
192 union json_value v = {};
193 void *json_state = NULL;
203 if (memchr(payload, 0, size))
206 buf = strndup(payload, size);
211 t = json_tokenize(&p, &id, &v, &json_state, NULL);
214 if (t != JSON_STRING)
217 t = json_tokenize(&p, &other, &v, &json_state, NULL);
223 if (!dkr_id_is_valid(id))
232 static int parse_ancestry(const void *payload, size_t size, char ***ret) {
233 _cleanup_free_ char *buf = NULL;
234 void *json_state = NULL;
241 } state = STATE_BEGIN;
242 _cleanup_strv_free_ char **l = NULL;
243 size_t n = 0, allocated = 0;
248 if (memchr(payload, 0, size))
251 buf = strndup(payload, size);
257 _cleanup_free_ char *str;
258 union json_value v = {};
261 t = json_tokenize(&p, &str, &v, &json_state, NULL);
268 if (t == JSON_ARRAY_OPEN)
276 if (t == JSON_STRING) {
277 if (!dkr_id_is_valid(str))
280 if (n+1 > LAYERS_MAX)
283 if (!GREEDY_REALLOC(l, allocated, n + 2))
292 } else if (t == JSON_ARRAY_CLOSE)
302 else if (t == JSON_ARRAY_CLOSE)
314 if (!strv_is_uniq(l))
329 static const char *dkr_import_name_current_layer(DkrImportName *name) {
332 if (strv_isempty(name->ancestry))
335 return name->ancestry[name->current_ancestry];
338 static const char *dkr_import_name_current_base_layer(DkrImportName *name) {
341 if (strv_isempty(name->ancestry))
344 if (name->current_ancestry <= 0)
347 return name->ancestry[name->current_ancestry-1];
350 static char** dkr_import_name_get_registries(DkrImportName *name) {
353 if (!name->job_images)
356 if (!name->job_images->done)
359 if (strv_isempty(name->job_images->response_registries))
362 return name->job_images->response_registries;
365 static const char*dkr_import_name_get_token(DkrImportName *name) {
368 if (!name->job_images)
371 if (!name->job_images->done)
374 return name->job_images->response_token;
377 static void dkr_import_name_maybe_finish(DkrImportName *name) {
382 if (!name->job_images || !name->job_images->done)
385 if (!name->job_ancestry || !name->job_ancestry->done)
388 if (!name->job_json || !name->job_json->done)
391 if (name->job_layer && !name->job_json->done)
394 if (dkr_import_name_current_layer(name))
402 p = strappenda("/var/lib/container/", name->local);
403 q = strappenda("/var/lib/container/.dkr-", name->id);
405 if (name->force_local) {
406 (void) btrfs_subvol_remove(p);
407 (void) rm_rf(p, false, true, false);
410 r = btrfs_subvol_snapshot(q, p, false, false);
412 log_error_errno(r, "Failed to snapshot final image: %m");
413 dkr_import_finish(name->import, r);
417 log_info("Created new image %s.", p);
420 dkr_import_finish(name->import, 0);
423 static int dkr_import_job_run_tar(DkrImportJob *job) {
424 _cleanup_close_pair_ int pipefd[2] = { -1, -1 };
429 /* A stream to run tar on? */
436 /* Maybe fork off tar, if we have enough to figure out that
437 * something is gzip compressed or not */
439 if (job->payload_size < 2)
442 /* Detect gzip signature */
443 gzip = ((uint8_t*) job->payload)[0] == 0x1f &&
444 ((uint8_t*) job->payload)[1] == 0x8b;
446 assert(!job->tar_stream);
447 assert(job->tar_pid <= 0);
449 if (pipe2(pipefd, O_CLOEXEC) < 0)
450 return log_error_errno(errno, "Failed to create pipe for tar: %m");
452 job->tar_pid = fork();
453 if (job->tar_pid < 0)
454 return log_error_errno(errno, "Failed to fork off tar: %m");
455 if (job->tar_pid == 0) {
458 reset_all_signal_handlers();
460 assert_se(prctl(PR_SET_PDEATHSIG, SIGTERM) == 0);
462 pipefd[1] = safe_close(pipefd[1]);
464 if (dup2(pipefd[0], STDIN_FILENO) != STDIN_FILENO) {
465 log_error_errno(errno, "Failed to dup2() fd: %m");
469 if (pipefd[0] != STDIN_FILENO)
470 safe_close(pipefd[0]);
471 if (pipefd[1] != STDIN_FILENO)
472 safe_close(pipefd[1]);
474 null_fd = open("/dev/null", O_WRONLY|O_NOCTTY);
476 log_error_errno(errno, "Failed to open /dev/null: %m");
480 if (dup2(null_fd, STDOUT_FILENO) != STDOUT_FILENO) {
481 log_error_errno(errno, "Failed to dup2() fd: %m");
485 if (null_fd != STDOUT_FILENO)
488 execlp("tar", "tar", "-C", job->temp_path, gzip ? "-xz" : "-x", NULL);
492 pipefd[0] = safe_close(pipefd[0]);
494 job->tar_stream = fdopen(pipefd[1], "w");
495 if (!job->tar_stream)
496 return log_error_errno(errno, "Failed to allocate tar stream: %m");
500 if (fwrite(job->payload, 1, job->payload_size, job->tar_stream) != job->payload_size)
501 return log_error_errno(errno, "Couldn't write payload: %m");
505 job->payload_size = 0;
510 static int dkr_import_name_pull_layer(DkrImportName *name) {
511 _cleanup_free_ char *path = NULL, *temp = NULL;
512 const char *url, *layer = NULL, *base = NULL;
518 if (name->job_layer) {
519 set_remove(name->job_layer->needed_by, name);
520 name->job_layer = NULL;
524 layer = dkr_import_name_current_layer(name);
526 dkr_import_name_maybe_finish(name);
530 path = strjoin("/var/lib/container/.dkr-", layer, NULL);
534 if (laccess(path, F_OK) < 0) {
538 return log_error_errno(errno, "Failed to check for container: %m");
541 log_info("Layer %s already exists, skipping.", layer);
543 name->current_ancestry++;
549 rg = dkr_import_name_get_registries(name);
552 url = strappenda(PROTOCOL_PREFIX, rg[0], "/v1/images/", layer, "/layer");
553 r = dkr_import_name_add_job(name, DKR_IMPORT_JOB_LAYER, url, &name->job_layer);
555 log_error_errno(r, "Failed to issue HTTP request: %m");
558 if (r == 0) /* Already downloading this one? */
561 log_info("Pulling layer %s...", layer);
563 r = tempfn_random(path, &temp);
567 base = dkr_import_name_current_base_layer(name);
569 const char *base_path;
571 base_path = strappend("/var/lib/container/.dkr-", base);
572 r = btrfs_subvol_snapshot(base_path, temp, false, true);
574 r = btrfs_subvol_make(temp);
577 return log_error_errno(r, "Failed to make btrfs subvolume %s", temp);
579 name->job_layer->final_path = path;
580 name->job_layer->temp_path = temp;
586 static void dkr_import_name_job_finished(DkrImportName *name, DkrImportJob *job) {
592 if (name->job_images == job) {
596 assert(!name->job_tags);
597 assert(!name->job_ancestry);
598 assert(!name->job_json);
599 assert(!name->job_layer);
601 rg = dkr_import_name_get_registries(name);
602 if (strv_isempty(rg)) {
603 log_error("Didn't get registry information.");
608 log_info("Index lookup succeeded, directed to registry %s.", rg[0]);
610 url = strappenda(PROTOCOL_PREFIX, rg[0], "/v1/repositories/", name->name, "/tags/", name->tag);
612 r = dkr_import_name_add_job(name, DKR_IMPORT_JOB_TAGS, url, &name->job_tags);
614 log_error_errno(r, "Failed to issue HTTP request: %m");
618 } else if (name->job_tags == job) {
620 char *id = NULL, **rg;
622 assert(!name->job_ancestry);
623 assert(!name->job_json);
624 assert(!name->job_layer);
626 r = parse_id(job->payload, job->payload_size, &id);
628 log_error_errno(r, "Failed to parse JSON id.");
635 rg = dkr_import_name_get_registries(name);
638 log_info("Tag lookup succeeded, resolved to layer %s.", name->id);
640 url = strappenda(PROTOCOL_PREFIX, rg[0], "/v1/images/", name->id, "/ancestry");
641 r = dkr_import_name_add_job(name, DKR_IMPORT_JOB_ANCESTRY, url, &name->job_ancestry);
643 log_error_errno(r, "Failed to issue HTTP request: %m");
647 url = strappenda(PROTOCOL_PREFIX, rg[0], "/v1/images/", name->id, "/json");
648 r = dkr_import_name_add_job(name, DKR_IMPORT_JOB_JSON, url, &name->job_json);
650 log_error_errno(r, "Failed to issue HTTP request: %m");
654 } else if (name->job_ancestry == job) {
655 char **ancestry = NULL, **i;
658 r = parse_ancestry(job->payload, job->payload_size, &ancestry);
660 log_error_errno(r, "Failed to parse JSON id.");
664 n = strv_length(ancestry);
665 if (n <= 0 || !streq(ancestry[n-1], name->id)) {
666 log_error("Ancestry doesn't end in main layer.");
671 log_info("Ancestor lookup succeeded, requires layers:\n");
672 STRV_FOREACH(i, ancestry)
673 log_info("\t%s", *i);
675 strv_free(name->ancestry);
676 name->ancestry = ancestry;
678 name->current_ancestry = 0;
679 r = dkr_import_name_pull_layer(name);
683 } else if (name->job_json == job) {
685 dkr_import_name_maybe_finish(name);
687 } else if (name->job_layer == job) {
689 name->current_ancestry ++;
690 r = dkr_import_name_pull_layer(name);
695 assert_not_reached("Got finished event for unknown curl object");
700 dkr_import_finish(name->import, r);
703 static void dkr_import_curl_on_finished(CurlGlue *g, CURL *curl, CURLcode result) {
704 DkrImportJob *job = NULL;
711 if (curl_easy_getinfo(curl, CURLINFO_PRIVATE, &job) != CURLE_OK)
719 if (result != CURLE_OK) {
720 log_error("Transfer failed: %s", curl_easy_strerror(result));
725 code = curl_easy_getinfo(curl, CURLINFO_RESPONSE_CODE, &status);
726 if (code != CURLE_OK) {
727 log_error("Failed to retrieve response code: %s", curl_easy_strerror(code));
730 } else if (status >= 300) {
731 log_error("HTTP request to %s failed with code %li.", job->url, status);
734 } else if (status < 200) {
735 log_error("HTTP request to %s finished with unexpected code %li.", job->url, status);
742 case DKR_IMPORT_JOB_LAYER: {
745 if (!job->tar_stream) {
746 log_error("Downloaded layer too short.");
751 fclose(job->tar_stream);
752 job->tar_stream = NULL;
754 assert(job->tar_pid > 0);
756 r = wait_for_terminate(job->tar_pid, &si);
758 log_error_errno(r, "Failed to wait for tar process: %m");
764 if (si.si_code != CLD_EXITED || si.si_status != EXIT_SUCCESS) {
765 log_error_errno(r, "tar failed abnormally.");
770 r = aufs_resolve(job->temp_path);
772 log_error_errno(r, "Couldn't resolve aufs whiteouts: %m");
776 r = btrfs_subvol_read_only(job->temp_path, true);
778 log_error_errno(r, "Failed to mark snapshot read-only: %m");
782 if (rename(job->temp_path, job->final_path) < 0) {
783 log_error_errno(r, "Failed to rename snapshot: %m");
787 log_info("Completed writing to layer %s", job->final_path);
795 SET_FOREACH(n, job->needed_by, i)
796 dkr_import_name_job_finished(n, job);
801 dkr_import_finish(job->import, r);
804 static size_t dkr_import_job_write_callback(void *contents, size_t size, size_t nmemb, void *userdata) {
805 DkrImportJob *j = userdata;
806 size_t sz = size * nmemb;
816 l = fwrite(contents, size, nmemb, j->tar_stream);
825 if (j->payload_size + sz > PAYLOAD_MAX) {
830 p = realloc(j->payload, j->payload_size + sz);
836 memcpy(p + j->payload_size, contents, sz);
837 j->payload_size += sz;
840 r = dkr_import_job_run_tar(j);
847 dkr_import_finish(j->import, r);
851 static size_t dkr_import_job_header_callback(void *contents, size_t size, size_t nmemb, void *userdata) {
852 _cleanup_free_ char *registry = NULL;
853 size_t sz = size * nmemb;
854 DkrImportJob *j = userdata;
861 r = curl_header_strdup(contents, sz, HEADER_TOKEN, &token);
867 free(j->response_token);
868 j->response_token = token;
871 r = curl_header_strdup(contents, sz, HEADER_REGISTRY, ®istry);
879 l = strv_split(registry, ",");
886 if (!hostname_is_valid(*i)) {
887 log_error("Registry hostname is not valid.");
894 strv_free(j->response_registries);
895 j->response_registries = l;
901 dkr_import_finish(j->import, r);
905 static int dkr_import_name_add_job(DkrImportName *name, DkrImportJobType type, const char *url, DkrImportJob **ret) {
906 _cleanup_(dkr_import_job_unrefp) DkrImportJob *j = NULL;
907 DkrImportJob *f = NULL;
908 const char *t, *token;
915 log_info("Getting %s.", url);
916 f = hashmap_get(name->import->jobs, url);
921 r = set_put(f->needed_by, name);
928 r = hashmap_ensure_allocated(&name->import->jobs, &string_hash_ops);
932 j = new0(DkrImportJob, 1);
936 j->import = name->import;
938 j->url = strdup(url);
942 r = set_ensure_allocated(&j->needed_by, &trivial_hash_ops);
946 r = curl_glue_make(&j->curl, j->url, j);
950 token = dkr_import_name_get_token(name);
952 t = strappenda("Authorization: Token ", token);
954 t = HEADER_TOKEN " true";
956 j->request_header = curl_slist_new("Accept: application/json", t, NULL);
957 if (!j->request_header)
960 if (curl_easy_setopt(j->curl, CURLOPT_HTTPHEADER, j->request_header) != CURLE_OK)
963 if (curl_easy_setopt(j->curl, CURLOPT_WRITEFUNCTION, dkr_import_job_write_callback) != CURLE_OK)
966 if (curl_easy_setopt(j->curl, CURLOPT_WRITEDATA, j) != CURLE_OK)
969 if (curl_easy_setopt(j->curl, CURLOPT_HEADERFUNCTION, dkr_import_job_header_callback) != CURLE_OK)
972 if (curl_easy_setopt(j->curl, CURLOPT_HEADERDATA, j) != CURLE_OK)
975 r = curl_glue_add(name->import->glue, j->curl);
979 r = hashmap_put(name->import->jobs, j->url, j);
983 r = set_put(j->needed_by, name);
985 hashmap_remove(name->import->jobs, url);
995 static int dkr_import_name_begin(DkrImportName *name) {
999 assert(!name->job_images);
1001 url = strappenda(name->index_url, "/v1/repositories/", name->name, "/images");
1003 return dkr_import_name_add_job(name, DKR_IMPORT_JOB_IMAGES, url, &name->job_images);
1006 int dkr_import_new(DkrImport **import, sd_event *event, dkr_import_on_finished on_finished, void *userdata) {
1007 _cleanup_(dkr_import_unrefp) DkrImport *i = NULL;
1012 i = new0(DkrImport, 1);
1016 i->on_finished = on_finished;
1017 i->userdata = userdata;
1020 i->event = sd_event_ref(event);
1022 r = sd_event_default(&i->event);
1027 r = curl_glue_new(&i->glue, i->event);
1031 i->glue->on_finished = dkr_import_curl_on_finished;
1032 i->glue->userdata = i;
1040 DkrImport* dkr_import_unref(DkrImport *import) {
1047 while ((n = hashmap_steal_first(import->names)))
1048 dkr_import_name_unref(n);
1049 hashmap_free(import->names);
1051 while ((j = hashmap_steal_first(import->jobs)))
1052 dkr_import_job_unref(j);
1053 hashmap_free(import->jobs);
1055 curl_glue_unref(import->glue);
1056 sd_event_unref(import->event);
1063 int dkr_import_cancel(DkrImport *import, const char *name) {
1069 n = hashmap_remove(import->names, name);
1073 dkr_import_name_unref(n);
1077 int dkr_import_pull(DkrImport *import, const char *index_url, const char *name, const char *tag, const char *local, bool force_local) {
1078 _cleanup_(dkr_import_name_unrefp) DkrImportName *n = NULL;
1083 assert(dkr_url_is_valid(index_url));
1084 assert(dkr_name_is_valid(name));
1085 assert(dkr_tag_is_valid(tag));
1086 assert(!local || machine_name_is_valid(local));
1088 if (hashmap_get(import->names, name))
1091 r = hashmap_ensure_allocated(&import->names, &string_hash_ops);
1095 n = new0(DkrImportName, 1);
1101 n->index_url = strdup(index_url);
1104 e = endswith(n->index_url, "/");
1108 n->name = strdup(name);
1112 n->tag = strdup(tag);
1117 n->local = strdup(local);
1120 n->force_local = force_local;
1123 r = hashmap_put(import->names, name, n);
1127 r = dkr_import_name_begin(n);
1129 dkr_import_cancel(import, n->name);
1139 bool dkr_name_is_valid(const char *name) {
1140 const char *slash, *p;
1145 slash = strchr(name, '/');
1149 if (!filename_is_valid(slash + 1))
1152 p = strndupa(name, slash - name);
1153 if (!filename_is_valid(p))
1159 bool dkr_id_is_valid(const char *id) {
1161 if (!filename_is_valid(id))
1164 if (!in_charset(id, "0123456789abcdef"))
1170 bool dkr_url_is_valid(const char *url) {
1172 if (!startswith(url, "http://") &&
1173 !startswith(url, "https://"))
1176 return ascii_is_valid(url);