1 /*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/
4 This file is part of systemd.
6 Copyright 2014 Lennart Poettering
8 systemd is free software; you can redistribute it and/or modify it
9 under the terms of the GNU Lesser General Public License as published by
10 the Free Software Foundation; either version 2.1 of the License, or
11 (at your option) any later version.
13 systemd is distributed in the hope that it will be useful, but
14 WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 Lesser General Public License for more details.
18 You should have received a copy of the GNU Lesser General Public License
19 along with systemd; If not, see <http://www.gnu.org/licenses/>.
22 #include <curl/curl.h>
23 #include <sys/prctl.h>
29 #include "curl-util.h"
30 #include "import-dkr.h"
31 #include "btrfs-util.h"
32 #include "aufs-util.h"
38 - fall back to btrfs loop pool device
41 typedef struct DkrImportJob DkrImportJob;
42 typedef struct DkrImportName DkrImportName;
44 typedef enum DkrImportJobType {
45 DKR_IMPORT_JOB_IMAGES,
47 DKR_IMPORT_JOB_ANCESTRY,
54 DkrImportJobType type;
59 Set *needed_by; /* DkrImport Name objects */
62 struct curl_slist *request_header;
67 char **response_registries;
76 struct DkrImportName {
84 DkrImportJob *job_images, *job_tags, *job_ancestry, *job_json, *job_layer;
87 unsigned current_ancestry;
101 dkr_import_on_finished on_finished;
107 #define PROTOCOL_PREFIX "https://"
109 #define HEADER_TOKEN "X-Do" /* the HTTP header for the auth token */ "cker-Token:"
110 #define HEADER_REGISTRY "X-Do" /*the HTTP header for the registry */ "cker-Endpoints:"
112 #define PAYLOAD_MAX (16*1024*1024)
113 #define LAYERS_MAX 2048
115 static int dkr_import_name_add_job(DkrImportName *name, DkrImportJobType type, const char *url, DkrImportJob **ret);
117 static DkrImportJob *dkr_import_job_unref(DkrImportJob *job) {
122 curl_glue_remove_and_free(job->import->glue, job->curl);
123 curl_slist_free_all(job->request_header);
126 fclose(job->tar_stream);
128 free(job->final_path);
130 if (job->temp_path) {
131 btrfs_subvol_remove(job->temp_path);
132 free(job->temp_path);
135 set_free(job->needed_by);
137 if (job->tar_pid > 0)
138 kill(job->tar_pid, SIGTERM);
142 free(job->response_token);
143 strv_free(job->response_registries);
150 static DkrImportName *dkr_import_name_unref(DkrImportName *name) {
154 if (name->job_images)
155 set_remove(name->job_images->needed_by, name);
158 set_remove(name->job_tags->needed_by, name);
160 if (name->job_ancestry)
161 set_remove(name->job_ancestry->needed_by, name);
164 set_remove(name->job_json->needed_by, name);
167 set_remove(name->job_layer->needed_by, name);
174 strv_free(name->ancestry);
180 DEFINE_TRIVIAL_CLEANUP_FUNC(DkrImportJob*, dkr_import_job_unref);
181 DEFINE_TRIVIAL_CLEANUP_FUNC(DkrImportName*, dkr_import_name_unref);
183 static void dkr_import_finish(DkrImport *import, int error) {
186 if (import->finished)
189 import->finished = true;
191 if (import->on_finished)
192 import->on_finished(import, error, import->userdata);
194 sd_event_exit(import->event, error);
197 static int parse_id(const void *payload, size_t size, char **ret) {
198 _cleanup_free_ char *buf = NULL, *id = NULL, *other = NULL;
199 union json_value v = {};
200 void *json_state = NULL;
210 if (memchr(payload, 0, size))
213 buf = strndup(payload, size);
218 t = json_tokenize(&p, &id, &v, &json_state, NULL);
221 if (t != JSON_STRING)
224 t = json_tokenize(&p, &other, &v, &json_state, NULL);
230 if (!dkr_id_is_valid(id))
239 static int parse_ancestry(const void *payload, size_t size, char ***ret) {
240 _cleanup_free_ char *buf = NULL;
241 void *json_state = NULL;
248 } state = STATE_BEGIN;
249 _cleanup_strv_free_ char **l = NULL;
250 size_t n = 0, allocated = 0;
255 if (memchr(payload, 0, size))
258 buf = strndup(payload, size);
264 _cleanup_free_ char *str;
265 union json_value v = {};
268 t = json_tokenize(&p, &str, &v, &json_state, NULL);
275 if (t == JSON_ARRAY_OPEN)
283 if (t == JSON_STRING) {
284 if (!dkr_id_is_valid(str))
287 if (n+1 > LAYERS_MAX)
290 if (!GREEDY_REALLOC(l, allocated, n + 2))
299 } else if (t == JSON_ARRAY_CLOSE)
309 else if (t == JSON_ARRAY_CLOSE)
321 if (!strv_is_uniq(l))
336 static const char *dkr_import_name_current_layer(DkrImportName *name) {
339 if (strv_isempty(name->ancestry))
342 return name->ancestry[name->current_ancestry];
345 static const char *dkr_import_name_current_base_layer(DkrImportName *name) {
348 if (strv_isempty(name->ancestry))
351 if (name->current_ancestry <= 0)
354 return name->ancestry[name->current_ancestry-1];
357 static char** dkr_import_name_get_registries(DkrImportName *name) {
360 if (!name->job_images)
363 if (!name->job_images->done)
366 if (strv_isempty(name->job_images->response_registries))
369 return name->job_images->response_registries;
372 static const char*dkr_import_name_get_token(DkrImportName *name) {
375 if (!name->job_images)
378 if (!name->job_images->done)
381 return name->job_images->response_token;
384 static void dkr_import_name_maybe_finish(DkrImportName *name) {
389 if (!name->job_images || !name->job_images->done)
392 if (!name->job_ancestry || !name->job_ancestry->done)
395 if (!name->job_json || !name->job_json->done)
398 if (name->job_layer && !name->job_json->done)
401 if (dkr_import_name_current_layer(name))
409 p = strappenda("/var/lib/container/", name->local);
410 q = strappenda("/var/lib/container/.dkr-", name->id);
412 if (name->force_local) {
413 (void) btrfs_subvol_remove(p);
414 (void) rm_rf(p, false, true, false);
417 r = btrfs_subvol_snapshot(q, p, false, false);
419 log_error_errno(r, "Failed to snapshot final image: %m");
420 dkr_import_finish(name->import, r);
424 log_info("Created new image %s.", p);
427 dkr_import_finish(name->import, 0);
430 static int dkr_import_job_run_tar(DkrImportJob *job) {
431 _cleanup_close_pair_ int pipefd[2] = { -1, -1 };
436 /* A stream to run tar on? */
443 /* Maybe fork off tar, if we have enough to figure out that
444 * something is gzip compressed or not */
446 if (job->payload_size < 2)
449 /* Detect gzip signature */
450 gzip = ((uint8_t*) job->payload)[0] == 0x1f &&
451 ((uint8_t*) job->payload)[1] == 0x8b;
453 assert(!job->tar_stream);
454 assert(job->tar_pid <= 0);
456 if (pipe2(pipefd, O_CLOEXEC) < 0)
457 return log_error_errno(errno, "Failed to create pipe for tar: %m");
459 job->tar_pid = fork();
460 if (job->tar_pid < 0)
461 return log_error_errno(errno, "Failed to fork off tar: %m");
462 if (job->tar_pid == 0) {
465 reset_all_signal_handlers();
467 assert_se(prctl(PR_SET_PDEATHSIG, SIGTERM) == 0);
469 pipefd[1] = safe_close(pipefd[1]);
471 if (dup2(pipefd[0], STDIN_FILENO) != STDIN_FILENO) {
472 log_error_errno(errno, "Failed to dup2() fd: %m");
476 if (pipefd[0] != STDIN_FILENO)
477 safe_close(pipefd[0]);
478 if (pipefd[1] != STDIN_FILENO)
479 safe_close(pipefd[1]);
481 null_fd = open("/dev/null", O_WRONLY|O_NOCTTY);
483 log_error_errno(errno, "Failed to open /dev/null: %m");
487 if (dup2(null_fd, STDOUT_FILENO) != STDOUT_FILENO) {
488 log_error_errno(errno, "Failed to dup2() fd: %m");
492 if (null_fd != STDOUT_FILENO)
495 execlp("tar", "tar", "-C", job->temp_path, gzip ? "-xz" : "-x", NULL);
499 pipefd[0] = safe_close(pipefd[0]);
501 job->tar_stream = fdopen(pipefd[1], "w");
502 if (!job->tar_stream)
503 return log_error_errno(errno, "Failed to allocate tar stream: %m");
507 if (fwrite(job->payload, 1, job->payload_size, job->tar_stream) != job->payload_size)
508 return log_error_errno(errno, "Couldn't write payload: %m");
512 job->payload_size = 0;
517 static int dkr_import_name_pull_layer(DkrImportName *name) {
518 _cleanup_free_ char *path = NULL, *temp = NULL;
519 const char *url, *layer = NULL, *base = NULL;
525 if (name->job_layer) {
526 set_remove(name->job_layer->needed_by, name);
527 name->job_layer = NULL;
531 layer = dkr_import_name_current_layer(name);
533 dkr_import_name_maybe_finish(name);
537 path = strjoin("/var/lib/container/.dkr-", layer, NULL);
541 if (laccess(path, F_OK) < 0) {
545 return log_error_errno(errno, "Failed to check for container: %m");
548 log_info("Layer %s already exists, skipping.", layer);
550 name->current_ancestry++;
556 rg = dkr_import_name_get_registries(name);
559 url = strappenda(PROTOCOL_PREFIX, rg[0], "/v1/images/", layer, "/layer");
560 r = dkr_import_name_add_job(name, DKR_IMPORT_JOB_LAYER, url, &name->job_layer);
562 log_error_errno(r, "Failed to issue HTTP request: %m");
565 if (r == 0) /* Already downloading this one? */
568 log_info("Pulling layer %s...", layer);
570 r = tempfn_random(path, &temp);
574 base = dkr_import_name_current_base_layer(name);
576 const char *base_path;
578 base_path = strappend("/var/lib/container/.dkr-", base);
579 r = btrfs_subvol_snapshot(base_path, temp, false, true);
581 r = btrfs_subvol_make(temp);
584 return log_error_errno(r, "Failed to make btrfs subvolume %s", temp);
586 name->job_layer->final_path = path;
587 name->job_layer->temp_path = temp;
593 static void dkr_import_name_job_finished(DkrImportName *name, DkrImportJob *job) {
599 if (name->job_images == job) {
603 assert(!name->job_tags);
604 assert(!name->job_ancestry);
605 assert(!name->job_json);
606 assert(!name->job_layer);
608 rg = dkr_import_name_get_registries(name);
609 if (strv_isempty(rg)) {
610 log_error("Didn't get registry information.");
615 log_info("Index lookup succeeded, directed to registry %s.", rg[0]);
617 url = strappenda(PROTOCOL_PREFIX, rg[0], "/v1/repositories/", name->name, "/tags/", name->tag);
619 r = dkr_import_name_add_job(name, DKR_IMPORT_JOB_TAGS, url, &name->job_tags);
621 log_error_errno(r, "Failed to issue HTTP request: %m");
625 } else if (name->job_tags == job) {
627 char *id = NULL, **rg;
629 assert(!name->job_ancestry);
630 assert(!name->job_json);
631 assert(!name->job_layer);
633 r = parse_id(job->payload, job->payload_size, &id);
635 log_error_errno(r, "Failed to parse JSON id.");
642 rg = dkr_import_name_get_registries(name);
645 log_info("Tag lookup succeeded, resolved to layer %s.", name->id);
647 url = strappenda(PROTOCOL_PREFIX, rg[0], "/v1/images/", name->id, "/ancestry");
648 r = dkr_import_name_add_job(name, DKR_IMPORT_JOB_ANCESTRY, url, &name->job_ancestry);
650 log_error_errno(r, "Failed to issue HTTP request: %m");
654 url = strappenda(PROTOCOL_PREFIX, rg[0], "/v1/images/", name->id, "/json");
655 r = dkr_import_name_add_job(name, DKR_IMPORT_JOB_JSON, url, &name->job_json);
657 log_error_errno(r, "Failed to issue HTTP request: %m");
661 } else if (name->job_ancestry == job) {
662 char **ancestry = NULL, **i;
665 r = parse_ancestry(job->payload, job->payload_size, &ancestry);
667 log_error_errno(r, "Failed to parse JSON id.");
671 n = strv_length(ancestry);
672 if (n <= 0 || !streq(ancestry[n-1], name->id)) {
673 log_error("Ancestry doesn't end in main layer.");
678 log_info("Ancestor lookup succeeded, requires layers:\n");
679 STRV_FOREACH(i, ancestry)
680 log_info("\t%s", *i);
682 strv_free(name->ancestry);
683 name->ancestry = ancestry;
685 name->current_ancestry = 0;
686 r = dkr_import_name_pull_layer(name);
690 } else if (name->job_json == job) {
692 dkr_import_name_maybe_finish(name);
694 } else if (name->job_layer == job) {
696 name->current_ancestry ++;
697 r = dkr_import_name_pull_layer(name);
702 assert_not_reached("Got finished event for unknown curl object");
707 dkr_import_finish(name->import, r);
710 static void dkr_import_curl_on_finished(CurlGlue *g, CURL *curl, CURLcode result) {
711 DkrImportJob *job = NULL;
718 if (curl_easy_getinfo(curl, CURLINFO_PRIVATE, &job) != CURLE_OK)
726 if (result != CURLE_OK) {
727 log_error("Transfer failed: %s", curl_easy_strerror(result));
732 code = curl_easy_getinfo(curl, CURLINFO_RESPONSE_CODE, &status);
733 if (code != CURLE_OK) {
734 log_error("Failed to retrieve response code: %s", curl_easy_strerror(code));
737 } else if (status >= 300) {
738 log_error("HTTP request to %s failed with code %li.", job->url, status);
741 } else if (status < 200) {
742 log_error("HTTP request to %s finished with unexpected code %li.", job->url, status);
749 case DKR_IMPORT_JOB_LAYER: {
752 if (!job->tar_stream) {
753 log_error("Downloaded layer too short.");
758 fclose(job->tar_stream);
759 job->tar_stream = NULL;
761 assert(job->tar_pid > 0);
763 r = wait_for_terminate(job->tar_pid, &si);
765 log_error_errno(r, "Failed to wait for tar process: %m");
771 if (si.si_code != CLD_EXITED || si.si_status != EXIT_SUCCESS) {
772 log_error_errno(r, "tar failed abnormally.");
777 r = aufs_resolve(job->temp_path);
779 log_error_errno(r, "Couldn't resolve aufs whiteouts: %m");
783 r = btrfs_subvol_read_only(job->temp_path, true);
785 log_error_errno(r, "Failed to mark snapshot read-only: %m");
789 if (rename(job->temp_path, job->final_path) < 0) {
790 log_error_errno(r, "Failed to rename snapshot: %m");
794 log_info("Completed writing to layer %s", job->final_path);
802 SET_FOREACH(n, job->needed_by, i)
803 dkr_import_name_job_finished(n, job);
808 dkr_import_finish(job->import, r);
811 static size_t dkr_import_job_write_callback(void *contents, size_t size, size_t nmemb, void *userdata) {
812 DkrImportJob *j = userdata;
813 size_t sz = size * nmemb;
823 l = fwrite(contents, size, nmemb, j->tar_stream);
825 r = log_error_errno(errno, "Failed to write to tar: %m");
832 if (j->payload_size + sz > PAYLOAD_MAX) {
833 log_error("Payload too large.");
838 p = realloc(j->payload, j->payload_size + sz);
844 memcpy(p + j->payload_size, contents, sz);
845 j->payload_size += sz;
848 r = dkr_import_job_run_tar(j);
855 dkr_import_finish(j->import, r);
859 static size_t dkr_import_job_header_callback(void *contents, size_t size, size_t nmemb, void *userdata) {
860 _cleanup_free_ char *registry = NULL;
861 size_t sz = size * nmemb;
862 DkrImportJob *j = userdata;
869 r = curl_header_strdup(contents, sz, HEADER_TOKEN, &token);
875 free(j->response_token);
876 j->response_token = token;
879 r = curl_header_strdup(contents, sz, HEADER_REGISTRY, ®istry);
887 l = strv_split(registry, ",");
894 if (!hostname_is_valid(*i)) {
895 log_error("Registry hostname is not valid.");
902 strv_free(j->response_registries);
903 j->response_registries = l;
909 dkr_import_finish(j->import, r);
913 static int dkr_import_name_add_job(DkrImportName *name, DkrImportJobType type, const char *url, DkrImportJob **ret) {
914 _cleanup_(dkr_import_job_unrefp) DkrImportJob *j = NULL;
915 DkrImportJob *f = NULL;
916 const char *t, *token;
923 log_info("Getting %s.", url);
924 f = hashmap_get(name->import->jobs, url);
929 r = set_put(f->needed_by, name);
936 r = hashmap_ensure_allocated(&name->import->jobs, &string_hash_ops);
940 j = new0(DkrImportJob, 1);
944 j->import = name->import;
946 j->url = strdup(url);
950 r = set_ensure_allocated(&j->needed_by, &trivial_hash_ops);
954 r = curl_glue_make(&j->curl, j->url, j);
958 token = dkr_import_name_get_token(name);
960 t = strappenda("Authorization: Token ", token);
962 t = HEADER_TOKEN " true";
964 j->request_header = curl_slist_new("Accept: application/json", t, NULL);
965 if (!j->request_header)
968 if (curl_easy_setopt(j->curl, CURLOPT_HTTPHEADER, j->request_header) != CURLE_OK)
971 if (curl_easy_setopt(j->curl, CURLOPT_WRITEFUNCTION, dkr_import_job_write_callback) != CURLE_OK)
974 if (curl_easy_setopt(j->curl, CURLOPT_WRITEDATA, j) != CURLE_OK)
977 if (curl_easy_setopt(j->curl, CURLOPT_HEADERFUNCTION, dkr_import_job_header_callback) != CURLE_OK)
980 if (curl_easy_setopt(j->curl, CURLOPT_HEADERDATA, j) != CURLE_OK)
983 r = curl_glue_add(name->import->glue, j->curl);
987 r = hashmap_put(name->import->jobs, j->url, j);
991 r = set_put(j->needed_by, name);
993 hashmap_remove(name->import->jobs, url);
1003 static int dkr_import_name_begin(DkrImportName *name) {
1007 assert(!name->job_images);
1009 url = strappenda(name->import->index_url, "/v1/repositories/", name->name, "/images");
1011 return dkr_import_name_add_job(name, DKR_IMPORT_JOB_IMAGES, url, &name->job_images);
1014 int dkr_import_new(DkrImport **import, sd_event *event, const char *index_url, dkr_import_on_finished on_finished, void *userdata) {
1015 _cleanup_(dkr_import_unrefp) DkrImport *i = NULL;
1020 assert(dkr_url_is_valid(index_url));
1022 i = new0(DkrImport, 1);
1026 i->on_finished = on_finished;
1027 i->userdata = userdata;
1029 i->index_url = strdup(index_url);
1033 e = endswith(i->index_url, "/");
1038 i->event = sd_event_ref(event);
1040 r = sd_event_default(&i->event);
1045 r = curl_glue_new(&i->glue, i->event);
1049 i->glue->on_finished = dkr_import_curl_on_finished;
1050 i->glue->userdata = i;
1058 DkrImport* dkr_import_unref(DkrImport *import) {
1065 while ((n = hashmap_steal_first(import->names)))
1066 dkr_import_name_unref(n);
1067 hashmap_free(import->names);
1069 while ((j = hashmap_steal_first(import->jobs)))
1070 dkr_import_job_unref(j);
1071 hashmap_free(import->jobs);
1073 curl_glue_unref(import->glue);
1074 sd_event_unref(import->event);
1076 free(import->index_url);
1083 int dkr_import_cancel(DkrImport *import, const char *name) {
1089 n = hashmap_remove(import->names, name);
1093 dkr_import_name_unref(n);
1097 int dkr_import_pull(DkrImport *import, const char *name, const char *tag, const char *local, bool force_local) {
1098 _cleanup_(dkr_import_name_unrefp) DkrImportName *n = NULL;
1102 assert(dkr_name_is_valid(name));
1103 assert(dkr_tag_is_valid(tag));
1104 assert(!local || machine_name_is_valid(local));
1106 if (hashmap_get(import->names, name))
1109 r = hashmap_ensure_allocated(&import->names, &string_hash_ops);
1113 n = new0(DkrImportName, 1);
1119 n->name = strdup(name);
1123 n->tag = strdup(tag);
1128 n->local = strdup(local);
1131 n->force_local = force_local;
1134 r = hashmap_put(import->names, n->name, n);
1138 r = dkr_import_name_begin(n);
1140 dkr_import_cancel(import, n->name);
1149 bool dkr_name_is_valid(const char *name) {
1150 const char *slash, *p;
1155 slash = strchr(name, '/');
1159 if (!filename_is_valid(slash + 1))
1162 p = strndupa(name, slash - name);
1163 if (!filename_is_valid(p))
1169 bool dkr_id_is_valid(const char *id) {
1171 if (!filename_is_valid(id))
1174 if (!in_charset(id, "0123456789abcdef"))
1180 bool dkr_url_is_valid(const char *url) {
1184 if (!startswith(url, "http://") &&
1185 !startswith(url, "https://"))
1188 return ascii_is_valid(url);