1 /*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/
4 This file is part of systemd.
6 Copyright 2014 Lennart Poettering
8 systemd is free software; you can redistribute it and/or modify it
9 under the terms of the GNU Lesser General Public License as published by
10 the Free Software Foundation; either version 2.1 of the License, or
11 (at your option) any later version.
13 systemd is distributed in the hope that it will be useful, but
14 WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 Lesser General Public License for more details.
18 You should have received a copy of the GNU Lesser General Public License
19 along with systemd; If not, see <http://www.gnu.org/licenses/>.
22 #include <curl/curl.h>
23 #include <sys/prctl.h>
29 #include "curl-util.h"
30 #include "import-dck.h"
31 #include "btrfs-util.h"
32 #include "aufs-util.h"
37 - fall back to btrfs loop pool device
40 typedef struct DckImportJob DckImportJob;
41 typedef struct DckImportName DckImportName;
43 typedef enum DckImportJobType {
44 DCK_IMPORT_JOB_IMAGES,
46 DCK_IMPORT_JOB_ANCESTRY,
53 DckImportJobType type;
58 Set *needed_by; /* DckImport Name objects */
61 struct curl_slist *request_header;
66 char **response_registries;
75 struct DckImportName {
83 DckImportJob *job_images, *job_tags, *job_ancestry, *job_json, *job_layer;
86 unsigned current_ancestry;
98 dck_import_on_finished on_finished;
102 #define PROTOCOL_PREFIX "https://"
103 #define INDEX_HOST "index.do" /* the URL we get the data from */ "cker.io"
105 #define HEADER_TOKEN "X-Do" /* the HTTP header for the auth token */ "cker-Token:"
106 #define HEADER_REGISTRY "X-Do" /*the HTTP header for the registry */ "cker-Endpoints:"
108 #define PAYLOAD_MAX (16*1024*1024)
109 #define LAYERS_MAX 2048
111 static int dck_import_name_add_job(DckImportName *name, DckImportJobType type, const char *url, DckImportJob **ret);
113 static DckImportJob *dck_import_job_unref(DckImportJob *job) {
118 curl_glue_remove_and_free(job->import->glue, job->curl);
119 curl_slist_free_all(job->request_header);
122 fclose(job->tar_stream);
124 free(job->final_path);
126 if (job->temp_path) {
127 btrfs_subvol_remove(job->temp_path);
128 free(job->temp_path);
131 set_free(job->needed_by);
133 if (job->tar_pid > 0)
134 kill(job->tar_pid, SIGTERM);
138 free(job->response_token);
139 strv_free(job->response_registries);
146 static DckImportName *dck_import_name_unref(DckImportName *name) {
150 if (name->job_images)
151 set_remove(name->job_images->needed_by, name);
154 set_remove(name->job_tags->needed_by, name);
156 if (name->job_ancestry)
157 set_remove(name->job_ancestry->needed_by, name);
160 set_remove(name->job_json->needed_by, name);
163 set_remove(name->job_layer->needed_by, name);
170 strv_free(name->ancestry);
176 DEFINE_TRIVIAL_CLEANUP_FUNC(DckImportJob*, dck_import_job_unref);
177 DEFINE_TRIVIAL_CLEANUP_FUNC(DckImportName*, dck_import_name_unref);
179 static void dck_import_finish(DckImport *import, int error) {
182 if (import->on_finished)
183 import->on_finished(import, error, import->userdata);
185 sd_event_exit(import->event, error);
188 static int parse_id(const void *payload, size_t size, char **ret) {
189 _cleanup_free_ char *buf = NULL, *id = NULL, *other = NULL;
190 union json_value v = {};
191 void *json_state = NULL;
201 if (memchr(payload, 0, size))
204 buf = strndup(payload, size);
209 t = json_tokenize(&p, &id, &v, &json_state, NULL);
212 if (t != JSON_STRING)
215 t = json_tokenize(&p, &other, &v, &json_state, NULL);
221 if (!dck_id_is_valid(id))
230 static int parse_ancestry(const void *payload, size_t size, char ***ret) {
231 _cleanup_free_ char *buf = NULL;
232 void *json_state = NULL;
239 } state = STATE_BEGIN;
240 _cleanup_strv_free_ char **l = NULL;
241 size_t n = 0, allocated = 0;
246 if (memchr(payload, 0, size))
249 buf = strndup(payload, size);
255 _cleanup_free_ char *str;
256 union json_value v = {};
259 t = json_tokenize(&p, &str, &v, &json_state, NULL);
266 if (t == JSON_ARRAY_OPEN)
274 if (t == JSON_STRING) {
275 if (!dck_id_is_valid(str))
278 if (n+1 > LAYERS_MAX)
281 if (!GREEDY_REALLOC(l, allocated, n + 2))
290 } else if (t == JSON_ARRAY_CLOSE)
300 else if (t == JSON_ARRAY_CLOSE)
312 if (!strv_is_uniq(l))
327 static const char *dck_import_name_current_layer(DckImportName *name) {
330 if (strv_isempty(name->ancestry))
333 return name->ancestry[name->current_ancestry];
336 static const char *dck_import_name_current_base_layer(DckImportName *name) {
339 if (strv_isempty(name->ancestry))
342 if (name->current_ancestry <= 0)
345 return name->ancestry[name->current_ancestry-1];
348 static char** dck_import_name_get_registries(DckImportName *name) {
351 if (!name->job_images)
354 if (!name->job_images->done)
357 if (strv_isempty(name->job_images->response_registries))
360 return name->job_images->response_registries;
363 static const char*dck_import_name_get_token(DckImportName *name) {
366 if (!name->job_images)
369 if (!name->job_images->done)
372 return name->job_images->response_token;
375 static void dck_import_name_maybe_finish(DckImportName *name) {
380 if (!name->job_images || !name->job_images->done)
383 if (!name->job_ancestry || !name->job_ancestry->done)
386 if (!name->job_json || !name->job_json->done)
389 if (name->job_layer && !name->job_json->done)
392 if (dck_import_name_current_layer(name))
400 p = strappenda("/var/lib/container/", name->local);
401 q = strappenda("/var/lib/container/.dck-", name->id);
403 if (name->force_local) {
404 (void) btrfs_subvol_remove(p);
405 (void) rm_rf(p, false, true, false);
408 r = btrfs_subvol_snapshot(q, p, false, false);
410 log_error_errno(r, "Failed to snapshot final image: %m");
411 dck_import_finish(name->import, r);
415 log_info("Created new image %s.", p);
418 dck_import_finish(name->import, 0);
421 static int dck_import_job_run_tar(DckImportJob *job) {
422 _cleanup_close_pair_ int pipefd[2] = { -1, -1 };
427 /* A stream to run tar on? */
434 /* Maybe fork off tar, if we have enough to figure out that
435 * something is gzip compressed or not */
437 if (job->payload_size < 2)
440 /* Detect gzip signature */
441 gzip = ((uint8_t*) job->payload)[0] == 0x1f &&
442 ((uint8_t*) job->payload)[1] == 0x8b;
444 assert(!job->tar_stream);
445 assert(job->tar_pid <= 0);
447 if (pipe2(pipefd, O_CLOEXEC) < 0)
448 return log_error_errno(errno, "Failed to create pipe for tar: %m");
450 job->tar_pid = fork();
451 if (job->tar_pid < 0)
452 return log_error_errno(errno, "Failed to fork off tar: %m");
453 if (job->tar_pid == 0) {
456 reset_all_signal_handlers();
458 assert_se(prctl(PR_SET_PDEATHSIG, SIGTERM) == 0);
460 pipefd[1] = safe_close(pipefd[1]);
462 if (dup2(pipefd[0], STDIN_FILENO) != STDIN_FILENO) {
463 log_error_errno(errno, "Failed to dup2() fd: %m");
467 if (pipefd[0] != STDIN_FILENO)
468 safe_close(pipefd[0]);
469 if (pipefd[1] != STDIN_FILENO)
470 safe_close(pipefd[1]);
472 null_fd = open("/dev/null", O_WRONLY|O_NOCTTY);
474 log_error_errno(errno, "Failed to open /dev/null: %m");
478 if (dup2(null_fd, STDOUT_FILENO) != STDOUT_FILENO) {
479 log_error_errno(errno, "Failed to dup2() fd: %m");
483 if (null_fd != STDOUT_FILENO)
486 execlp("tar", "tar", "-C", job->temp_path, gzip ? "-xz" : "-x", NULL);
490 pipefd[0] = safe_close(pipefd[0]);
492 job->tar_stream = fdopen(pipefd[1], "w");
493 if (!job->tar_stream)
494 return log_error_errno(errno, "Failed to allocate tar stream: %m");
498 if (fwrite(job->payload, 1, job->payload_size, job->tar_stream) != job->payload_size)
499 return log_error_errno(errno, "Couldn't write payload: %m");
503 job->payload_size = 0;
508 static int dck_import_name_pull_layer(DckImportName *name) {
509 _cleanup_free_ char *path = NULL, *temp = NULL;
510 const char *url, *layer = NULL, *base = NULL;
516 if (name->job_layer) {
517 set_remove(name->job_layer->needed_by, name);
518 name->job_layer = NULL;
522 layer = dck_import_name_current_layer(name);
524 dck_import_name_maybe_finish(name);
528 path = strjoin("/var/lib/container/.dck-", layer, NULL);
532 if (laccess(path, F_OK) < 0) {
536 return log_error_errno(errno, "Failed to check for container: %m");
539 log_info("Layer %s already exists, skipping.", layer);
541 name->current_ancestry++;
547 rg = dck_import_name_get_registries(name);
550 url = strappenda(PROTOCOL_PREFIX, rg[0], "/v1/images/", layer, "/layer");
551 r = dck_import_name_add_job(name, DCK_IMPORT_JOB_LAYER, url, &name->job_layer);
553 log_error_errno(r, "Failed to issue HTTP request: %m");
556 if (r == 0) /* Already downloading this one? */
559 log_info("Pulling layer %s...", layer);
561 r = tempfn_random(path, &temp);
565 base = dck_import_name_current_base_layer(name);
567 const char *base_path;
569 base_path = strappend("/var/lib/container/.dck-", base);
570 r = btrfs_subvol_snapshot(base_path, temp, false, true);
572 r = btrfs_subvol_make(temp);
575 return log_error_errno(r, "Failed to make btrfs subvolume %s", temp);
577 name->job_layer->final_path = path;
578 name->job_layer->temp_path = temp;
584 static void dck_import_name_job_finished(DckImportName *name, DckImportJob *job) {
590 if (name->job_images == job) {
594 assert(!name->job_tags);
595 assert(!name->job_ancestry);
596 assert(!name->job_json);
597 assert(!name->job_layer);
599 rg = dck_import_name_get_registries(name);
600 if (strv_isempty(rg)) {
601 log_error("Didn't get registry information.");
606 log_info("Index lookup succeeded, directed to registry %s.", rg[0]);
608 url = strappenda(PROTOCOL_PREFIX, rg[0], "/v1/repositories/", name->name, "/tags/", name->tag);
610 r = dck_import_name_add_job(name, DCK_IMPORT_JOB_TAGS, url, &name->job_tags);
612 log_error_errno(r, "Failed to issue HTTP request: %m");
616 } else if (name->job_tags == job) {
618 char *id = NULL, **rg;
620 assert(!name->job_ancestry);
621 assert(!name->job_json);
622 assert(!name->job_layer);
624 r = parse_id(job->payload, job->payload_size, &id);
626 log_error_errno(r, "Failed to parse JSON id.");
633 rg = dck_import_name_get_registries(name);
636 log_info("Tag lookup succeeded, resolved to layer %s.", name->id);
638 url = strappenda(PROTOCOL_PREFIX, rg[0], "/v1/images/", name->id, "/ancestry");
639 r = dck_import_name_add_job(name, DCK_IMPORT_JOB_ANCESTRY, url, &name->job_ancestry);
641 log_error_errno(r, "Failed to issue HTTP request: %m");
645 url = strappenda(PROTOCOL_PREFIX, rg[0], "/v1/images/", name->id, "/json");
646 r = dck_import_name_add_job(name, DCK_IMPORT_JOB_JSON, url, &name->job_json);
648 log_error_errno(r, "Failed to issue HTTP request: %m");
652 } else if (name->job_ancestry == job) {
653 char **ancestry = NULL, **i;
656 r = parse_ancestry(job->payload, job->payload_size, &ancestry);
658 log_error_errno(r, "Failed to parse JSON id.");
662 n = strv_length(ancestry);
663 if (n <= 0 || !streq(ancestry[n-1], name->id)) {
664 log_error("Ancestry doesn't end in main layer.");
669 log_info("Ancestor lookup succeeded, requires layers:\n");
670 STRV_FOREACH(i, ancestry)
671 log_info("\t%s", *i);
673 strv_free(name->ancestry);
674 name->ancestry = ancestry;
676 name->current_ancestry = 0;
677 r = dck_import_name_pull_layer(name);
681 } else if (name->job_json == job) {
683 dck_import_name_maybe_finish(name);
685 } else if (name->job_layer == job) {
687 name->current_ancestry ++;
688 r = dck_import_name_pull_layer(name);
693 assert_not_reached("Got finished event for unknown curl object");
698 dck_import_finish(name->import, r);
701 static void dck_import_curl_on_finished(CurlGlue *g, CURL *curl, CURLcode result) {
702 DckImportJob *job = NULL;
709 if (curl_easy_getinfo(curl, CURLINFO_PRIVATE, &job) != CURLE_OK)
717 if (result != CURLE_OK) {
718 log_error("Transfer failed: %s", curl_easy_strerror(result));
723 code = curl_easy_getinfo(curl, CURLINFO_RESPONSE_CODE, &status);
724 if (code != CURLE_OK) {
725 log_error("Failed to retrieve response code: %s", curl_easy_strerror(code));
728 } else if (status >= 300) {
729 log_error("HTTP request to %s failed with code %li.", job->url, status);
732 } else if (status < 200) {
733 log_error("HTTP request to %s finished with unexpected code %li.", job->url, status);
740 case DCK_IMPORT_JOB_LAYER: {
743 if (!job->tar_stream) {
744 log_error("Downloaded layer too short.");
749 fclose(job->tar_stream);
750 job->tar_stream = NULL;
752 assert(job->tar_pid > 0);
754 r = wait_for_terminate(job->tar_pid, &si);
756 log_error_errno(r, "Failed to wait for tar process: %m");
762 if (si.si_code != CLD_EXITED || si.si_status != EXIT_SUCCESS) {
763 log_error_errno(r, "tar failed abnormally.");
768 r = aufs_resolve(job->temp_path);
770 log_error_errno(r, "Couldn't resolve aufs whiteouts: %m");
774 r = btrfs_subvol_read_only(job->temp_path, true);
776 log_error_errno(r, "Failed to mark snapshot read-only: %m");
780 if (rename(job->temp_path, job->final_path) < 0) {
781 log_error_errno(r, "Failed to rename snapshot: %m");
785 log_info("Completed writing to layer %s", job->final_path);
793 SET_FOREACH(n, job->needed_by, i)
794 dck_import_name_job_finished(n, job);
799 dck_import_finish(job->import, r);
802 static size_t dck_import_job_write_callback(void *contents, size_t size, size_t nmemb, void *userdata) {
803 DckImportJob *j = userdata;
804 size_t sz = size * nmemb;
814 l = fwrite(contents, size, nmemb, j->tar_stream);
823 if (j->payload_size + sz > PAYLOAD_MAX) {
828 p = realloc(j->payload, j->payload_size + sz);
834 memcpy(p + j->payload_size, contents, sz);
835 j->payload_size += sz;
838 r = dck_import_job_run_tar(j);
845 dck_import_finish(j->import, r);
849 static size_t dck_import_job_header_callback(void *contents, size_t size, size_t nmemb, void *userdata) {
850 _cleanup_free_ char *registry = NULL;
851 size_t sz = size * nmemb;
852 DckImportJob *j = userdata;
859 r = curl_header_strdup(contents, sz, HEADER_TOKEN, &token);
865 free(j->response_token);
866 j->response_token = token;
869 r = curl_header_strdup(contents, sz, HEADER_REGISTRY, ®istry);
877 l = strv_split(registry, ",");
884 if (!hostname_is_valid(*i)) {
885 log_error("Registry hostname is not valid.");
892 strv_free(j->response_registries);
893 j->response_registries = l;
899 dck_import_finish(j->import, r);
903 static int dck_import_name_add_job(DckImportName *name, DckImportJobType type, const char *url, DckImportJob **ret) {
904 _cleanup_(dck_import_job_unrefp) DckImportJob *j = NULL;
905 DckImportJob *f = NULL;
906 const char *t, *token;
913 log_info("Getting %s.", url);
914 f = hashmap_get(name->import->jobs, url);
919 r = set_put(f->needed_by, name);
926 r = hashmap_ensure_allocated(&name->import->jobs, &string_hash_ops);
930 j = new0(DckImportJob, 1);
934 j->import = name->import;
936 j->url = strdup(url);
940 r = set_ensure_allocated(&j->needed_by, &trivial_hash_ops);
944 r = curl_glue_make(&j->curl, j->url, j);
948 token = dck_import_name_get_token(name);
950 t = strappenda("Authorization: Token ", token);
952 t = HEADER_TOKEN " true";
954 j->request_header = curl_slist_new("Accept: application/json", t, NULL);
955 if (!j->request_header)
958 if (curl_easy_setopt(j->curl, CURLOPT_HTTPHEADER, j->request_header) != CURLE_OK)
961 if (curl_easy_setopt(j->curl, CURLOPT_WRITEFUNCTION, dck_import_job_write_callback) != CURLE_OK)
964 if (curl_easy_setopt(j->curl, CURLOPT_WRITEDATA, j) != CURLE_OK)
967 if (curl_easy_setopt(j->curl, CURLOPT_HEADERFUNCTION, dck_import_job_header_callback) != CURLE_OK)
970 if (curl_easy_setopt(j->curl, CURLOPT_HEADERDATA, j) != CURLE_OK)
973 r = curl_glue_add(name->import->glue, j->curl);
977 r = hashmap_put(name->import->jobs, j->url, j);
981 r = set_put(j->needed_by, name);
983 hashmap_remove(name->import->jobs, url);
993 static int dck_import_name_begin(DckImportName *name) {
997 assert(!name->job_images);
999 url = strappenda(PROTOCOL_PREFIX, INDEX_HOST, "/v1/repositories/", name->name, "/images");
1001 return dck_import_name_add_job(name, DCK_IMPORT_JOB_IMAGES, url, &name->job_images);
1004 int dck_import_new(DckImport **import, sd_event *event, dck_import_on_finished on_finished, void *userdata) {
1005 _cleanup_(dck_import_unrefp) DckImport *i = NULL;
1010 i = new0(DckImport, 1);
1014 i->on_finished = on_finished;
1015 i->userdata = userdata;
1018 i->event = sd_event_ref(event);
1020 r = sd_event_default(&i->event);
1025 r = curl_glue_new(&i->glue, i->event);
1029 i->glue->on_finished = dck_import_curl_on_finished;
1030 i->glue->userdata = i;
1038 DckImport* dck_import_unref(DckImport *import) {
1045 while ((n = hashmap_steal_first(import->names)))
1046 dck_import_name_unref(n);
1047 hashmap_free(import->names);
1049 while ((j = hashmap_steal_first(import->jobs)))
1050 dck_import_job_unref(j);
1051 hashmap_free(import->jobs);
1053 curl_glue_unref(import->glue);
1054 sd_event_unref(import->event);
1060 int dck_import_cancel(DckImport *import, const char *name) {
1066 n = hashmap_remove(import->names, name);
1070 dck_import_name_unref(n);
1074 int dck_import_pull(DckImport *import, const char *name, const char *tag, const char *local, bool force_local) {
1075 _cleanup_(dck_import_name_unrefp) DckImportName *n = NULL;
1079 assert(dck_name_is_valid(name));
1080 assert(dck_tag_is_valid(tag));
1081 assert(!local || machine_name_is_valid(local));
1083 if (hashmap_get(import->names, name))
1086 r = hashmap_ensure_allocated(&import->names, &string_hash_ops);
1090 n = new0(DckImportName, 1);
1096 n->name = strdup(name);
1100 n->tag = strdup(tag);
1105 n->local = strdup(local);
1108 n->force_local = force_local;
1111 r = hashmap_put(import->names, name, n);
1115 r = dck_import_name_begin(n);
1117 dck_import_cancel(import, n->name);
1127 bool dck_name_is_valid(const char *name) {
1128 const char *slash, *p;
1133 slash = strchr(name, '/');
1137 if (!filename_is_valid(slash + 1))
1140 p = strndupa(name, slash - name);
1141 if (!filename_is_valid(p))
1147 bool dck_id_is_valid(const char *id) {
1149 if (!filename_is_valid(id))
1152 if (!in_charset(id, "0123456789abcdef"))