From 56ebfaf1ca185a93ffb372b6e1a1fa3a957d93cd Mon Sep 17 00:00:00 2001 From: Lennart Poettering Date: Tue, 20 Jan 2015 01:36:11 +0100 Subject: [PATCH] import: add support for pulling raw tar balls as containers Ubuntu provides their cloud images optionally as tarball, hence also support downloading those. --- Makefile.am | 6 + src/import/import-dkr.c | 2 +- src/import/import-job.c | 629 +++++++++++++++++++++++++++++++++++++++ src/import/import-job.h | 101 +++++++ src/import/import-raw.c | 14 +- src/import/import-tar.c | 296 ++++++++++++++++++ src/import/import-tar.h | 36 +++ src/import/import-util.c | 201 +++++++++++++ src/import/import-util.h | 31 ++ src/import/import.c | 152 +++++++++- 10 files changed, 1450 insertions(+), 18 deletions(-) create mode 100644 src/import/import-job.c create mode 100644 src/import/import-job.h create mode 100644 src/import/import-tar.c create mode 100644 src/import/import-tar.h create mode 100644 src/import/import-util.c create mode 100644 src/import/import-util.h diff --git a/Makefile.am b/Makefile.am index 788e63498..f165042cb 100644 --- a/Makefile.am +++ b/Makefile.am @@ -5246,8 +5246,14 @@ systemd_import_SOURCES = \ src/import/import.c \ src/import/import-raw.c \ src/import/import-raw.h \ + src/import/import-tar.c \ + src/import/import-tar.h \ src/import/import-dkr.c \ src/import/import-dkr.h \ + src/import/import-job.c \ + src/import/import-job.h \ + src/import/import-util.c \ + src/import/import-util.h \ src/import/curl-util.c \ src/import/curl-util.h \ src/import/aufs-util.c \ diff --git a/src/import/import-dkr.c b/src/import/import-dkr.c index b54a1a6d9..8dfd2707e 100644 --- a/src/import/import-dkr.c +++ b/src/import/import-dkr.c @@ -422,7 +422,7 @@ static void dkr_import_name_maybe_finish(DkrImportName *name) { return; } - log_info("Created new local image %s.", p); + log_info("Created new local image '%s'.", name->local); } dkr_import_finish(name->import, 0); diff --git a/src/import/import-job.c b/src/import/import-job.c new file mode 100644 index 000000000..5a4ea69cb --- /dev/null +++ b/src/import/import-job.c @@ -0,0 +1,629 @@ +/*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/ + +/*** + This file is part of systemd. + + Copyright 2015 Lennart Poettering + + systemd is free software; you can redistribute it and/or modify it + under the terms of the GNU Lesser General Public License as published by + the Free Software Foundation; either version 2.1 of the License, or + (at your option) any later version. + + systemd is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public License + along with systemd; If not, see . +***/ + +#include + +#include "strv.h" +#include "import-job.h" + +ImportJob* import_job_unref(ImportJob *j) { + if (!j) + return NULL; + + curl_glue_remove_and_free(j->glue, j->curl); + curl_slist_free_all(j->request_header); + + safe_close(j->disk_fd); + + if (j->compressed == IMPORT_JOB_XZ) + lzma_end(&j->xz); + else if (j->compressed == IMPORT_JOB_GZIP) + inflateEnd(&j->gzip); + + free(j->url); + free(j->etag); + strv_free(j->old_etags); + free(j->payload); + + free(j); + + return NULL; +} + +DEFINE_TRIVIAL_CLEANUP_FUNC(ImportJob*, import_job_unref); + +static void import_job_finish(ImportJob *j, int ret) { + assert(j); + + if (j->state == IMPORT_JOB_DONE || + j->state == IMPORT_JOB_FAILED) + return; + + if (ret == 0) + j->state = IMPORT_JOB_DONE; + else { + j->state = IMPORT_JOB_FAILED; + j->error = ret; + } + + if (j->on_finished) + j->on_finished(j); +} + +void import_job_curl_on_finished(CurlGlue *g, CURL *curl, CURLcode result) { + ImportJob *j = NULL; + CURLcode code; + long status; + int r; + + if (curl_easy_getinfo(curl, CURLINFO_PRIVATE, &j) != CURLE_OK) + return; + + if (!j || j->state == IMPORT_JOB_DONE || j->state == IMPORT_JOB_FAILED) + return; + + if (result != CURLE_OK) { + log_error("Transfer failed: %s", curl_easy_strerror(result)); + r = -EIO; + goto finish; + } + + code = curl_easy_getinfo(curl, CURLINFO_RESPONSE_CODE, &status); + if (code != CURLE_OK) { + log_error("Failed to retrieve response code: %s", curl_easy_strerror(code)); + r = -EIO; + goto finish; + } else if (status == 304) { + log_info("Image already downloaded. Skipping download."); + r = 0; + goto finish; + } else if (status >= 300) { + log_error("HTTP request to %s failed with code %li.", j->url, status); + r = -EIO; + goto finish; + } else if (status < 200) { + log_error("HTTP request to %s finished with unexpected code %li.", j->url, status); + r = -EIO; + goto finish; + } + + if (j->state != IMPORT_JOB_RUNNING) { + log_error("Premature connection termination."); + r = -EIO; + goto finish; + } + + if (j->content_length != (uint64_t) -1 && + j->content_length != j->written_compressed) { + log_error("Download truncated."); + r = -EIO; + goto finish; + } + + if (j->disk_fd >= 0 && j->allow_sparse) { + /* Make sure the file size is right, in case the file was + * sparse and we just seeked for the last part */ + + if (ftruncate(j->disk_fd, j->written_uncompressed) < 0) { + log_error_errno(errno, "Failed to truncate file: %m"); + r = -errno; + goto finish; + } + + if (j->etag) + (void) fsetxattr(j->disk_fd, "user.source_etag", j->etag, strlen(j->etag), 0); + if (j->url) + (void) fsetxattr(j->disk_fd, "user.source_url", j->url, strlen(j->url), 0); + + if (j->mtime != 0) { + struct timespec ut[2]; + + timespec_store(&ut[0], j->mtime); + ut[1] = ut[0]; + (void) futimens(j->disk_fd, ut); + + (void) fd_setcrtime(j->disk_fd, j->mtime); + } + } + + r = 0; + +finish: + import_job_finish(j, r); +} + + +static int import_job_write_uncompressed(ImportJob *j, void *p, size_t sz) { + ssize_t n; + + assert(j); + assert(p); + assert(sz > 0); + assert(j->disk_fd >= 0); + + if (j->written_uncompressed + sz < j->written_uncompressed) { + log_error("File too large, overflow"); + return -EOVERFLOW; + } + + if (j->written_uncompressed + sz > j->uncompressed_max) { + log_error("File overly large, refusing"); + return -EFBIG; + } + + if (j->disk_fd >= 0) { + + if (j->allow_sparse) + n = sparse_write(j->disk_fd, p, sz, 64); + else + n = write(j->disk_fd, p, sz); + if (n < 0) { + log_error_errno(errno, "Failed to write file: %m"); + return -errno; + } + if ((size_t) n < sz) { + log_error("Short write"); + return -EIO; + } + } else { + + if (!GREEDY_REALLOC(j->payload, j->payload_allocated, j->payload_size + sz)) + return log_oom(); + + memcpy((uint8_t*) j->payload + j->payload_size, p, sz); + j->payload_size += sz; + } + + j->written_uncompressed += sz; + + return 0; +} + +static int import_job_write_compressed(ImportJob *j, void *p, size_t sz) { + int r; + + assert(j); + assert(p); + assert(sz > 0); + assert(j->disk_fd >= 0); + + if (j->written_compressed + sz < j->written_compressed) { + log_error("File too large, overflow"); + return -EOVERFLOW; + } + + if (j->written_compressed + sz > j->compressed_max) { + log_error("File overly large, refusing."); + return -EFBIG; + } + + if (j->content_length != (uint64_t) -1 && + j->written_compressed + sz > j->content_length) { + log_error("Content length incorrect."); + return -EFBIG; + } + + switch (j->compressed) { + + case IMPORT_JOB_UNCOMPRESSED: + r = import_job_write_uncompressed(j, p, sz); + if (r < 0) + return r; + + break; + + case IMPORT_JOB_XZ: + j->xz.next_in = p; + j->xz.avail_in = sz; + + while (j->xz.avail_in > 0) { + uint8_t buffer[16 * 1024]; + lzma_ret lzr; + + j->xz.next_out = buffer; + j->xz.avail_out = sizeof(buffer); + + lzr = lzma_code(&j->xz, LZMA_RUN); + if (lzr != LZMA_OK && lzr != LZMA_STREAM_END) { + log_error("Decompression error."); + return -EIO; + } + + r = import_job_write_uncompressed(j, buffer, sizeof(buffer) - j->xz.avail_out); + if (r < 0) + return r; + } + + break; + + case IMPORT_JOB_GZIP: + j->gzip.next_in = p; + j->gzip.avail_in = sz; + + while (j->gzip.avail_in > 0) { + uint8_t buffer[16 * 1024]; + + j->gzip.next_out = buffer; + j->gzip.avail_out = sizeof(buffer); + + r = inflate(&j->gzip, Z_NO_FLUSH); + if (r != Z_OK && r != Z_STREAM_END) { + log_error("Decompression error."); + return -EIO; + } + + r = import_job_write_uncompressed(j, buffer, sizeof(buffer) - j->gzip.avail_out); + if (r < 0) + return r; + } + + break; + + default: + assert_not_reached("Unknown compression"); + } + + j->written_compressed += sz; + + return 0; +} + +static int import_job_open_disk(ImportJob *j) { + int r; + + assert(j); + + if (j->on_open_disk) { + r = j->on_open_disk(j); + if (r < 0) + return r; + } + + if (j->disk_fd >= 0) { + /* Check if we can do sparse files */ + + if (lseek(j->disk_fd, SEEK_SET, 0) == 0) + j->allow_sparse = true; + else { + if (errno != ESPIPE) + return log_error_errno(errno, "Failed to seek on file descriptor: %m"); + + j->allow_sparse = false; + } + } + + return 0; +} + +static int import_job_detect_compression(ImportJob *j) { + static const uint8_t xz_signature[] = { + 0xfd, '7', 'z', 'X', 'Z', 0x00 + }; + static const uint8_t gzip_signature[] = { + 0x1f, 0x8b + }; + + _cleanup_free_ uint8_t *stub = NULL; + size_t stub_size; + + int r; + + assert(j); + + if (j->payload_size < MAX(sizeof(xz_signature), sizeof(gzip_signature))) + return 0; + + if (memcmp(j->payload, xz_signature, sizeof(xz_signature)) == 0) + j->compressed = IMPORT_JOB_XZ; + else if (memcmp(j->payload, gzip_signature, sizeof(gzip_signature)) == 0) + j->compressed = IMPORT_JOB_GZIP; + else + j->compressed = IMPORT_JOB_UNCOMPRESSED; + + log_debug("Stream is XZ compressed: %s", yes_no(j->compressed == IMPORT_JOB_XZ)); + log_debug("Stream is GZIP compressed: %s", yes_no(j->compressed == IMPORT_JOB_GZIP)); + + if (j->compressed == IMPORT_JOB_XZ) { + lzma_ret xzr; + + xzr = lzma_stream_decoder(&j->xz, UINT64_MAX, LZMA_TELL_UNSUPPORTED_CHECK); + if (xzr != LZMA_OK) { + log_error("Failed to initialize XZ decoder."); + return -EIO; + } + } + if (j->compressed == IMPORT_JOB_GZIP) { + r = inflateInit2(&j->gzip, 15+16); + if (r != Z_OK) { + log_error("Failed to initialize gzip decoder."); + return -EIO; + } + } + + r = import_job_open_disk(j); + if (r < 0) + return r; + + /* Now, take the payload we read so far, and decompress it */ + stub = j->payload; + stub_size = j->payload_size; + + j->payload = NULL; + j->payload_size = 0; + + j->state = IMPORT_JOB_RUNNING; + + r = import_job_write_compressed(j, stub, stub_size); + if (r < 0) + return r; + + return 0; +} + +static size_t import_job_write_callback(void *contents, size_t size, size_t nmemb, void *userdata) { + ImportJob *j = userdata; + size_t sz = size * nmemb; + int r; + + assert(contents); + assert(j); + + switch (j->state) { + + case IMPORT_JOB_ANALYZING: + /* Let's first check what it actually is */ + + if (!GREEDY_REALLOC(j->payload, j->payload_allocated, j->payload_size + sz)) { + r = log_oom(); + goto fail; + } + + memcpy((uint8_t*) j->payload + j->payload_size, contents, sz); + j->payload_size += sz; + + r = import_job_detect_compression(j); + if (r < 0) + goto fail; + + break; + + case IMPORT_JOB_RUNNING: + + r = import_job_write_compressed(j, contents, sz); + if (r < 0) + goto fail; + + break; + + case IMPORT_JOB_DONE: + case IMPORT_JOB_FAILED: + r = -ESTALE; + goto fail; + + default: + assert_not_reached("Impossible state."); + } + + return sz; + +fail: + import_job_finish(j, r); + return 0; +} + +static size_t import_job_header_callback(void *contents, size_t size, size_t nmemb, void *userdata) { + ImportJob *j = userdata; + size_t sz = size * nmemb; + _cleanup_free_ char *length = NULL, *last_modified = NULL; + char *etag; + int r; + + assert(contents); + assert(j); + + if (j->state == IMPORT_JOB_DONE || j->state == IMPORT_JOB_FAILED) { + r = -ESTALE; + goto fail; + } + + assert(j->state == IMPORT_JOB_ANALYZING); + + r = curl_header_strdup(contents, sz, "ETag:", &etag); + if (r < 0) { + log_oom(); + goto fail; + } + if (r > 0) { + free(j->etag); + j->etag = etag; + + if (strv_contains(j->old_etags, j->etag)) { + log_info("Image already downloaded. Skipping download."); + import_job_finish(j, 0); + return sz; + } + + return sz; + } + + r = curl_header_strdup(contents, sz, "Content-Length:", &length); + if (r < 0) { + log_oom(); + goto fail; + } + if (r > 0) { + (void) safe_atou64(length, &j->content_length); + + if (j->content_length != (uint64_t) -1) { + char bytes[FORMAT_BYTES_MAX]; + + if (j->content_length > j->compressed_max) { + log_error("Content too large."); + r = -EFBIG; + goto fail; + } + + log_info("Downloading %s.", format_bytes(bytes, sizeof(bytes), j->content_length)); + } + + return sz; + } + + r = curl_header_strdup(contents, sz, "Last-Modified:", &last_modified); + if (r < 0) { + log_oom(); + goto fail; + } + if (r > 0) { + (void) curl_parse_http_time(last_modified, &j->mtime); + return sz; + } + + return sz; + +fail: + import_job_finish(j, r); + return 0; +} + +static int import_job_progress_callback(void *userdata, curl_off_t dltotal, curl_off_t dlnow, curl_off_t ultotal, curl_off_t ulnow) { + ImportJob *j = userdata; + unsigned percent; + usec_t n; + + assert(j); + + if (dltotal <= 0) + return 0; + + percent = ((100 * dlnow) / dltotal); + n = now(CLOCK_MONOTONIC); + + if (n > j->last_status_usec + USEC_PER_SEC && + percent != j->progress_percent) { + char buf[FORMAT_TIMESPAN_MAX]; + + if (n - j->start_usec > USEC_PER_SEC && dlnow > 0) { + usec_t left, done; + + done = n - j->start_usec; + left = (usec_t) (((double) done * (double) dltotal) / dlnow) - done; + + log_info("Got %u%% of %s. %s left.", percent, j->url, format_timespan(buf, sizeof(buf), left, USEC_PER_SEC)); + } else + log_info("Got %u%% of %s.", percent, j->url); + + j->progress_percent = percent; + j->last_status_usec = n; + } + + return 0; +} + +int import_job_new(ImportJob **ret, const char *url, CurlGlue *glue, void *userdata) { + _cleanup_(import_job_unrefp) ImportJob *j = NULL; + + assert(url); + assert(glue); + assert(ret); + + j = new0(ImportJob, 1); + if (!j) + return -ENOMEM; + + j->state = IMPORT_JOB_INIT; + j->disk_fd = -1; + j->userdata = userdata; + j->glue = glue; + j->content_length = (uint64_t) -1; + j->start_usec = now(CLOCK_MONOTONIC); + j->compressed_max = j->uncompressed_max = 8LLU * 1024LLU * 1024LLU * 1024LLU; /* 8GB */ + + j->url = strdup(url); + if (!j->url) + return -ENOMEM; + + *ret = j; + j = NULL; + + return 0; +} + +int import_job_begin(ImportJob *j) { + int r; + + assert(j); + + if (j->state != IMPORT_JOB_INIT) + return -EBUSY; + + r = curl_glue_make(&j->curl, j->url, j); + if (r < 0) + return r; + + if (!strv_isempty(j->old_etags)) { + _cleanup_free_ char *cc = NULL, *hdr = NULL; + + cc = strv_join(j->old_etags, ", "); + if (!cc) + return -ENOMEM; + + hdr = strappend("If-None-Match: ", cc); + if (!hdr) + return -ENOMEM; + + j->request_header = curl_slist_new(hdr, NULL); + if (!j->request_header) + return -ENOMEM; + + if (curl_easy_setopt(j->curl, CURLOPT_HTTPHEADER, j->request_header) != CURLE_OK) + return -EIO; + } + + if (curl_easy_setopt(j->curl, CURLOPT_WRITEFUNCTION, import_job_write_callback) != CURLE_OK) + return -EIO; + + if (curl_easy_setopt(j->curl, CURLOPT_WRITEDATA, j) != CURLE_OK) + return -EIO; + + if (curl_easy_setopt(j->curl, CURLOPT_HEADERFUNCTION, import_job_header_callback) != CURLE_OK) + return -EIO; + + if (curl_easy_setopt(j->curl, CURLOPT_HEADERDATA, j) != CURLE_OK) + return -EIO; + + if (curl_easy_setopt(j->curl, CURLOPT_XFERINFOFUNCTION, import_job_progress_callback) != CURLE_OK) + return -EIO; + + if (curl_easy_setopt(j->curl, CURLOPT_XFERINFODATA, j) != CURLE_OK) + return -EIO; + + if (curl_easy_setopt(j->curl, CURLOPT_NOPROGRESS, 0) != CURLE_OK) + return -EIO; + + r = curl_glue_add(j->glue, j->curl); + if (r < 0) + return r; + + j->state = IMPORT_JOB_ANALYZING; + + return 0; +} diff --git a/src/import/import-job.h b/src/import/import-job.h new file mode 100644 index 000000000..843daa217 --- /dev/null +++ b/src/import/import-job.h @@ -0,0 +1,101 @@ +/*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/ + +#pragma once + +/*** + This file is part of systemd. + + Copyright 2015 Lennart Poettering + + systemd is free software; you can redistribute it and/or modify it + under the terms of the GNU Lesser General Public License as published by + the Free Software Foundation; either version 2.1 of the License, or + (at your option) any later version. + + systemd is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public License + along with systemd; If not, see . +***/ + +#include +#include + +#include "macro.h" +#include "curl-util.h" + +typedef struct ImportJob ImportJob; + +typedef void (*ImportJobFinished)(ImportJob *job); +typedef int (*ImportJobOpenDisk)(ImportJob *job); + +typedef enum ImportJobState { + IMPORT_JOB_INIT, + IMPORT_JOB_ANALYZING, /* Still reading into ->payload, to figure out what we have */ + IMPORT_JOB_RUNNING, /* Writing to destination */ + IMPORT_JOB_DONE, + IMPORT_JOB_FAILED, + _IMPORT_JOB_STATE_MAX, + _IMPORT_JOB_STATE_INVALID = -1, +} ImportJobState; + +typedef enum ImportJobCompression { + IMPORT_JOB_UNCOMPRESSED, + IMPORT_JOB_XZ, + IMPORT_JOB_GZIP, + _IMPORT_JOB_COMPRESSION_MAX, + _IMPORT_JOB_COMPRESSION_INVALID = -1, +} ImportJobCompression; + +struct ImportJob { + ImportJobState state; + int error; + + char *url; + + void *userdata; + ImportJobFinished on_finished; + ImportJobOpenDisk on_open_disk; + + CurlGlue *glue; + CURL *curl; + struct curl_slist *request_header; + + char *etag; + char **old_etags; + + uint64_t content_length; + uint64_t written_compressed; + uint64_t written_uncompressed; + + uint64_t uncompressed_max; + uint64_t compressed_max; + + uint8_t *payload; + size_t payload_size; + size_t payload_allocated; + + int disk_fd; + + usec_t mtime; + + ImportJobCompression compressed; + lzma_stream xz; + z_stream gzip; + + unsigned progress_percent; + usec_t start_usec; + usec_t last_status_usec; + + bool allow_sparse; +}; + +int import_job_new(ImportJob **job, const char *url, CurlGlue *glue, void *userdata); +ImportJob* import_job_unref(ImportJob *job); + +int import_job_begin(ImportJob *j); + +void import_job_curl_on_finished(CurlGlue *g, CURL *curl, CURLcode result); diff --git a/src/import/import-raw.c b/src/import/import-raw.c index 486157aa9..1fe27b693 100644 --- a/src/import/import-raw.c +++ b/src/import/import-raw.c @@ -30,6 +30,7 @@ #include "qcow2-util.h" #include "strv.h" #include "copy.h" +#include "import-util.h" #include "import-raw.h" typedef struct RawImportFile RawImportFile; @@ -691,17 +692,6 @@ static int raw_import_file_progress_callback(void *userdata, curl_off_t dltotal, return 0; } -static bool etag_is_valid(const char *etag) { - - if (!endswith(etag, "\"")) - return false; - - if (!startswith(etag, "\"") && !startswith(etag, "W/\"")) - return false; - - return true; -} - static int raw_import_file_find_old_etags(RawImportFile *f) { _cleanup_free_ char *escaped_url = NULL; _cleanup_closedir_ DIR *d = NULL; @@ -751,7 +741,7 @@ static int raw_import_file_find_old_etags(RawImportFile *f) { if (!u) return -ENOMEM; - if (!etag_is_valid(u)) { + if (!http_etag_is_valid(u)) { free(u); continue; } diff --git a/src/import/import-tar.c b/src/import/import-tar.c new file mode 100644 index 000000000..43227f61f --- /dev/null +++ b/src/import/import-tar.c @@ -0,0 +1,296 @@ +/*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/ + +/*** + This file is part of systemd. + + Copyright 2015 Lennart Poettering + + systemd is free software; you can redistribute it and/or modify it + under the terms of the GNU Lesser General Public License as published by + the Free Software Foundation; either version 2.1 of the License, or + (at your option) any later version. + + systemd is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public License + along with systemd; If not, see . +***/ + +#include +#include + +#include "hashmap.h" +#include "utf8.h" +#include "strv.h" +#include "copy.h" +#include "btrfs-util.h" +#include "util.h" +#include "macro.h" +#include "mkdir.h" +#include "curl-util.h" +#include "import-job.h" +#include "import-util.h" +#include "import-tar.h" + +struct TarImport { + sd_event *event; + CurlGlue *glue; + + char *image_root; + + ImportJob *tar_job; + + TarImportFinished on_finished; + void *userdata; + + bool finished; + + char *local; + bool force_local; + + pid_t tar_pid; + + char *temp_path; + char *final_path; +}; + +TarImport* tar_import_unref(TarImport *i) { + if (!i) + return NULL; + + if (i->tar_pid > 0) { + kill(i->tar_pid, SIGKILL); + wait_for_terminate(i->tar_pid, NULL); + } + + import_job_unref(i->tar_job); + + curl_glue_unref(i->glue); + sd_event_unref(i->event); + + if (i->temp_path) { + (void) btrfs_subvol_remove(i->temp_path); + (void) rm_rf_dangerous(i->temp_path, false, true, false); + } + + free(i->final_path); + free(i->image_root); + free(i->local); + + free(i); + + return NULL; +} + +int tar_import_new(TarImport **ret, sd_event *event, const char *image_root, TarImportFinished on_finished, void *userdata) { + _cleanup_(tar_import_unrefp) TarImport *i = NULL; + int r; + + assert(ret); + assert(event); + + i = new0(TarImport, 1); + if (!i) + return -ENOMEM; + + i->on_finished = on_finished; + i->userdata = userdata; + + i->image_root = strdup(image_root ?: "/var/lib/machines"); + if (!i->image_root) + return -ENOMEM; + + if (event) + i->event = sd_event_ref(event); + else { + r = sd_event_default(&i->event); + if (r < 0) + return r; + } + + r = curl_glue_new(&i->glue, i->event); + if (r < 0) + return r; + + i->glue->on_finished = import_job_curl_on_finished; + i->glue->userdata = i; + + *ret = i; + i = NULL; + + return 0; +} + +static void tar_import_job_on_finished(ImportJob *j) { + TarImport *i; + int r; + + assert(j); + assert(j->userdata); + + i = j->userdata; + + if (j->error != 0) { + r = j->error; + goto finish; + } + + /* This is invoked if either the download completed + * successfully, or the download was skipped because we + * already have the etag. */ + + j->disk_fd = safe_close(j->disk_fd); + + if (i->tar_pid > 0) { + r = wait_for_terminate_and_warn("tar", i->tar_pid, true); + i->tar_pid = 0; + if (r < 0) + goto finish; + } + + if (i->temp_path) { + r = import_make_read_only(i->temp_path); + if (r < 0) + goto finish; + + if (rename(i->temp_path, i->final_path) < 0) { + r = log_error_errno(errno, "Failed to rename to final image name: %m"); + goto finish; + } + } + + if (i->local) { + if (!i->final_path) { + r = import_make_path(j->url, j->etag, i->image_root, ".tar-", NULL, &i->final_path); + if (r < 0) + goto finish; + } + + r = import_make_local_copy(i->final_path, i->image_root, i->local, i->force_local); + if (r < 0) + goto finish; + } + + r = 0; + +finish: + i->finished = true; + + if (i->on_finished) + i->on_finished(i, r, i->userdata); + else + sd_event_exit(i->event, r); +} + +static int tar_import_job_on_open_disk(ImportJob *j) { + _cleanup_close_pair_ int pipefd[2] = { -1 , -1 }; + TarImport *i; + int r; + + assert(j); + assert(j->userdata); + + i = j->userdata; + + r = import_make_path(j->url, j->etag, i->image_root, ".tar-", NULL, &i->final_path); + if (r < 0) + return log_oom(); + + r = tempfn_random(i->final_path, &i->temp_path); + if (r < 0) + return log_oom(); + + mkdir_parents_label(i->temp_path, 0700); + + r = btrfs_subvol_make(i->temp_path); + if (r == -ENOTTY) { + if (mkdir(i->temp_path, 0755) < 0) + return log_error_errno(errno, "Failed to create directory %s: %m", i->temp_path); + } else if (r < 0) + return log_error_errno(errno, "Failed to create subvolume %s: %m", i->temp_path); + + if (pipe2(pipefd, O_CLOEXEC) < 0) + return log_error_errno(errno, "Failed to create pipe for tar: %m"); + + i->tar_pid = fork(); + if (i->tar_pid < 0) + return log_error_errno(errno, "Failed to fork off tar: %m"); + if (i->tar_pid == 0) { + int null_fd; + + reset_all_signal_handlers(); + reset_signal_mask(); + assert_se(prctl(PR_SET_PDEATHSIG, SIGTERM) == 0); + + pipefd[1] = safe_close(pipefd[1]); + + if (dup2(pipefd[0], STDIN_FILENO) != STDIN_FILENO) { + log_error_errno(errno, "Failed to dup2() fd: %m"); + _exit(EXIT_FAILURE); + } + + if (pipefd[0] != STDIN_FILENO) + safe_close(pipefd[0]); + + null_fd = open("/dev/null", O_WRONLY|O_NOCTTY); + if (null_fd < 0) { + log_error_errno(errno, "Failed to open /dev/null: %m"); + _exit(EXIT_FAILURE); + } + + if (dup2(null_fd, STDOUT_FILENO) != STDOUT_FILENO) { + log_error_errno(errno, "Failed to dup2() fd: %m"); + _exit(EXIT_FAILURE); + } + + if (null_fd != STDOUT_FILENO) + safe_close(null_fd); + + execlp("tar", "tar", "--numeric-owner", "-C", i->temp_path, "-px", NULL); + _exit(EXIT_FAILURE); + } + + pipefd[0] = safe_close(pipefd[0]); + + j->disk_fd = pipefd[1]; + pipefd[1] = -1; + + return 0; +} + +int tar_import_pull(TarImport *i, const char *url, const char *local, bool force_local) { + int r; + + assert(i); + + if (i->tar_job) + return -EBUSY; + + if (!http_url_is_valid(url)) + return -EINVAL; + + if (local && !machine_name_is_valid(local)) + return -EINVAL; + + r = free_and_strdup(&i->local, local); + if (r < 0) + return r; + + i->force_local = force_local; + + r = import_job_new(&i->tar_job, url, i->glue, i); + if (r < 0) + return r; + + i->tar_job->on_finished = tar_import_job_on_finished; + i->tar_job->on_open_disk = tar_import_job_on_open_disk; + + r = import_find_old_etags(url, i->image_root, DT_DIR, ".tar-", NULL, &i->tar_job->old_etags); + if (r < 0) + return r; + + return import_job_begin(i->tar_job); +} diff --git a/src/import/import-tar.h b/src/import/import-tar.h new file mode 100644 index 000000000..6a7477fee --- /dev/null +++ b/src/import/import-tar.h @@ -0,0 +1,36 @@ +/*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/ + +#pragma once + +/*** + This file is part of systemd. + + Copyright 2015 Lennart Poettering + + systemd is free software; you can redistribute it and/or modify it + under the terms of the GNU Lesser General Public License as published by + the Free Software Foundation; either version 2.1 of the License, or + (at your option) any later version. + + systemd is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public License + along with systemd; If not, see . +***/ + +#include "sd-event.h" +#include "macro.h" + +typedef struct TarImport TarImport; + +typedef void (*TarImportFinished)(TarImport *import, int error, void *userdata); + +int tar_import_new(TarImport **import, sd_event *event, const char *image_root, TarImportFinished on_finished, void *userdata); +TarImport* tar_import_unref(TarImport *import); + +DEFINE_TRIVIAL_CLEANUP_FUNC(TarImport*, tar_import_unref); + +int tar_import_pull(TarImport *import, const char *rul, const char *local, bool force_local); diff --git a/src/import/import-util.c b/src/import/import-util.c new file mode 100644 index 000000000..24c8ac1d1 --- /dev/null +++ b/src/import/import-util.c @@ -0,0 +1,201 @@ +/*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/ + +/*** + This file is part of systemd. + + Copyright 2015 Lennart Poettering + + systemd is free software; you can redistribute it and/or modify it + under the terms of the GNU Lesser General Public License as published by + the Free Software Foundation; either version 2.1 of the License, or + (at your option) any later version. + + systemd is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public License + along with systemd; If not, see . +***/ + +#include "util.h" +#include "strv.h" +#include "copy.h" +#include "btrfs-util.h" +#include "import-util.h" + +#define FILENAME_ESCAPE "/.#\"\'" + +bool http_etag_is_valid(const char *etag) { + if (!endswith(etag, "\"")) + return false; + + if (!startswith(etag, "\"") && !startswith(etag, "W/\"")) + return false; + + return true; +} + +int import_find_old_etags(const char *url, const char *image_root, int dt, const char *prefix, const char *suffix, char ***etags) { + _cleanup_free_ char *escaped_url = NULL; + _cleanup_closedir_ DIR *d = NULL; + _cleanup_strv_free_ char **l = NULL; + struct dirent *de; + int r; + + assert(url); + assert(etags); + + if (!image_root) + image_root = "/var/lib/machines"; + + escaped_url = xescape(url, FILENAME_ESCAPE); + if (!escaped_url) + return -ENOMEM; + + d = opendir(image_root); + if (!d) { + if (errno == ENOENT) { + *etags = NULL; + return 0; + } + + return -errno; + } + + FOREACH_DIRENT_ALL(de, d, return -errno) { + const char *a, *b; + char *u; + + if (de->d_type != DT_UNKNOWN && + de->d_type != dt) + continue; + + if (prefix) { + a = startswith(de->d_name, prefix); + if (!a) + continue; + } else + a = de->d_name; + + a = startswith(a, escaped_url); + if (!a) + continue; + + a = startswith(a, "."); + if (!a) + continue; + + if (suffix) { + b = endswith(de->d_name, suffix); + if (!b) + continue; + } else + b = strchr(de->d_name, 0); + + if (a >= b) + continue; + + u = cunescape_length(a, b - a); + if (!u) + return -ENOMEM; + + if (!http_etag_is_valid(u)) { + free(u); + continue; + } + + r = strv_consume(&l, u); + if (r < 0) + return r; + } + + *etags = l; + l = NULL; + + return 0; +} + +int import_make_local_copy(const char *final, const char *image_root, const char *local, bool force_local) { + const char *p; + int r; + + assert(final); + assert(local); + + if (!image_root) + image_root = "/var/lib/machines"; + + p = strappenda(image_root, "/", local); + + if (force_local) { + (void) btrfs_subvol_remove(p); + (void) rm_rf_dangerous(p, false, true, false); + } + + r = btrfs_subvol_snapshot(final, p, false, false); + if (r == -ENOTTY) { + r = copy_tree(final, p, false); + if (r < 0) + return log_error_errno(r, "Failed to copy image: %m"); + } else if (r < 0) + return log_error_errno(r, "Failed to create local image: %m"); + + log_info("Created new local image '%s'.", local); + + return 0; +} + +int import_make_read_only(const char *path) { + int r; + + r = btrfs_subvol_set_read_only(path, true); + if (r == -ENOTTY) { + struct stat st; + + r = stat(path, &st); + if (r < 0) + return log_error_errno(errno, "Failed to stat temporary image: %m"); + + if (chmod(path, st.st_mode & 0755) < 0) + return log_error_errno(errno, "Failed to chmod() final image: %m"); + + return 0; + } + if (r < 0) + return log_error_errno(r, "Failed to mark final image read-only: %m"); + + return 0; +} + +int import_make_path(const char *url, const char *etag, const char *image_root, const char *prefix, const char *suffix, char **ret) { + _cleanup_free_ char *escaped_url = NULL; + char *path; + + assert(url); + assert(ret); + + if (!image_root) + image_root = "/var/lib/machines"; + + escaped_url = xescape(url, FILENAME_ESCAPE); + if (!escaped_url) + return -ENOMEM; + + if (etag) { + _cleanup_free_ char *escaped_etag = NULL; + + escaped_etag = xescape(etag, FILENAME_ESCAPE); + if (!escaped_etag) + return -ENOMEM; + + path = strjoin(image_root, "/", strempty(prefix), escaped_url, ".", escaped_etag, strempty(suffix), NULL); + } else + path = strjoin(image_root, "/", strempty(prefix), escaped_url, strempty(suffix), NULL); + if (!path) + return -ENOMEM; + + *ret = path; + return 0; +} diff --git a/src/import/import-util.h b/src/import/import-util.h new file mode 100644 index 000000000..ad5ab502a --- /dev/null +++ b/src/import/import-util.h @@ -0,0 +1,31 @@ +/*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/ + +#pragma once + +/*** + This file is part of systemd. + + Copyright 2015 Lennart Poettering + + systemd is free software; you can redistribute it and/or modify it + under the terms of the GNU Lesser General Public License as published by + the Free Software Foundation; either version 2.1 of the License, or + (at your option) any later version. + + systemd is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public License + along with systemd; If not, see . +***/ + +#include + +bool http_etag_is_valid(const char *etag); + +int import_make_local_copy(const char *final, const char *root, const char *local, bool force_local); +int import_find_old_etags(const char *url, const char *root, int dt, const char *prefix, const char *suffix, char ***etags); +int import_make_read_only(const char *path); +int import_make_path(const char *url, const char *etag, const char *image_root, const char *prefix, const char *suffix, char **ret); diff --git a/src/import/import.c b/src/import/import.c index 9b10de555..c0fc22427 100644 --- a/src/import/import.c +++ b/src/import/import.c @@ -25,6 +25,8 @@ #include "event-util.h" #include "verbs.h" #include "build.h" +#include "machine-image.h" +#include "import-tar.h" #include "import-raw.h" #include "import-dkr.h" @@ -33,6 +35,144 @@ static const char *arg_image_root = "/var/lib/machines"; static const char* arg_dkr_index_url = DEFAULT_DKR_INDEX_URL; +static void on_tar_finished(TarImport *import, int error, void *userdata) { + sd_event *event = userdata; + assert(import); + + if (error == 0) + log_info("Operation completed successfully."); + else + log_error_errno(error, "Operation failed: %m"); + + sd_event_exit(event, error); +} + +static int url_final_component(const char *url, char **ret) { + const char *e, *p; + char *s; + + e = strchrnul(url, '?'); + + while (e > url && e[-1] == '/') + e--; + + p = e; + while (p > url && p[-1] != '/') + p--; + + if (e <= p) + return -EINVAL; + + s = strndup(p, e - p); + if (!s) + return -ENOMEM; + + *ret = s; + return 0; +} + +static int strip_tar_suffixes(const char *name, char **ret) { + const char *e; + char *s; + + e = endswith(name, ".tar"); + if (!e) + e = endswith(name, ".tar.gz"); + if (!e) + e = endswith(name, ".tar.xz"); + if (!e) + e = endswith(name, ".tgz"); + if (!e) + e = strchr(name, 0); + + if (e <= name) + return -EINVAL; + + s = strndup(name, e - name); + if (!s) + return -ENOMEM; + + *ret = s; + return 0; +} + +static int pull_tar(int argc, char *argv[], void *userdata) { + _cleanup_(tar_import_unrefp) TarImport *import = NULL; + _cleanup_event_unref_ sd_event *event = NULL; + const char *url, *local; + _cleanup_free_ char *l = NULL, *ll = NULL; + int r; + + url = argv[1]; + if (!http_url_is_valid(url)) { + log_error("URL '%s' is not valid.", url); + return -EINVAL; + } + + if (argc >= 3) + local = argv[2]; + else { + r = url_final_component(url, &l); + if (r < 0) + return log_error_errno(r, "Failed get final component of URL: %m"); + + local = l; + } + + if (isempty(local) || streq(local, "-")) + local = NULL; + + if (local) { + r = strip_tar_suffixes(local, &ll); + if (r < 0) + return log_oom(); + + local = ll; + + if (!machine_name_is_valid(local)) { + log_error("Local image name '%s' is not valid.", local); + return -EINVAL; + } + + if (!arg_force) { + r = image_find(local, NULL); + if (r < 0) + return log_error_errno(r, "Failed to check whether image '%s' exists: %m", local); + else if (r > 0) { + log_error_errno(EEXIST, "Image '%s' already exists.", local); + return -EEXIST; + } + } + + log_info("Pulling '%s', saving as '%s'.", url, local); + } else + log_info("Pulling '%s'.", url); + + r = sd_event_default(&event); + if (r < 0) + return log_error_errno(r, "Failed to allocate event loop: %m"); + + assert_se(sigprocmask_many(SIG_BLOCK, SIGTERM, SIGINT, -1) == 0); + sd_event_add_signal(event, NULL, SIGTERM, NULL, NULL); + sd_event_add_signal(event, NULL, SIGINT, NULL, NULL); + + r = tar_import_new(&import, event, arg_image_root, on_tar_finished, event); + if (r < 0) + return log_error_errno(r, "Failed to allocate importer: %m"); + + r = tar_import_pull(import, url, local, arg_force); + if (r < 0) + return log_error_errno(r, "Failed to pull image: %m"); + + r = sd_event_loop(event); + if (r < 0) + return log_error_errno(r, "Failed to run event loop: %m"); + + log_info("Exiting."); + + return 0; +} + static void on_raw_finished(RawImport *import, int error, void *userdata) { sd_event *event = userdata; assert(import); @@ -40,7 +180,7 @@ static void on_raw_finished(RawImport *import, int error, void *userdata) { if (error == 0) log_info("Operation completed successfully."); else - log_info_errno(error, "Operation failed: %m"); + log_error_errno(error, "Operation failed: %m"); sd_event_exit(event, error); } @@ -173,7 +313,7 @@ static void on_dkr_finished(DkrImport *import, int error, void *userdata) { if (error == 0) log_info("Operation completed successfully."); else - log_info_errno(error, "Operation failed: %m"); + log_error_errno(error, "Operation failed: %m"); sd_event_exit(event, error); } @@ -277,8 +417,9 @@ static int help(int argc, char *argv[], void *userdata) { " --image-root= Image root directory\n" " --dkr-index-url=URL Specify index URL to use for downloads\n\n" "Commands:\n" - " pull-dkr REMOTE [NAME] Download a DKR image\n" - " pull-raw URL [NAME] Download a RAW image\n", + " pull-tar URL Download a TAR image\n" + " pull-raw URL [NAME] Download a RAW image\n" + " pull-dkr REMOTE [NAME] Download a DKR image\n", program_invocation_short_name); return 0; @@ -350,8 +491,9 @@ static int import_main(int argc, char *argv[]) { static const Verb verbs[] = { { "help", VERB_ANY, VERB_ANY, 0, help }, - { "pull-dkr", 2, 3, 0, pull_dkr }, + { "pull-tar", 2, 3, 0, pull_tar }, { "pull-raw", 2, 3, 0, pull_raw }, + { "pull-dkr", 2, 3, 0, pull_dkr }, {} }; -- 2.30.2