1 /*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/
4 This file is part of systemd.
6 Copyright 2015 Lennart Poettering
8 systemd is free software; you can redistribute it and/or modify it
9 under the terms of the GNU Lesser General Public License as published by
10 the Free Software Foundation; either version 2.1 of the License, or
11 (at your option) any later version.
13 systemd is distributed in the hope that it will be useful, but
14 WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 Lesser General Public License for more details.
18 You should have received a copy of the GNU Lesser General Public License
19 along with systemd; If not, see <http://www.gnu.org/licenses/>.
22 #include <sys/xattr.h>
25 #include "machine-pool.h"
28 /* Grow the /var/lib/machines directory after each 10MiB written */
29 #define PULL_GROW_INTERVAL_BYTES (UINT64_C(10) * UINT64_C(1024) * UINT64_C(1024))
31 PullJob* pull_job_unref(PullJob *j) {
35 curl_glue_remove_and_free(j->glue, j->curl);
36 curl_slist_free_all(j->request_header);
38 safe_close(j->disk_fd);
40 import_compress_free(&j->compress);
42 if (j->checksum_context)
43 gcry_md_close(j->checksum_context);
47 strv_free(j->old_etags);
56 static void pull_job_finish(PullJob *j, int ret) {
59 if (j->state == PULL_JOB_DONE ||
60 j->state == PULL_JOB_FAILED)
64 j->state = PULL_JOB_DONE;
65 j->progress_percent = 100;
66 log_info("Download of %s complete.", j->url);
68 j->state = PULL_JOB_FAILED;
76 void pull_job_curl_on_finished(CurlGlue *g, CURL *curl, CURLcode result) {
82 if (curl_easy_getinfo(curl, CURLINFO_PRIVATE, &j) != CURLE_OK)
85 if (!j || j->state == PULL_JOB_DONE || j->state == PULL_JOB_FAILED)
88 if (result != CURLE_OK) {
89 log_error("Transfer failed: %s", curl_easy_strerror(result));
94 code = curl_easy_getinfo(curl, CURLINFO_RESPONSE_CODE, &status);
95 if (code != CURLE_OK) {
96 log_error("Failed to retrieve response code: %s", curl_easy_strerror(code));
99 } else if (status == 304) {
100 log_info("Image already downloaded. Skipping download.");
101 j->etag_exists = true;
104 } else if (status >= 300) {
105 log_error("HTTP request to %s failed with code %li.", j->url, status);
108 } else if (status < 200) {
109 log_error("HTTP request to %s finished with unexpected code %li.", j->url, status);
114 if (j->state != PULL_JOB_RUNNING) {
115 log_error("Premature connection termination.");
120 if (j->content_length != (uint64_t) -1 &&
121 j->content_length != j->written_compressed) {
122 log_error("Download truncated.");
127 if (j->checksum_context) {
130 k = gcry_md_read(j->checksum_context, GCRY_MD_SHA256);
132 log_error("Failed to get checksum.");
137 j->checksum = hexmem(k, gcry_md_get_algo_dlen(GCRY_MD_SHA256));
143 log_debug("SHA256 of %s is %s.", j->url, j->checksum);
146 if (j->disk_fd >= 0 && j->allow_sparse) {
147 /* Make sure the file size is right, in case the file was
148 * sparse and we just seeked for the last part */
150 if (ftruncate(j->disk_fd, j->written_uncompressed) < 0) {
151 log_error_errno(errno, "Failed to truncate file: %m");
157 (void) fsetxattr(j->disk_fd, "user.source_etag", j->etag, strlen(j->etag), 0);
159 (void) fsetxattr(j->disk_fd, "user.source_url", j->url, strlen(j->url), 0);
162 struct timespec ut[2];
164 timespec_store(&ut[0], j->mtime);
166 (void) futimens(j->disk_fd, ut);
168 (void) fd_setcrtime(j->disk_fd, j->mtime);
175 pull_job_finish(j, r);
178 static int pull_job_write_uncompressed(const void *p, size_t sz, void *userdata) {
179 PullJob *j = userdata;
188 if (j->written_uncompressed + sz < j->written_uncompressed) {
189 log_error("File too large, overflow");
193 if (j->written_uncompressed + sz > j->uncompressed_max) {
194 log_error("File overly large, refusing");
198 if (j->disk_fd >= 0) {
200 if (j->grow_machine_directory && j->written_since_last_grow >= PULL_GROW_INTERVAL_BYTES) {
201 j->written_since_last_grow = 0;
202 grow_machine_directory();
206 n = sparse_write(j->disk_fd, p, sz, 64);
208 n = write(j->disk_fd, p, sz);
210 log_error_errno(errno, "Failed to write file: %m");
213 if ((size_t) n < sz) {
214 log_error("Short write");
219 if (!GREEDY_REALLOC(j->payload, j->payload_allocated, j->payload_size + sz))
222 memcpy(j->payload + j->payload_size, p, sz);
223 j->payload_size += sz;
226 j->written_uncompressed += sz;
227 j->written_since_last_grow += sz;
232 static int pull_job_write_compressed(PullJob *j, void *p, size_t sz) {
241 if (j->written_compressed + sz < j->written_compressed) {
242 log_error("File too large, overflow");
246 if (j->written_compressed + sz > j->compressed_max) {
247 log_error("File overly large, refusing.");
251 if (j->content_length != (uint64_t) -1 &&
252 j->written_compressed + sz > j->content_length) {
253 log_error("Content length incorrect.");
257 if (j->checksum_context)
258 gcry_md_write(j->checksum_context, p, sz);
260 r = import_uncompress(&j->compress, p, sz, pull_job_write_uncompressed, j);
264 j->written_compressed += sz;
269 static int pull_job_open_disk(PullJob *j) {
274 if (j->on_open_disk) {
275 r = j->on_open_disk(j);
280 if (j->disk_fd >= 0) {
281 /* Check if we can do sparse files */
283 if (lseek(j->disk_fd, SEEK_SET, 0) == 0)
284 j->allow_sparse = true;
287 return log_error_errno(errno, "Failed to seek on file descriptor: %m");
289 j->allow_sparse = false;
293 if (j->calc_checksum) {
294 if (gcry_md_open(&j->checksum_context, GCRY_MD_SHA256, 0) != 0) {
295 log_error("Failed to initialize hash context.");
303 static int pull_job_detect_compression(PullJob *j) {
304 _cleanup_free_ uint8_t *stub = NULL;
311 r = import_uncompress_detect(&j->compress, j->payload, j->payload_size);
313 return log_error_errno(r, "Failed to initialize compressor: %m");
317 log_debug("Stream is compressed: %s", import_compress_type_to_string(j->compress.type));
319 r = pull_job_open_disk(j);
323 /* Now, take the payload we read so far, and decompress it */
325 stub_size = j->payload_size;
329 j->payload_allocated = 0;
331 j->state = PULL_JOB_RUNNING;
333 r = pull_job_write_compressed(j, stub, stub_size);
340 static size_t pull_job_write_callback(void *contents, size_t size, size_t nmemb, void *userdata) {
341 PullJob *j = userdata;
342 size_t sz = size * nmemb;
350 case PULL_JOB_ANALYZING:
351 /* Let's first check what it actually is */
353 if (!GREEDY_REALLOC(j->payload, j->payload_allocated, j->payload_size + sz)) {
358 memcpy(j->payload + j->payload_size, contents, sz);
359 j->payload_size += sz;
361 r = pull_job_detect_compression(j);
367 case PULL_JOB_RUNNING:
369 r = pull_job_write_compressed(j, contents, sz);
376 case PULL_JOB_FAILED:
381 assert_not_reached("Impossible state.");
387 pull_job_finish(j, r);
391 static size_t pull_job_header_callback(void *contents, size_t size, size_t nmemb, void *userdata) {
392 PullJob *j = userdata;
393 size_t sz = size * nmemb;
394 _cleanup_free_ char *length = NULL, *last_modified = NULL;
401 if (j->state == PULL_JOB_DONE || j->state == PULL_JOB_FAILED) {
406 assert(j->state == PULL_JOB_ANALYZING);
408 r = curl_header_strdup(contents, sz, "ETag:", &etag);
417 if (strv_contains(j->old_etags, j->etag)) {
418 log_info("Image already downloaded. Skipping download.");
419 j->etag_exists = true;
420 pull_job_finish(j, 0);
427 r = curl_header_strdup(contents, sz, "Content-Length:", &length);
433 (void) safe_atou64(length, &j->content_length);
435 if (j->content_length != (uint64_t) -1) {
436 char bytes[FORMAT_BYTES_MAX];
438 if (j->content_length > j->compressed_max) {
439 log_error("Content too large.");
444 log_info("Downloading %s for %s.", format_bytes(bytes, sizeof(bytes), j->content_length), j->url);
450 r = curl_header_strdup(contents, sz, "Last-Modified:", &last_modified);
456 (void) curl_parse_http_time(last_modified, &j->mtime);
461 r = j->on_header(j, contents, sz);
469 pull_job_finish(j, r);
473 static int pull_job_progress_callback(void *userdata, curl_off_t dltotal, curl_off_t dlnow, curl_off_t ultotal, curl_off_t ulnow) {
474 PullJob *j = userdata;
483 percent = ((100 * dlnow) / dltotal);
484 n = now(CLOCK_MONOTONIC);
486 if (n > j->last_status_usec + USEC_PER_SEC &&
487 percent != j->progress_percent &&
489 char buf[FORMAT_TIMESPAN_MAX];
491 if (n - j->start_usec > USEC_PER_SEC && dlnow > 0) {
492 char y[FORMAT_BYTES_MAX];
495 done = n - j->start_usec;
496 left = (usec_t) (((double) done * (double) dltotal) / dlnow) - done;
498 log_info("Got %u%% of %s. %s left at %s/s.",
501 format_timespan(buf, sizeof(buf), left, USEC_PER_SEC),
502 format_bytes(y, sizeof(y), (uint64_t) ((double) dlnow / ((double) done / (double) USEC_PER_SEC))));
504 log_info("Got %u%% of %s.", percent, j->url);
506 j->progress_percent = percent;
507 j->last_status_usec = n;
516 int pull_job_new(PullJob **ret, const char *url, CurlGlue *glue, void *userdata) {
517 _cleanup_(pull_job_unrefp) PullJob *j = NULL;
523 j = new0(PullJob, 1);
527 j->state = PULL_JOB_INIT;
529 j->userdata = userdata;
531 j->content_length = (uint64_t) -1;
532 j->start_usec = now(CLOCK_MONOTONIC);
533 j->compressed_max = j->uncompressed_max = 8LLU * 1024LLU * 1024LLU * 1024LLU; /* 8GB */
535 j->url = strdup(url);
545 int pull_job_begin(PullJob *j) {
550 if (j->state != PULL_JOB_INIT)
553 if (j->grow_machine_directory)
554 grow_machine_directory();
556 r = curl_glue_make(&j->curl, j->url, j);
560 if (!strv_isempty(j->old_etags)) {
561 _cleanup_free_ char *cc = NULL, *hdr = NULL;
563 cc = strv_join(j->old_etags, ", ");
567 hdr = strappend("If-None-Match: ", cc);
571 if (!j->request_header) {
572 j->request_header = curl_slist_new(hdr, NULL);
573 if (!j->request_header)
576 struct curl_slist *l;
578 l = curl_slist_append(j->request_header, hdr);
582 j->request_header = l;
586 if (j->request_header) {
587 if (curl_easy_setopt(j->curl, CURLOPT_HTTPHEADER, j->request_header) != CURLE_OK)
591 if (curl_easy_setopt(j->curl, CURLOPT_WRITEFUNCTION, pull_job_write_callback) != CURLE_OK)
594 if (curl_easy_setopt(j->curl, CURLOPT_WRITEDATA, j) != CURLE_OK)
597 if (curl_easy_setopt(j->curl, CURLOPT_HEADERFUNCTION, pull_job_header_callback) != CURLE_OK)
600 if (curl_easy_setopt(j->curl, CURLOPT_HEADERDATA, j) != CURLE_OK)
603 if (curl_easy_setopt(j->curl, CURLOPT_XFERINFOFUNCTION, pull_job_progress_callback) != CURLE_OK)
606 if (curl_easy_setopt(j->curl, CURLOPT_XFERINFODATA, j) != CURLE_OK)
609 if (curl_easy_setopt(j->curl, CURLOPT_NOPROGRESS, 0) != CURLE_OK)
612 r = curl_glue_add(j->glue, j->curl);
616 j->state = PULL_JOB_ANALYZING;