1 /*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/
4 This file is part of systemd.
6 Copyright 2015 Lennart Poettering
8 systemd is free software; you can redistribute it and/or modify it
9 under the terms of the GNU Lesser General Public License as published by
10 the Free Software Foundation; either version 2.1 of the License, or
11 (at your option) any later version.
13 systemd is distributed in the hope that it will be useful, but
14 WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 Lesser General Public License for more details.
18 You should have received a copy of the GNU Lesser General Public License
19 along with systemd; If not, see <http://www.gnu.org/licenses/>.
22 #include <sys/xattr.h>
25 #include "machine-pool.h"
28 PullJob* pull_job_unref(PullJob *j) {
32 curl_glue_remove_and_free(j->glue, j->curl);
33 curl_slist_free_all(j->request_header);
35 safe_close(j->disk_fd);
37 import_compress_free(&j->compress);
39 if (j->checksum_context)
40 gcry_md_close(j->checksum_context);
44 strv_free(j->old_etags);
53 static void pull_job_finish(PullJob *j, int ret) {
56 if (j->state == PULL_JOB_DONE ||
57 j->state == PULL_JOB_FAILED)
61 j->state = PULL_JOB_DONE;
62 j->progress_percent = 100;
63 log_info("Download of %s complete.", j->url);
65 j->state = PULL_JOB_FAILED;
73 void pull_job_curl_on_finished(CurlGlue *g, CURL *curl, CURLcode result) {
79 if (curl_easy_getinfo(curl, CURLINFO_PRIVATE, &j) != CURLE_OK)
82 if (!j || j->state == PULL_JOB_DONE || j->state == PULL_JOB_FAILED)
85 if (result != CURLE_OK) {
86 log_error("Transfer failed: %s", curl_easy_strerror(result));
91 code = curl_easy_getinfo(curl, CURLINFO_RESPONSE_CODE, &status);
92 if (code != CURLE_OK) {
93 log_error("Failed to retrieve response code: %s", curl_easy_strerror(code));
96 } else if (status == 304) {
97 log_info("Image already downloaded. Skipping download.");
98 j->etag_exists = true;
101 } else if (status >= 300) {
102 log_error("HTTP request to %s failed with code %li.", j->url, status);
105 } else if (status < 200) {
106 log_error("HTTP request to %s finished with unexpected code %li.", j->url, status);
111 if (j->state != PULL_JOB_RUNNING) {
112 log_error("Premature connection termination.");
117 if (j->content_length != (uint64_t) -1 &&
118 j->content_length != j->written_compressed) {
119 log_error("Download truncated.");
124 if (j->checksum_context) {
127 k = gcry_md_read(j->checksum_context, GCRY_MD_SHA256);
129 log_error("Failed to get checksum.");
134 j->checksum = hexmem(k, gcry_md_get_algo_dlen(GCRY_MD_SHA256));
140 log_debug("SHA256 of %s is %s.", j->url, j->checksum);
143 if (j->disk_fd >= 0 && j->allow_sparse) {
144 /* Make sure the file size is right, in case the file was
145 * sparse and we just seeked for the last part */
147 if (ftruncate(j->disk_fd, j->written_uncompressed) < 0) {
148 r = log_error_errno(errno, "Failed to truncate file: %m");
153 (void) fsetxattr(j->disk_fd, "user.source_etag", j->etag, strlen(j->etag), 0);
155 (void) fsetxattr(j->disk_fd, "user.source_url", j->url, strlen(j->url), 0);
158 struct timespec ut[2];
160 timespec_store(&ut[0], j->mtime);
162 (void) futimens(j->disk_fd, ut);
164 (void) fd_setcrtime(j->disk_fd, j->mtime);
171 pull_job_finish(j, r);
174 static int pull_job_write_uncompressed(const void *p, size_t sz, void *userdata) {
175 PullJob *j = userdata;
184 if (j->written_uncompressed + sz < j->written_uncompressed) {
185 log_error("File too large, overflow");
189 if (j->written_uncompressed + sz > j->uncompressed_max) {
190 log_error("File overly large, refusing");
194 if (j->disk_fd >= 0) {
196 if (j->grow_machine_directory && j->written_since_last_grow >= GROW_INTERVAL_BYTES) {
197 j->written_since_last_grow = 0;
198 grow_machine_directory();
202 n = sparse_write(j->disk_fd, p, sz, 64);
204 n = write(j->disk_fd, p, sz);
206 return log_error_errno(errno, "Failed to write file: %m");
207 if ((size_t) n < sz) {
208 log_error("Short write");
213 if (!GREEDY_REALLOC(j->payload, j->payload_allocated, j->payload_size + sz))
216 memcpy(j->payload + j->payload_size, p, sz);
217 j->payload_size += sz;
220 j->written_uncompressed += sz;
221 j->written_since_last_grow += sz;
226 static int pull_job_write_compressed(PullJob *j, void *p, size_t sz) {
235 if (j->written_compressed + sz < j->written_compressed) {
236 log_error("File too large, overflow");
240 if (j->written_compressed + sz > j->compressed_max) {
241 log_error("File overly large, refusing.");
245 if (j->content_length != (uint64_t) -1 &&
246 j->written_compressed + sz > j->content_length) {
247 log_error("Content length incorrect.");
251 if (j->checksum_context)
252 gcry_md_write(j->checksum_context, p, sz);
254 r = import_uncompress(&j->compress, p, sz, pull_job_write_uncompressed, j);
258 j->written_compressed += sz;
263 static int pull_job_open_disk(PullJob *j) {
268 if (j->on_open_disk) {
269 r = j->on_open_disk(j);
274 if (j->disk_fd >= 0) {
275 /* Check if we can do sparse files */
277 if (lseek(j->disk_fd, SEEK_SET, 0) == 0)
278 j->allow_sparse = true;
281 return log_error_errno(errno, "Failed to seek on file descriptor: %m");
283 j->allow_sparse = false;
287 if (j->calc_checksum) {
288 if (gcry_md_open(&j->checksum_context, GCRY_MD_SHA256, 0) != 0) {
289 log_error("Failed to initialize hash context.");
297 static int pull_job_detect_compression(PullJob *j) {
298 _cleanup_free_ uint8_t *stub = NULL;
305 r = import_uncompress_detect(&j->compress, j->payload, j->payload_size);
307 return log_error_errno(r, "Failed to initialize compressor: %m");
311 log_debug("Stream is compressed: %s", import_compress_type_to_string(j->compress.type));
313 r = pull_job_open_disk(j);
317 /* Now, take the payload we read so far, and decompress it */
319 stub_size = j->payload_size;
323 j->payload_allocated = 0;
325 j->state = PULL_JOB_RUNNING;
327 r = pull_job_write_compressed(j, stub, stub_size);
334 static size_t pull_job_write_callback(void *contents, size_t size, size_t nmemb, void *userdata) {
335 PullJob *j = userdata;
336 size_t sz = size * nmemb;
344 case PULL_JOB_ANALYZING:
345 /* Let's first check what it actually is */
347 if (!GREEDY_REALLOC(j->payload, j->payload_allocated, j->payload_size + sz)) {
352 memcpy(j->payload + j->payload_size, contents, sz);
353 j->payload_size += sz;
355 r = pull_job_detect_compression(j);
361 case PULL_JOB_RUNNING:
363 r = pull_job_write_compressed(j, contents, sz);
370 case PULL_JOB_FAILED:
375 assert_not_reached("Impossible state.");
381 pull_job_finish(j, r);
385 static size_t pull_job_header_callback(void *contents, size_t size, size_t nmemb, void *userdata) {
386 PullJob *j = userdata;
387 size_t sz = size * nmemb;
388 _cleanup_free_ char *length = NULL, *last_modified = NULL;
395 if (j->state == PULL_JOB_DONE || j->state == PULL_JOB_FAILED) {
400 assert(j->state == PULL_JOB_ANALYZING);
402 r = curl_header_strdup(contents, sz, "ETag:", &etag);
411 if (strv_contains(j->old_etags, j->etag)) {
412 log_info("Image already downloaded. Skipping download.");
413 j->etag_exists = true;
414 pull_job_finish(j, 0);
421 r = curl_header_strdup(contents, sz, "Content-Length:", &length);
427 (void) safe_atou64(length, &j->content_length);
429 if (j->content_length != (uint64_t) -1) {
430 char bytes[FORMAT_BYTES_MAX];
432 if (j->content_length > j->compressed_max) {
433 log_error("Content too large.");
438 log_info("Downloading %s for %s.", format_bytes(bytes, sizeof(bytes), j->content_length), j->url);
444 r = curl_header_strdup(contents, sz, "Last-Modified:", &last_modified);
450 (void) curl_parse_http_time(last_modified, &j->mtime);
455 r = j->on_header(j, contents, sz);
463 pull_job_finish(j, r);
467 static int pull_job_progress_callback(void *userdata, curl_off_t dltotal, curl_off_t dlnow, curl_off_t ultotal, curl_off_t ulnow) {
468 PullJob *j = userdata;
477 percent = ((100 * dlnow) / dltotal);
478 n = now(CLOCK_MONOTONIC);
480 if (n > j->last_status_usec + USEC_PER_SEC &&
481 percent != j->progress_percent &&
483 char buf[FORMAT_TIMESPAN_MAX];
485 if (n - j->start_usec > USEC_PER_SEC && dlnow > 0) {
486 char y[FORMAT_BYTES_MAX];
489 done = n - j->start_usec;
490 left = (usec_t) (((double) done * (double) dltotal) / dlnow) - done;
492 log_info("Got %u%% of %s. %s left at %s/s.",
495 format_timespan(buf, sizeof(buf), left, USEC_PER_SEC),
496 format_bytes(y, sizeof(y), (uint64_t) ((double) dlnow / ((double) done / (double) USEC_PER_SEC))));
498 log_info("Got %u%% of %s.", percent, j->url);
500 j->progress_percent = percent;
501 j->last_status_usec = n;
510 int pull_job_new(PullJob **ret, const char *url, CurlGlue *glue, void *userdata) {
511 _cleanup_(pull_job_unrefp) PullJob *j = NULL;
517 j = new0(PullJob, 1);
521 j->state = PULL_JOB_INIT;
523 j->userdata = userdata;
525 j->content_length = (uint64_t) -1;
526 j->start_usec = now(CLOCK_MONOTONIC);
527 j->compressed_max = j->uncompressed_max = 8LLU * 1024LLU * 1024LLU * 1024LLU; /* 8GB */
529 j->url = strdup(url);
539 int pull_job_begin(PullJob *j) {
544 if (j->state != PULL_JOB_INIT)
547 if (j->grow_machine_directory)
548 grow_machine_directory();
550 r = curl_glue_make(&j->curl, j->url, j);
554 if (!strv_isempty(j->old_etags)) {
555 _cleanup_free_ char *cc = NULL, *hdr = NULL;
557 cc = strv_join(j->old_etags, ", ");
561 hdr = strappend("If-None-Match: ", cc);
565 if (!j->request_header) {
566 j->request_header = curl_slist_new(hdr, NULL);
567 if (!j->request_header)
570 struct curl_slist *l;
572 l = curl_slist_append(j->request_header, hdr);
576 j->request_header = l;
580 if (j->request_header) {
581 if (curl_easy_setopt(j->curl, CURLOPT_HTTPHEADER, j->request_header) != CURLE_OK)
585 if (curl_easy_setopt(j->curl, CURLOPT_WRITEFUNCTION, pull_job_write_callback) != CURLE_OK)
588 if (curl_easy_setopt(j->curl, CURLOPT_WRITEDATA, j) != CURLE_OK)
591 if (curl_easy_setopt(j->curl, CURLOPT_HEADERFUNCTION, pull_job_header_callback) != CURLE_OK)
594 if (curl_easy_setopt(j->curl, CURLOPT_HEADERDATA, j) != CURLE_OK)
597 if (curl_easy_setopt(j->curl, CURLOPT_XFERINFOFUNCTION, pull_job_progress_callback) != CURLE_OK)
600 if (curl_easy_setopt(j->curl, CURLOPT_XFERINFODATA, j) != CURLE_OK)
603 if (curl_easy_setopt(j->curl, CURLOPT_NOPROGRESS, 0) != CURLE_OK)
606 r = curl_glue_add(j->glue, j->curl);
610 j->state = PULL_JOB_ANALYZING;