X-Git-Url: https://www.chiark.greenend.org.uk/ucgi/~ianmdlvl/git?a=blobdiff_plain;f=src%2Fjournal%2Fjournal-vacuum.c;h=832c327b3101d3e3b56740a48ab17396628e2083;hb=56e6c2abb8f18bba2bb9d96d66ac7e633349ddfb;hp=c89014653784d88e5634dc27a6d07ec8966ef7a8;hpb=2b43f939a4b3ad5aeb2650868b0234ff42ec0045;p=elogind.git diff --git a/src/journal/journal-vacuum.c b/src/journal/journal-vacuum.c index c89014653..832c327b3 100644 --- a/src/journal/journal-vacuum.c +++ b/src/journal/journal-vacuum.c @@ -24,6 +24,7 @@ #include #include #include +#include #include "journal-def.h" #include "journal-file.h" @@ -32,7 +33,7 @@ #include "util.h" struct vacuum_info { - off_t usage; + uint64_t usage; char *filename; uint64_t realtime; @@ -68,25 +69,116 @@ static int vacuum_compare(const void *_a, const void *_b) { return strcmp(a->filename, b->filename); } -int journal_directory_vacuum(const char *directory, uint64_t max_use, uint64_t min_free) { - DIR *d; +static void patch_realtime( + const char *dir, + const char *fn, + const struct stat *st, + unsigned long long *realtime) { + + _cleanup_free_ const char *path = NULL; + usec_t x, crtime; + + /* The timestamp was determined by the file name, but let's + * see if the file might actually be older than the file name + * suggested... */ + + assert(dir); + assert(fn); + assert(st); + assert(realtime); + + x = timespec_load(&st->st_ctim); + if (x > 0 && x != USEC_INFINITY && x < *realtime) + *realtime = x; + + x = timespec_load(&st->st_atim); + if (x > 0 && x != USEC_INFINITY && x < *realtime) + *realtime = x; + + x = timespec_load(&st->st_mtim); + if (x > 0 && x != USEC_INFINITY && x < *realtime) + *realtime = x; + + /* Let's read the original creation time, if possible. Ideally + * we'd just query the creation time the FS might provide, but + * unfortunately there's currently no sane API to query + * it. Hence let's implement this manually... */ + + /* Unfortunately there is is not fgetxattrat(), so we need to + * go via path here. :-( */ + + path = strjoin(dir, "/", fn, NULL); + if (!path) + return; + + if (path_getcrtime(path, &crtime) >= 0) { + if (crtime < *realtime) + *realtime = crtime; + } +} + +static int journal_file_empty(int dir_fd, const char *name) { + _cleanup_close_ int fd; + struct stat st; + le64_t n_entries; + ssize_t n; + + fd = openat(dir_fd, name, O_RDONLY|O_CLOEXEC|O_NOFOLLOW|O_NONBLOCK); + if (fd < 0) + return -errno; + + if (fstat(fd, &st) < 0) + return -errno; + + /* If an offline file doesn't even have a header we consider it empty */ + if (st.st_size < (off_t) sizeof(Header)) + return 1; + + /* If the number of entries is empty, we consider it empty, too */ + n = pread(fd, &n_entries, sizeof(n_entries), offsetof(Header, n_entries)); + if (n < 0) + return -errno; + if (n != sizeof(n_entries)) + return -EIO; + + return le64toh(n_entries) <= 0; +} + +int journal_directory_vacuum( + const char *directory, + uint64_t max_use, + usec_t max_retention_usec, + usec_t *oldest_usec, + bool verbose) { + + _cleanup_closedir_ DIR *d = NULL; int r = 0; struct vacuum_info *list = NULL; - unsigned n_list = 0, n_allocated = 0, i; - uint64_t sum = 0; + unsigned n_list = 0, i; + size_t n_allocated = 0; + uint64_t sum = 0, freed = 0; + usec_t retention_limit = 0; + char sbytes[FORMAT_BYTES_MAX]; assert(directory); - if (max_use <= 0) + if (max_use <= 0 && max_retention_usec <= 0) return 0; + if (max_retention_usec > 0) { + retention_limit = now(CLOCK_REALTIME); + if (retention_limit > max_retention_usec) + retention_limit -= max_retention_usec; + else + max_retention_usec = retention_limit = 0; + } + d = opendir(directory); if (!d) return -errno; for (;;) { - int k; - struct dirent buf, *de; + struct dirent *de; size_t q; struct stat st; char *p; @@ -94,9 +186,10 @@ int journal_directory_vacuum(const char *directory, uint64_t max_use, uint64_t m sd_id128_t seqnum_id; bool have_seqnum; - k = readdir_r(d, &buf, &de); - if (k != 0) { - r = -k; + errno = 0; + de = readdir(d); + if (!de && errno != 0) { + r = -errno; goto finish; } @@ -167,20 +260,30 @@ int journal_directory_vacuum(const char *directory, uint64_t max_use, uint64_t m have_seqnum = false; } else + /* We do not vacuum active files or unknown files! */ continue; - if (n_list >= n_allocated) { - struct vacuum_info *j; + if (journal_file_empty(dirfd(d), p)) { + /* Always vacuum empty non-online files. */ - n_allocated = MAX(n_allocated * 2U, 8U); - j = realloc(list, n_allocated * sizeof(struct vacuum_info)); - if (!j) { - free(p); - r = -ENOMEM; - goto finish; - } + uint64_t size = 512UL * (uint64_t) st.st_blocks; + + if (unlinkat(dirfd(d), p, 0) >= 0) { + log_full(verbose ? LOG_INFO : LOG_DEBUG, "Deleted empty archived journal %s/%s (%s).", directory, p, format_bytes(sbytes, sizeof(sbytes), size)); + freed += size; + } else if (errno != ENOENT) + log_warning_errno(errno, "Failed to delete empty archived journal %s/%s: %m", directory, p); + + free(p); + continue; + } + + patch_realtime(directory, p, &st, &realtime); - list = j; + if (!GREEDY_REALLOC(list, n_allocated, n_list + 1)) { + free(p); + r = -ENOMEM; + goto finish; } list[n_list].filename = p; @@ -195,36 +298,35 @@ int journal_directory_vacuum(const char *directory, uint64_t max_use, uint64_t m n_list ++; } - if (n_list > 0) - qsort(list, n_list, sizeof(struct vacuum_info), vacuum_compare); - - for(i = 0; i < n_list; i++) { - struct statvfs ss; - - if (fstatvfs(dirfd(d), &ss) < 0) { - r = -errno; - goto finish; - } + qsort_safe(list, n_list, sizeof(struct vacuum_info), vacuum_compare); - if (sum <= max_use && - (uint64_t) ss.f_bavail * (uint64_t) ss.f_bsize >= min_free) + for (i = 0; i < n_list; i++) { + if ((max_retention_usec <= 0 || list[i].realtime >= retention_limit) && + (max_use <= 0 || sum <= max_use)) break; if (unlinkat(dirfd(d), list[i].filename, 0) >= 0) { - log_debug("Deleted archived journal %s/%s.", directory, list[i].filename); - sum -= list[i].usage; + log_full(verbose ? LOG_INFO : LOG_DEBUG, "Deleted archived journal %s/%s (%s).", directory, list[i].filename, format_bytes(sbytes, sizeof(sbytes), list[i].usage)); + freed += list[i].usage; + + if (list[i].usage < sum) + sum -= list[i].usage; + else + sum = 0; + } else if (errno != ENOENT) - log_warning("Failed to delete %s/%s: %m", directory, list[i].filename); + log_warning_errno(errno, "Failed to delete archived journal %s/%s: %m", directory, list[i].filename); } + if (oldest_usec && i < n_list && (*oldest_usec == 0 || list[i].realtime < *oldest_usec)) + *oldest_usec = list[i].realtime; + finish: for (i = 0; i < n_list; i++) free(list[i].filename); - free(list); - if (d) - closedir(d); + log_full(verbose ? LOG_INFO : LOG_DEBUG, "Vacuuming done, freed %s of archived journals on disk.", format_bytes(sbytes, sizeof(sbytes), freed)); return r; }