X-Git-Url: http://www.chiark.greenend.org.uk/ucgi/~ianmdlvl/git?p=elogind.git;a=blobdiff_plain;f=src%2Fjournal%2Fjournal-file.c;h=c9030c56addb5e7dac5f6d19c23b9b70d9fae234;hp=274f22db170a5e79a3a0e8040e42420e1f4b1b9a;hb=b9a1617d75c16a48cccf4ff135013dca9af94e7d;hpb=14d10188de1fd58e663d73683a400d8d7dc67dba diff --git a/src/journal/journal-file.c b/src/journal/journal-file.c index 274f22db1..c9030c56a 100644 --- a/src/journal/journal-file.c +++ b/src/journal/journal-file.c @@ -27,6 +27,7 @@ #include #include +#include "btrfs-util.h" #include "journal-def.h" #include "journal-file.h" #include "journal-authenticate.h" @@ -40,7 +41,7 @@ #define COMPRESSION_SIZE_THRESHOLD (512ULL) /* This is the minimum journal file size */ -#define JOURNAL_FILE_SIZE_MIN (64ULL*1024ULL) /* 64 KiB */ +#define JOURNAL_FILE_SIZE_MIN (4ULL*1024ULL*1024ULL) /* 4 MiB */ /* These are the lower and upper bounds if we deduce the max_use value * from the file system size */ @@ -61,43 +62,103 @@ /* n_data was the first entry we added after the initial file format design */ #define HEADER_SIZE_MIN ALIGN64(offsetof(Header, n_data)) +/* How many entries to keep in the entry array chain cache at max */ +#define CHAIN_CACHE_MAX 20 + +/* How much to increase the journal file size at once each time we allocate something new. */ +#define FILE_SIZE_INCREASE (8ULL*1024ULL*1024ULL) /* 8MB */ + +/* Reread fstat() of the file for detecting deletions at least this often */ +#define LAST_STAT_REFRESH_USEC (5*USEC_PER_SEC) + +/* The mmap context to use for the header we pick as one above the last defined typed */ +#define CONTEXT_HEADER _OBJECT_TYPE_MAX + +static int journal_file_set_online(JournalFile *f) { + assert(f); + + if (!f->writable) + return -EPERM; + + if (!(f->fd >= 0 && f->header)) + return -EINVAL; + + if (mmap_cache_got_sigbus(f->mmap, f->fd)) + return -EIO; + + switch(f->header->state) { + case STATE_ONLINE: + return 0; + + case STATE_OFFLINE: + f->header->state = STATE_ONLINE; + fsync(f->fd); + return 0; + + default: + return -EINVAL; + } +} + +int journal_file_set_offline(JournalFile *f) { + assert(f); + + if (!f->writable) + return -EPERM; + + if (!(f->fd >= 0 && f->header)) + return -EINVAL; + + if (f->header->state != STATE_ONLINE) + return 0; + + fsync(f->fd); + + if (mmap_cache_got_sigbus(f->mmap, f->fd)) + return -EIO; + + f->header->state = STATE_OFFLINE; + + if (mmap_cache_got_sigbus(f->mmap, f->fd)) + return -EIO; + + fsync(f->fd); + + return 0; +} + void journal_file_close(JournalFile *f) { assert(f); +#ifdef HAVE_GCRYPT /* Write the final tag */ - if (f->authenticate) + if (f->seal && f->writable) journal_file_append_tag(f); +#endif + + journal_file_set_offline(f); - /* Sync everything to disk, before we mark the file offline */ if (f->mmap && f->fd >= 0) mmap_cache_close_fd(f->mmap, f->fd); - if (f->writable && f->fd >= 0) - fdatasync(f->fd); - - if (f->header) { - /* Mark the file offline. Don't override the archived state if it already is set */ - if (f->writable && f->header->state == STATE_ONLINE) - f->header->state = STATE_OFFLINE; - - munmap(f->header, PAGE_ALIGN(sizeof(Header))); - } - - if (f->fd >= 0) - close_nointr_nofail(f->fd); + if (f->fd >= 0 && f->defrag_on_close) + btrfs_defrag_fd(f->fd); + safe_close(f->fd); free(f->path); if (f->mmap) mmap_cache_unref(f->mmap); -#ifdef HAVE_XZ + ordered_hashmap_free_free(f->chain_cache); + +#if defined(HAVE_XZ) || defined(HAVE_LZ4) free(f->compress_buffer); #endif #ifdef HAVE_GCRYPT - if (f->fsprg_file) - munmap(f->fsprg_file, PAGE_ALIGN(f->fsprg_file_size)); + if (f->fss_file) + munmap(f->fss_file, PAGE_ALIGN(f->fss_file_size)); else if (f->fsprg_state) free(f->fsprg_state); @@ -111,21 +172,21 @@ void journal_file_close(JournalFile *f) { } static int journal_file_init_header(JournalFile *f, JournalFile *template) { - Header h; + Header h = {}; ssize_t k; int r; assert(f); - zero(h); memcpy(h.signature, HEADER_SIGNATURE, 8); h.header_size = htole64(ALIGN64(sizeof(h))); - h.incompatible_flags = - htole32(f->compress ? HEADER_INCOMPATIBLE_COMPRESSED : 0); + h.incompatible_flags |= htole32( + f->compress_xz * HEADER_INCOMPATIBLE_COMPRESSED_XZ | + f->compress_lz4 * HEADER_INCOMPATIBLE_COMPRESSED_LZ4); - h.compatible_flags = - htole32(f->authenticate ? HEADER_COMPATIBLE_AUTHENTICATED : 0); + h.compatible_flags = htole32( + f->seal * HEADER_COMPATIBLE_SEALED); r = sd_id128_randomize(&h.file_id); if (r < 0) @@ -148,8 +209,8 @@ static int journal_file_init_header(JournalFile *f, JournalFile *template) { } static int journal_file_refresh_header(JournalFile *f) { - int r; sd_id128_t boot_id; + int r; assert(f); @@ -166,16 +227,17 @@ static int journal_file_refresh_header(JournalFile *f) { f->header->boot_id = boot_id; - f->header->state = STATE_ONLINE; + r = journal_file_set_online(f); /* Sync the online state to disk */ - msync(f->header, PAGE_ALIGN(sizeof(Header)), MS_SYNC); - fdatasync(f->fd); + fsync(f->fd); - return 0; + return r; } static int journal_file_verify_header(JournalFile *f) { + uint32_t flags; + assert(f); if (memcmp(f->header->signature, HEADER_SIGNATURE, 8)) @@ -183,35 +245,52 @@ static int journal_file_verify_header(JournalFile *f) { /* In both read and write mode we refuse to open files with * incompatible flags we don't know */ -#ifdef HAVE_XZ - if ((le32toh(f->header->incompatible_flags) & ~HEADER_INCOMPATIBLE_COMPRESSED) != 0) - return -EPROTONOSUPPORT; -#else - if (f->header->incompatible_flags != 0) + flags = le32toh(f->header->incompatible_flags); + if (flags & ~HEADER_INCOMPATIBLE_SUPPORTED) { + if (flags & ~HEADER_INCOMPATIBLE_ANY) + log_debug("Journal file %s has unknown incompatible flags %"PRIx32, + f->path, flags & ~HEADER_INCOMPATIBLE_ANY); + flags = (flags & HEADER_INCOMPATIBLE_ANY) & ~HEADER_INCOMPATIBLE_SUPPORTED; + if (flags) + log_debug("Journal file %s uses incompatible flags %"PRIx32 + " disabled at compilation time.", f->path, flags); return -EPROTONOSUPPORT; -#endif + } /* When open for writing we refuse to open files with * compatible flags, too */ - if (f->writable) { -#ifdef HAVE_GCRYPT - if ((le32toh(f->header->compatible_flags) & ~HEADER_COMPATIBLE_AUTHENTICATED) != 0) - return -EPROTONOSUPPORT; -#else - if (f->header->compatible_flags != 0) - return -EPROTONOSUPPORT; -#endif + flags = le32toh(f->header->compatible_flags); + if (f->writable && (flags & ~HEADER_COMPATIBLE_SUPPORTED)) { + if (flags & ~HEADER_COMPATIBLE_ANY) + log_debug("Journal file %s has unknown compatible flags %"PRIx32, + f->path, flags & ~HEADER_COMPATIBLE_ANY); + flags = (flags & HEADER_COMPATIBLE_ANY) & ~HEADER_COMPATIBLE_SUPPORTED; + if (flags) + log_debug("Journal file %s uses compatible flags %"PRIx32 + " disabled at compilation time.", f->path, flags); + return -EPROTONOSUPPORT; } + if (f->header->state >= _STATE_MAX) + return -EBADMSG; + /* The first addition was n_data, so check that we are at least this large */ if (le64toh(f->header->header_size) < HEADER_SIZE_MIN) return -EBADMSG; - if ((le32toh(f->header->compatible_flags) & HEADER_COMPATIBLE_AUTHENTICATED) && - !JOURNAL_HEADER_CONTAINS(f->header, n_tags)) + if (JOURNAL_HEADER_SEALED(f->header) && !JOURNAL_HEADER_CONTAINS(f->header, n_entry_arrays)) return -EBADMSG; - if ((uint64_t) f->last_stat.st_size < (le64toh(f->header->header_size) + le64toh(f->header->arena_size))) + if ((le64toh(f->header->header_size) + le64toh(f->header->arena_size)) > (uint64_t) f->last_stat.st_size) + return -ENODATA; + + if (le64toh(f->header->tail_object_offset) > (le64toh(f->header->header_size) + le64toh(f->header->arena_size))) + return -ENODATA; + + if (!VALID64(le64toh(f->header->data_hash_table_offset)) || + !VALID64(le64toh(f->header->field_hash_table_offset)) || + !VALID64(le64toh(f->header->tail_object_offset)) || + !VALID64(le64toh(f->header->entry_array_offset))) return -ENODATA; if (f->writable) { @@ -239,8 +318,26 @@ static int journal_file_verify_header(JournalFile *f) { } } - f->compress = !!(le32toh(f->header->incompatible_flags) & HEADER_INCOMPATIBLE_COMPRESSED); - f->authenticate = !!(le32toh(f->header->compatible_flags) & HEADER_COMPATIBLE_AUTHENTICATED); + f->compress_xz = JOURNAL_HEADER_COMPRESSED_XZ(f->header); + f->compress_lz4 = JOURNAL_HEADER_COMPRESSED_LZ4(f->header); + + f->seal = JOURNAL_HEADER_SEALED(f->header); + + return 0; +} + +static int journal_file_fstat(JournalFile *f) { + assert(f); + assert(f->fd >= 0); + + if (fstat(f->fd, &f->last_stat) < 0) + return -errno; + + f->last_stat_usec = now(CLOCK_MONOTONIC); + + /* Refuse appending to files that are already deleted */ + if (f->last_stat.st_nlink <= 0) + return -EIDRM; return 0; } @@ -255,6 +352,9 @@ static int journal_file_allocate(JournalFile *f, uint64_t offset, uint64_t size) * for sure, since we always call posix_fallocate() * ourselves */ + if (mmap_cache_got_sigbus(f->mmap, f->fd)) + return -EIO; + old_size = le64toh(f->header->header_size) + le64toh(f->header->arena_size); @@ -263,15 +363,26 @@ static int journal_file_allocate(JournalFile *f, uint64_t offset, uint64_t size) if (new_size < le64toh(f->header->header_size)) new_size = le64toh(f->header->header_size); - if (new_size <= old_size) - return 0; + if (new_size <= old_size) { + + /* We already pre-allocated enough space, but before + * we write to it, let's check with fstat() if the + * file got deleted, in order make sure we don't throw + * away the data immediately. Don't check fstat() for + * all writes though, but only once ever 10s. */ + + if (f->last_stat_usec + LAST_STAT_REFRESH_USEC > now(CLOCK_MONOTONIC)) + return 0; - if (f->metrics.max_size > 0 && - new_size > f->metrics.max_size) + return journal_file_fstat(f); + } + + /* Allocate more space. */ + + if (f->metrics.max_size > 0 && new_size > f->metrics.max_size) return -E2BIG; - if (new_size > f->metrics.min_size && - f->metrics.keep_free > 0) { + if (new_size > f->metrics.min_size && f->metrics.keep_free > 0) { struct statvfs svfs; if (fstatvfs(f->fd, &svfs) >= 0) { @@ -289,6 +400,11 @@ static int journal_file_allocate(JournalFile *f, uint64_t offset, uint64_t size) } } + /* Increase by larger blocks at once */ + new_size = ((new_size+FILE_SIZE_INCREASE-1) / FILE_SIZE_INCREASE) * FILE_SIZE_INCREASE; + if (f->metrics.max_size > 0 && new_size > f->metrics.max_size) + new_size = f->metrics.max_size; + /* Note that the glibc fallocate() fallback is very inefficient, hence we try to minimize the allocation area as we can. */ @@ -296,36 +412,46 @@ static int journal_file_allocate(JournalFile *f, uint64_t offset, uint64_t size) if (r != 0) return -r; - mmap_cache_close_fd_range(f->mmap, f->fd, old_size); - - if (fstat(f->fd, &f->last_stat) < 0) - return -errno; - f->header->arena_size = htole64(new_size - le64toh(f->header->header_size)); - return 0; + return journal_file_fstat(f); +} + +static unsigned type_to_context(ObjectType type) { + /* One context for each type, plus one catch-all for the rest */ + assert_cc(_OBJECT_TYPE_MAX <= MMAP_CACHE_MAX_CONTEXTS); + assert_cc(CONTEXT_HEADER < MMAP_CACHE_MAX_CONTEXTS); + return type > OBJECT_UNUSED && type < _OBJECT_TYPE_MAX ? type : 0; } -static int journal_file_move_to(JournalFile *f, int context, uint64_t offset, uint64_t size, void **ret) { +static int journal_file_move_to(JournalFile *f, ObjectType type, bool keep_always, uint64_t offset, uint64_t size, void **ret) { + int r; + assert(f); assert(ret); + if (size <= 0) + return -EINVAL; + /* Avoid SIGBUS on invalid accesses */ if (offset + size > (uint64_t) f->last_stat.st_size) { /* Hmm, out of range? Let's refresh the fstat() data * first, before we trust that check. */ - if (fstat(f->fd, &f->last_stat) < 0 || - offset + size > (uint64_t) f->last_stat.st_size) + r = journal_file_fstat(f); + if (r < 0) + return r; + + if (offset + size > (uint64_t) f->last_stat.st_size) return -EADDRNOTAVAIL; } - return mmap_cache_get(f->mmap, f->fd, f->prot, context, offset, size, ret); + return mmap_cache_get(f->mmap, f->fd, f->prot, type_to_context(type), keep_always, offset, size, &f->last_stat, ret); } static uint64_t minimum_header_size(Object *o) { - static uint64_t table[] = { + static const uint64_t table[] = { [OBJECT_DATA] = sizeof(DataObject), [OBJECT_FIELD] = sizeof(FieldObject), [OBJECT_ENTRY] = sizeof(EntryObject), @@ -341,20 +467,20 @@ static uint64_t minimum_header_size(Object *o) { return table[o->object.type]; } -int journal_file_move_to_object(JournalFile *f, int type, uint64_t offset, Object **ret) { +int journal_file_move_to_object(JournalFile *f, ObjectType type, uint64_t offset, Object **ret) { int r; void *t; Object *o; uint64_t s; - unsigned context; assert(f); assert(ret); - /* One context for each type, plus one catch-all for the rest */ - context = type > 0 && type < _OBJECT_TYPE_MAX ? type : 0; + /* Objects may only be located at multiple of 64 bit */ + if (!VALID64(offset)) + return -EFAULT; - r = journal_file_move_to(f, context, offset, sizeof(ObjectHeader), &t); + r = journal_file_move_to(f, type, false, offset, sizeof(ObjectHeader), &t); if (r < 0) return r; @@ -370,11 +496,11 @@ int journal_file_move_to_object(JournalFile *f, int type, uint64_t offset, Objec if (s < minimum_header_size(o)) return -EBADMSG; - if (type >= 0 && o->object.type != type) + if (type > OBJECT_UNUSED && o->object.type != type) return -EBADMSG; if (s > sizeof(ObjectHeader)) { - r = journal_file_move_to(f, o->object.type, offset, s, &t); + r = journal_file_move_to(f, type, false, offset, s, &t); if (r < 0) return r; @@ -411,23 +537,27 @@ static uint64_t journal_file_entry_seqnum(JournalFile *f, uint64_t *seqnum) { return r; } -int journal_file_append_object(JournalFile *f, int type, uint64_t size, Object **ret, uint64_t *offset) { +int journal_file_append_object(JournalFile *f, ObjectType type, uint64_t size, Object **ret, uint64_t *offset) { int r; uint64_t p; Object *tail, *o; void *t; assert(f); - assert(type > 0 && type < _OBJECT_TYPE_MAX); + assert(type > OBJECT_UNUSED && type < _OBJECT_TYPE_MAX); assert(size >= sizeof(ObjectHeader)); assert(offset); assert(ret); + r = journal_file_set_online(f); + if (r < 0) + return r; + p = le64toh(f->header->tail_object_offset); if (p == 0) p = le64toh(f->header->header_size); else { - r = journal_file_move_to_object(f, -1, p, &tail); + r = journal_file_move_to_object(f, OBJECT_UNUSED, p, &tail); if (r < 0) return r; @@ -438,7 +568,7 @@ int journal_file_append_object(JournalFile *f, int type, uint64_t size, Object * if (r < 0) return r; - r = journal_file_move_to(f, type, p, size, &t); + r = journal_file_move_to(f, type, false, p, size, &t); if (r < 0) return r; @@ -473,7 +603,7 @@ static int journal_file_setup_data_hash_table(JournalFile *f) { if (s < DEFAULT_DATA_HASH_TABLE_SIZE) s = DEFAULT_DATA_HASH_TABLE_SIZE; - log_info("Reserving %llu entries in hash table.", (unsigned long long) (s / sizeof(HashItem))); + log_debug("Reserving %"PRIu64" entries in hash table.", s / sizeof(HashItem)); r = journal_file_append_object(f, OBJECT_DATA_HASH_TABLE, @@ -482,7 +612,7 @@ static int journal_file_setup_data_hash_table(JournalFile *f) { if (r < 0) return r; - memset(o->hash_table.items, 0, s); + memzero(o->hash_table.items, s); f->header->data_hash_table_offset = htole64(p + offsetof(Object, hash_table.items)); f->header->data_hash_table_size = htole64(s); @@ -497,6 +627,9 @@ static int journal_file_setup_field_hash_table(JournalFile *f) { assert(f); + /* We use a fixed size hash table for the fields as this + * number should grow very slowly only */ + s = DEFAULT_FIELD_HASH_TABLE_SIZE; r = journal_file_append_object(f, OBJECT_FIELD_HASH_TABLE, @@ -505,7 +638,7 @@ static int journal_file_setup_field_hash_table(JournalFile *f) { if (r < 0) return r; - memset(o->hash_table.items, 0, s); + memzero(o->hash_table.items, s); f->header->field_hash_table_offset = htole64(p + offsetof(Object, hash_table.items)); f->header->field_hash_table_size = htole64(s); @@ -525,6 +658,7 @@ static int journal_file_map_data_hash_table(JournalFile *f) { r = journal_file_move_to(f, OBJECT_DATA_HASH_TABLE, + true, p, s, &t); if (r < 0) @@ -546,6 +680,7 @@ static int journal_file_map_field_hash_table(JournalFile *f) { r = journal_file_move_to(f, OBJECT_FIELD_HASH_TABLE, + true, p, s, &t); if (r < 0) @@ -555,27 +690,80 @@ static int journal_file_map_field_hash_table(JournalFile *f) { return 0; } -static int journal_file_link_data(JournalFile *f, Object *o, uint64_t offset, uint64_t hash) { - uint64_t p, h; +static int journal_file_link_field( + JournalFile *f, + Object *o, + uint64_t offset, + uint64_t hash) { + + uint64_t p, h, m; int r; assert(f); assert(o); assert(offset > 0); - assert(o->object.type == OBJECT_DATA); + + if (o->object.type != OBJECT_FIELD) + return -EINVAL; + + m = le64toh(f->header->field_hash_table_size) / sizeof(HashItem); + if (m <= 0) + return -EBADMSG; /* This might alter the window we are looking at */ + o->field.next_hash_offset = o->field.head_data_offset = 0; + + h = hash % m; + p = le64toh(f->field_hash_table[h].tail_hash_offset); + if (p == 0) + f->field_hash_table[h].head_hash_offset = htole64(offset); + else { + r = journal_file_move_to_object(f, OBJECT_FIELD, p, &o); + if (r < 0) + return r; + + o->field.next_hash_offset = htole64(offset); + } + + f->field_hash_table[h].tail_hash_offset = htole64(offset); + if (JOURNAL_HEADER_CONTAINS(f->header, n_fields)) + f->header->n_fields = htole64(le64toh(f->header->n_fields) + 1); + + return 0; +} + +static int journal_file_link_data( + JournalFile *f, + Object *o, + uint64_t offset, + uint64_t hash) { + + uint64_t p, h, m; + int r; + + assert(f); + assert(o); + assert(offset > 0); + + if (o->object.type != OBJECT_DATA) + return -EINVAL; + + m = le64toh(f->header->data_hash_table_size) / sizeof(HashItem); + if (m <= 0) + return -EBADMSG; + + /* This might alter the window we are looking at */ o->data.next_hash_offset = o->data.next_field_offset = 0; o->data.entry_offset = o->data.entry_array_offset = 0; o->data.n_entries = 0; - h = hash % (le64toh(f->header->data_hash_table_size) / sizeof(HashItem)); + h = hash % m; p = le64toh(f->data_hash_table[h].tail_hash_offset); - if (p == 0) { + if (p == 0) /* Only entry in the hash table is easy */ f->data_hash_table[h].head_hash_offset = htole64(offset); - } else { + else { /* Move back to the previous data object, to patch in * pointer */ @@ -594,12 +782,75 @@ static int journal_file_link_data(JournalFile *f, Object *o, uint64_t offset, ui return 0; } +int journal_file_find_field_object_with_hash( + JournalFile *f, + const void *field, uint64_t size, uint64_t hash, + Object **ret, uint64_t *offset) { + + uint64_t p, osize, h, m; + int r; + + assert(f); + assert(field && size > 0); + + osize = offsetof(Object, field.payload) + size; + + m = le64toh(f->header->field_hash_table_size) / sizeof(HashItem); + + if (m <= 0) + return -EBADMSG; + + h = hash % m; + p = le64toh(f->field_hash_table[h].head_hash_offset); + + while (p > 0) { + Object *o; + + r = journal_file_move_to_object(f, OBJECT_FIELD, p, &o); + if (r < 0) + return r; + + if (le64toh(o->field.hash) == hash && + le64toh(o->object.size) == osize && + memcmp(o->field.payload, field, size) == 0) { + + if (ret) + *ret = o; + if (offset) + *offset = p; + + return 1; + } + + p = le64toh(o->field.next_hash_offset); + } + + return 0; +} + +int journal_file_find_field_object( + JournalFile *f, + const void *field, uint64_t size, + Object **ret, uint64_t *offset) { + + uint64_t hash; + + assert(f); + assert(field && size > 0); + + hash = hash64(field, size); + + return journal_file_find_field_object_with_hash(f, + field, size, hash, + ret, offset); +} + int journal_file_find_data_object_with_hash( JournalFile *f, const void *data, uint64_t size, uint64_t hash, Object **ret, uint64_t *offset) { - uint64_t p, osize, h; + uint64_t p, osize, h, m; int r; assert(f); @@ -607,10 +858,11 @@ int journal_file_find_data_object_with_hash( osize = offsetof(Object, data.payload) + size; - if (f->header->data_hash_table_size == 0) + m = le64toh(f->header->data_hash_table_size) / sizeof(HashItem); + if (m <= 0) return -EBADMSG; - h = hash % (le64toh(f->header->data_hash_table_size) / sizeof(HashItem)); + h = hash % m; p = le64toh(f->data_hash_table[h].head_hash_offset); while (p > 0) { @@ -623,9 +875,10 @@ int journal_file_find_data_object_with_hash( if (le64toh(o->data.hash) != hash) goto next; - if (o->object.flags & OBJECT_COMPRESSED) { -#ifdef HAVE_XZ - uint64_t l, rsize; + if (o->object.flags & OBJECT_COMPRESSION_MASK) { +#if defined(HAVE_XZ) || defined(HAVE_LZ4) + uint64_t l; + size_t rsize; l = le64toh(o->object.size); if (l <= offsetof(Object, data.payload)) @@ -633,8 +886,10 @@ int journal_file_find_data_object_with_hash( l -= offsetof(Object, data.payload); - if (!uncompress_blob(o->data.payload, l, &f->compress_buffer, &f->compress_buffer_size, &rsize)) - return -EBADMSG; + r = decompress_blob(o->object.flags & OBJECT_COMPRESSION_MASK, + o->data.payload, l, &f->compress_buffer, &f->compress_buffer_size, &rsize, 0); + if (r < 0) + return r; if (rsize == size && memcmp(f->compress_buffer, data, size) == 0) { @@ -650,7 +905,6 @@ int journal_file_find_data_object_with_hash( #else return -EPROTONOSUPPORT; #endif - } else if (le64toh(o->object.size) == osize && memcmp(o->data.payload, data, size) == 0) { @@ -687,6 +941,68 @@ int journal_file_find_data_object( ret, offset); } +static int journal_file_append_field( + JournalFile *f, + const void *field, uint64_t size, + Object **ret, uint64_t *offset) { + + uint64_t hash, p; + uint64_t osize; + Object *o; + int r; + + assert(f); + assert(field && size > 0); + + hash = hash64(field, size); + + r = journal_file_find_field_object_with_hash(f, field, size, hash, &o, &p); + if (r < 0) + return r; + else if (r > 0) { + + if (ret) + *ret = o; + + if (offset) + *offset = p; + + return 0; + } + + osize = offsetof(Object, field.payload) + size; + r = journal_file_append_object(f, OBJECT_FIELD, osize, &o, &p); + if (r < 0) + return r; + + o->field.hash = htole64(hash); + memcpy(o->field.payload, field, size); + + r = journal_file_link_field(f, o, p, hash); + if (r < 0) + return r; + + /* The linking might have altered the window, so let's + * refresh our pointer */ + r = journal_file_move_to_object(f, OBJECT_FIELD, p, &o); + if (r < 0) + return r; + +#ifdef HAVE_GCRYPT + r = journal_file_hmac_put_object(f, OBJECT_FIELD, o, p); + if (r < 0) + return r; +#endif + + if (ret) + *ret = o; + + if (offset) + *offset = p; + + return 0; +} + static int journal_file_append_data( JournalFile *f, const void *data, uint64_t size, @@ -695,8 +1011,8 @@ static int journal_file_append_data( uint64_t hash, p; uint64_t osize; Object *o; - int r; - bool compressed = false; + int r, compression = 0; + const void *eq; assert(f); assert(data || size == 0); @@ -724,39 +1040,60 @@ static int journal_file_append_data( o->data.hash = htole64(hash); -#ifdef HAVE_XZ - if (f->compress && +#if defined(HAVE_XZ) || defined(HAVE_LZ4) + if (f->compress_xz && size >= COMPRESSION_SIZE_THRESHOLD) { - uint64_t rsize; + size_t rsize; - compressed = compress_blob(data, size, o->data.payload, &rsize); + compression = compress_blob(data, size, o->data.payload, &rsize); - if (compressed) { + if (compression) { o->object.size = htole64(offsetof(Object, data.payload) + rsize); - o->object.flags |= OBJECT_COMPRESSED; + o->object.flags |= compression; - log_debug("Compressed data object %lu -> %lu", (unsigned long) size, (unsigned long) rsize); + log_debug("Compressed data object %"PRIu64" -> %zu using %s", + size, rsize, object_compressed_to_string(compression)); } } #endif - if (!compressed && size > 0) + if (!compression && size > 0) memcpy(o->data.payload, data, size); r = journal_file_link_data(f, o, p, hash); if (r < 0) return r; - r = journal_file_hmac_put_object(f, OBJECT_DATA, p); - if (r < 0) - return r; - /* The linking might have altered the window, so let's * refresh our pointer */ r = journal_file_move_to_object(f, OBJECT_DATA, p, &o); if (r < 0) return r; + if (!data) + eq = NULL; + else + eq = memchr(data, '=', size); + if (eq && eq > data) { + Object *fo = NULL; + uint64_t fp; + + /* Create field object ... */ + r = journal_file_append_field(f, data, (uint8_t*) eq - (uint8_t*) data, &fo, &fp); + if (r < 0) + return r; + + /* ... and link it in. */ + o->data.next_field_offset = fo->field.head_data_offset; + fo->field.head_data_offset = le64toh(p); + } + +#ifdef HAVE_GCRYPT + r = journal_file_hmac_put_object(f, OBJECT_DATA, o, p); + if (r < 0) + return r; +#endif + if (ret) *ret = o; @@ -768,18 +1105,32 @@ static int journal_file_append_data( uint64_t journal_file_entry_n_items(Object *o) { assert(o); - assert(o->object.type == OBJECT_ENTRY); + + if (o->object.type != OBJECT_ENTRY) + return 0; return (le64toh(o->object.size) - offsetof(Object, entry.items)) / sizeof(EntryItem); } uint64_t journal_file_entry_array_n_items(Object *o) { assert(o); - assert(o->object.type == OBJECT_ENTRY_ARRAY); + + if (o->object.type != OBJECT_ENTRY_ARRAY) + return 0; return (le64toh(o->object.size) - offsetof(Object, entry_array.items)) / sizeof(uint64_t); } +uint64_t journal_file_hash_table_n_items(Object *o) { + assert(o); + + if (o->object.type != OBJECT_DATA_HASH_TABLE && + o->object.type != OBJECT_FIELD_HASH_TABLE) + return 0; + + return (le64toh(o->object.size) - offsetof(Object, hash_table.items)) / sizeof(HashItem); +} + static int link_entry_into_array(JournalFile *f, le64_t *first, le64_t *idx, @@ -827,9 +1178,11 @@ static int link_entry_into_array(JournalFile *f, if (r < 0) return r; - r = journal_file_hmac_put_object(f, OBJECT_ENTRY_ARRAY, q); +#ifdef HAVE_GCRYPT + r = journal_file_hmac_put_object(f, OBJECT_ENTRY_ARRAY, o, q); if (r < 0) return r; +#endif o->entry_array.items[i] = htole64(p); @@ -909,7 +1262,9 @@ static int journal_file_link_entry(JournalFile *f, Object *o, uint64_t offset) { assert(f); assert(o); assert(offset > 0); - assert(o->object.type == OBJECT_ENTRY); + + if (o->object.type != OBJECT_ENTRY) + return -EINVAL; __sync_synchronize(); @@ -921,7 +1276,7 @@ static int journal_file_link_entry(JournalFile *f, Object *o, uint64_t offset) { if (r < 0) return r; - /* log_debug("=> %s seqnr=%lu n_entries=%lu", f->path, (unsigned long) o->entry.seqnum, (unsigned long) f->header->n_entries); */ + /* log_debug("=> %s seqnr=%"PRIu64" n_entries=%"PRIu64, f->path, o->entry.seqnum, f->header->n_entries); */ if (f->header->head_entry_realtime == 0) f->header->head_entry_realtime = o->entry.realtime; @@ -971,9 +1326,11 @@ static int journal_file_append_entry_internal( o->entry.xor_hash = htole64(xor_hash); o->entry.boot_id = f->header->boot_id; - r = journal_file_hmac_put_object(f, OBJECT_ENTRY, np); +#ifdef HAVE_GCRYPT + r = journal_file_hmac_put_object(f, OBJECT_ENTRY, o, np); if (r < 0) return r; +#endif r = journal_file_link_entry(f, o, np); if (r < 0) @@ -999,7 +1356,17 @@ void journal_file_post_change(JournalFile *f) { __sync_synchronize(); if (ftruncate(f->fd, f->last_stat.st_size) < 0) - log_error("Failed to to truncate file to its own size: %m"); + log_error_errno(errno, "Failed to truncate file to its own size: %m"); +} + +static int entry_item_cmp(const void *_a, const void *_b) { + const EntryItem *a = _a, *b = _b; + + if (le64toh(a->object_offset) < le64toh(b->object_offset)) + return -1; + if (le64toh(a->object_offset) > le64toh(b->object_offset)) + return 1; + return 0; } int journal_file_append_entry(JournalFile *f, const dual_timestamp *ts, const struct iovec iovec[], unsigned n_iovec, uint64_t *seqnum, Object **ret, uint64_t *offset) { @@ -1012,9 +1379,6 @@ int journal_file_append_entry(JournalFile *f, const dual_timestamp *ts, const st assert(f); assert(iovec || n_iovec == 0); - if (!f->writable) - return -EPERM; - if (!ts) { dual_timestamp_get(&_ts); ts = &_ts; @@ -1024,12 +1388,14 @@ int journal_file_append_entry(JournalFile *f, const dual_timestamp *ts, const st ts->monotonic < le64toh(f->header->tail_entry_monotonic)) return -EINVAL; +#ifdef HAVE_GCRYPT r = journal_file_maybe_append_tag(f, ts->realtime); if (r < 0) return r; +#endif /* alloca() can't take 0, hence let's allocate at least one */ - items = alloca(sizeof(EntryItem) * MAX(1, n_iovec)); + items = alloca(sizeof(EntryItem) * MAX(1u, n_iovec)); for (i = 0; i < n_iovec; i++) { uint64_t p; @@ -1044,44 +1410,118 @@ int journal_file_append_entry(JournalFile *f, const dual_timestamp *ts, const st items[i].hash = o->data.hash; } + /* Order by the position on disk, in order to improve seek + * times for rotating media. */ + qsort_safe(items, n_iovec, sizeof(EntryItem), entry_item_cmp); + r = journal_file_append_entry_internal(f, ts, xor_hash, items, n_iovec, seqnum, ret, offset); + /* If the memory mapping triggered a SIGBUS then we return an + * IO error and ignore the error code passed down to us, since + * it is very likely just an effect of a nullified replacement + * mapping page */ + + if (mmap_cache_got_sigbus(f->mmap, f->fd)) + r = -EIO; + journal_file_post_change(f); return r; } -static int generic_array_get(JournalFile *f, - uint64_t first, - uint64_t i, - Object **ret, uint64_t *offset) { +typedef struct ChainCacheItem { + uint64_t first; /* the array at the beginning of the chain */ + uint64_t array; /* the cached array */ + uint64_t begin; /* the first item in the cached array */ + uint64_t total; /* the total number of items in all arrays before this one in the chain */ + uint64_t last_index; /* the last index we looked at, to optimize locality when bisecting */ +} ChainCacheItem; + +static void chain_cache_put( + OrderedHashmap *h, + ChainCacheItem *ci, + uint64_t first, + uint64_t array, + uint64_t begin, + uint64_t total, + uint64_t last_index) { + + if (!ci) { + /* If the chain item to cache for this chain is the + * first one it's not worth caching anything */ + if (array == first) + return; + + if (ordered_hashmap_size(h) >= CHAIN_CACHE_MAX) { + ci = ordered_hashmap_steal_first(h); + assert(ci); + } else { + ci = new(ChainCacheItem, 1); + if (!ci) + return; + } + + ci->first = first; + + if (ordered_hashmap_put(h, &ci->first, ci) < 0) { + free(ci); + return; + } + } else + assert(ci->first == first); + + ci->array = array; + ci->begin = begin; + ci->total = total; + ci->last_index = last_index; +} + +static int generic_array_get( + JournalFile *f, + uint64_t first, + uint64_t i, + Object **ret, uint64_t *offset) { Object *o; - uint64_t p = 0, a; + uint64_t p = 0, a, t = 0; int r; + ChainCacheItem *ci; assert(f); a = first; + + /* Try the chain cache first */ + ci = ordered_hashmap_get(f->chain_cache, &first); + if (ci && i > ci->total) { + a = ci->array; + i -= ci->total; + t = ci->total; + } + while (a > 0) { - uint64_t n; + uint64_t k; r = journal_file_move_to_object(f, OBJECT_ENTRY_ARRAY, a, &o); if (r < 0) return r; - n = journal_file_entry_array_n_items(o); - if (i < n) { + k = journal_file_entry_array_n_items(o); + if (i < k) { p = le64toh(o->entry_array.items[i]); - break; + goto found; } - i -= n; + i -= k; + t += k; a = le64toh(o->entry_array.next_entry_array_offset); } - if (a <= 0 || p <= 0) - return 0; + return 0; + +found: + /* Let's cache this item for the next invocation */ + chain_cache_put(f->chain_cache, ci, first, a, le64toh(o->entry_array.items[0]), t, i); r = journal_file_move_to_object(f, OBJECT_ENTRY, p, &o); if (r < 0) @@ -1096,11 +1536,12 @@ static int generic_array_get(JournalFile *f, return 1; } -static int generic_array_get_plus_one(JournalFile *f, - uint64_t extra, - uint64_t first, - uint64_t i, - Object **ret, uint64_t *offset) { +static int generic_array_get_plus_one( + JournalFile *f, + uint64_t extra, + uint64_t first, + uint64_t i, + Object **ret, uint64_t *offset) { Object *o; @@ -1131,25 +1572,54 @@ enum { TEST_RIGHT }; -static int generic_array_bisect(JournalFile *f, - uint64_t first, - uint64_t n, - uint64_t needle, - int (*test_object)(JournalFile *f, uint64_t p, uint64_t needle), - direction_t direction, - Object **ret, - uint64_t *offset, - uint64_t *idx) { - - uint64_t a, p, t = 0, i = 0, last_p = 0; +static int generic_array_bisect( + JournalFile *f, + uint64_t first, + uint64_t n, + uint64_t needle, + int (*test_object)(JournalFile *f, uint64_t p, uint64_t needle), + direction_t direction, + Object **ret, + uint64_t *offset, + uint64_t *idx) { + + uint64_t a, p, t = 0, i = 0, last_p = 0, last_index = (uint64_t) -1; bool subtract_one = false; Object *o, *array = NULL; int r; + ChainCacheItem *ci; assert(f); assert(test_object); + /* Start with the first array in the chain */ a = first; + + ci = ordered_hashmap_get(f->chain_cache, &first); + if (ci && n > ci->total) { + /* Ah, we have iterated this bisection array chain + * previously! Let's see if we can skip ahead in the + * chain, as far as the last time. But we can't jump + * backwards in the chain, so let's check that + * first. */ + + r = test_object(f, ci->begin, needle); + if (r < 0) + return r; + + if (r == TEST_LEFT) { + /* OK, what we are looking for is right of the + * begin of this EntryArray, so let's jump + * straight to previously cached array in the + * chain */ + + a = ci->array; + n -= ci->total; + t = ci->total; + last_index = ci->last_index; + } + } + while (a > 0) { uint64_t left, right, k, lp; @@ -1177,6 +1647,58 @@ static int generic_array_bisect(JournalFile *f, if (r == TEST_RIGHT) { left = 0; right -= 1; + + if (last_index != (uint64_t) -1) { + assert(last_index <= right); + + /* If we cached the last index we + * looked at, let's try to not to jump + * too wildly around and see if we can + * limit the range to look at early to + * the immediate neighbors of the last + * index we looked at. */ + + if (last_index > 0) { + uint64_t x = last_index - 1; + + p = le64toh(array->entry_array.items[x]); + if (p <= 0) + return -EBADMSG; + + r = test_object(f, p, needle); + if (r < 0) + return r; + + if (r == TEST_FOUND) + r = direction == DIRECTION_DOWN ? TEST_RIGHT : TEST_LEFT; + + if (r == TEST_RIGHT) + right = x; + else + left = x + 1; + } + + if (last_index < right) { + uint64_t y = last_index + 1; + + p = le64toh(array->entry_array.items[y]); + if (p <= 0) + return -EBADMSG; + + r = test_object(f, p, needle); + if (r < 0) + return r; + + if (r == TEST_FOUND) + r = direction == DIRECTION_DOWN ? TEST_RIGHT : TEST_LEFT; + + if (r == TEST_RIGHT) + right = y; + else + left = y + 1; + } + } + for (;;) { if (left == right) { if (direction == DIRECTION_UP) @@ -1187,8 +1709,8 @@ static int generic_array_bisect(JournalFile *f, } assert(left < right); - i = (left + right) / 2; + p = le64toh(array->entry_array.items[i]); if (p <= 0) return -EBADMSG; @@ -1207,7 +1729,7 @@ static int generic_array_bisect(JournalFile *f, } } - if (k > n) { + if (k >= n) { if (direction == DIRECTION_UP) { i = n; subtract_one = true; @@ -1221,6 +1743,7 @@ static int generic_array_bisect(JournalFile *f, n -= k; t += k; + last_index = (uint64_t) -1; a = le64toh(array->entry_array.next_entry_array_offset); } @@ -1230,6 +1753,9 @@ found: if (subtract_one && t == 0 && i == 0) return 0; + /* Let's cache this item for the next invocation */ + chain_cache_put(f->chain_cache, ci, first, a, le64toh(array->entry_array.items[0]), t, subtract_one ? (i > 0 ? i-1 : (uint64_t) -1) : i); + if (subtract_one && i == 0) p = last_p; else if (subtract_one) @@ -1253,16 +1779,17 @@ found: return 1; } -static int generic_array_bisect_plus_one(JournalFile *f, - uint64_t extra, - uint64_t first, - uint64_t n, - uint64_t needle, - int (*test_object)(JournalFile *f, uint64_t p, uint64_t needle), - direction_t direction, - Object **ret, - uint64_t *offset, - uint64_t *idx) { +static int generic_array_bisect_plus_one( + JournalFile *f, + uint64_t extra, + uint64_t first, + uint64_t n, + uint64_t needle, + int (*test_object)(JournalFile *f, uint64_t p, uint64_t needle), + direction_t direction, + Object **ret, + uint64_t *offset, + uint64_t *idx) { int r; bool step_back = false; @@ -1325,7 +1852,7 @@ found: return 1; } -static int test_object_offset(JournalFile *f, uint64_t p, uint64_t needle) { +_pure_ static int test_object_offset(JournalFile *f, uint64_t p, uint64_t needle) { assert(f); assert(p > 0); @@ -1337,23 +1864,6 @@ static int test_object_offset(JournalFile *f, uint64_t p, uint64_t needle) { return TEST_RIGHT; } -int journal_file_move_to_entry_by_offset( - JournalFile *f, - uint64_t p, - direction_t direction, - Object **ret, - uint64_t *offset) { - - return generic_array_bisect(f, - le64toh(f->header->entry_array_offset), - le64toh(f->header->n_entries), - p, - test_object_offset, - direction, - ret, offset, NULL); -} - - static int test_object_seqnum(JournalFile *f, uint64_t p, uint64_t needle) { Object *o; int r; @@ -1443,6 +1953,17 @@ static int test_object_monotonic(JournalFile *f, uint64_t p, uint64_t needle) { return TEST_RIGHT; } +static inline int find_data_object_by_boot_id( + JournalFile *f, + sd_id128_t boot_id, + Object **o, + uint64_t *b) { + char t[sizeof("_BOOT_ID=")-1 + 32 + 1] = "_BOOT_ID="; + + sd_id128_to_string(boot_id, t + 9); + return journal_file_find_data_object(f, t, sizeof(t) - 1, o, b); +} + int journal_file_move_to_entry_by_monotonic( JournalFile *f, sd_id128_t boot_id, @@ -1451,14 +1972,12 @@ int journal_file_move_to_entry_by_monotonic( Object **ret, uint64_t *offset) { - char t[9+32+1] = "_BOOT_ID="; Object *o; int r; assert(f); - sd_id128_to_string(boot_id, t + 9); - r = journal_file_find_data_object(f, t, strlen(t), &o, NULL); + r = find_data_object_by_boot_id(f, boot_id, &o, NULL); if (r < 0) return r; if (r == 0) @@ -1474,28 +1993,97 @@ int journal_file_move_to_entry_by_monotonic( ret, offset, NULL); } +void journal_file_reset_location(JournalFile *f) { + f->location_type = LOCATION_HEAD; + f->current_offset = 0; + f->current_seqnum = 0; + f->current_realtime = 0; + f->current_monotonic = 0; + zero(f->current_boot_id); + f->current_xor_hash = 0; +} + +void journal_file_save_location(JournalFile *f, direction_t direction, Object *o, uint64_t offset) { + f->last_direction = direction; + f->location_type = LOCATION_SEEK; + f->current_offset = offset; + f->current_seqnum = le64toh(o->entry.seqnum); + f->current_realtime = le64toh(o->entry.realtime); + f->current_monotonic = le64toh(o->entry.monotonic); + f->current_boot_id = o->entry.boot_id; + f->current_xor_hash = le64toh(o->entry.xor_hash); +} + +int journal_file_compare_locations(JournalFile *af, JournalFile *bf) { + assert(af); + assert(bf); + assert(af->location_type == LOCATION_SEEK); + assert(bf->location_type == LOCATION_SEEK); + + /* If contents and timestamps match, these entries are + * identical, even if the seqnum does not match */ + if (sd_id128_equal(af->current_boot_id, bf->current_boot_id) && + af->current_monotonic == bf->current_monotonic && + af->current_realtime == bf->current_realtime && + af->current_xor_hash == bf->current_xor_hash) + return 0; + + if (sd_id128_equal(af->header->seqnum_id, bf->header->seqnum_id)) { + + /* If this is from the same seqnum source, compare + * seqnums */ + if (af->current_seqnum < bf->current_seqnum) + return -1; + if (af->current_seqnum > bf->current_seqnum) + return 1; + + /* Wow! This is weird, different data but the same + * seqnums? Something is borked, but let's make the + * best of it and compare by time. */ + } + + if (sd_id128_equal(af->current_boot_id, bf->current_boot_id)) { + + /* If the boot id matches, compare monotonic time */ + if (af->current_monotonic < bf->current_monotonic) + return -1; + if (af->current_monotonic > bf->current_monotonic) + return 1; + } + + /* Otherwise, compare UTC time */ + if (af->current_realtime < bf->current_realtime) + return -1; + if (af->current_realtime > bf->current_realtime) + return 1; + + /* Finally, compare by contents */ + if (af->current_xor_hash < bf->current_xor_hash) + return -1; + if (af->current_xor_hash > bf->current_xor_hash) + return 1; + + return 0; +} + int journal_file_next_entry( JournalFile *f, - Object *o, uint64_t p, + uint64_t p, direction_t direction, Object **ret, uint64_t *offset) { - uint64_t i, n; + uint64_t i, n, ofs; int r; assert(f); - assert(p > 0 || !o); n = le64toh(f->header->n_entries); if (n <= 0) return 0; - if (!o) + if (p == 0) i = direction == DIRECTION_DOWN ? 0 : n - 1; else { - if (o->object.type != OBJECT_ENTRY) - return -EINVAL; - r = generic_array_bisect(f, le64toh(f->header->entry_array_offset), le64toh(f->header->n_entries), @@ -1521,59 +2109,24 @@ int journal_file_next_entry( } /* And jump to it */ - return generic_array_get(f, - le64toh(f->header->entry_array_offset), - i, - ret, offset); -} - -int journal_file_skip_entry( - JournalFile *f, - Object *o, uint64_t p, - int64_t skip, - Object **ret, uint64_t *offset) { - - uint64_t i, n; - int r; - - assert(f); - assert(o); - assert(p > 0); - - if (o->object.type != OBJECT_ENTRY) - return -EINVAL; - - r = generic_array_bisect(f, - le64toh(f->header->entry_array_offset), - le64toh(f->header->n_entries), - p, - test_object_offset, - DIRECTION_DOWN, - NULL, NULL, - &i); + r = generic_array_get(f, + le64toh(f->header->entry_array_offset), + i, + ret, &ofs); if (r <= 0) return r; - /* Calculate new index */ - if (skip < 0) { - if ((uint64_t) -skip >= i) - i = 0; - else - i = i - (uint64_t) -skip; - } else - i += (uint64_t) skip; - - n = le64toh(f->header->n_entries); - if (n <= 0) + if (p > 0 && + (direction == DIRECTION_DOWN ? ofs <= p : ofs >= p)) { + log_debug("%s: entry array corrupted at entry %"PRIu64, + f->path, i); return -EBADMSG; + } - if (i >= n) - i = n-1; + if (offset) + *offset = ofs; - return generic_array_get(f, - le64toh(f->header->entry_array_offset), - i, - ret, offset); + return 1; } int journal_file_next_entry_for_data( @@ -1672,7 +2225,6 @@ int journal_file_move_to_entry_by_monotonic_for_data( direction_t direction, Object **ret, uint64_t *offset) { - char t[9+32+1] = "_BOOT_ID="; Object *o, *d; int r; uint64_t b, z; @@ -1680,8 +2232,7 @@ int journal_file_move_to_entry_by_monotonic_for_data( assert(f); /* First, seek by time */ - sd_id128_to_string(boot_id, t + 9); - r = journal_file_find_data_object(f, t, strlen(t), &o, &b); + r = find_data_object_by_boot_id(f, boot_id, &o, &b); if (r < 0) return r; if (r == 0) @@ -1747,8 +2298,6 @@ int journal_file_move_to_entry_by_monotonic_for_data( z = q; } - - return 0; } int journal_file_move_to_entry_by_seqnum_for_data( @@ -1814,7 +2363,7 @@ void journal_file_dump(JournalFile *f) { p = le64toh(f->header->header_size); while (p != 0) { - r = journal_file_move_to_object(f, -1, p, &o); + r = journal_file_move_to_object(f, OBJECT_UNUSED, p, &o); if (r < 0) goto fail; @@ -1828,11 +2377,15 @@ void journal_file_dump(JournalFile *f) { printf("Type: OBJECT_DATA\n"); break; + case OBJECT_FIELD: + printf("Type: OBJECT_FIELD\n"); + break; + case OBJECT_ENTRY: - printf("Type: OBJECT_ENTRY %llu %llu %llu\n", - (unsigned long long) le64toh(o->entry.seqnum), - (unsigned long long) le64toh(o->entry.monotonic), - (unsigned long long) le64toh(o->entry.realtime)); + printf("Type: OBJECT_ENTRY seqnum=%"PRIu64" monotonic=%"PRIu64" realtime=%"PRIu64"\n", + le64toh(o->entry.seqnum), + le64toh(o->entry.monotonic), + le64toh(o->entry.realtime)); break; case OBJECT_FIELD_HASH_TABLE: @@ -1848,13 +2401,19 @@ void journal_file_dump(JournalFile *f) { break; case OBJECT_TAG: - printf("Type: OBJECT_TAG %llu\n", - (unsigned long long) le64toh(o->tag.seqnum)); + printf("Type: OBJECT_TAG seqnum=%"PRIu64" epoch=%"PRIu64"\n", + le64toh(o->tag.seqnum), + le64toh(o->tag.epoch)); + break; + + default: + printf("Type: unknown (%u)\n", o->object.type); break; } - if (o->object.flags & OBJECT_COMPRESSED) - printf("Flags: COMPRESSED\n"); + if (o->object.flags & OBJECT_COMPRESSION_MASK) + printf("Flags: %s\n", + object_compressed_to_string(o->object.flags & OBJECT_COMPRESSION_MASK)); if (p == le64toh(f->header->tail_object_offset)) p = 0; @@ -1867,9 +2426,20 @@ fail: log_error("File corrupt"); } +static const char* format_timestamp_safe(char *buf, size_t l, usec_t t) { + const char *x; + + x = format_timestamp(buf, l, t); + if (x) + return x; + return " --- "; +} + void journal_file_print_header(JournalFile *f) { - char a[33], b[33], c[33]; - char x[FORMAT_TIMESTAMP_MAX], y[FORMAT_TIMESTAMP_MAX]; + char a[33], b[33], c[33], d[33]; + char x[FORMAT_TIMESTAMP_MAX], y[FORMAT_TIMESTAMP_MAX], z[FORMAT_TIMESTAMP_MAX]; + struct stat st; + char bytes[FORMAT_BYTES_MAX]; assert(f); @@ -1880,53 +2450,66 @@ void journal_file_print_header(JournalFile *f) { "Sequential Number ID: %s\n" "State: %s\n" "Compatible Flags:%s%s\n" - "Incompatible Flags:%s%s\n" - "Header size: %llu\n" - "Arena size: %llu\n" - "Data Hash Table Size: %llu\n" - "Field Hash Table Size: %llu\n" - "Objects: %llu\n" - "Entry Objects: %llu\n" + "Incompatible Flags:%s%s%s\n" + "Header size: %"PRIu64"\n" + "Arena size: %"PRIu64"\n" + "Data Hash Table Size: %"PRIu64"\n" + "Field Hash Table Size: %"PRIu64"\n" "Rotate Suggested: %s\n" - "Head Sequential Number: %llu\n" - "Tail Sequential Number: %llu\n" + "Head Sequential Number: %"PRIu64"\n" + "Tail Sequential Number: %"PRIu64"\n" "Head Realtime Timestamp: %s\n" - "Tail Realtime Timestamp: %s\n", + "Tail Realtime Timestamp: %s\n" + "Tail Monotonic Timestamp: %s\n" + "Objects: %"PRIu64"\n" + "Entry Objects: %"PRIu64"\n", f->path, sd_id128_to_string(f->header->file_id, a), sd_id128_to_string(f->header->machine_id, b), sd_id128_to_string(f->header->boot_id, c), - sd_id128_to_string(f->header->seqnum_id, c), - f->header->state == STATE_OFFLINE ? "offline" : - f->header->state == STATE_ONLINE ? "online" : - f->header->state == STATE_ARCHIVED ? "archived" : "unknown", - (f->header->compatible_flags & HEADER_COMPATIBLE_AUTHENTICATED) ? " AUTHENTICATED" : "", - (f->header->compatible_flags & ~HEADER_COMPATIBLE_AUTHENTICATED) ? " ???" : "", - (f->header->incompatible_flags & HEADER_INCOMPATIBLE_COMPRESSED) ? " COMPRESSED" : "", - (f->header->incompatible_flags & ~HEADER_INCOMPATIBLE_COMPRESSED) ? " ???" : "", - (unsigned long long) le64toh(f->header->header_size), - (unsigned long long) le64toh(f->header->arena_size), - (unsigned long long) le64toh(f->header->data_hash_table_size) / sizeof(HashItem), - (unsigned long long) le64toh(f->header->field_hash_table_size) / sizeof(HashItem), - (unsigned long long) le64toh(f->header->n_objects), - (unsigned long long) le64toh(f->header->n_entries), - yes_no(journal_file_rotate_suggested(f)), - (unsigned long long) le64toh(f->header->head_entry_seqnum), - (unsigned long long) le64toh(f->header->tail_entry_seqnum), - format_timestamp(x, sizeof(x), le64toh(f->header->head_entry_realtime)), - format_timestamp(y, sizeof(y), le64toh(f->header->tail_entry_realtime))); + sd_id128_to_string(f->header->seqnum_id, d), + f->header->state == STATE_OFFLINE ? "OFFLINE" : + f->header->state == STATE_ONLINE ? "ONLINE" : + f->header->state == STATE_ARCHIVED ? "ARCHIVED" : "UNKNOWN", + JOURNAL_HEADER_SEALED(f->header) ? " SEALED" : "", + (le32toh(f->header->compatible_flags) & ~HEADER_COMPATIBLE_ANY) ? " ???" : "", + JOURNAL_HEADER_COMPRESSED_XZ(f->header) ? " COMPRESSED-XZ" : "", + JOURNAL_HEADER_COMPRESSED_LZ4(f->header) ? " COMPRESSED-LZ4" : "", + (le32toh(f->header->incompatible_flags) & ~HEADER_INCOMPATIBLE_ANY) ? " ???" : "", + le64toh(f->header->header_size), + le64toh(f->header->arena_size), + le64toh(f->header->data_hash_table_size) / sizeof(HashItem), + le64toh(f->header->field_hash_table_size) / sizeof(HashItem), + yes_no(journal_file_rotate_suggested(f, 0)), + le64toh(f->header->head_entry_seqnum), + le64toh(f->header->tail_entry_seqnum), + format_timestamp_safe(x, sizeof(x), le64toh(f->header->head_entry_realtime)), + format_timestamp_safe(y, sizeof(y), le64toh(f->header->tail_entry_realtime)), + format_timespan(z, sizeof(z), le64toh(f->header->tail_entry_monotonic), USEC_PER_MSEC), + le64toh(f->header->n_objects), + le64toh(f->header->n_entries)); if (JOURNAL_HEADER_CONTAINS(f->header, n_data)) - printf("Data Objects: %llu\n" + printf("Data Objects: %"PRIu64"\n" "Data Hash Table Fill: %.1f%%\n", - (unsigned long long) le64toh(f->header->n_data), + le64toh(f->header->n_data), 100.0 * (double) le64toh(f->header->n_data) / ((double) (le64toh(f->header->data_hash_table_size) / sizeof(HashItem)))); if (JOURNAL_HEADER_CONTAINS(f->header, n_fields)) - printf("Field Objects: %llu\n" + printf("Field Objects: %"PRIu64"\n" "Field Hash Table Fill: %.1f%%\n", - (unsigned long long) le64toh(f->header->n_fields), + le64toh(f->header->n_fields), 100.0 * (double) le64toh(f->header->n_fields) / ((double) (le64toh(f->header->field_hash_table_size) / sizeof(HashItem)))); + + if (JOURNAL_HEADER_CONTAINS(f->header, n_tags)) + printf("Tag Objects: %"PRIu64"\n", + le64toh(f->header->n_tags)); + if (JOURNAL_HEADER_CONTAINS(f->header, n_entry_arrays)) + printf("Entry Array Objects: %"PRIu64"\n", + le64toh(f->header->n_entry_arrays)); + + if (fstat(f->fd, &st) >= 0) + printf("Disk usage: %s\n", format_bytes(bytes, sizeof(bytes), (off_t) st.st_blocks * 512ULL)); } int journal_file_open( @@ -1934,23 +2517,26 @@ int journal_file_open( int flags, mode_t mode, bool compress, - bool authenticate, + bool seal, JournalMetrics *metrics, MMapCache *mmap_cache, JournalFile *template, JournalFile **ret) { + bool newly_created = false; JournalFile *f; + void *h; int r; - bool newly_created = false; assert(fname); + assert(ret); if ((flags & O_ACCMODE) != O_RDONLY && (flags & O_ACCMODE) != O_RDWR) return -EINVAL; - if (!endswith(fname, ".journal")) + if (!endswith(fname, ".journal") && + !endswith(fname, ".journal~")) return -EINVAL; f = new0(JournalFile, 1); @@ -1963,16 +2549,19 @@ int journal_file_open( f->flags = flags; f->prot = prot_from_flags(flags); f->writable = (flags & O_ACCMODE) != O_RDONLY; - f->compress = compress; - f->authenticate = authenticate; +#if defined(HAVE_LZ4) + f->compress_lz4 = compress; +#elif defined(HAVE_XZ) + f->compress_xz = compress; +#endif +#ifdef HAVE_GCRYPT + f->seal = seal; +#endif if (mmap_cache) f->mmap = mmap_cache_ref(mmap_cache); else { - /* One context for each type, plus the zeroth catchall - * context. One fd for the file plus one for each type - * (which we need during verification */ - f->mmap = mmap_cache_new(_OBJECT_TYPE_MAX, 1 + _OBJECT_TYPE_MAX); + f->mmap = mmap_cache_new(); if (!f->mmap) { r = -ENOMEM; goto fail; @@ -1985,34 +2574,54 @@ int journal_file_open( goto fail; } + f->chain_cache = ordered_hashmap_new(&uint64_hash_ops); + if (!f->chain_cache) { + r = -ENOMEM; + goto fail; + } + f->fd = open(f->path, f->flags|O_CLOEXEC, f->mode); if (f->fd < 0) { r = -errno; goto fail; } - if (fstat(f->fd, &f->last_stat) < 0) { - r = -errno; + r = journal_file_fstat(f); + if (r < 0) goto fail; - } if (f->last_stat.st_size == 0 && f->writable) { - newly_created = true; + /* Let's attach the creation time to the journal file, + * so that the vacuuming code knows the age of this + * file even if the file might end up corrupted one + * day... Ideally we'd just use the creation time many + * file systems maintain for each file, but there is + * currently no usable API to query this, hence let's + * emulate this via extended attributes. If extended + * attributes are not supported we'll just skip this, + * and rely solely on mtime/atime/ctime of the file. */ + + fd_setcrtime(f->fd, now(CLOCK_REALTIME)); +#ifdef HAVE_GCRYPT /* Try to load the FSPRG state, and if we can't, then - * just don't do authentication */ - r = journal_file_load_fsprg(f); - if (r < 0) - f->authenticate = false; + * just don't do sealing */ + if (f->seal) { + r = journal_file_fss_load(f); + if (r < 0) + f->seal = false; + } +#endif r = journal_file_init_header(f, template); if (r < 0) goto fail; - if (fstat(f->fd, &f->last_stat) < 0) { - r = -errno; + r = journal_file_fstat(f); + if (r < 0) goto fail; - } + + newly_created = true; } if (f->last_stat.st_size < (off_t) HEADER_SIZE_MIN) { @@ -2020,24 +2629,27 @@ int journal_file_open( goto fail; } - f->header = mmap(NULL, PAGE_ALIGN(sizeof(Header)), prot_from_flags(flags), MAP_SHARED, f->fd, 0); - if (f->header == MAP_FAILED) { - f->header = NULL; + r = mmap_cache_get(f->mmap, f->fd, f->prot, CONTEXT_HEADER, true, 0, PAGE_ALIGN(sizeof(Header)), &f->last_stat, &h); + if (r < 0) { r = -errno; goto fail; } + f->header = h; + if (!newly_created) { r = journal_file_verify_header(f); if (r < 0) goto fail; } +#ifdef HAVE_GCRYPT if (!newly_created && f->writable) { - r = journal_file_load_fsprg(f); + r = journal_file_fss_load(f); if (r < 0) goto fail; } +#endif if (f->writable) { if (metrics) { @@ -2051,9 +2663,11 @@ int journal_file_open( goto fail; } - r = journal_file_setup_hmac(f); +#ifdef HAVE_GCRYPT + r = journal_file_hmac_setup(f); if (r < 0) goto fail; +#endif if (newly_created) { r = journal_file_setup_field_hash_table(f); @@ -2064,9 +2678,11 @@ int journal_file_open( if (r < 0) goto fail; +#ifdef HAVE_GCRYPT r = journal_file_append_first_tag(f); if (r < 0) goto fail; +#endif } r = journal_file_map_field_hash_table(f); @@ -2077,19 +2693,25 @@ int journal_file_open( if (r < 0) goto fail; - if (ret) - *ret = f; + if (mmap_cache_got_sigbus(f->mmap, f->fd)) { + r = -EIO; + goto fail; + } + *ret = f; return 0; fail: + if (f->fd >= 0 && mmap_cache_got_sigbus(f->mmap, f->fd)) + r = -EIO; + journal_file_close(f); return r; } -int journal_file_rotate(JournalFile **f, bool compress, bool authenticate) { - char *p; +int journal_file_rotate(JournalFile **f, bool compress, bool seal) { + _cleanup_free_ char *p = NULL; size_t l; JournalFile *old_file, *new_file = NULL; int r; @@ -2106,28 +2728,29 @@ int journal_file_rotate(JournalFile **f, bool compress, bool authenticate) { return -EINVAL; l = strlen(old_file->path); - - p = new(char, l + 1 + 32 + 1 + 16 + 1 + 16 + 1); - if (!p) + r = asprintf(&p, "%.*s@" SD_ID128_FORMAT_STR "-%016"PRIx64"-%016"PRIx64".journal", + (int) l - 8, old_file->path, + SD_ID128_FORMAT_VAL(old_file->header->seqnum_id), + le64toh((*f)->header->head_entry_seqnum), + le64toh((*f)->header->head_entry_realtime)); + if (r < 0) return -ENOMEM; - memcpy(p, old_file->path, l - 8); - p[l-8] = '@'; - sd_id128_to_string(old_file->header->seqnum_id, p + l - 8 + 1); - snprintf(p + l - 8 + 1 + 32, 1 + 16 + 1 + 16 + 8 + 1, - "-%016llx-%016llx.journal", - (unsigned long long) le64toh((*f)->header->tail_entry_seqnum), - (unsigned long long) le64toh((*f)->header->tail_entry_realtime)); - + /* Try to rename the file to the archived version. If the file + * already was deleted, we'll get ENOENT, let's ignore that + * case. */ r = rename(old_file->path, p); - free(p); - - if (r < 0) + if (r < 0 && errno != ENOENT) return -errno; old_file->header->state = STATE_ARCHIVED; - r = journal_file_open(old_file->path, old_file->flags, old_file->mode, compress, authenticate, NULL, old_file->mmap, old_file, &new_file); + /* Currently, btrfs is not very good with out write patterns + * and fragments heavily. Let's defrag our journal files when + * we archive them */ + old_file->defrag_on_close = true; + + r = journal_file_open(old_file->path, old_file->flags, old_file->mode, compress, seal, NULL, old_file->mmap, old_file, &new_file); journal_file_close(old_file); *f = new_file; @@ -2139,7 +2762,7 @@ int journal_file_open_reliably( int flags, mode_t mode, bool compress, - bool authenticate, + bool seal, JournalMetrics *metrics, MMapCache *mmap_cache, JournalFile *template, @@ -2147,16 +2770,18 @@ int journal_file_open_reliably( int r; size_t l; - char *p; + _cleanup_free_ char *p = NULL; - r = journal_file_open(fname, flags, mode, compress, authenticate, + r = journal_file_open(fname, flags, mode, compress, seal, metrics, mmap_cache, template, ret); if (r != -EBADMSG && /* corrupted */ r != -ENODATA && /* truncated */ r != -EHOSTDOWN && /* other machine */ r != -EPROTONOSUPPORT && /* incompatible feature */ r != -EBUSY && /* unclean shutdown */ - r != -ESHUTDOWN /* already archived */) + r != -ESHUTDOWN && /* already archived */ + r != -EIO && /* IO error, including SIGBUS on mmap */ + r != -EIDRM /* File has been deleted */) return r; if ((flags & O_ACCMODE) == O_RDONLY) @@ -2171,24 +2796,26 @@ int journal_file_open_reliably( /* The file is corrupted. Rotate it away and try it again (but only once) */ l = strlen(fname); - if (asprintf(&p, "%.*s@%016llx-%016llx.journal~", - (int) (l-8), fname, + if (asprintf(&p, "%.*s@%016llx-%016" PRIx64 ".journal~", + (int) l - 8, fname, (unsigned long long) now(CLOCK_REALTIME), - random_ull()) < 0) + random_u64()) < 0) return -ENOMEM; r = rename(fname, p); - free(p); if (r < 0) return -errno; + /* btrfs doesn't cope well with our write pattern and + * fragments heavily. Let's defrag all files we rotate */ + (void) btrfs_defrag(p); + log_warning("File %s corrupted or uncleanly shut down, renaming and replacing.", fname); - return journal_file_open(fname, flags, mode, compress, authenticate, + return journal_file_open(fname, flags, mode, compress, seal, metrics, mmap_cache, template, ret); } - int journal_file_copy_entry(JournalFile *from, JournalFile *to, Object *o, uint64_t p, uint64_t *seqnum, Object **ret, uint64_t *offset) { uint64_t i, n; uint64_t q, xor_hash = 0; @@ -2207,12 +2834,9 @@ int journal_file_copy_entry(JournalFile *from, JournalFile *to, Object *o, uint6 ts.monotonic = le64toh(o->entry.monotonic); ts.realtime = le64toh(o->entry.realtime); - if (to->tail_entry_monotonic_valid && - ts.monotonic < le64toh(to->header->tail_entry_monotonic)) - return -EINVAL; - n = journal_file_entry_n_items(o); - items = alloca(sizeof(EntryItem) * n); + /* alloca() can't take 0, hence let's allocate at least one */ + items = alloca(sizeof(EntryItem) * MAX(1u, n)); for (i = 0; i < n; i++) { uint64_t l, h; @@ -2238,12 +2862,14 @@ int journal_file_copy_entry(JournalFile *from, JournalFile *to, Object *o, uint6 if ((uint64_t) t != l) return -E2BIG; - if (o->object.flags & OBJECT_COMPRESSED) { -#ifdef HAVE_XZ - uint64_t rsize; + if (o->object.flags & OBJECT_COMPRESSION_MASK) { +#if defined(HAVE_XZ) || defined(HAVE_LZ4) + size_t rsize; - if (!uncompress_blob(o->data.payload, l, &from->compress_buffer, &from->compress_buffer_size, &rsize)) - return -EBADMSG; + r = decompress_blob(o->object.flags & OBJECT_COMPRESSION_MASK, + o->data.payload, l, &from->compress_buffer, &from->compress_buffer_size, &rsize, 0); + if (r < 0) + return r; data = from->compress_buffer; l = rsize; @@ -2266,7 +2892,12 @@ int journal_file_copy_entry(JournalFile *from, JournalFile *to, Object *o, uint6 return r; } - return journal_file_append_entry_internal(to, &ts, xor_hash, items, n, seqnum, ret, offset); + r = journal_file_append_entry_internal(to, &ts, xor_hash, items, n, seqnum, ret, offset); + + if (mmap_cache_got_sigbus(to->mmap, to->fd)) + return -EIO; + + return r; } void journal_default_metrics(JournalMetrics *m, int fd) { @@ -2328,7 +2959,7 @@ void journal_default_metrics(JournalMetrics *m, int fd) { if (m->keep_free == (uint64_t) -1) { if (fs_size > 0) { - m->keep_free = PAGE_ALIGN(fs_size / 20); /* 5% of file system size */ + m->keep_free = PAGE_ALIGN(fs_size * 3 / 20); /* 15% of file system size */ if (m->keep_free > DEFAULT_KEEP_FREE_UPPER) m->keep_free = DEFAULT_KEEP_FREE_UPPER; @@ -2337,11 +2968,11 @@ void journal_default_metrics(JournalMetrics *m, int fd) { m->keep_free = DEFAULT_KEEP_FREE; } - log_info("Fixed max_use=%s max_size=%s min_size=%s keep_free=%s", - format_bytes(a, sizeof(a), m->max_use), - format_bytes(b, sizeof(b), m->max_size), - format_bytes(c, sizeof(c), m->min_size), - format_bytes(d, sizeof(d), m->keep_free)); + log_debug("Fixed max_use=%s max_size=%s min_size=%s keep_free=%s", + format_bytes(a, sizeof(a), m->max_use), + format_bytes(b, sizeof(b), m->max_size), + format_bytes(c, sizeof(c), m->min_size), + format_bytes(d, sizeof(d), m->keep_free)); } int journal_file_get_cutoff_realtime_usec(JournalFile *f, usec_t *from, usec_t *to) { @@ -2366,7 +2997,6 @@ int journal_file_get_cutoff_realtime_usec(JournalFile *f, usec_t *from, usec_t * } int journal_file_get_cutoff_monotonic_usec(JournalFile *f, sd_id128_t boot_id, usec_t *from, usec_t *to) { - char t[9+32+1] = "_BOOT_ID="; Object *o; uint64_t p; int r; @@ -2374,9 +3004,7 @@ int journal_file_get_cutoff_monotonic_usec(JournalFile *f, sd_id128_t boot_id, u assert(f); assert(from || to); - sd_id128_to_string(boot_id, t + 9); - - r = journal_file_find_data_object(f, t, strlen(t), &o, &p); + r = find_data_object_by_boot_id(f, boot_id, &o, &p); if (r <= 0) return r; @@ -2410,7 +3038,7 @@ int journal_file_get_cutoff_monotonic_usec(JournalFile *f, sd_id128_t boot_id, u return 1; } -bool journal_file_rotate_suggested(JournalFile *f) { +bool journal_file_rotate_suggested(JournalFile *f, usec_t max_file_usec) { assert(f); /* If we gained new header fields we gained new features, @@ -2428,25 +3056,42 @@ bool journal_file_rotate_suggested(JournalFile *f) { if (JOURNAL_HEADER_CONTAINS(f->header, n_data)) if (le64toh(f->header->n_data) * 4ULL > (le64toh(f->header->data_hash_table_size) / sizeof(HashItem)) * 3ULL) { - log_debug("Data hash table of %s has a fill level at %.1f (%llu of %llu items, %llu file size, %llu bytes per hash table item), suggesting rotation.", + log_debug("Data hash table of %s has a fill level at %.1f (%"PRIu64" of %"PRIu64" items, %llu file size, %"PRIu64" bytes per hash table item), suggesting rotation.", f->path, 100.0 * (double) le64toh(f->header->n_data) / ((double) (le64toh(f->header->data_hash_table_size) / sizeof(HashItem))), - (unsigned long long) le64toh(f->header->n_data), - (unsigned long long) (le64toh(f->header->data_hash_table_size) / sizeof(HashItem)), - (unsigned long long) (f->last_stat.st_size), - (unsigned long long) (f->last_stat.st_size / le64toh(f->header->n_data))); + le64toh(f->header->n_data), + le64toh(f->header->data_hash_table_size) / sizeof(HashItem), + (unsigned long long) f->last_stat.st_size, + f->last_stat.st_size / le64toh(f->header->n_data)); return true; } if (JOURNAL_HEADER_CONTAINS(f->header, n_fields)) if (le64toh(f->header->n_fields) * 4ULL > (le64toh(f->header->field_hash_table_size) / sizeof(HashItem)) * 3ULL) { - log_debug("Field hash table of %s has a fill level at %.1f (%llu of %llu items), suggesting rotation.", + log_debug("Field hash table of %s has a fill level at %.1f (%"PRIu64" of %"PRIu64" items), suggesting rotation.", f->path, 100.0 * (double) le64toh(f->header->n_fields) / ((double) (le64toh(f->header->field_hash_table_size) / sizeof(HashItem))), - (unsigned long long) le64toh(f->header->n_fields), - (unsigned long long) (le64toh(f->header->field_hash_table_size) / sizeof(HashItem))); + le64toh(f->header->n_fields), + le64toh(f->header->field_hash_table_size) / sizeof(HashItem)); return true; } + /* Are the data objects properly indexed by field objects? */ + if (JOURNAL_HEADER_CONTAINS(f->header, n_data) && + JOURNAL_HEADER_CONTAINS(f->header, n_fields) && + le64toh(f->header->n_data) > 0 && + le64toh(f->header->n_fields) == 0) + return true; + + if (max_file_usec > 0) { + usec_t t, h; + + h = le64toh(f->header->head_entry_realtime); + t = now(CLOCK_REALTIME); + + if (h > 0 && t > h + max_file_usec) + return true; + } + return false; }