X-Git-Url: http://www.chiark.greenend.org.uk/ucgi/~ianmdlvl/git?p=elogind.git;a=blobdiff_plain;f=src%2Fjournal%2Fjournal-file.c;h=2845e05ce0adcb7333e71c3c85cd5433a6727d12;hp=43fd4453a44accfde468c66c37f1e236072f43e2;hb=2a560338c471f47ca0caf6f1ec8c54a61e005d7f;hpb=3c1668da6202f1ead3d4d3981b89e9da1a0e98e3 diff --git a/src/journal/journal-file.c b/src/journal/journal-file.c index 43fd4453a..2845e05ce 100644 --- a/src/journal/journal-file.c +++ b/src/journal/journal-file.c @@ -26,11 +26,9 @@ #include #include #include +#include -#ifdef HAVE_XATTR -#include -#endif - +#include "btrfs-util.h" #include "journal-def.h" #include "journal-file.h" #include "journal-authenticate.h" @@ -44,7 +42,7 @@ #define COMPRESSION_SIZE_THRESHOLD (512ULL) /* This is the minimum journal file size */ -#define JOURNAL_FILE_SIZE_MIN (64ULL*1024ULL) /* 64 KiB */ +#define JOURNAL_FILE_SIZE_MIN (4ULL*1024ULL*1024ULL) /* 4 MiB */ /* These are the lower and upper bounds if we deduce the max_use value * from the file system size */ @@ -65,6 +63,71 @@ /* n_data was the first entry we added after the initial file format design */ #define HEADER_SIZE_MIN ALIGN64(offsetof(Header, n_data)) +/* How many entries to keep in the entry array chain cache at max */ +#define CHAIN_CACHE_MAX 20 + +/* How much to increase the journal file size at once each time we allocate something new. */ +#define FILE_SIZE_INCREASE (8ULL*1024ULL*1024ULL) /* 8MB */ + +/* Reread fstat() of the file for detecting deletions at least this often */ +#define LAST_STAT_REFRESH_USEC (5*USEC_PER_SEC) + +/* The mmap context to use for the header we pick as one above the last defined typed */ +#define CONTEXT_HEADER _OBJECT_TYPE_MAX + +static int journal_file_set_online(JournalFile *f) { + assert(f); + + if (!f->writable) + return -EPERM; + + if (!(f->fd >= 0 && f->header)) + return -EINVAL; + + if (mmap_cache_got_sigbus(f->mmap, f->fd)) + return -EIO; + + switch(f->header->state) { + case STATE_ONLINE: + return 0; + + case STATE_OFFLINE: + f->header->state = STATE_ONLINE; + fsync(f->fd); + return 0; + + default: + return -EINVAL; + } +} + +int journal_file_set_offline(JournalFile *f) { + assert(f); + + if (!f->writable) + return -EPERM; + + if (!(f->fd >= 0 && f->header)) + return -EINVAL; + + if (f->header->state != STATE_ONLINE) + return 0; + + fsync(f->fd); + + if (mmap_cache_got_sigbus(f->mmap, f->fd)) + return -EIO; + + f->header->state = STATE_OFFLINE; + + if (mmap_cache_got_sigbus(f->mmap, f->fd)) + return -EIO; + + fsync(f->fd); + + return 0; +} + void journal_file_close(JournalFile *f) { assert(f); @@ -74,30 +137,32 @@ void journal_file_close(JournalFile *f) { journal_file_append_tag(f); #endif - /* Sync everything to disk, before we mark the file offline */ + journal_file_set_offline(f); + if (f->mmap && f->fd >= 0) mmap_cache_close_fd(f->mmap, f->fd); - if (f->writable && f->fd >= 0) - fdatasync(f->fd); + if (f->fd >= 0 && f->defrag_on_close) { - if (f->header) { - /* Mark the file offline. Don't override the archived state if it already is set */ - if (f->writable && f->header->state == STATE_ONLINE) - f->header->state = STATE_OFFLINE; + /* Be friendly to btrfs: turn COW back on again now, + * and defragment the file. We won't write to the file + * ever again, hence remove all fragmentation, and + * reenable all the good bits COW usually provides + * (such as data checksumming). */ - munmap(f->header, PAGE_ALIGN(sizeof(Header))); + (void) chattr_fd(f->fd, false, FS_NOCOW_FL); + (void) btrfs_defrag_fd(f->fd); } - if (f->fd >= 0) - close_nointr_nofail(f->fd); - + safe_close(f->fd); free(f->path); if (f->mmap) mmap_cache_unref(f->mmap); -#ifdef HAVE_XZ + ordered_hashmap_free_free(f->chain_cache); + +#if defined(HAVE_XZ) || defined(HAVE_LZ4) free(f->compress_buffer); #endif @@ -117,21 +182,21 @@ void journal_file_close(JournalFile *f) { } static int journal_file_init_header(JournalFile *f, JournalFile *template) { - Header h; + Header h = {}; ssize_t k; int r; assert(f); - zero(h); memcpy(h.signature, HEADER_SIGNATURE, 8); h.header_size = htole64(ALIGN64(sizeof(h))); - h.incompatible_flags = - htole32(f->compress ? HEADER_INCOMPATIBLE_COMPRESSED : 0); + h.incompatible_flags |= htole32( + f->compress_xz * HEADER_INCOMPATIBLE_COMPRESSED_XZ | + f->compress_lz4 * HEADER_INCOMPATIBLE_COMPRESSED_LZ4); - h.compatible_flags = - htole32(f->seal ? HEADER_COMPATIBLE_SEALED : 0); + h.compatible_flags = htole32( + f->seal * HEADER_COMPATIBLE_SEALED); r = sd_id128_randomize(&h.file_id); if (r < 0) @@ -154,8 +219,8 @@ static int journal_file_init_header(JournalFile *f, JournalFile *template) { } static int journal_file_refresh_header(JournalFile *f) { - int r; sd_id128_t boot_id; + int r; assert(f); @@ -172,16 +237,17 @@ static int journal_file_refresh_header(JournalFile *f) { f->header->boot_id = boot_id; - f->header->state = STATE_ONLINE; + r = journal_file_set_online(f); /* Sync the online state to disk */ - msync(f->header, PAGE_ALIGN(sizeof(Header)), MS_SYNC); - fdatasync(f->fd); + fsync(f->fd); - return 0; + return r; } static int journal_file_verify_header(JournalFile *f) { + uint32_t flags; + assert(f); if (memcmp(f->header->signature, HEADER_SIGNATURE, 8)) @@ -189,24 +255,30 @@ static int journal_file_verify_header(JournalFile *f) { /* In both read and write mode we refuse to open files with * incompatible flags we don't know */ -#ifdef HAVE_XZ - if ((le32toh(f->header->incompatible_flags) & ~HEADER_INCOMPATIBLE_COMPRESSED) != 0) + flags = le32toh(f->header->incompatible_flags); + if (flags & ~HEADER_INCOMPATIBLE_SUPPORTED) { + if (flags & ~HEADER_INCOMPATIBLE_ANY) + log_debug("Journal file %s has unknown incompatible flags %"PRIx32, + f->path, flags & ~HEADER_INCOMPATIBLE_ANY); + flags = (flags & HEADER_INCOMPATIBLE_ANY) & ~HEADER_INCOMPATIBLE_SUPPORTED; + if (flags) + log_debug("Journal file %s uses incompatible flags %"PRIx32 + " disabled at compilation time.", f->path, flags); return -EPROTONOSUPPORT; -#else - if (f->header->incompatible_flags != 0) - return -EPROTONOSUPPORT; -#endif + } /* When open for writing we refuse to open files with * compatible flags, too */ - if (f->writable) { -#ifdef HAVE_GCRYPT - if ((le32toh(f->header->compatible_flags) & ~HEADER_COMPATIBLE_SEALED) != 0) - return -EPROTONOSUPPORT; -#else - if (f->header->compatible_flags != 0) - return -EPROTONOSUPPORT; -#endif + flags = le32toh(f->header->compatible_flags); + if (f->writable && (flags & ~HEADER_COMPATIBLE_SUPPORTED)) { + if (flags & ~HEADER_COMPATIBLE_ANY) + log_debug("Journal file %s has unknown compatible flags %"PRIx32, + f->path, flags & ~HEADER_COMPATIBLE_ANY); + flags = (flags & HEADER_COMPATIBLE_ANY) & ~HEADER_COMPATIBLE_SUPPORTED; + if (flags) + log_debug("Journal file %s uses compatible flags %"PRIx32 + " disabled at compilation time.", f->path, flags); + return -EPROTONOSUPPORT; } if (f->header->state >= _STATE_MAX) @@ -231,12 +303,6 @@ static int journal_file_verify_header(JournalFile *f) { !VALID64(le64toh(f->header->entry_array_offset))) return -ENODATA; - if (le64toh(f->header->data_hash_table_offset) < le64toh(f->header->header_size) || - le64toh(f->header->field_hash_table_offset) < le64toh(f->header->header_size) || - le64toh(f->header->tail_object_offset) < le64toh(f->header->header_size) || - le64toh(f->header->entry_array_offset) < le64toh(f->header->header_size)) - return -ENODATA; - if (f->writable) { uint8_t state; sd_id128_t machine_id; @@ -257,18 +323,35 @@ static int journal_file_verify_header(JournalFile *f) { } else if (state == STATE_ARCHIVED) return -ESHUTDOWN; else if (state != STATE_OFFLINE) { - log_debug("Journal file %s has unknown state %u.", f->path, state); + log_debug("Journal file %s has unknown state %i.", f->path, state); return -EBUSY; } } - f->compress = JOURNAL_HEADER_COMPRESSED(f->header); + f->compress_xz = JOURNAL_HEADER_COMPRESSED_XZ(f->header); + f->compress_lz4 = JOURNAL_HEADER_COMPRESSED_LZ4(f->header); f->seal = JOURNAL_HEADER_SEALED(f->header); return 0; } +static int journal_file_fstat(JournalFile *f) { + assert(f); + assert(f->fd >= 0); + + if (fstat(f->fd, &f->last_stat) < 0) + return -errno; + + f->last_stat_usec = now(CLOCK_MONOTONIC); + + /* Refuse appending to files that are already deleted */ + if (f->last_stat.st_nlink <= 0) + return -EIDRM; + + return 0; +} + static int journal_file_allocate(JournalFile *f, uint64_t offset, uint64_t size) { uint64_t old_size, new_size; int r; @@ -279,6 +362,9 @@ static int journal_file_allocate(JournalFile *f, uint64_t offset, uint64_t size) * for sure, since we always call posix_fallocate() * ourselves */ + if (mmap_cache_got_sigbus(f->mmap, f->fd)) + return -EIO; + old_size = le64toh(f->header->header_size) + le64toh(f->header->arena_size); @@ -287,15 +373,26 @@ static int journal_file_allocate(JournalFile *f, uint64_t offset, uint64_t size) if (new_size < le64toh(f->header->header_size)) new_size = le64toh(f->header->header_size); - if (new_size <= old_size) - return 0; + if (new_size <= old_size) { + + /* We already pre-allocated enough space, but before + * we write to it, let's check with fstat() if the + * file got deleted, in order make sure we don't throw + * away the data immediately. Don't check fstat() for + * all writes though, but only once ever 10s. */ - if (f->metrics.max_size > 0 && - new_size > f->metrics.max_size) + if (f->last_stat_usec + LAST_STAT_REFRESH_USEC > now(CLOCK_MONOTONIC)) + return 0; + + return journal_file_fstat(f); + } + + /* Allocate more space. */ + + if (f->metrics.max_size > 0 && new_size > f->metrics.max_size) return -E2BIG; - if (new_size > f->metrics.min_size && - f->metrics.keep_free > 0) { + if (new_size > f->metrics.min_size && f->metrics.keep_free > 0) { struct statvfs svfs; if (fstatvfs(f->fd, &svfs) >= 0) { @@ -313,6 +410,11 @@ static int journal_file_allocate(JournalFile *f, uint64_t offset, uint64_t size) } } + /* Increase by larger blocks at once */ + new_size = ((new_size+FILE_SIZE_INCREASE-1) / FILE_SIZE_INCREASE) * FILE_SIZE_INCREASE; + if (f->metrics.max_size > 0 && new_size > f->metrics.max_size) + new_size = f->metrics.max_size; + /* Note that the glibc fallocate() fallback is very inefficient, hence we try to minimize the allocation area as we can. */ @@ -320,15 +422,21 @@ static int journal_file_allocate(JournalFile *f, uint64_t offset, uint64_t size) if (r != 0) return -r; - if (fstat(f->fd, &f->last_stat) < 0) - return -errno; - f->header->arena_size = htole64(new_size - le64toh(f->header->header_size)); - return 0; + return journal_file_fstat(f); } -static int journal_file_move_to(JournalFile *f, int context, bool keep_always, uint64_t offset, uint64_t size, void **ret) { +static unsigned type_to_context(ObjectType type) { + /* One context for each type, plus one catch-all for the rest */ + assert_cc(_OBJECT_TYPE_MAX <= MMAP_CACHE_MAX_CONTEXTS); + assert_cc(CONTEXT_HEADER < MMAP_CACHE_MAX_CONTEXTS); + return type > OBJECT_UNUSED && type < _OBJECT_TYPE_MAX ? type : 0; +} + +static int journal_file_move_to(JournalFile *f, ObjectType type, bool keep_always, uint64_t offset, uint64_t size, void **ret) { + int r; + assert(f); assert(ret); @@ -340,17 +448,20 @@ static int journal_file_move_to(JournalFile *f, int context, bool keep_always, u /* Hmm, out of range? Let's refresh the fstat() data * first, before we trust that check. */ - if (fstat(f->fd, &f->last_stat) < 0 || - offset + size > (uint64_t) f->last_stat.st_size) + r = journal_file_fstat(f); + if (r < 0) + return r; + + if (offset + size > (uint64_t) f->last_stat.st_size) return -EADDRNOTAVAIL; } - return mmap_cache_get(f->mmap, f->fd, f->prot, context, keep_always, offset, size, &f->last_stat, ret); + return mmap_cache_get(f->mmap, f->fd, f->prot, type_to_context(type), keep_always, offset, size, &f->last_stat, ret); } static uint64_t minimum_header_size(Object *o) { - static uint64_t table[] = { + static const uint64_t table[] = { [OBJECT_DATA] = sizeof(DataObject), [OBJECT_FIELD] = sizeof(FieldObject), [OBJECT_ENTRY] = sizeof(EntryObject), @@ -366,12 +477,11 @@ static uint64_t minimum_header_size(Object *o) { return table[o->object.type]; } -int journal_file_move_to_object(JournalFile *f, int type, uint64_t offset, Object **ret) { +int journal_file_move_to_object(JournalFile *f, ObjectType type, uint64_t offset, Object **ret) { int r; void *t; Object *o; uint64_t s; - unsigned context; assert(f); assert(ret); @@ -380,10 +490,7 @@ int journal_file_move_to_object(JournalFile *f, int type, uint64_t offset, Objec if (!VALID64(offset)) return -EFAULT; - /* One context for each type, plus one catch-all for the rest */ - context = type > 0 && type < _OBJECT_TYPE_MAX ? type : 0; - - r = journal_file_move_to(f, context, false, offset, sizeof(ObjectHeader), &t); + r = journal_file_move_to(f, type, false, offset, sizeof(ObjectHeader), &t); if (r < 0) return r; @@ -399,11 +506,11 @@ int journal_file_move_to_object(JournalFile *f, int type, uint64_t offset, Objec if (s < minimum_header_size(o)) return -EBADMSG; - if (type > 0 && o->object.type != type) + if (type > OBJECT_UNUSED && o->object.type != type) return -EBADMSG; if (s > sizeof(ObjectHeader)) { - r = journal_file_move_to(f, o->object.type, false, offset, s, &t); + r = journal_file_move_to(f, type, false, offset, s, &t); if (r < 0) return r; @@ -440,23 +547,27 @@ static uint64_t journal_file_entry_seqnum(JournalFile *f, uint64_t *seqnum) { return r; } -int journal_file_append_object(JournalFile *f, int type, uint64_t size, Object **ret, uint64_t *offset) { +int journal_file_append_object(JournalFile *f, ObjectType type, uint64_t size, Object **ret, uint64_t *offset) { int r; uint64_t p; Object *tail, *o; void *t; assert(f); - assert(type > 0 && type < _OBJECT_TYPE_MAX); + assert(type > OBJECT_UNUSED && type < _OBJECT_TYPE_MAX); assert(size >= sizeof(ObjectHeader)); assert(offset); assert(ret); + r = journal_file_set_online(f); + if (r < 0) + return r; + p = le64toh(f->header->tail_object_offset); if (p == 0) p = le64toh(f->header->header_size); else { - r = journal_file_move_to_object(f, -1, p, &tail); + r = journal_file_move_to_object(f, OBJECT_UNUSED, p, &tail); if (r < 0) return r; @@ -502,7 +613,7 @@ static int journal_file_setup_data_hash_table(JournalFile *f) { if (s < DEFAULT_DATA_HASH_TABLE_SIZE) s = DEFAULT_DATA_HASH_TABLE_SIZE; - log_debug("Reserving %llu entries in hash table.", (unsigned long long) (s / sizeof(HashItem))); + log_debug("Reserving %"PRIu64" entries in hash table.", s / sizeof(HashItem)); r = journal_file_append_object(f, OBJECT_DATA_HASH_TABLE, @@ -511,7 +622,7 @@ static int journal_file_setup_data_hash_table(JournalFile *f) { if (r < 0) return r; - memset(o->hash_table.items, 0, s); + memzero(o->hash_table.items, s); f->header->data_hash_table_offset = htole64(p + offsetof(Object, hash_table.items)); f->header->data_hash_table_size = htole64(s); @@ -537,7 +648,7 @@ static int journal_file_setup_field_hash_table(JournalFile *f) { if (r < 0) return r; - memset(o->hash_table.items, 0, s); + memzero(o->hash_table.items, s); f->header->field_hash_table_offset = htole64(p + offsetof(Object, hash_table.items)); f->header->field_hash_table_size = htole64(s); @@ -595,7 +706,7 @@ static int journal_file_link_field( uint64_t offset, uint64_t hash) { - uint64_t p, h; + uint64_t p, h, m; int r; assert(f); @@ -605,11 +716,14 @@ static int journal_file_link_field( if (o->object.type != OBJECT_FIELD) return -EINVAL; - /* This might alter the window we are looking at */ + m = le64toh(f->header->field_hash_table_size) / sizeof(HashItem); + if (m <= 0) + return -EBADMSG; + /* This might alter the window we are looking at */ o->field.next_hash_offset = o->field.head_data_offset = 0; - h = hash % (le64toh(f->header->field_hash_table_size) / sizeof(HashItem)); + h = hash % m; p = le64toh(f->field_hash_table[h].tail_hash_offset); if (p == 0) f->field_hash_table[h].head_hash_offset = htole64(offset); @@ -635,7 +749,7 @@ static int journal_file_link_data( uint64_t offset, uint64_t hash) { - uint64_t p, h; + uint64_t p, h, m; int r; assert(f); @@ -645,13 +759,16 @@ static int journal_file_link_data( if (o->object.type != OBJECT_DATA) return -EINVAL; - /* This might alter the window we are looking at */ + m = le64toh(f->header->data_hash_table_size) / sizeof(HashItem); + if (m <= 0) + return -EBADMSG; + /* This might alter the window we are looking at */ o->data.next_hash_offset = o->data.next_field_offset = 0; o->data.entry_offset = o->data.entry_array_offset = 0; o->data.n_entries = 0; - h = hash % (le64toh(f->header->data_hash_table_size) / sizeof(HashItem)); + h = hash % m; p = le64toh(f->data_hash_table[h].tail_hash_offset); if (p == 0) /* Only entry in the hash table is easy */ @@ -680,7 +797,7 @@ int journal_file_find_field_object_with_hash( const void *field, uint64_t size, uint64_t hash, Object **ret, uint64_t *offset) { - uint64_t p, osize, h; + uint64_t p, osize, h, m; int r; assert(f); @@ -688,10 +805,12 @@ int journal_file_find_field_object_with_hash( osize = offsetof(Object, field.payload) + size; - if (f->header->field_hash_table_size == 0) + m = le64toh(f->header->field_hash_table_size) / sizeof(HashItem); + + if (m <= 0) return -EBADMSG; - h = hash % (le64toh(f->header->field_hash_table_size) / sizeof(HashItem)); + h = hash % m; p = le64toh(f->field_hash_table[h].head_hash_offset); while (p > 0) { @@ -741,7 +860,7 @@ int journal_file_find_data_object_with_hash( const void *data, uint64_t size, uint64_t hash, Object **ret, uint64_t *offset) { - uint64_t p, osize, h; + uint64_t p, osize, h, m; int r; assert(f); @@ -749,10 +868,11 @@ int journal_file_find_data_object_with_hash( osize = offsetof(Object, data.payload) + size; - if (f->header->data_hash_table_size == 0) + m = le64toh(f->header->data_hash_table_size) / sizeof(HashItem); + if (m <= 0) return -EBADMSG; - h = hash % (le64toh(f->header->data_hash_table_size) / sizeof(HashItem)); + h = hash % m; p = le64toh(f->data_hash_table[h].head_hash_offset); while (p > 0) { @@ -765,9 +885,10 @@ int journal_file_find_data_object_with_hash( if (le64toh(o->data.hash) != hash) goto next; - if (o->object.flags & OBJECT_COMPRESSED) { -#ifdef HAVE_XZ - uint64_t l, rsize; + if (o->object.flags & OBJECT_COMPRESSION_MASK) { +#if defined(HAVE_XZ) || defined(HAVE_LZ4) + uint64_t l; + size_t rsize; l = le64toh(o->object.size); if (l <= offsetof(Object, data.payload)) @@ -775,8 +896,10 @@ int journal_file_find_data_object_with_hash( l -= offsetof(Object, data.payload); - if (!uncompress_blob(o->data.payload, l, &f->compress_buffer, &f->compress_buffer_size, &rsize)) - return -EBADMSG; + r = decompress_blob(o->object.flags & OBJECT_COMPRESSION_MASK, + o->data.payload, l, &f->compress_buffer, &f->compress_buffer_size, &rsize, 0); + if (r < 0) + return r; if (rsize == size && memcmp(f->compress_buffer, data, size) == 0) { @@ -792,7 +915,6 @@ int journal_file_find_data_object_with_hash( #else return -EPROTONOSUPPORT; #endif - } else if (le64toh(o->object.size) == osize && memcmp(o->data.payload, data, size) == 0) { @@ -860,6 +982,8 @@ static int journal_file_append_field( osize = offsetof(Object, field.payload) + size; r = journal_file_append_object(f, OBJECT_FIELD, osize, &o, &p); + if (r < 0) + return r; o->field.hash = htole64(hash); memcpy(o->field.payload, field, size); @@ -897,8 +1021,7 @@ static int journal_file_append_data( uint64_t hash, p; uint64_t osize; Object *o; - int r; - bool compressed = false; + int r, compression = 0; const void *eq; assert(f); @@ -927,23 +1050,24 @@ static int journal_file_append_data( o->data.hash = htole64(hash); -#ifdef HAVE_XZ - if (f->compress && +#if defined(HAVE_XZ) || defined(HAVE_LZ4) + if (f->compress_xz && size >= COMPRESSION_SIZE_THRESHOLD) { - uint64_t rsize; + size_t rsize; - compressed = compress_blob(data, size, o->data.payload, &rsize); + compression = compress_blob(data, size, o->data.payload, &rsize); - if (compressed) { + if (compression) { o->object.size = htole64(offsetof(Object, data.payload) + rsize); - o->object.flags |= OBJECT_COMPRESSED; + o->object.flags |= compression; - log_debug("Compressed data object %lu -> %lu", (unsigned long) size, (unsigned long) rsize); + log_debug("Compressed data object %"PRIu64" -> %zu using %s", + size, rsize, object_compressed_to_string(compression)); } } #endif - if (!compressed && size > 0) + if (!compression && size > 0) memcpy(o->data.payload, data, size); r = journal_file_link_data(f, o, p, hash); @@ -956,10 +1080,13 @@ static int journal_file_append_data( if (r < 0) return r; - eq = memchr(data, '=', size); + if (!data) + eq = NULL; + else + eq = memchr(data, '=', size); if (eq && eq > data) { + Object *fo = NULL; uint64_t fp; - Object *fo; /* Create field object ... */ r = journal_file_append_field(f, data, (uint8_t*) eq - (uint8_t*) data, &fo, &fp); @@ -1159,7 +1286,7 @@ static int journal_file_link_entry(JournalFile *f, Object *o, uint64_t offset) { if (r < 0) return r; - /* log_debug("=> %s seqnr=%lu n_entries=%lu", f->path, (unsigned long) o->entry.seqnum, (unsigned long) f->header->n_entries); */ + /* log_debug("=> %s seqnr=%"PRIu64" n_entries=%"PRIu64, f->path, o->entry.seqnum, f->header->n_entries); */ if (f->header->head_entry_realtime == 0) f->header->head_entry_realtime = o->entry.realtime; @@ -1239,7 +1366,7 @@ void journal_file_post_change(JournalFile *f) { __sync_synchronize(); if (ftruncate(f->fd, f->last_stat.st_size) < 0) - log_error("Failed to truncate file to its own size: %m"); + log_error_errno(errno, "Failed to truncate file to its own size: %m"); } static int entry_item_cmp(const void *_a, const void *_b) { @@ -1262,9 +1389,6 @@ int journal_file_append_entry(JournalFile *f, const dual_timestamp *ts, const st assert(f); assert(iovec || n_iovec == 0); - if (!f->writable) - return -EPERM; - if (!ts) { dual_timestamp_get(&_ts); ts = &_ts; @@ -1281,7 +1405,7 @@ int journal_file_append_entry(JournalFile *f, const dual_timestamp *ts, const st #endif /* alloca() can't take 0, hence let's allocate at least one */ - items = alloca(sizeof(EntryItem) * MAX(1, n_iovec)); + items = alloca(sizeof(EntryItem) * MAX(1u, n_iovec)); for (i = 0; i < n_iovec; i++) { uint64_t p; @@ -1298,46 +1422,116 @@ int journal_file_append_entry(JournalFile *f, const dual_timestamp *ts, const st /* Order by the position on disk, in order to improve seek * times for rotating media. */ - qsort(items, n_iovec, sizeof(EntryItem), entry_item_cmp); + qsort_safe(items, n_iovec, sizeof(EntryItem), entry_item_cmp); r = journal_file_append_entry_internal(f, ts, xor_hash, items, n_iovec, seqnum, ret, offset); + /* If the memory mapping triggered a SIGBUS then we return an + * IO error and ignore the error code passed down to us, since + * it is very likely just an effect of a nullified replacement + * mapping page */ + + if (mmap_cache_got_sigbus(f->mmap, f->fd)) + r = -EIO; + journal_file_post_change(f); return r; } -static int generic_array_get(JournalFile *f, - uint64_t first, - uint64_t i, - Object **ret, uint64_t *offset) { +typedef struct ChainCacheItem { + uint64_t first; /* the array at the beginning of the chain */ + uint64_t array; /* the cached array */ + uint64_t begin; /* the first item in the cached array */ + uint64_t total; /* the total number of items in all arrays before this one in the chain */ + uint64_t last_index; /* the last index we looked at, to optimize locality when bisecting */ +} ChainCacheItem; + +static void chain_cache_put( + OrderedHashmap *h, + ChainCacheItem *ci, + uint64_t first, + uint64_t array, + uint64_t begin, + uint64_t total, + uint64_t last_index) { + + if (!ci) { + /* If the chain item to cache for this chain is the + * first one it's not worth caching anything */ + if (array == first) + return; + + if (ordered_hashmap_size(h) >= CHAIN_CACHE_MAX) { + ci = ordered_hashmap_steal_first(h); + assert(ci); + } else { + ci = new(ChainCacheItem, 1); + if (!ci) + return; + } + + ci->first = first; + + if (ordered_hashmap_put(h, &ci->first, ci) < 0) { + free(ci); + return; + } + } else + assert(ci->first == first); + + ci->array = array; + ci->begin = begin; + ci->total = total; + ci->last_index = last_index; +} + +static int generic_array_get( + JournalFile *f, + uint64_t first, + uint64_t i, + Object **ret, uint64_t *offset) { Object *o; - uint64_t p = 0, a; + uint64_t p = 0, a, t = 0; int r; + ChainCacheItem *ci; assert(f); a = first; + + /* Try the chain cache first */ + ci = ordered_hashmap_get(f->chain_cache, &first); + if (ci && i > ci->total) { + a = ci->array; + i -= ci->total; + t = ci->total; + } + while (a > 0) { - uint64_t n; + uint64_t k; r = journal_file_move_to_object(f, OBJECT_ENTRY_ARRAY, a, &o); if (r < 0) return r; - n = journal_file_entry_array_n_items(o); - if (i < n) { + k = journal_file_entry_array_n_items(o); + if (i < k) { p = le64toh(o->entry_array.items[i]); - break; + goto found; } - i -= n; + i -= k; + t += k; a = le64toh(o->entry_array.next_entry_array_offset); } - if (a <= 0 || p <= 0) - return 0; + return 0; + +found: + /* Let's cache this item for the next invocation */ + chain_cache_put(f->chain_cache, ci, first, a, le64toh(o->entry_array.items[0]), t, i); r = journal_file_move_to_object(f, OBJECT_ENTRY, p, &o); if (r < 0) @@ -1352,11 +1546,12 @@ static int generic_array_get(JournalFile *f, return 1; } -static int generic_array_get_plus_one(JournalFile *f, - uint64_t extra, - uint64_t first, - uint64_t i, - Object **ret, uint64_t *offset) { +static int generic_array_get_plus_one( + JournalFile *f, + uint64_t extra, + uint64_t first, + uint64_t i, + Object **ret, uint64_t *offset) { Object *o; @@ -1387,25 +1582,54 @@ enum { TEST_RIGHT }; -static int generic_array_bisect(JournalFile *f, - uint64_t first, - uint64_t n, - uint64_t needle, - int (*test_object)(JournalFile *f, uint64_t p, uint64_t needle), - direction_t direction, - Object **ret, - uint64_t *offset, - uint64_t *idx) { - - uint64_t a, p, t = 0, i = 0, last_p = 0; +static int generic_array_bisect( + JournalFile *f, + uint64_t first, + uint64_t n, + uint64_t needle, + int (*test_object)(JournalFile *f, uint64_t p, uint64_t needle), + direction_t direction, + Object **ret, + uint64_t *offset, + uint64_t *idx) { + + uint64_t a, p, t = 0, i = 0, last_p = 0, last_index = (uint64_t) -1; bool subtract_one = false; Object *o, *array = NULL; int r; + ChainCacheItem *ci; assert(f); assert(test_object); + /* Start with the first array in the chain */ a = first; + + ci = ordered_hashmap_get(f->chain_cache, &first); + if (ci && n > ci->total) { + /* Ah, we have iterated this bisection array chain + * previously! Let's see if we can skip ahead in the + * chain, as far as the last time. But we can't jump + * backwards in the chain, so let's check that + * first. */ + + r = test_object(f, ci->begin, needle); + if (r < 0) + return r; + + if (r == TEST_LEFT) { + /* OK, what we are looking for is right of the + * begin of this EntryArray, so let's jump + * straight to previously cached array in the + * chain */ + + a = ci->array; + n -= ci->total; + t = ci->total; + last_index = ci->last_index; + } + } + while (a > 0) { uint64_t left, right, k, lp; @@ -1433,6 +1657,58 @@ static int generic_array_bisect(JournalFile *f, if (r == TEST_RIGHT) { left = 0; right -= 1; + + if (last_index != (uint64_t) -1) { + assert(last_index <= right); + + /* If we cached the last index we + * looked at, let's try to not to jump + * too wildly around and see if we can + * limit the range to look at early to + * the immediate neighbors of the last + * index we looked at. */ + + if (last_index > 0) { + uint64_t x = last_index - 1; + + p = le64toh(array->entry_array.items[x]); + if (p <= 0) + return -EBADMSG; + + r = test_object(f, p, needle); + if (r < 0) + return r; + + if (r == TEST_FOUND) + r = direction == DIRECTION_DOWN ? TEST_RIGHT : TEST_LEFT; + + if (r == TEST_RIGHT) + right = x; + else + left = x + 1; + } + + if (last_index < right) { + uint64_t y = last_index + 1; + + p = le64toh(array->entry_array.items[y]); + if (p <= 0) + return -EBADMSG; + + r = test_object(f, p, needle); + if (r < 0) + return r; + + if (r == TEST_FOUND) + r = direction == DIRECTION_DOWN ? TEST_RIGHT : TEST_LEFT; + + if (r == TEST_RIGHT) + right = y; + else + left = y + 1; + } + } + for (;;) { if (left == right) { if (direction == DIRECTION_UP) @@ -1443,8 +1719,8 @@ static int generic_array_bisect(JournalFile *f, } assert(left < right); - i = (left + right) / 2; + p = le64toh(array->entry_array.items[i]); if (p <= 0) return -EBADMSG; @@ -1463,7 +1739,7 @@ static int generic_array_bisect(JournalFile *f, } } - if (k > n) { + if (k >= n) { if (direction == DIRECTION_UP) { i = n; subtract_one = true; @@ -1477,6 +1753,7 @@ static int generic_array_bisect(JournalFile *f, n -= k; t += k; + last_index = (uint64_t) -1; a = le64toh(array->entry_array.next_entry_array_offset); } @@ -1486,6 +1763,9 @@ found: if (subtract_one && t == 0 && i == 0) return 0; + /* Let's cache this item for the next invocation */ + chain_cache_put(f->chain_cache, ci, first, a, le64toh(array->entry_array.items[0]), t, subtract_one ? (i > 0 ? i-1 : (uint64_t) -1) : i); + if (subtract_one && i == 0) p = last_p; else if (subtract_one) @@ -1509,16 +1789,17 @@ found: return 1; } -static int generic_array_bisect_plus_one(JournalFile *f, - uint64_t extra, - uint64_t first, - uint64_t n, - uint64_t needle, - int (*test_object)(JournalFile *f, uint64_t p, uint64_t needle), - direction_t direction, - Object **ret, - uint64_t *offset, - uint64_t *idx) { +static int generic_array_bisect_plus_one( + JournalFile *f, + uint64_t extra, + uint64_t first, + uint64_t n, + uint64_t needle, + int (*test_object)(JournalFile *f, uint64_t p, uint64_t needle), + direction_t direction, + Object **ret, + uint64_t *offset, + uint64_t *idx) { int r; bool step_back = false; @@ -1581,7 +1862,7 @@ found: return 1; } -static int test_object_offset(JournalFile *f, uint64_t p, uint64_t needle) { +_pure_ static int test_object_offset(JournalFile *f, uint64_t p, uint64_t needle) { assert(f); assert(p > 0); @@ -1593,23 +1874,6 @@ static int test_object_offset(JournalFile *f, uint64_t p, uint64_t needle) { return TEST_RIGHT; } -int journal_file_move_to_entry_by_offset( - JournalFile *f, - uint64_t p, - direction_t direction, - Object **ret, - uint64_t *offset) { - - return generic_array_bisect(f, - le64toh(f->header->entry_array_offset), - le64toh(f->header->n_entries), - p, - test_object_offset, - direction, - ret, offset, NULL); -} - - static int test_object_seqnum(JournalFile *f, uint64_t p, uint64_t needle) { Object *o; int r; @@ -1699,6 +1963,18 @@ static int test_object_monotonic(JournalFile *f, uint64_t p, uint64_t needle) { return TEST_RIGHT; } +static int find_data_object_by_boot_id( + JournalFile *f, + sd_id128_t boot_id, + Object **o, + uint64_t *b) { + + char t[sizeof("_BOOT_ID=")-1 + 32 + 1] = "_BOOT_ID="; + + sd_id128_to_string(boot_id, t + 9); + return journal_file_find_data_object(f, t, sizeof(t) - 1, o, b); +} + int journal_file_move_to_entry_by_monotonic( JournalFile *f, sd_id128_t boot_id, @@ -1707,14 +1983,12 @@ int journal_file_move_to_entry_by_monotonic( Object **ret, uint64_t *offset) { - char t[9+32+1] = "_BOOT_ID="; Object *o; int r; assert(f); - sd_id128_to_string(boot_id, t + 9); - r = journal_file_find_data_object(f, t, strlen(t), &o, NULL); + r = find_data_object_by_boot_id(f, boot_id, &o, NULL); if (r < 0) return r; if (r == 0) @@ -1730,28 +2004,97 @@ int journal_file_move_to_entry_by_monotonic( ret, offset, NULL); } +void journal_file_reset_location(JournalFile *f) { + f->location_type = LOCATION_HEAD; + f->current_offset = 0; + f->current_seqnum = 0; + f->current_realtime = 0; + f->current_monotonic = 0; + zero(f->current_boot_id); + f->current_xor_hash = 0; +} + +void journal_file_save_location(JournalFile *f, direction_t direction, Object *o, uint64_t offset) { + f->last_direction = direction; + f->location_type = LOCATION_SEEK; + f->current_offset = offset; + f->current_seqnum = le64toh(o->entry.seqnum); + f->current_realtime = le64toh(o->entry.realtime); + f->current_monotonic = le64toh(o->entry.monotonic); + f->current_boot_id = o->entry.boot_id; + f->current_xor_hash = le64toh(o->entry.xor_hash); +} + +int journal_file_compare_locations(JournalFile *af, JournalFile *bf) { + assert(af); + assert(bf); + assert(af->location_type == LOCATION_SEEK); + assert(bf->location_type == LOCATION_SEEK); + + /* If contents and timestamps match, these entries are + * identical, even if the seqnum does not match */ + if (sd_id128_equal(af->current_boot_id, bf->current_boot_id) && + af->current_monotonic == bf->current_monotonic && + af->current_realtime == bf->current_realtime && + af->current_xor_hash == bf->current_xor_hash) + return 0; + + if (sd_id128_equal(af->header->seqnum_id, bf->header->seqnum_id)) { + + /* If this is from the same seqnum source, compare + * seqnums */ + if (af->current_seqnum < bf->current_seqnum) + return -1; + if (af->current_seqnum > bf->current_seqnum) + return 1; + + /* Wow! This is weird, different data but the same + * seqnums? Something is borked, but let's make the + * best of it and compare by time. */ + } + + if (sd_id128_equal(af->current_boot_id, bf->current_boot_id)) { + + /* If the boot id matches, compare monotonic time */ + if (af->current_monotonic < bf->current_monotonic) + return -1; + if (af->current_monotonic > bf->current_monotonic) + return 1; + } + + /* Otherwise, compare UTC time */ + if (af->current_realtime < bf->current_realtime) + return -1; + if (af->current_realtime > bf->current_realtime) + return 1; + + /* Finally, compare by contents */ + if (af->current_xor_hash < bf->current_xor_hash) + return -1; + if (af->current_xor_hash > bf->current_xor_hash) + return 1; + + return 0; +} + int journal_file_next_entry( JournalFile *f, - Object *o, uint64_t p, + uint64_t p, direction_t direction, Object **ret, uint64_t *offset) { - uint64_t i, n; + uint64_t i, n, ofs; int r; assert(f); - assert(p > 0 || !o); n = le64toh(f->header->n_entries); if (n <= 0) return 0; - if (!o) + if (p == 0) i = direction == DIRECTION_DOWN ? 0 : n - 1; else { - if (o->object.type != OBJECT_ENTRY) - return -EINVAL; - r = generic_array_bisect(f, le64toh(f->header->entry_array_offset), le64toh(f->header->n_entries), @@ -1777,59 +2120,24 @@ int journal_file_next_entry( } /* And jump to it */ - return generic_array_get(f, - le64toh(f->header->entry_array_offset), - i, - ret, offset); -} - -int journal_file_skip_entry( - JournalFile *f, - Object *o, uint64_t p, - int64_t skip, - Object **ret, uint64_t *offset) { - - uint64_t i, n; - int r; - - assert(f); - assert(o); - assert(p > 0); - - if (o->object.type != OBJECT_ENTRY) - return -EINVAL; - - r = generic_array_bisect(f, - le64toh(f->header->entry_array_offset), - le64toh(f->header->n_entries), - p, - test_object_offset, - DIRECTION_DOWN, - NULL, NULL, - &i); + r = generic_array_get(f, + le64toh(f->header->entry_array_offset), + i, + ret, &ofs); if (r <= 0) return r; - /* Calculate new index */ - if (skip < 0) { - if ((uint64_t) -skip >= i) - i = 0; - else - i = i - (uint64_t) -skip; - } else - i += (uint64_t) skip; - - n = le64toh(f->header->n_entries); - if (n <= 0) + if (p > 0 && + (direction == DIRECTION_DOWN ? ofs <= p : ofs >= p)) { + log_debug("%s: entry array corrupted at entry %"PRIu64, + f->path, i); return -EBADMSG; + } - if (i >= n) - i = n-1; + if (offset) + *offset = ofs; - return generic_array_get(f, - le64toh(f->header->entry_array_offset), - i, - ret, offset); + return 1; } int journal_file_next_entry_for_data( @@ -1928,7 +2236,6 @@ int journal_file_move_to_entry_by_monotonic_for_data( direction_t direction, Object **ret, uint64_t *offset) { - char t[9+32+1] = "_BOOT_ID="; Object *o, *d; int r; uint64_t b, z; @@ -1936,8 +2243,7 @@ int journal_file_move_to_entry_by_monotonic_for_data( assert(f); /* First, seek by time */ - sd_id128_to_string(boot_id, t + 9); - r = journal_file_find_data_object(f, t, strlen(t), &o, &b); + r = find_data_object_by_boot_id(f, boot_id, &o, &b); if (r < 0) return r; if (r == 0) @@ -2003,8 +2309,6 @@ int journal_file_move_to_entry_by_monotonic_for_data( z = q; } - - return 0; } int journal_file_move_to_entry_by_seqnum_for_data( @@ -2070,7 +2374,7 @@ void journal_file_dump(JournalFile *f) { p = le64toh(f->header->header_size); while (p != 0) { - r = journal_file_move_to_object(f, -1, p, &o); + r = journal_file_move_to_object(f, OBJECT_UNUSED, p, &o); if (r < 0) goto fail; @@ -2089,10 +2393,10 @@ void journal_file_dump(JournalFile *f) { break; case OBJECT_ENTRY: - printf("Type: OBJECT_ENTRY seqnum=%llu monotonic=%llu realtime=%llu\n", - (unsigned long long) le64toh(o->entry.seqnum), - (unsigned long long) le64toh(o->entry.monotonic), - (unsigned long long) le64toh(o->entry.realtime)); + printf("Type: OBJECT_ENTRY seqnum=%"PRIu64" monotonic=%"PRIu64" realtime=%"PRIu64"\n", + le64toh(o->entry.seqnum), + le64toh(o->entry.monotonic), + le64toh(o->entry.realtime)); break; case OBJECT_FIELD_HASH_TABLE: @@ -2108,18 +2412,19 @@ void journal_file_dump(JournalFile *f) { break; case OBJECT_TAG: - printf("Type: OBJECT_TAG seqnum=%llu epoch=%llu\n", - (unsigned long long) le64toh(o->tag.seqnum), - (unsigned long long) le64toh(o->tag.epoch)); + printf("Type: OBJECT_TAG seqnum=%"PRIu64" epoch=%"PRIu64"\n", + le64toh(o->tag.seqnum), + le64toh(o->tag.epoch)); break; default: - printf("Type: unknown (%u)\n", o->object.type); + printf("Type: unknown (%i)\n", o->object.type); break; } - if (o->object.flags & OBJECT_COMPRESSED) - printf("Flags: COMPRESSED\n"); + if (o->object.flags & OBJECT_COMPRESSION_MASK) + printf("Flags: %s\n", + object_compressed_to_string(o->object.flags & OBJECT_COMPRESSION_MASK)); if (p == le64toh(f->header->tail_object_offset)) p = 0; @@ -2132,9 +2437,18 @@ fail: log_error("File corrupt"); } +static const char* format_timestamp_safe(char *buf, size_t l, usec_t t) { + const char *x; + + x = format_timestamp(buf, l, t); + if (x) + return x; + return " --- "; +} + void journal_file_print_header(JournalFile *f) { - char a[33], b[33], c[33]; - char x[FORMAT_TIMESTAMP_MAX], y[FORMAT_TIMESTAMP_MAX]; + char a[33], b[33], c[33], d[33]; + char x[FORMAT_TIMESTAMP_MAX], y[FORMAT_TIMESTAMP_MAX], z[FORMAT_TIMESTAMP_MAX]; struct stat st; char bytes[FORMAT_BYTES_MAX]; @@ -2147,60 +2461,63 @@ void journal_file_print_header(JournalFile *f) { "Sequential Number ID: %s\n" "State: %s\n" "Compatible Flags:%s%s\n" - "Incompatible Flags:%s%s\n" - "Header size: %llu\n" - "Arena size: %llu\n" - "Data Hash Table Size: %llu\n" - "Field Hash Table Size: %llu\n" + "Incompatible Flags:%s%s%s\n" + "Header size: %"PRIu64"\n" + "Arena size: %"PRIu64"\n" + "Data Hash Table Size: %"PRIu64"\n" + "Field Hash Table Size: %"PRIu64"\n" "Rotate Suggested: %s\n" - "Head Sequential Number: %llu\n" - "Tail Sequential Number: %llu\n" + "Head Sequential Number: %"PRIu64"\n" + "Tail Sequential Number: %"PRIu64"\n" "Head Realtime Timestamp: %s\n" "Tail Realtime Timestamp: %s\n" - "Objects: %llu\n" - "Entry Objects: %llu\n", + "Tail Monotonic Timestamp: %s\n" + "Objects: %"PRIu64"\n" + "Entry Objects: %"PRIu64"\n", f->path, sd_id128_to_string(f->header->file_id, a), sd_id128_to_string(f->header->machine_id, b), sd_id128_to_string(f->header->boot_id, c), - sd_id128_to_string(f->header->seqnum_id, c), + sd_id128_to_string(f->header->seqnum_id, d), f->header->state == STATE_OFFLINE ? "OFFLINE" : f->header->state == STATE_ONLINE ? "ONLINE" : f->header->state == STATE_ARCHIVED ? "ARCHIVED" : "UNKNOWN", JOURNAL_HEADER_SEALED(f->header) ? " SEALED" : "", - (le32toh(f->header->compatible_flags) & ~HEADER_COMPATIBLE_SEALED) ? " ???" : "", - JOURNAL_HEADER_COMPRESSED(f->header) ? " COMPRESSED" : "", - (le32toh(f->header->incompatible_flags) & ~HEADER_INCOMPATIBLE_COMPRESSED) ? " ???" : "", - (unsigned long long) le64toh(f->header->header_size), - (unsigned long long) le64toh(f->header->arena_size), - (unsigned long long) le64toh(f->header->data_hash_table_size) / sizeof(HashItem), - (unsigned long long) le64toh(f->header->field_hash_table_size) / sizeof(HashItem), + (le32toh(f->header->compatible_flags) & ~HEADER_COMPATIBLE_ANY) ? " ???" : "", + JOURNAL_HEADER_COMPRESSED_XZ(f->header) ? " COMPRESSED-XZ" : "", + JOURNAL_HEADER_COMPRESSED_LZ4(f->header) ? " COMPRESSED-LZ4" : "", + (le32toh(f->header->incompatible_flags) & ~HEADER_INCOMPATIBLE_ANY) ? " ???" : "", + le64toh(f->header->header_size), + le64toh(f->header->arena_size), + le64toh(f->header->data_hash_table_size) / sizeof(HashItem), + le64toh(f->header->field_hash_table_size) / sizeof(HashItem), yes_no(journal_file_rotate_suggested(f, 0)), - (unsigned long long) le64toh(f->header->head_entry_seqnum), - (unsigned long long) le64toh(f->header->tail_entry_seqnum), - format_timestamp(x, sizeof(x), le64toh(f->header->head_entry_realtime)), - format_timestamp(y, sizeof(y), le64toh(f->header->tail_entry_realtime)), - (unsigned long long) le64toh(f->header->n_objects), - (unsigned long long) le64toh(f->header->n_entries)); + le64toh(f->header->head_entry_seqnum), + le64toh(f->header->tail_entry_seqnum), + format_timestamp_safe(x, sizeof(x), le64toh(f->header->head_entry_realtime)), + format_timestamp_safe(y, sizeof(y), le64toh(f->header->tail_entry_realtime)), + format_timespan(z, sizeof(z), le64toh(f->header->tail_entry_monotonic), USEC_PER_MSEC), + le64toh(f->header->n_objects), + le64toh(f->header->n_entries)); if (JOURNAL_HEADER_CONTAINS(f->header, n_data)) - printf("Data Objects: %llu\n" + printf("Data Objects: %"PRIu64"\n" "Data Hash Table Fill: %.1f%%\n", - (unsigned long long) le64toh(f->header->n_data), + le64toh(f->header->n_data), 100.0 * (double) le64toh(f->header->n_data) / ((double) (le64toh(f->header->data_hash_table_size) / sizeof(HashItem)))); if (JOURNAL_HEADER_CONTAINS(f->header, n_fields)) - printf("Field Objects: %llu\n" + printf("Field Objects: %"PRIu64"\n" "Field Hash Table Fill: %.1f%%\n", - (unsigned long long) le64toh(f->header->n_fields), + le64toh(f->header->n_fields), 100.0 * (double) le64toh(f->header->n_fields) / ((double) (le64toh(f->header->field_hash_table_size) / sizeof(HashItem)))); if (JOURNAL_HEADER_CONTAINS(f->header, n_tags)) - printf("Tag Objects: %llu\n", - (unsigned long long) le64toh(f->header->n_tags)); + printf("Tag Objects: %"PRIu64"\n", + le64toh(f->header->n_tags)); if (JOURNAL_HEADER_CONTAINS(f->header, n_entry_arrays)) - printf("Entry Array Objects: %llu\n", - (unsigned long long) le64toh(f->header->n_entry_arrays)); + printf("Entry Array Objects: %"PRIu64"\n", + le64toh(f->header->n_entry_arrays)); if (fstat(f->fd, &st) >= 0) printf("Disk usage: %s\n", format_bytes(bytes, sizeof(bytes), (off_t) st.st_blocks * 512ULL)); @@ -2217,9 +2534,10 @@ int journal_file_open( JournalFile *template, JournalFile **ret) { + bool newly_created = false; JournalFile *f; + void *h; int r; - bool newly_created = false; assert(fname); assert(ret); @@ -2242,8 +2560,10 @@ int journal_file_open( f->flags = flags; f->prot = prot_from_flags(flags); f->writable = (flags & O_ACCMODE) != O_RDONLY; -#ifdef HAVE_XZ - f->compress = compress; +#if defined(HAVE_LZ4) + f->compress_lz4 = compress; +#elif defined(HAVE_XZ) + f->compress_xz = compress; #endif #ifdef HAVE_GCRYPT f->seal = seal; @@ -2265,20 +2585,34 @@ int journal_file_open( goto fail; } + f->chain_cache = ordered_hashmap_new(&uint64_hash_ops); + if (!f->chain_cache) { + r = -ENOMEM; + goto fail; + } + f->fd = open(f->path, f->flags|O_CLOEXEC, f->mode); if (f->fd < 0) { r = -errno; goto fail; } - if (fstat(f->fd, &f->last_stat) < 0) { - r = -errno; + r = journal_file_fstat(f); + if (r < 0) goto fail; - } if (f->last_stat.st_size == 0 && f->writable) { -#ifdef HAVE_XATTR - uint64_t crtime; + + /* Before we write anything, turn off COW logic. Given + * our write pattern that is quite unfriendly to COW + * file systems this should greatly improve + * performance on COW file systems, such as btrfs, at + * the expense of data integrity features (which + * shouldn't be too bad, given that we do our own + * checksumming). */ + r = chattr_fd(f->fd, true, FS_NOCOW_FL); + if (r < 0) + log_warning_errno(errno, "Failed to set file attributes: %m"); /* Let's attach the creation time to the journal file, * so that the vacuuming code knows the age of this @@ -2288,11 +2622,9 @@ int journal_file_open( * currently no usable API to query this, hence let's * emulate this via extended attributes. If extended * attributes are not supported we'll just skip this, - * and rely solely on mtime/atime/ctime of the file.*/ + * and rely solely on mtime/atime/ctime of the file. */ - crtime = htole64((uint64_t) now(CLOCK_REALTIME)); - fsetxattr(f->fd, "user.crtime_usec", &crtime, sizeof(crtime), XATTR_CREATE); -#endif + fd_setcrtime(f->fd, 0); #ifdef HAVE_GCRYPT /* Try to load the FSPRG state, and if we can't, then @@ -2308,10 +2640,9 @@ int journal_file_open( if (r < 0) goto fail; - if (fstat(f->fd, &f->last_stat) < 0) { - r = -errno; + r = journal_file_fstat(f); + if (r < 0) goto fail; - } newly_created = true; } @@ -2321,13 +2652,14 @@ int journal_file_open( goto fail; } - f->header = mmap(NULL, PAGE_ALIGN(sizeof(Header)), prot_from_flags(flags), MAP_SHARED, f->fd, 0); - if (f->header == MAP_FAILED) { - f->header = NULL; + r = mmap_cache_get(f->mmap, f->fd, f->prot, CONTEXT_HEADER, true, 0, PAGE_ALIGN(sizeof(Header)), &f->last_stat, &h); + if (r < 0) { r = -errno; goto fail; } + f->header = h; + if (!newly_created) { r = journal_file_verify_header(f); if (r < 0) @@ -2384,17 +2716,25 @@ int journal_file_open( if (r < 0) goto fail; + if (mmap_cache_got_sigbus(f->mmap, f->fd)) { + r = -EIO; + goto fail; + } + *ret = f; return 0; fail: + if (f->fd >= 0 && mmap_cache_got_sigbus(f->mmap, f->fd)) + r = -EIO; + journal_file_close(f); return r; } int journal_file_rotate(JournalFile **f, bool compress, bool seal) { - char *p; + _cleanup_free_ char *p = NULL; size_t l; JournalFile *old_file, *new_file = NULL; int r; @@ -2411,27 +2751,28 @@ int journal_file_rotate(JournalFile **f, bool compress, bool seal) { return -EINVAL; l = strlen(old_file->path); - - p = new(char, l + 1 + 32 + 1 + 16 + 1 + 16 + 1); - if (!p) + r = asprintf(&p, "%.*s@" SD_ID128_FORMAT_STR "-%016"PRIx64"-%016"PRIx64".journal", + (int) l - 8, old_file->path, + SD_ID128_FORMAT_VAL(old_file->header->seqnum_id), + le64toh((*f)->header->head_entry_seqnum), + le64toh((*f)->header->head_entry_realtime)); + if (r < 0) return -ENOMEM; - memcpy(p, old_file->path, l - 8); - p[l-8] = '@'; - sd_id128_to_string(old_file->header->seqnum_id, p + l - 8 + 1); - snprintf(p + l - 8 + 1 + 32, 1 + 16 + 1 + 16 + 8 + 1, - "-%016llx-%016llx.journal", - (unsigned long long) le64toh((*f)->header->head_entry_seqnum), - (unsigned long long) le64toh((*f)->header->head_entry_realtime)); - + /* Try to rename the file to the archived version. If the file + * already was deleted, we'll get ENOENT, let's ignore that + * case. */ r = rename(old_file->path, p); - free(p); - - if (r < 0) + if (r < 0 && errno != ENOENT) return -errno; old_file->header->state = STATE_ARCHIVED; + /* Currently, btrfs is not very good with out write patterns + * and fragments heavily. Let's defrag our journal files when + * we archive them */ + old_file->defrag_on_close = true; + r = journal_file_open(old_file->path, old_file->flags, old_file->mode, compress, seal, NULL, old_file->mmap, old_file, &new_file); journal_file_close(old_file); @@ -2452,7 +2793,7 @@ int journal_file_open_reliably( int r; size_t l; - char *p; + _cleanup_free_ char *p = NULL; r = journal_file_open(fname, flags, mode, compress, seal, metrics, mmap_cache, template, ret); @@ -2461,7 +2802,9 @@ int journal_file_open_reliably( r != -EHOSTDOWN && /* other machine */ r != -EPROTONOSUPPORT && /* incompatible feature */ r != -EBUSY && /* unclean shutdown */ - r != -ESHUTDOWN /* already archived */) + r != -ESHUTDOWN && /* already archived */ + r != -EIO && /* IO error, including SIGBUS on mmap */ + r != -EIDRM /* File has been deleted */) return r; if ((flags & O_ACCMODE) == O_RDONLY) @@ -2476,24 +2819,28 @@ int journal_file_open_reliably( /* The file is corrupted. Rotate it away and try it again (but only once) */ l = strlen(fname); - if (asprintf(&p, "%.*s@%016llx-%016llx.journal~", - (int) (l-8), fname, + if (asprintf(&p, "%.*s@%016llx-%016" PRIx64 ".journal~", + (int) l - 8, fname, (unsigned long long) now(CLOCK_REALTIME), - random_ull()) < 0) + random_u64()) < 0) return -ENOMEM; r = rename(fname, p); - free(p); if (r < 0) return -errno; + /* btrfs doesn't cope well with our write pattern and + * fragments heavily. Let's defrag all files we rotate */ + + (void) chattr_path(p, false, FS_NOCOW_FL); + (void) btrfs_defrag(p); + log_warning("File %s corrupted or uncleanly shut down, renaming and replacing.", fname); return journal_file_open(fname, flags, mode, compress, seal, metrics, mmap_cache, template, ret); } - int journal_file_copy_entry(JournalFile *from, JournalFile *to, Object *o, uint64_t p, uint64_t *seqnum, Object **ret, uint64_t *offset) { uint64_t i, n; uint64_t q, xor_hash = 0; @@ -2512,12 +2859,9 @@ int journal_file_copy_entry(JournalFile *from, JournalFile *to, Object *o, uint6 ts.monotonic = le64toh(o->entry.monotonic); ts.realtime = le64toh(o->entry.realtime); - if (to->tail_entry_monotonic_valid && - ts.monotonic < le64toh(to->header->tail_entry_monotonic)) - return -EINVAL; - n = journal_file_entry_n_items(o); - items = alloca(sizeof(EntryItem) * n); + /* alloca() can't take 0, hence let's allocate at least one */ + items = alloca(sizeof(EntryItem) * MAX(1u, n)); for (i = 0; i < n; i++) { uint64_t l, h; @@ -2543,12 +2887,14 @@ int journal_file_copy_entry(JournalFile *from, JournalFile *to, Object *o, uint6 if ((uint64_t) t != l) return -E2BIG; - if (o->object.flags & OBJECT_COMPRESSED) { -#ifdef HAVE_XZ - uint64_t rsize; + if (o->object.flags & OBJECT_COMPRESSION_MASK) { +#if defined(HAVE_XZ) || defined(HAVE_LZ4) + size_t rsize; - if (!uncompress_blob(o->data.payload, l, &from->compress_buffer, &from->compress_buffer_size, &rsize)) - return -EBADMSG; + r = decompress_blob(o->object.flags & OBJECT_COMPRESSION_MASK, + o->data.payload, l, &from->compress_buffer, &from->compress_buffer_size, &rsize, 0); + if (r < 0) + return r; data = from->compress_buffer; l = rsize; @@ -2571,7 +2917,12 @@ int journal_file_copy_entry(JournalFile *from, JournalFile *to, Object *o, uint6 return r; } - return journal_file_append_entry_internal(to, &ts, xor_hash, items, n, seqnum, ret, offset); + r = journal_file_append_entry_internal(to, &ts, xor_hash, items, n, seqnum, ret, offset); + + if (mmap_cache_got_sigbus(to->mmap, to->fd)) + return -EIO; + + return r; } void journal_default_metrics(JournalMetrics *m, int fd) { @@ -2633,7 +2984,7 @@ void journal_default_metrics(JournalMetrics *m, int fd) { if (m->keep_free == (uint64_t) -1) { if (fs_size > 0) { - m->keep_free = PAGE_ALIGN(fs_size / 20); /* 5% of file system size */ + m->keep_free = PAGE_ALIGN(fs_size * 3 / 20); /* 15% of file system size */ if (m->keep_free > DEFAULT_KEEP_FREE_UPPER) m->keep_free = DEFAULT_KEEP_FREE_UPPER; @@ -2671,7 +3022,6 @@ int journal_file_get_cutoff_realtime_usec(JournalFile *f, usec_t *from, usec_t * } int journal_file_get_cutoff_monotonic_usec(JournalFile *f, sd_id128_t boot_id, usec_t *from, usec_t *to) { - char t[9+32+1] = "_BOOT_ID="; Object *o; uint64_t p; int r; @@ -2679,9 +3029,7 @@ int journal_file_get_cutoff_monotonic_usec(JournalFile *f, sd_id128_t boot_id, u assert(f); assert(from || to); - sd_id128_to_string(boot_id, t + 9); - - r = journal_file_find_data_object(f, t, strlen(t), &o, &p); + r = find_data_object_by_boot_id(f, boot_id, &o, &p); if (r <= 0) return r; @@ -2733,26 +3081,33 @@ bool journal_file_rotate_suggested(JournalFile *f, usec_t max_file_usec) { if (JOURNAL_HEADER_CONTAINS(f->header, n_data)) if (le64toh(f->header->n_data) * 4ULL > (le64toh(f->header->data_hash_table_size) / sizeof(HashItem)) * 3ULL) { - log_debug("Data hash table of %s has a fill level at %.1f (%llu of %llu items, %llu file size, %llu bytes per hash table item), suggesting rotation.", + log_debug("Data hash table of %s has a fill level at %.1f (%"PRIu64" of %"PRIu64" items, %llu file size, %"PRIu64" bytes per hash table item), suggesting rotation.", f->path, 100.0 * (double) le64toh(f->header->n_data) / ((double) (le64toh(f->header->data_hash_table_size) / sizeof(HashItem))), - (unsigned long long) le64toh(f->header->n_data), - (unsigned long long) (le64toh(f->header->data_hash_table_size) / sizeof(HashItem)), - (unsigned long long) (f->last_stat.st_size), - (unsigned long long) (f->last_stat.st_size / le64toh(f->header->n_data))); + le64toh(f->header->n_data), + le64toh(f->header->data_hash_table_size) / sizeof(HashItem), + (unsigned long long) f->last_stat.st_size, + f->last_stat.st_size / le64toh(f->header->n_data)); return true; } if (JOURNAL_HEADER_CONTAINS(f->header, n_fields)) if (le64toh(f->header->n_fields) * 4ULL > (le64toh(f->header->field_hash_table_size) / sizeof(HashItem)) * 3ULL) { - log_debug("Field hash table of %s has a fill level at %.1f (%llu of %llu items), suggesting rotation.", + log_debug("Field hash table of %s has a fill level at %.1f (%"PRIu64" of %"PRIu64" items), suggesting rotation.", f->path, 100.0 * (double) le64toh(f->header->n_fields) / ((double) (le64toh(f->header->field_hash_table_size) / sizeof(HashItem))), - (unsigned long long) le64toh(f->header->n_fields), - (unsigned long long) (le64toh(f->header->field_hash_table_size) / sizeof(HashItem))); + le64toh(f->header->n_fields), + le64toh(f->header->field_hash_table_size) / sizeof(HashItem)); return true; } + /* Are the data objects properly indexed by field objects? */ + if (JOURNAL_HEADER_CONTAINS(f->header, n_data) && + JOURNAL_HEADER_CONTAINS(f->header, n_fields) && + le64toh(f->header->n_data) > 0 && + le64toh(f->header->n_fields) == 0) + return true; + if (max_file_usec > 0) { usec_t t, h;