X-Git-Url: https://www.chiark.greenend.org.uk/ucgi/~ianmdlvl/git?p=elogind.git;a=blobdiff_plain;f=src%2Fjournal%2Fjournal-file.c;h=f01f12496cd533e7b96d0a5a5c92d0c239e951c3;hp=180b43a9c53c4f068c83e5fba43d9876ce646f74;hb=feb12d3ed2c7f9132c64773c7c41b9e3a608a814;hpb=dc36ac673356534b8b32c31e1892e55b8f891381 diff --git a/src/journal/journal-file.c b/src/journal/journal-file.c index 180b43a9c..f01f12496 100644 --- a/src/journal/journal-file.c +++ b/src/journal/journal-file.c @@ -29,14 +29,14 @@ #include "journal-def.h" #include "journal-file.h" +#include "journal-authenticate.h" #include "lookup3.h" #include "compress.h" +#include "fsprg.h" #define DEFAULT_DATA_HASH_TABLE_SIZE (2047ULL*sizeof(HashItem)) #define DEFAULT_FIELD_HASH_TABLE_SIZE (333ULL*sizeof(HashItem)) -#define DEFAULT_WINDOW_SIZE (8ULL*1024ULL*1024ULL) - #define COMPRESSION_SIZE_THRESHOLD (512ULL) /* This is the minimum journal file size */ @@ -61,38 +61,54 @@ /* n_data was the first entry we added after the initial file format design */ #define HEADER_SIZE_MIN ALIGN64(offsetof(Header, n_data)) -#define ALIGN64(x) (((x) + 7ULL) & ~7ULL) - -#define JOURNAL_HEADER_CONTAINS(h, field) \ - (le64toh((h)->header_size) >= offsetof(Header, field) + sizeof((h)->field)) +void journal_file_close(JournalFile *f) { + assert(f); -static const char signature[] = { 'L', 'P', 'K', 'S', 'H', 'H', 'R', 'H' }; +#ifdef HAVE_GCRYPT + /* Write the final tag */ + if (f->seal && f->writable) + journal_file_append_tag(f); +#endif -void journal_file_close(JournalFile *f) { - int t; + /* Sync everything to disk, before we mark the file offline */ + if (f->mmap && f->fd >= 0) + mmap_cache_close_fd(f->mmap, f->fd); - assert(f); + if (f->writable && f->fd >= 0) + fdatasync(f->fd); if (f->header) { - if (f->writable) + /* Mark the file offline. Don't override the archived state if it already is set */ + if (f->writable && f->header->state == STATE_ONLINE) f->header->state = STATE_OFFLINE; munmap(f->header, PAGE_ALIGN(sizeof(Header))); } - for (t = 0; t < _WINDOW_MAX; t++) - if (f->windows[t].ptr) - munmap(f->windows[t].ptr, f->windows[t].size); - if (f->fd >= 0) close_nointr_nofail(f->fd); free(f->path); + if (f->mmap) + mmap_cache_unref(f->mmap); + #ifdef HAVE_XZ free(f->compress_buffer); #endif +#ifdef HAVE_GCRYPT + if (f->fss_file) + munmap(f->fss_file, PAGE_ALIGN(f->fss_file_size)); + else if (f->fsprg_state) + free(f->fsprg_state); + + free(f->fsprg_seed); + + if (f->hmac) + gcry_md_close(f->hmac); +#endif + free(f); } @@ -104,16 +120,22 @@ static int journal_file_init_header(JournalFile *f, JournalFile *template) { assert(f); zero(h); - memcpy(h.signature, signature, 8); + memcpy(h.signature, HEADER_SIGNATURE, 8); h.header_size = htole64(ALIGN64(sizeof(h))); + h.incompatible_flags = + htole32(f->compress ? HEADER_INCOMPATIBLE_COMPRESSED : 0); + + h.compatible_flags = + htole32(f->seal ? HEADER_COMPATIBLE_SEALED : 0); + r = sd_id128_randomize(&h.file_id); if (r < 0) return r; if (template) { h.seqnum_id = template->header->seqnum_id; - h.tail_seqnum = template->header->tail_seqnum; + h.tail_entry_seqnum = template->header->tail_entry_seqnum; } else h.seqnum_id = h.file_id; @@ -148,7 +170,9 @@ static int journal_file_refresh_header(JournalFile *f) { f->header->state = STATE_ONLINE; - __sync_synchronize(); + /* Sync the online state to disk */ + msync(f->header, PAGE_ALIGN(sizeof(Header)), MS_SYNC); + fdatasync(f->fd); return 0; } @@ -156,22 +180,51 @@ static int journal_file_refresh_header(JournalFile *f) { static int journal_file_verify_header(JournalFile *f) { assert(f); - if (memcmp(f->header, signature, 8)) + if (memcmp(f->header->signature, HEADER_SIGNATURE, 8)) return -EBADMSG; + /* In both read and write mode we refuse to open files with + * incompatible flags we don't know */ #ifdef HAVE_XZ - if ((le64toh(f->header->incompatible_flags) & ~HEADER_INCOMPATIBLE_COMPRESSED) != 0) + if ((le32toh(f->header->incompatible_flags) & ~HEADER_INCOMPATIBLE_COMPRESSED) != 0) return -EPROTONOSUPPORT; #else if (f->header->incompatible_flags != 0) return -EPROTONOSUPPORT; #endif + /* When open for writing we refuse to open files with + * compatible flags, too */ + if (f->writable) { +#ifdef HAVE_GCRYPT + if ((le32toh(f->header->compatible_flags) & ~HEADER_COMPATIBLE_SEALED) != 0) + return -EPROTONOSUPPORT; +#else + if (f->header->compatible_flags != 0) + return -EPROTONOSUPPORT; +#endif + } + + if (f->header->state >= _STATE_MAX) + return -EBADMSG; + /* The first addition was n_data, so check that we are at least this large */ if (le64toh(f->header->header_size) < HEADER_SIZE_MIN) return -EBADMSG; - if ((uint64_t) f->last_stat.st_size < (le64toh(f->header->header_size) + le64toh(f->header->arena_size))) + if (JOURNAL_HEADER_SEALED(f->header) && !JOURNAL_HEADER_CONTAINS(f->header, n_entry_arrays)) + return -EBADMSG; + + if ((le64toh(f->header->header_size) + le64toh(f->header->arena_size)) > (uint64_t) f->last_stat.st_size) + return -ENODATA; + + if (le64toh(f->header->tail_object_offset) > (le64toh(f->header->header_size) + le64toh(f->header->arena_size))) + return -ENODATA; + + if (!VALID64(f->header->data_hash_table_offset) || + !VALID64(f->header->field_hash_table_offset) || + !VALID64(f->header->tail_object_offset) || + !VALID64(f->header->entry_array_offset)) return -ENODATA; if (f->writable) { @@ -199,6 +252,11 @@ static int journal_file_verify_header(JournalFile *f) { } } + f->compress = JOURNAL_HEADER_COMPRESSED(f->header); + + if (f->writable) + f->seal = JOURNAL_HEADER_SEALED(f->header); + return 0; } @@ -253,6 +311,8 @@ static int journal_file_allocate(JournalFile *f, uint64_t offset, uint64_t size) if (r != 0) return -r; + mmap_cache_close_fd_range(f->mmap, f->fd, old_size); + if (fstat(f->fd, &f->last_stat) < 0) return -errno; @@ -261,59 +321,11 @@ static int journal_file_allocate(JournalFile *f, uint64_t offset, uint64_t size) return 0; } -static int journal_file_map( - JournalFile *f, - uint64_t offset, - uint64_t size, - void **_window, - uint64_t *_woffset, - uint64_t *_wsize, - void **ret) { - - uint64_t woffset, wsize; - void *window; - +static int journal_file_move_to(JournalFile *f, int context, uint64_t offset, uint64_t size, void **ret) { assert(f); - assert(size > 0); assert(ret); - woffset = offset & ~((uint64_t) page_size() - 1ULL); - wsize = size + (offset - woffset); - wsize = PAGE_ALIGN(wsize); - /* Avoid SIGBUS on invalid accesses */ - if (woffset + wsize > (uint64_t) PAGE_ALIGN(f->last_stat.st_size)) - return -EADDRNOTAVAIL; - - window = mmap(NULL, wsize, f->prot, MAP_SHARED, f->fd, woffset); - if (window == MAP_FAILED) - return -errno; - - if (_window) - *_window = window; - - if (_woffset) - *_woffset = woffset; - - if (_wsize) - *_wsize = wsize; - - *ret = (uint8_t*) window + (offset - woffset); - - return 0; -} - -static int journal_file_move_to(JournalFile *f, int wt, uint64_t offset, uint64_t size, void **ret) { - void *p = NULL; - uint64_t delta; - int r; - Window *w; - - assert(f); - assert(ret); - assert(wt >= 0); - assert(wt < _WINDOW_MAX); - if (offset + size > (uint64_t) f->last_stat.st_size) { /* Hmm, out of range? Let's refresh the fstat() data * first, before we trust that check. */ @@ -323,74 +335,25 @@ static int journal_file_move_to(JournalFile *f, int wt, uint64_t offset, uint64_ return -EADDRNOTAVAIL; } - w = f->windows + wt; - - if (_likely_(w->ptr && - w->offset <= offset && - w->offset + w->size >= offset + size)) { - - *ret = (uint8_t*) w->ptr + (offset - w->offset); - return 0; - } - - if (w->ptr) { - if (munmap(w->ptr, w->size) < 0) - return -errno; - - w->ptr = NULL; - w->size = w->offset = 0; - } - - if (size < DEFAULT_WINDOW_SIZE) { - /* If the default window size is larger then what was - * asked for extend the mapping a bit in the hope to - * minimize needed remappings later on. We add half - * the window space before and half behind the - * requested mapping */ - - delta = (DEFAULT_WINDOW_SIZE - size) / 2; - - if (delta > offset) - delta = offset; - - offset -= delta; - size = DEFAULT_WINDOW_SIZE; - } else - delta = 0; - - if (offset + size > (uint64_t) f->last_stat.st_size) - size = (uint64_t) f->last_stat.st_size - offset; - - if (size <= 0) - return -EADDRNOTAVAIL; - - r = journal_file_map(f, - offset, size, - &w->ptr, &w->offset, &w->size, - &p); - - if (r < 0) - return r; - - *ret = (uint8_t*) p + delta; - return 0; + return mmap_cache_get(f->mmap, f->fd, f->prot, context, offset, size, ret); } -static bool verify_hash(Object *o) { - uint64_t h1, h2; +static uint64_t minimum_header_size(Object *o) { - assert(o); + static uint64_t table[] = { + [OBJECT_DATA] = sizeof(DataObject), + [OBJECT_FIELD] = sizeof(FieldObject), + [OBJECT_ENTRY] = sizeof(EntryObject), + [OBJECT_DATA_HASH_TABLE] = sizeof(HashTableObject), + [OBJECT_FIELD_HASH_TABLE] = sizeof(HashTableObject), + [OBJECT_ENTRY_ARRAY] = sizeof(EntryArrayObject), + [OBJECT_TAG] = sizeof(TagObject), + }; - if (o->object.type == OBJECT_DATA && !(o->object.flags & OBJECT_COMPRESSED)) { - h1 = le64toh(o->data.hash); - h2 = hash64(o->data.payload, le64toh(o->object.size) - offsetof(Object, data.payload)); - } else if (o->object.type == OBJECT_FIELD) { - h1 = le64toh(o->field.hash); - h2 = hash64(o->field.payload, le64toh(o->object.size) - offsetof(Object, field.payload)); - } else - return true; + if (o->object.type >= ELEMENTSOF(table) || table[o->object.type] <= 0) + return sizeof(ObjectHeader); - return h1 == h2; + return table[o->object.type]; } int journal_file_move_to_object(JournalFile *f, int type, uint64_t offset, Object **ret) { @@ -398,12 +361,19 @@ int journal_file_move_to_object(JournalFile *f, int type, uint64_t offset, Objec void *t; Object *o; uint64_t s; + unsigned context; assert(f); assert(ret); - assert(type < _OBJECT_TYPE_MAX); - r = journal_file_move_to(f, type >= 0 ? type : WINDOW_UNKNOWN, offset, sizeof(ObjectHeader), &t); + /* Objects may only be located at multiple of 64 bit */ + if (!VALID64(offset)) + return -EFAULT; + + /* One context for each type, plus one catch-all for the rest */ + context = type > 0 && type < _OBJECT_TYPE_MAX ? type : 0; + + r = journal_file_move_to(f, context, offset, sizeof(ObjectHeader), &t); if (r < 0) return r; @@ -413,6 +383,12 @@ int journal_file_move_to_object(JournalFile *f, int type, uint64_t offset, Objec if (s < sizeof(ObjectHeader)) return -EBADMSG; + if (o->object.type <= OBJECT_UNUSED) + return -EBADMSG; + + if (s < minimum_header_size(o)) + return -EBADMSG; + if (type >= 0 && o->object.type != type) return -EBADMSG; @@ -424,19 +400,16 @@ int journal_file_move_to_object(JournalFile *f, int type, uint64_t offset, Objec o = (Object*) t; } - if (!verify_hash(o)) - return -EBADMSG; - *ret = o; return 0; } -static uint64_t journal_file_seqnum(JournalFile *f, uint64_t *seqnum) { +static uint64_t journal_file_entry_seqnum(JournalFile *f, uint64_t *seqnum) { uint64_t r; assert(f); - r = le64toh(f->header->tail_seqnum) + 1; + r = le64toh(f->header->tail_entry_seqnum) + 1; if (seqnum) { /* If an external seqnum counter was passed, we update @@ -449,21 +422,22 @@ static uint64_t journal_file_seqnum(JournalFile *f, uint64_t *seqnum) { *seqnum = r; } - f->header->tail_seqnum = htole64(r); + f->header->tail_entry_seqnum = htole64(r); - if (f->header->head_seqnum == 0) - f->header->head_seqnum = htole64(r); + if (f->header->head_entry_seqnum == 0) + f->header->head_entry_seqnum = htole64(r); return r; } -static int journal_file_append_object(JournalFile *f, int type, uint64_t size, Object **ret, uint64_t *offset) { +int journal_file_append_object(JournalFile *f, int type, uint64_t size, Object **ret, uint64_t *offset) { int r; uint64_t p; Object *tail, *o; void *t; assert(f); + assert(type > 0 && type < _OBJECT_TYPE_MAX); assert(size >= sizeof(ObjectHeader)); assert(offset); assert(ret); @@ -509,16 +483,16 @@ static int journal_file_setup_data_hash_table(JournalFile *f) { assert(f); - /* We estimate that we need 1 hash table entry per 2K of + /* We estimate that we need 1 hash table entry per 768 of journal file and we want to make sure we never get beyond 75% fill level. Calculate the hash table size for the maximum file size based on these metrics. */ - s = (f->metrics.max_size * 4 / 2048 / 3) * sizeof(HashItem); + s = (f->metrics.max_size * 4 / 768 / 3) * sizeof(HashItem); if (s < DEFAULT_DATA_HASH_TABLE_SIZE) s = DEFAULT_DATA_HASH_TABLE_SIZE; - log_info("Reserving %llu entries in hash table.", (unsigned long long) s); + log_info("Reserving %llu entries in hash table.", (unsigned long long) (s / sizeof(HashItem))); r = journal_file_append_object(f, OBJECT_DATA_HASH_TABLE, @@ -569,7 +543,7 @@ static int journal_file_map_data_hash_table(JournalFile *f) { s = le64toh(f->header->data_hash_table_size); r = journal_file_move_to(f, - WINDOW_DATA_HASH_TABLE, + OBJECT_DATA_HASH_TABLE, p, s, &t); if (r < 0) @@ -590,7 +564,7 @@ static int journal_file_map_field_hash_table(JournalFile *f) { s = le64toh(f->header->field_hash_table_size); r = journal_file_move_to(f, - WINDOW_FIELD_HASH_TABLE, + OBJECT_FIELD_HASH_TABLE, p, s, &t); if (r < 0) @@ -780,20 +754,24 @@ static int journal_file_append_data( o->object.size = htole64(offsetof(Object, data.payload) + rsize); o->object.flags |= OBJECT_COMPRESSED; - f->header->incompatible_flags = htole32(le32toh(f->header->incompatible_flags) | HEADER_INCOMPATIBLE_COMPRESSED); - log_debug("Compressed data object %lu -> %lu", (unsigned long) size, (unsigned long) rsize); } } #endif - if (!compressed) + if (!compressed && size > 0) memcpy(o->data.payload, data, size); r = journal_file_link_data(f, o, p, hash); if (r < 0) return r; +#ifdef HAVE_GCRYPT + r = journal_file_hmac_put_object(f, OBJECT_DATA, p); + if (r < 0) + return r; +#endif + /* The linking might have altered the window, so let's * refresh our pointer */ r = journal_file_move_to_object(f, OBJECT_DATA, p, &o); @@ -816,13 +794,21 @@ uint64_t journal_file_entry_n_items(Object *o) { return (le64toh(o->object.size) - offsetof(Object, entry.items)) / sizeof(EntryItem); } -static uint64_t journal_file_entry_array_n_items(Object *o) { +uint64_t journal_file_entry_array_n_items(Object *o) { assert(o); assert(o->object.type == OBJECT_ENTRY_ARRAY); return (le64toh(o->object.size) - offsetof(Object, entry_array.items)) / sizeof(uint64_t); } +uint64_t journal_file_hash_table_n_items(Object *o) { + assert(o); + assert(o->object.type == OBJECT_DATA_HASH_TABLE || + o->object.type == OBJECT_FIELD_HASH_TABLE); + + return (le64toh(o->object.size) - offsetof(Object, hash_table.items)) / sizeof(HashItem); +} + static int link_entry_into_array(JournalFile *f, le64_t *first, le64_t *idx, @@ -870,6 +856,12 @@ static int link_entry_into_array(JournalFile *f, if (r < 0) return r; +#ifdef HAVE_GCRYPT + r = journal_file_hmac_put_object(f, OBJECT_ENTRY_ARRAY, q); + if (r < 0) + return r; +#endif + o->entry_array.items[i] = htole64(p); if (ap == 0) @@ -882,6 +874,9 @@ static int link_entry_into_array(JournalFile *f, o->entry_array.next_entry_array_offset = htole64(q); } + if (JOURNAL_HEADER_CONTAINS(f->header, n_entry_arrays)) + f->header->n_entry_arrays = htole64(le64toh(f->header->n_entry_arrays) + 1); + *idx = htole64(hidx + 1); return 0; @@ -1000,13 +995,19 @@ static int journal_file_append_entry_internal( if (r < 0) return r; - o->entry.seqnum = htole64(journal_file_seqnum(f, seqnum)); + o->entry.seqnum = htole64(journal_file_entry_seqnum(f, seqnum)); memcpy(o->entry.items, items, n_items * sizeof(EntryItem)); o->entry.realtime = htole64(ts->realtime); o->entry.monotonic = htole64(ts->monotonic); o->entry.xor_hash = htole64(xor_hash); o->entry.boot_id = f->header->boot_id; +#ifdef HAVE_GCRYPT + r = journal_file_hmac_put_object(f, OBJECT_ENTRY, np); + if (r < 0) + return r; +#endif + r = journal_file_link_entry(f, o, np); if (r < 0) return r; @@ -1056,7 +1057,14 @@ int journal_file_append_entry(JournalFile *f, const dual_timestamp *ts, const st ts->monotonic < le64toh(f->header->tail_entry_monotonic)) return -EINVAL; - items = alloca(sizeof(EntryItem) * n_iovec); +#ifdef HAVE_GCRYPT + r = journal_file_maybe_append_tag(f, ts->realtime); + if (r < 0) + return r; +#endif + + /* alloca() can't take 0, hence let's allocate at least one */ + items = alloca(sizeof(EntryItem) * MAX(1, n_iovec)); for (i = 0; i < n_iovec; i++) { uint64_t p; @@ -1856,7 +1864,7 @@ void journal_file_dump(JournalFile *f) { break; case OBJECT_ENTRY: - printf("Type: OBJECT_ENTRY %llu %llu %llu\n", + printf("Type: OBJECT_ENTRY seqnum=%llu monotonic=%llu realtime=%llu\n", (unsigned long long) le64toh(o->entry.seqnum), (unsigned long long) le64toh(o->entry.monotonic), (unsigned long long) le64toh(o->entry.realtime)); @@ -1874,8 +1882,10 @@ void journal_file_dump(JournalFile *f) { printf("Type: OBJECT_ENTRY_ARRAY\n"); break; - case OBJECT_SIGNATURE: - printf("Type: OBJECT_SIGNATURE\n"); + case OBJECT_TAG: + printf("Type: OBJECT_TAG seqnum=%llu epoch=%llu\n", + (unsigned long long) le64toh(o->tag.seqnum), + (unsigned long long) le64toh(o->tag.epoch)); break; } @@ -1911,36 +1921,36 @@ void journal_file_print_header(JournalFile *f) { "Arena size: %llu\n" "Data Hash Table Size: %llu\n" "Field Hash Table Size: %llu\n" - "Objects: %llu\n" - "Entry Objects: %llu\n" "Rotate Suggested: %s\n" "Head Sequential Number: %llu\n" "Tail Sequential Number: %llu\n" "Head Realtime Timestamp: %s\n" - "Tail Realtime Timestamp: %s\n", + "Tail Realtime Timestamp: %s\n" + "Objects: %llu\n" + "Entry Objects: %llu\n", f->path, sd_id128_to_string(f->header->file_id, a), sd_id128_to_string(f->header->machine_id, b), sd_id128_to_string(f->header->boot_id, c), sd_id128_to_string(f->header->seqnum_id, c), - f->header->state == STATE_OFFLINE ? "offline" : - f->header->state == STATE_ONLINE ? "online" : - f->header->state == STATE_ARCHIVED ? "archived" : "unknown", - (f->header->compatible_flags & HEADER_COMPATIBLE_SIGNED) ? " SIGNED" : "", - (f->header->compatible_flags & ~HEADER_COMPATIBLE_SIGNED) ? " ???" : "", - (f->header->incompatible_flags & HEADER_INCOMPATIBLE_COMPRESSED) ? " COMPRESSED" : "", - (f->header->incompatible_flags & ~HEADER_INCOMPATIBLE_COMPRESSED) ? " ???" : "", + f->header->state == STATE_OFFLINE ? "OFFLINE" : + f->header->state == STATE_ONLINE ? "ONLINE" : + f->header->state == STATE_ARCHIVED ? "ARCHIVED" : "UNKNOWN", + JOURNAL_HEADER_SEALED(f->header) ? " SEALED" : "", + (le32toh(f->header->compatible_flags) & ~HEADER_COMPATIBLE_SEALED) ? " ???" : "", + JOURNAL_HEADER_COMPRESSED(f->header) ? " COMPRESSED" : "", + (le32toh(f->header->incompatible_flags) & ~HEADER_INCOMPATIBLE_COMPRESSED) ? " ???" : "", (unsigned long long) le64toh(f->header->header_size), (unsigned long long) le64toh(f->header->arena_size), (unsigned long long) le64toh(f->header->data_hash_table_size) / sizeof(HashItem), (unsigned long long) le64toh(f->header->field_hash_table_size) / sizeof(HashItem), - (unsigned long long) le64toh(f->header->n_objects), - (unsigned long long) le64toh(f->header->n_entries), yes_no(journal_file_rotate_suggested(f)), - (unsigned long long) le64toh(f->header->head_seqnum), - (unsigned long long) le64toh(f->header->tail_seqnum), + (unsigned long long) le64toh(f->header->head_entry_seqnum), + (unsigned long long) le64toh(f->header->tail_entry_seqnum), format_timestamp(x, sizeof(x), le64toh(f->header->head_entry_realtime)), - format_timestamp(y, sizeof(y), le64toh(f->header->tail_entry_realtime))); + format_timestamp(y, sizeof(y), le64toh(f->header->tail_entry_realtime)), + (unsigned long long) le64toh(f->header->n_objects), + (unsigned long long) le64toh(f->header->n_entries)); if (JOURNAL_HEADER_CONTAINS(f->header, n_data)) printf("Data Objects: %llu\n" @@ -1953,13 +1963,23 @@ void journal_file_print_header(JournalFile *f) { "Field Hash Table Fill: %.1f%%\n", (unsigned long long) le64toh(f->header->n_fields), 100.0 * (double) le64toh(f->header->n_fields) / ((double) (le64toh(f->header->field_hash_table_size) / sizeof(HashItem)))); + + if (JOURNAL_HEADER_CONTAINS(f->header, n_tags)) + printf("Tag Objects: %llu\n", + (unsigned long long) le64toh(f->header->n_tags)); + if (JOURNAL_HEADER_CONTAINS(f->header, n_entry_arrays)) + printf("Entry Array Objects: %llu\n", + (unsigned long long) le64toh(f->header->n_entry_arrays)); } int journal_file_open( const char *fname, int flags, mode_t mode, + bool compress, + bool seal, JournalMetrics *metrics, + MMapCache *mmap_cache, JournalFile *template, JournalFile **ret) { @@ -1973,7 +1993,8 @@ int journal_file_open( (flags & O_ACCMODE) != O_RDWR) return -EINVAL; - if (!endswith(fname, ".journal")) + if (!endswith(fname, ".journal") && + !endswith(fname, ".journal~")) return -EINVAL; f = new0(JournalFile, 1); @@ -1981,13 +2002,23 @@ int journal_file_open( return -ENOMEM; f->fd = -1; - f->flags = flags; f->mode = mode; - f->writable = (flags & O_ACCMODE) != O_RDONLY; + + f->flags = flags; f->prot = prot_from_flags(flags); + f->writable = (flags & O_ACCMODE) != O_RDONLY; + f->compress = compress; + f->seal = seal; - if (template) - f->compress = template->compress; + if (mmap_cache) + f->mmap = mmap_cache_ref(mmap_cache); + else { + f->mmap = mmap_cache_new(); + if (!f->mmap) { + r = -ENOMEM; + goto fail; + } + } f->path = strdup(fname); if (!f->path) { @@ -2009,6 +2040,14 @@ int journal_file_open( if (f->last_stat.st_size == 0 && f->writable) { newly_created = true; +#ifdef HAVE_GCRYPT + /* Try to load the FSPRG state, and if we can't, then + * just don't do sealing */ + r = journal_file_fss_load(f); + if (r < 0) + f->seal = false; +#endif + r = journal_file_init_header(f, template); if (r < 0) goto fail; @@ -2037,6 +2076,14 @@ int journal_file_open( goto fail; } +#ifdef HAVE_GCRYPT + if (!newly_created && f->writable) { + r = journal_file_fss_load(f); + if (r < 0) + goto fail; + } +#endif + if (f->writable) { if (metrics) { journal_default_metrics(metrics, f->fd); @@ -2049,8 +2096,13 @@ int journal_file_open( goto fail; } - if (newly_created) { +#ifdef HAVE_GCRYPT + r = journal_file_hmac_setup(f); + if (r < 0) + goto fail; +#endif + if (newly_created) { r = journal_file_setup_field_hash_table(f); if (r < 0) goto fail; @@ -2058,6 +2110,12 @@ int journal_file_open( r = journal_file_setup_data_hash_table(f); if (r < 0) goto fail; + +#ifdef HAVE_GCRYPT + r = journal_file_append_first_tag(f); + if (r < 0) + goto fail; +#endif } r = journal_file_map_field_hash_table(f); @@ -2079,7 +2137,7 @@ fail: return r; } -int journal_file_rotate(JournalFile **f) { +int journal_file_rotate(JournalFile **f, bool compress, bool seal) { char *p; size_t l; JournalFile *old_file, *new_file = NULL; @@ -2107,7 +2165,7 @@ int journal_file_rotate(JournalFile **f) { sd_id128_to_string(old_file->header->seqnum_id, p + l - 8 + 1); snprintf(p + l - 8 + 1 + 32, 1 + 16 + 1 + 16 + 8 + 1, "-%016llx-%016llx.journal", - (unsigned long long) le64toh((*f)->header->tail_seqnum), + (unsigned long long) le64toh((*f)->header->tail_entry_seqnum), (unsigned long long) le64toh((*f)->header->tail_entry_realtime)); r = rename(old_file->path, p); @@ -2118,7 +2176,7 @@ int journal_file_rotate(JournalFile **f) { old_file->header->state = STATE_ARCHIVED; - r = journal_file_open(old_file->path, old_file->flags, old_file->mode, NULL, old_file, &new_file); + r = journal_file_open(old_file->path, old_file->flags, old_file->mode, compress, seal, NULL, old_file->mmap, old_file, &new_file); journal_file_close(old_file); *f = new_file; @@ -2129,7 +2187,10 @@ int journal_file_open_reliably( const char *fname, int flags, mode_t mode, + bool compress, + bool seal, JournalMetrics *metrics, + MMapCache *mmap_cache, JournalFile *template, JournalFile **ret) { @@ -2137,11 +2198,14 @@ int journal_file_open_reliably( size_t l; char *p; - r = journal_file_open(fname, flags, mode, metrics, template, ret); + r = journal_file_open(fname, flags, mode, compress, seal, + metrics, mmap_cache, template, ret); if (r != -EBADMSG && /* corrupted */ r != -ENODATA && /* truncated */ r != -EHOSTDOWN && /* other machine */ - r != -EPROTONOSUPPORT) /* incompatible feature */ + r != -EPROTONOSUPPORT && /* incompatible feature */ + r != -EBUSY && /* unclean shutdown */ + r != -ESHUTDOWN /* already archived */) return r; if ((flags & O_ACCMODE) == O_RDONLY) @@ -2150,6 +2214,9 @@ int journal_file_open_reliably( if (!(flags & O_CREAT)) return r; + if (!endswith(fname, ".journal")) + return r; + /* The file is corrupted. Rotate it away and try it again (but only once) */ l = strlen(fname); @@ -2164,207 +2231,12 @@ int journal_file_open_reliably( if (r < 0) return -errno; - log_warning("File %s corrupted, renaming and replacing.", fname); + log_warning("File %s corrupted or uncleanly shut down, renaming and replacing.", fname); - return journal_file_open(fname, flags, mode, metrics, template, ret); + return journal_file_open(fname, flags, mode, compress, seal, + metrics, mmap_cache, template, ret); } -struct vacuum_info { - off_t usage; - char *filename; - - uint64_t realtime; - sd_id128_t seqnum_id; - uint64_t seqnum; - - bool have_seqnum; -}; - -static int vacuum_compare(const void *_a, const void *_b) { - const struct vacuum_info *a, *b; - - a = _a; - b = _b; - - if (a->have_seqnum && b->have_seqnum && - sd_id128_equal(a->seqnum_id, b->seqnum_id)) { - if (a->seqnum < b->seqnum) - return -1; - else if (a->seqnum > b->seqnum) - return 1; - else - return 0; - } - - if (a->realtime < b->realtime) - return -1; - else if (a->realtime > b->realtime) - return 1; - else if (a->have_seqnum && b->have_seqnum) - return memcmp(&a->seqnum_id, &b->seqnum_id, 16); - else - return strcmp(a->filename, b->filename); -} - -int journal_directory_vacuum(const char *directory, uint64_t max_use, uint64_t min_free) { - DIR *d; - int r = 0; - struct vacuum_info *list = NULL; - unsigned n_list = 0, n_allocated = 0, i; - uint64_t sum = 0; - - assert(directory); - - if (max_use <= 0) - return 0; - - d = opendir(directory); - if (!d) - return -errno; - - for (;;) { - int k; - struct dirent buf, *de; - size_t q; - struct stat st; - char *p; - unsigned long long seqnum = 0, realtime; - sd_id128_t seqnum_id; - bool have_seqnum; - - k = readdir_r(d, &buf, &de); - if (k != 0) { - r = -k; - goto finish; - } - - if (!de) - break; - - if (fstatat(dirfd(d), de->d_name, &st, AT_SYMLINK_NOFOLLOW) < 0) - continue; - - if (!S_ISREG(st.st_mode)) - continue; - - q = strlen(de->d_name); - - if (endswith(de->d_name, ".journal")) { - - /* Vacuum archived files */ - - if (q < 1 + 32 + 1 + 16 + 1 + 16 + 8) - continue; - - if (de->d_name[q-8-16-1] != '-' || - de->d_name[q-8-16-1-16-1] != '-' || - de->d_name[q-8-16-1-16-1-32-1] != '@') - continue; - - p = strdup(de->d_name); - if (!p) { - r = -ENOMEM; - goto finish; - } - - de->d_name[q-8-16-1-16-1] = 0; - if (sd_id128_from_string(de->d_name + q-8-16-1-16-1-32, &seqnum_id) < 0) { - free(p); - continue; - } - - if (sscanf(de->d_name + q-8-16-1-16, "%16llx-%16llx.journal", &seqnum, &realtime) != 2) { - free(p); - continue; - } - - have_seqnum = true; - - } else if (endswith(de->d_name, ".journal~")) { - unsigned long long tmp; - - /* Vacuum corrupted files */ - - if (q < 1 + 16 + 1 + 16 + 8 + 1) - continue; - - if (de->d_name[q-1-8-16-1] != '-' || - de->d_name[q-1-8-16-1-16-1] != '@') - continue; - - p = strdup(de->d_name); - if (!p) { - r = -ENOMEM; - goto finish; - } - - if (sscanf(de->d_name + q-1-8-16-1-16, "%16llx-%16llx.journal~", &realtime, &tmp) != 2) { - free(p); - continue; - } - - have_seqnum = false; - } else - continue; - - if (n_list >= n_allocated) { - struct vacuum_info *j; - - n_allocated = MAX(n_allocated * 2U, 8U); - j = realloc(list, n_allocated * sizeof(struct vacuum_info)); - if (!j) { - free(p); - r = -ENOMEM; - goto finish; - } - - list = j; - } - - list[n_list].filename = p; - list[n_list].usage = 512UL * (uint64_t) st.st_blocks; - list[n_list].seqnum = seqnum; - list[n_list].realtime = realtime; - list[n_list].seqnum_id = seqnum_id; - list[n_list].have_seqnum = have_seqnum; - - sum += list[n_list].usage; - - n_list ++; - } - - qsort(list, n_list, sizeof(struct vacuum_info), vacuum_compare); - - for(i = 0; i < n_list; i++) { - struct statvfs ss; - - if (fstatvfs(dirfd(d), &ss) < 0) { - r = -errno; - goto finish; - } - - if (sum <= max_use && - (uint64_t) ss.f_bavail * (uint64_t) ss.f_bsize >= min_free) - break; - - if (unlinkat(dirfd(d), list[i].filename, 0) >= 0) { - log_info("Deleted archived journal %s/%s.", directory, list[i].filename); - sum -= list[i].usage; - } else if (errno != ENOENT) - log_warning("Failed to delete %s/%s: %m", directory, list[i].filename); - } - -finish: - for (i = 0; i < n_list; i++) - free(list[i].filename); - - free(list); - - if (d) - closedir(d); - - return r; -} int journal_file_copy_entry(JournalFile *from, JournalFile *to, Object *o, uint64_t p, uint64_t *seqnum, Object **ret, uint64_t *offset) { uint64_t i, n; @@ -2592,8 +2464,10 @@ bool journal_file_rotate_suggested(JournalFile *f) { /* If we gained new header fields we gained new features, * hence suggest a rotation */ - if (le64toh(f->header->header_size) < sizeof(Header)) + if (le64toh(f->header->header_size) < sizeof(Header)) { + log_debug("%s uses an outdated header, suggesting rotation.", f->path); return true; + } /* Let's check if the hash tables grew over a certain fill * level (75%, borrowing this value from Java's hash table @@ -2602,12 +2476,26 @@ bool journal_file_rotate_suggested(JournalFile *f) { * in newer versions. */ if (JOURNAL_HEADER_CONTAINS(f->header, n_data)) - if (le64toh(f->header->n_data) * 4ULL > (le64toh(f->header->data_hash_table_size) / sizeof(HashItem)) * 3ULL) + if (le64toh(f->header->n_data) * 4ULL > (le64toh(f->header->data_hash_table_size) / sizeof(HashItem)) * 3ULL) { + log_debug("Data hash table of %s has a fill level at %.1f (%llu of %llu items, %llu file size, %llu bytes per hash table item), suggesting rotation.", + f->path, + 100.0 * (double) le64toh(f->header->n_data) / ((double) (le64toh(f->header->data_hash_table_size) / sizeof(HashItem))), + (unsigned long long) le64toh(f->header->n_data), + (unsigned long long) (le64toh(f->header->data_hash_table_size) / sizeof(HashItem)), + (unsigned long long) (f->last_stat.st_size), + (unsigned long long) (f->last_stat.st_size / le64toh(f->header->n_data))); return true; + } if (JOURNAL_HEADER_CONTAINS(f->header, n_fields)) - if (le64toh(f->header->n_fields) * 4ULL > (le64toh(f->header->field_hash_table_size) / sizeof(HashItem)) * 3ULL) + if (le64toh(f->header->n_fields) * 4ULL > (le64toh(f->header->field_hash_table_size) / sizeof(HashItem)) * 3ULL) { + log_debug("Field hash table of %s has a fill level at %.1f (%llu of %llu items), suggesting rotation.", + f->path, + 100.0 * (double) le64toh(f->header->n_fields) / ((double) (le64toh(f->header->field_hash_table_size) / sizeof(HashItem))), + (unsigned long long) le64toh(f->header->n_fields), + (unsigned long long) (le64toh(f->header->field_hash_table_size) / sizeof(HashItem))); return true; + } return false; }