X-Git-Url: https://www.chiark.greenend.org.uk/ucgi/~ianmdlvl/git?p=elogind.git;a=blobdiff_plain;f=src%2Fjournal%2Fjournal-file.c;h=4de1daf6a0b177bed7105fc32ea493cc8da4a95c;hp=427631d30a4730b49770b34106acb6218d5f3f5f;hb=fd8ee359a014916ac62ae2b58f6736ccb48c6d4e;hpb=1e2579fdeb7608719cb28da0b5061f48ba0efc34 diff --git a/src/journal/journal-file.c b/src/journal/journal-file.c index 427631d30..4de1daf6a 100644 --- a/src/journal/journal-file.c +++ b/src/journal/journal-file.c @@ -30,18 +30,34 @@ #include "journal-def.h" #include "journal-file.h" #include "lookup3.h" - -#define DEFAULT_ARENA_MAX_SIZE (16ULL*1024ULL*1024ULL*1024ULL) -#define DEFAULT_ARENA_MIN_SIZE (256ULL*1024ULL) -#define DEFAULT_ARENA_KEEP_FREE (1ULL*1024ULL*1024ULL) - -#define DEFAULT_MAX_USE (16ULL*1024ULL*1024ULL*16ULL) +#include "compress.h" #define DEFAULT_DATA_HASH_TABLE_SIZE (2047ULL*16ULL) #define DEFAULT_FIELD_HASH_TABLE_SIZE (2047ULL*16ULL) #define DEFAULT_WINDOW_SIZE (128ULL*1024ULL*1024ULL) +#define COMPRESSION_SIZE_THRESHOLD (64ULL) + +/* This is the minimum journal file size */ +#define JOURNAL_FILE_SIZE_MIN (64ULL*1024ULL) + +/* These are the lower and upper bounds if we deduce the max_use value + * from the file system size */ +#define DEFAULT_MAX_USE_LOWER (1ULL*1024ULL*1024ULL) /* 1 MiB */ +#define DEFAULT_MAX_USE_UPPER (4ULL*1024ULL*1024ULL*1024ULL) /* 4 GiB */ + +/* This is the upper bound if we deduce max_size from max_use */ +#define DEFAULT_MAX_SIZE_UPPER (16ULL*1024ULL*1024ULL) /* 16 MiB */ + +/* This is the upper bound if we deduce the keep_free value from the + * file system size */ +#define DEFAULT_KEEP_FREE_UPPER (4ULL*1024ULL*1024ULL*1024ULL) /* 4 GiB */ + +/* This is the keep_free value when we can't determine the system + * size */ +#define DEFAULT_KEEP_FREE (1024ULL*1024ULL) /* 1 MB */ + static const char signature[] = { 'L', 'P', 'K', 'S', 'H', 'H', 'R', 'H' }; #define ALIGN64(x) (((x) + 7ULL) & ~7ULL) @@ -63,6 +79,11 @@ void journal_file_close(JournalFile *f) { close_nointr_nofail(f->fd); free(f->path); + +#ifdef HAVE_XZ + free(f->compress_buffer); +#endif + free(f); } @@ -76,9 +97,6 @@ static int journal_file_init_header(JournalFile *f, JournalFile *template) { zero(h); memcpy(h.signature, signature, 8); h.arena_offset = htole64(ALIGN64(sizeof(h))); - h.arena_max_size = htole64(DEFAULT_ARENA_MAX_SIZE); - h.arena_min_size = htole64(DEFAULT_ARENA_MIN_SIZE); - h.arena_keep_free = htole64(DEFAULT_ARENA_KEEP_FREE); r = sd_id128_randomize(&h.file_id); if (r < 0) @@ -120,6 +138,9 @@ static int journal_file_refresh_header(JournalFile *f) { f->header->boot_id = boot_id; f->header->state = STATE_ONLINE; + + __sync_synchronize(); + return 0; } @@ -129,8 +150,13 @@ static int journal_file_verify_header(JournalFile *f) { if (memcmp(f->header, signature, 8)) return -EBADMSG; +#ifdef HAVE_XZ + if ((le64toh(f->header->incompatible_flags) & ~HEADER_INCOMPATIBLE_COMPRESSED) != 0) + return -EPROTONOSUPPORT; +#else if (f->header->incompatible_flags != 0) return -EPROTONOSUPPORT; +#endif if ((uint64_t) f->last_stat.st_size < (le64toh(f->header->arena_offset) + le64toh(f->header->arena_size))) return -ENODATA; @@ -161,16 +187,10 @@ static int journal_file_verify_header(JournalFile *f) { } static int journal_file_allocate(JournalFile *f, uint64_t offset, uint64_t size) { - uint64_t asize; uint64_t old_size, new_size; assert(f); - if (offset < le64toh(f->header->arena_offset)) - return -EINVAL; - - new_size = PAGE_ALIGN(offset + size); - /* We assume that this file is not sparse, and we know that * for sure, since we always call posix_fallocate() * ourselves */ @@ -179,12 +199,19 @@ static int journal_file_allocate(JournalFile *f, uint64_t offset, uint64_t size) le64toh(f->header->arena_offset) + le64toh(f->header->arena_size); - if (old_size >= new_size) + new_size = PAGE_ALIGN(offset + size); + if (new_size < le64toh(f->header->arena_offset)) + new_size = le64toh(f->header->arena_offset); + + if (new_size <= old_size) return 0; - asize = new_size - le64toh(f->header->arena_offset); + if (f->metrics.max_size > 0 && + new_size > f->metrics.max_size) + return -E2BIG; - if (asize > le64toh(f->header->arena_min_size)) { + if (new_size > f->metrics.min_size && + f->metrics.keep_free > 0) { struct statvfs svfs; if (fstatvfs(f->fd, &svfs) >= 0) { @@ -192,8 +219,8 @@ static int journal_file_allocate(JournalFile *f, uint64_t offset, uint64_t size) available = svfs.f_bfree * svfs.f_bsize; - if (available >= f->header->arena_keep_free) - available -= f->header->arena_keep_free; + if (available >= f->metrics.keep_free) + available -= f->metrics.keep_free; else available = 0; @@ -202,16 +229,16 @@ static int journal_file_allocate(JournalFile *f, uint64_t offset, uint64_t size) } } - if (asize > le64toh(f->header->arena_max_size)) - return -E2BIG; - + /* Note that the glibc fallocate() fallback is very + inefficient, hence we try to minimize the allocation area + as we can. */ if (posix_fallocate(f->fd, old_size, new_size - old_size) < 0) return -errno; if (fstat(f->fd, &f->last_stat) < 0) return -errno; - f->header->arena_size = htole64(asize); + f->header->arena_size = new_size - htole64(f->header->arena_offset); return 0; } @@ -236,6 +263,10 @@ static int journal_file_map( wsize = size + (offset - woffset); wsize = PAGE_ALIGN(wsize); + /* Avoid SIGBUS on invalid accesses */ + if (woffset + wsize > (uint64_t) PAGE_ALIGN(f->last_stat.st_size)) + return -EADDRNOTAVAIL; + window = mmap(NULL, wsize, f->prot, MAP_SHARED, f->fd, woffset); if (window == MAP_FAILED) return -errno; @@ -255,7 +286,7 @@ static int journal_file_map( } static int journal_file_move_to(JournalFile *f, int wt, uint64_t offset, uint64_t size, void **ret) { - void *p; + void *p = NULL; uint64_t delta; int r; Window *w; @@ -265,6 +296,15 @@ static int journal_file_move_to(JournalFile *f, int wt, uint64_t offset, uint64_ assert(wt >= 0); assert(wt < _WINDOW_MAX); + if (offset + size > (uint64_t) f->last_stat.st_size) { + /* Hmm, out of range? Let's refresh the fstat() data + * first, before we trust that check. */ + + if (fstat(f->fd, &f->last_stat) < 0 || + offset + size > (uint64_t) f->last_stat.st_size) + return -EADDRNOTAVAIL; + } + w = f->windows + wt; if (_likely_(w->ptr && @@ -292,14 +332,20 @@ static int journal_file_move_to(JournalFile *f, int wt, uint64_t offset, uint64_ delta = PAGE_ALIGN((DEFAULT_WINDOW_SIZE - size) / 2); - if (offset < delta) + if (delta > offset) delta = offset; offset -= delta; - size += (DEFAULT_WINDOW_SIZE - delta); + size = DEFAULT_WINDOW_SIZE; } else delta = 0; + if (offset + size > (uint64_t) f->last_stat.st_size) + size = PAGE_ALIGN((uint64_t) f->last_stat.st_size - offset); + + if (size <= 0) + return -EADDRNOTAVAIL; + r = journal_file_map(f, offset, size, &w->ptr, &w->offset, &w->size, @@ -317,7 +363,7 @@ static bool verify_hash(Object *o) { assert(o); - if (o->object.type == OBJECT_DATA) { + if (o->object.type == OBJECT_DATA && !(o->object.flags & OBJECT_COMPRESSED)) { h1 = le64toh(o->data.hash); h2 = hash64(o->data.payload, le64toh(o->object.size) - offsetof(Object, data.payload)); } else if (o->object.type == OBJECT_FIELD) { @@ -576,6 +622,9 @@ int journal_file_find_data_object_with_hash( osize = offsetof(Object, data.payload) + size; + if (f->header->data_hash_table_size == 0) + return -EBADMSG; + h = hash % (le64toh(f->header->data_hash_table_size) / sizeof(HashItem)); p = le64toh(f->data_hash_table[h].head_hash_offset); @@ -586,12 +635,40 @@ int journal_file_find_data_object_with_hash( if (r < 0) return r; - if (le64toh(o->object.size) == osize && - memcmp(o->data.payload, data, size) == 0) { + if (le64toh(o->data.hash) != hash) + goto next; + + if (o->object.flags & OBJECT_COMPRESSED) { +#ifdef HAVE_XZ + uint64_t l, rsize; + + l = le64toh(o->object.size); + if (l <= offsetof(Object, data.payload)) + return -EBADMSG; + + l -= offsetof(Object, data.payload); - if (le64toh(o->data.hash) != hash) + if (!uncompress_blob(o->data.payload, l, &f->compress_buffer, &f->compress_buffer_size, &rsize)) return -EBADMSG; + if (rsize == size && + memcmp(f->compress_buffer, data, size) == 0) { + + if (ret) + *ret = o; + + if (offset) + *offset = p; + + return 1; + } +#else + return -EPROTONOSUPPORT; +#endif + + } else if (le64toh(o->object.size) == osize && + memcmp(o->data.payload, data, size) == 0) { + if (ret) *ret = o; @@ -601,6 +678,7 @@ int journal_file_find_data_object_with_hash( return 1; } + next: p = le64toh(o->data.next_hash_offset); } @@ -629,6 +707,7 @@ static int journal_file_append_data(JournalFile *f, const void *data, uint64_t s uint64_t osize; Object *o; int r; + bool compressed = false; assert(f); assert(data || size == 0); @@ -655,7 +734,27 @@ static int journal_file_append_data(JournalFile *f, const void *data, uint64_t s return r; o->data.hash = htole64(hash); - memcpy(o->data.payload, data, size); + +#ifdef HAVE_XZ + if (f->compress && + size >= COMPRESSION_SIZE_THRESHOLD) { + uint64_t rsize; + + compressed = compress_blob(data, size, o->data.payload, &rsize); + + if (compressed) { + o->object.size = htole64(offsetof(Object, data.payload) + rsize); + o->object.flags |= OBJECT_COMPRESSED; + + f->header->incompatible_flags = htole32(le32toh(f->header->incompatible_flags) | HEADER_INCOMPATIBLE_COMPRESSED); + + log_debug("Compressed data object %lu -> %lu", (unsigned long) size, (unsigned long) rsize); + } + } +#endif + + if (!compressed) + memcpy(o->data.payload, data, size); r = journal_file_link_data(f, o, p, hash); if (r < 0) @@ -808,6 +907,8 @@ static int journal_file_link_entry(JournalFile *f, Object *o, uint64_t offset) { assert(offset > 0); assert(o->object.type == OBJECT_ENTRY); + __sync_synchronize(); + /* Link up the entry itself */ r = link_entry_into_array(f, &f->header->entry_array_offset, @@ -816,7 +917,7 @@ static int journal_file_link_entry(JournalFile *f, Object *o, uint64_t offset) { if (r < 0) return r; - log_error("%s %lu", f->path, (unsigned long) f->header->n_entries); + log_error("=> %s seqnr=%lu n_entries=%lu", f->path, (unsigned long) o->entry.seqnum, (unsigned long) f->header->n_entries); if (f->header->head_entry_realtime == 0) f->header->head_entry_realtime = o->entry.realtime; @@ -879,6 +980,20 @@ static int journal_file_append_entry_internal( return 0; } +void journal_file_post_change(JournalFile *f) { + assert(f); + + /* inotify() does not receive IN_MODIFY events from file + * accesses done via mmap(). After each access we hence + * trigger IN_MODIFY by truncating the journal file to its + * current size which triggers IN_MODIFY. */ + + __sync_synchronize(); + + if (ftruncate(f->fd, f->last_stat.st_size) < 0) + log_error("Failed to to truncate file to its own size: %m"); +} + int journal_file_append_entry(JournalFile *f, const dual_timestamp *ts, const struct iovec iovec[], unsigned n_iovec, uint64_t *seqnum, Object **ret, uint64_t *offset) { unsigned i; EntryItem *items; @@ -901,12 +1016,7 @@ int journal_file_append_entry(JournalFile *f, const dual_timestamp *ts, const st ts->monotonic < le64toh(f->header->tail_entry_monotonic)) return -EINVAL; - if (ts->realtime < le64toh(f->header->tail_entry_realtime)) - return -EINVAL; - - items = new(EntryItem, n_iovec); - if (!items) - return -ENOMEM; + items = alloca(sizeof(EntryItem) * n_iovec); for (i = 0; i < n_iovec; i++) { uint64_t p; @@ -914,7 +1024,7 @@ int journal_file_append_entry(JournalFile *f, const dual_timestamp *ts, const st r = journal_file_append_data(f, iovec[i].iov_base, iovec[i].iov_len, &o, &p); if (r < 0) - goto finish; + return r; xor_hash ^= le64toh(o->data.hash); items[i].object_offset = htole64(p); @@ -923,8 +1033,7 @@ int journal_file_append_entry(JournalFile *f, const dual_timestamp *ts, const st r = journal_file_append_entry_internal(f, ts, xor_hash, items, n_iovec, seqnum, ret, offset); -finish: - free(items); + journal_file_post_change(f); return r; } @@ -935,7 +1044,7 @@ static int generic_array_get(JournalFile *f, Object **ret, uint64_t *offset) { Object *o; - uint64_t p, a; + uint64_t p = 0, a; int r; assert(f); @@ -1145,7 +1254,6 @@ static int generic_array_bisect_plus_one(JournalFile *f, /* This bisects the array in object 'first', but first checks * an extra */ - r = test_object(f, extra, needle); if (r < 0) return r; @@ -1161,6 +1269,11 @@ static int generic_array_bisect_plus_one(JournalFile *f, if (offset) *offset = extra; + + if (idx) + *idx = 0; + + return 1; } else if (r == TEST_RIGHT) return 0; @@ -1420,7 +1533,7 @@ int journal_file_next_entry_for_data( assert(p > 0 || !o); r = journal_file_move_to_object(f, OBJECT_DATA, data_offset, &d); - if (r <= 0) + if (r < 0) return r; n = le64toh(d->data.n_entries); @@ -1574,6 +1687,9 @@ void journal_file_dump(JournalFile *f) { break; } + if (o->object.flags & OBJECT_COMPRESSED) + printf("Flags: COMPRESSED\n"); + if (p == le64toh(f->header->tail_object_offset)) p = 0; else @@ -1784,7 +1900,7 @@ int journal_directory_vacuum(const char *directory, uint64_t max_use, uint64_t m assert(directory); if (max_use <= 0) - max_use = DEFAULT_MAX_USE; + return 0; d = opendir(directory); if (!d) @@ -1901,3 +2017,160 @@ finish: return r; } + +int journal_file_copy_entry(JournalFile *from, JournalFile *to, Object *o, uint64_t p, uint64_t *seqnum, Object **ret, uint64_t *offset) { + uint64_t i, n; + uint64_t q, xor_hash = 0; + int r; + EntryItem *items; + dual_timestamp ts; + + assert(from); + assert(to); + assert(o); + assert(p); + + if (!to->writable) + return -EPERM; + + ts.monotonic = le64toh(o->entry.monotonic); + ts.realtime = le64toh(o->entry.realtime); + + if (to->tail_entry_monotonic_valid && + ts.monotonic < le64toh(to->header->tail_entry_monotonic)) + return -EINVAL; + + if (ts.realtime < le64toh(to->header->tail_entry_realtime)) + return -EINVAL; + + n = journal_file_entry_n_items(o); + items = alloca(sizeof(EntryItem) * n); + + for (i = 0; i < n; i++) { + uint64_t le_hash, l, h; + size_t t; + void *data; + Object *u; + + q = le64toh(o->entry.items[i].object_offset); + le_hash = o->entry.items[i].hash; + + r = journal_file_move_to_object(from, OBJECT_DATA, q, &o); + if (r < 0) + return r; + + if (le_hash != o->data.hash) + return -EBADMSG; + + l = le64toh(o->object.size) - offsetof(Object, data.payload); + t = (size_t) l; + + /* We hit the limit on 32bit machines */ + if ((uint64_t) t != l) + return -E2BIG; + + if (o->object.flags & OBJECT_COMPRESSED) { +#ifdef HAVE_XZ + uint64_t rsize; + + if (!uncompress_blob(o->data.payload, l, &from->compress_buffer, &from->compress_buffer_size, &rsize)) + return -EBADMSG; + + data = from->compress_buffer; + l = rsize; +#else + return -EPROTONOSUPPORT; +#endif + } else + data = o->data.payload; + + r = journal_file_append_data(to, data, l, &u, &h); + if (r < 0) + return r; + + xor_hash ^= le64toh(u->data.hash); + items[i].object_offset = htole64(h); + items[i].hash = u->data.hash; + + r = journal_file_move_to_object(from, OBJECT_ENTRY, p, &o); + if (r < 0) + return r; + } + + return journal_file_append_entry_internal(to, &ts, xor_hash, items, n, seqnum, ret, offset); +} + +void journal_default_metrics(JournalMetrics *m, int fd) { + uint64_t fs_size = 0; + struct statvfs ss; + char a[64], b[64], c[64], d[64]; + + assert(m); + assert(fd >= 0); + + if (fstatvfs(fd, &ss) >= 0) + fs_size = ss.f_frsize * ss.f_blocks; + + if (m->max_use == (uint64_t) -1) { + + if (fs_size > 0) { + m->max_use = PAGE_ALIGN(fs_size / 10); /* 10% of file system size */ + + if (m->max_use > DEFAULT_MAX_USE_UPPER) + m->max_use = DEFAULT_MAX_USE_UPPER; + + if (m->max_use < DEFAULT_MAX_USE_LOWER) + m->max_use = DEFAULT_MAX_USE_LOWER; + } else + m->max_use = DEFAULT_MAX_USE_LOWER; + } else { + m->max_use = PAGE_ALIGN(m->max_use); + + if (m->max_use < JOURNAL_FILE_SIZE_MIN*2) + m->max_use = JOURNAL_FILE_SIZE_MIN*2; + } + + if (m->max_size == (uint64_t) -1) { + m->max_size = PAGE_ALIGN(m->max_use / 8); /* 8 chunks */ + + if (m->max_size > DEFAULT_MAX_SIZE_UPPER) + m->max_size = DEFAULT_MAX_SIZE_UPPER; + } else + m->max_size = PAGE_ALIGN(m->max_size); + + if (m->max_size < JOURNAL_FILE_SIZE_MIN) + m->max_size = JOURNAL_FILE_SIZE_MIN; + + if (m->max_size*2 > m->max_use) + m->max_use = m->max_size*2; + + if (m->min_size == (uint64_t) -1) + m->min_size = JOURNAL_FILE_SIZE_MIN; + else { + m->min_size = PAGE_ALIGN(m->min_size); + + if (m->min_size < JOURNAL_FILE_SIZE_MIN) + m->min_size = JOURNAL_FILE_SIZE_MIN; + + if (m->min_size > m->max_size) + m->max_size = m->min_size; + } + + if (m->keep_free == (uint64_t) -1) { + + if (fs_size > 0) { + m->keep_free = PAGE_ALIGN(fs_size / 20); /* 5% of file system size */ + + if (m->keep_free > DEFAULT_KEEP_FREE_UPPER) + m->keep_free = DEFAULT_KEEP_FREE_UPPER; + + } else + m->keep_free = DEFAULT_KEEP_FREE; + } + + log_debug("Fixed max_use=%s max_size=%s min_size=%s keep_free=%s", + format_bytes(a, sizeof(a), m->max_use), + format_bytes(b, sizeof(b), m->max_size), + format_bytes(c, sizeof(c), m->min_size), + format_bytes(d, sizeof(d), m->keep_free)); +}