X-Git-Url: http://www.chiark.greenend.org.uk/ucgi/~ianmdlvl/git?p=elogind.git;a=blobdiff_plain;f=src%2Fjournal%2Fjournal-file.c;h=9d8c9e686d3ec186e29d859a5a435afb85725d16;hp=a60a896c2f3ad7370f43abf8ba97514d0c9f20d2;hb=71fa6f006f92831c0c02e844b4c35e4b7197c6d6;hpb=7ea07dcddafe573c699fc48171b57b912897e7e2 diff --git a/src/journal/journal-file.c b/src/journal/journal-file.c index a60a896c2..9d8c9e686 100644 --- a/src/journal/journal-file.c +++ b/src/journal/journal-file.c @@ -58,18 +58,27 @@ * size */ #define DEFAULT_KEEP_FREE (1024ULL*1024ULL) /* 1 MB */ -static const char signature[] = { 'L', 'P', 'K', 'S', 'H', 'H', 'R', 'H' }; +/* n_data was the first entry we added after the initial file format design */ +#define HEADER_SIZE_MIN ALIGN64(offsetof(Header, n_data)) #define ALIGN64(x) (((x) + 7ULL) & ~7ULL) +#define JOURNAL_HEADER_CONTAINS(h, field) \ + (le64toh((h)->header_size) >= offsetof(Header, field) + sizeof((h)->field)) + +static const char signature[] = { 'L', 'P', 'K', 'S', 'H', 'H', 'R', 'H' }; + void journal_file_close(JournalFile *f) { int t; assert(f); - if (f->header && f->writable) - f->header->state = STATE_OFFLINE; + if (f->header) { + if (f->writable) + f->header->state = STATE_OFFLINE; + munmap(f->header, PAGE_ALIGN(sizeof(Header))); + } for (t = 0; t < _WINDOW_MAX; t++) if (f->windows[t].ptr) @@ -96,7 +105,7 @@ static int journal_file_init_header(JournalFile *f, JournalFile *template) { zero(h); memcpy(h.signature, signature, 8); - h.arena_offset = htole64(ALIGN64(sizeof(h))); + h.header_size = htole64(ALIGN64(sizeof(h))); r = sd_id128_randomize(&h.file_id); if (r < 0) @@ -104,7 +113,7 @@ static int journal_file_init_header(JournalFile *f, JournalFile *template) { if (template) { h.seqnum_id = template->header->seqnum_id; - h.seqnum = template->header->seqnum; + h.tail_seqnum = template->header->tail_seqnum; } else h.seqnum_id = h.file_id; @@ -158,7 +167,11 @@ static int journal_file_verify_header(JournalFile *f) { return -EPROTONOSUPPORT; #endif - if ((uint64_t) f->last_stat.st_size < (le64toh(f->header->arena_offset) + le64toh(f->header->arena_size))) + /* The first addition was n_data, so check that we are at least this large */ + if (le64toh(f->header->header_size) < HEADER_SIZE_MIN) + return -EBADMSG; + + if ((uint64_t) f->last_stat.st_size < (le64toh(f->header->header_size) + le64toh(f->header->arena_size))) return -ENODATA; if (f->writable) { @@ -175,12 +188,15 @@ static int journal_file_verify_header(JournalFile *f) { state = f->header->state; - if (state == STATE_ONLINE) - log_debug("Journal file %s is already online. Assuming unclean closing. Ignoring.", f->path); - else if (state == STATE_ARCHIVED) + if (state == STATE_ONLINE) { + log_debug("Journal file %s is already online. Assuming unclean closing.", f->path); + return -EBUSY; + } else if (state == STATE_ARCHIVED) return -ESHUTDOWN; - else if (state != STATE_OFFLINE) - log_debug("Journal file %s has unknown state %u. Ignoring.", f->path, state); + else if (state != STATE_OFFLINE) { + log_debug("Journal file %s has unknown state %u.", f->path, state); + return -EBUSY; + } } return 0; @@ -188,6 +204,7 @@ static int journal_file_verify_header(JournalFile *f) { static int journal_file_allocate(JournalFile *f, uint64_t offset, uint64_t size) { uint64_t old_size, new_size; + int r; assert(f); @@ -196,12 +213,12 @@ static int journal_file_allocate(JournalFile *f, uint64_t offset, uint64_t size) * ourselves */ old_size = - le64toh(f->header->arena_offset) + + le64toh(f->header->header_size) + le64toh(f->header->arena_size); new_size = PAGE_ALIGN(offset + size); - if (new_size < le64toh(f->header->arena_offset)) - new_size = le64toh(f->header->arena_offset); + if (new_size < le64toh(f->header->header_size)) + new_size = le64toh(f->header->header_size); if (new_size <= old_size) return 0; @@ -232,13 +249,14 @@ static int journal_file_allocate(JournalFile *f, uint64_t offset, uint64_t size) /* Note that the glibc fallocate() fallback is very inefficient, hence we try to minimize the allocation area as we can. */ - if (posix_fallocate(f->fd, old_size, new_size - old_size) < 0) - return -errno; + r = posix_fallocate(f->fd, old_size, new_size - old_size); + if (r != 0) + return -r; if (fstat(f->fd, &f->last_stat) < 0) return -errno; - f->header->arena_size = htole64(new_size - le64toh(f->header->arena_offset)); + f->header->arena_size = htole64(new_size - le64toh(f->header->header_size)); return 0; } @@ -418,7 +436,7 @@ static uint64_t journal_file_seqnum(JournalFile *f, uint64_t *seqnum) { assert(f); - r = le64toh(f->header->seqnum) + 1; + r = le64toh(f->header->tail_seqnum) + 1; if (seqnum) { /* If an external seqnum counter was passed, we update @@ -431,10 +449,10 @@ static uint64_t journal_file_seqnum(JournalFile *f, uint64_t *seqnum) { *seqnum = r; } - f->header->seqnum = htole64(r); + f->header->tail_seqnum = htole64(r); - if (f->header->first_seqnum == 0) - f->header->first_seqnum = htole64(r); + if (f->header->head_seqnum == 0) + f->header->head_seqnum = htole64(r); return r; } @@ -452,7 +470,7 @@ static int journal_file_append_object(JournalFile *f, int type, uint64_t size, O p = le64toh(f->header->tail_object_offset); if (p == 0) - p = le64toh(f->header->arena_offset); + p = le64toh(f->header->header_size); else { r = journal_file_move_to_object(f, -1, p, &tail); if (r < 0) @@ -588,7 +606,7 @@ static int journal_file_link_data(JournalFile *f, Object *o, uint64_t offset, ui o->data.n_entries = 0; h = hash % (le64toh(f->header->data_hash_table_size) / sizeof(HashItem)); - p = le64toh(f->data_hash_table[h].head_hash_offset); + p = le64toh(f->data_hash_table[h].tail_hash_offset); if (p == 0) { /* Only entry in the hash table is easy */ f->data_hash_table[h].head_hash_offset = htole64(offset); @@ -605,6 +623,9 @@ static int journal_file_link_data(JournalFile *f, Object *o, uint64_t offset, ui f->data_hash_table[h].tail_hash_offset = htole64(offset); + if (JOURNAL_HEADER_CONTAINS(f->header, n_data)) + f->header->n_data = htole64(le64toh(f->header->n_data) + 1); + return 0; } @@ -1203,8 +1224,15 @@ static int generic_array_bisect(JournalFile *f, } } - if (k > n) + if (k > n) { + if (direction == DIRECTION_UP) { + i = n; + subtract_one = true; + goto found; + } + return 0; + } last_p = lp; @@ -1237,7 +1265,7 @@ found: *offset = p; if (idx) - *idx = t + i - (subtract_one ? 1 : 0); + *idx = t + i + (subtract_one ? -1 : 0); return 1; } @@ -1254,6 +1282,8 @@ static int generic_array_bisect_plus_one(JournalFile *f, uint64_t *idx) { int r; + bool step_back = false; + Object *o; assert(f); assert(test_object); @@ -1266,34 +1296,81 @@ static int generic_array_bisect_plus_one(JournalFile *f, r = test_object(f, extra, needle); if (r < 0) return r; - else if (r == TEST_FOUND) { - Object *o; - r = journal_file_move_to_object(f, OBJECT_ENTRY, extra, &o); - if (r < 0) - return r; + if (r == TEST_FOUND) + r = direction == DIRECTION_DOWN ? TEST_RIGHT : TEST_LEFT; - if (ret) - *ret = o; + /* if we are looking with DIRECTION_UP then we need to first + see if in the actual array there is a matching entry, and + return the last one of that. But if there isn't any we need + to return this one. Hence remember this, and return it + below. */ + if (r == TEST_LEFT) + step_back = direction == DIRECTION_UP; - if (offset) - *offset = extra; - - if (idx) - *idx = 0; - - return 1; - } else if (r == TEST_RIGHT) - return 0; + if (r == TEST_RIGHT) { + if (direction == DIRECTION_DOWN) + goto found; + else + return 0; + } r = generic_array_bisect(f, first, n-1, needle, test_object, direction, ret, offset, idx); - if (r > 0) + if (r == 0 && step_back) + goto found; + + if (r > 0 && idx) (*idx) ++; return r; + +found: + r = journal_file_move_to_object(f, OBJECT_ENTRY, extra, &o); + if (r < 0) + return r; + + if (ret) + *ret = o; + + if (offset) + *offset = extra; + + if (idx) + *idx = 0; + + return 1; } +static int test_object_offset(JournalFile *f, uint64_t p, uint64_t needle) { + assert(f); + assert(p > 0); + + if (p == needle) + return TEST_FOUND; + else if (p < needle) + return TEST_LEFT; + else + return TEST_RIGHT; +} + +int journal_file_move_to_entry_by_offset( + JournalFile *f, + uint64_t p, + direction_t direction, + Object **ret, + uint64_t *offset) { + + return generic_array_bisect(f, + le64toh(f->header->entry_array_offset), + le64toh(f->header->n_entries), + p, + test_object_offset, + direction, + ret, offset, NULL); +} + + static int test_object_seqnum(JournalFile *f, uint64_t p, uint64_t needle) { Object *o; int r; @@ -1391,16 +1468,17 @@ int journal_file_move_to_entry_by_monotonic( Object **ret, uint64_t *offset) { - char t[8+32+1] = "_BOOT_ID="; + char t[9+32+1] = "_BOOT_ID="; Object *o; int r; - sd_id128_to_string(boot_id, t + 8); + assert(f); + sd_id128_to_string(boot_id, t + 9); r = journal_file_find_data_object(f, t, strlen(t), &o, NULL); if (r < 0) return r; - else if (r == 0) + if (r == 0) return -ENOENT; return generic_array_bisect_plus_one(f, @@ -1413,18 +1491,6 @@ int journal_file_move_to_entry_by_monotonic( ret, offset, NULL); } -static int test_object_offset(JournalFile *f, uint64_t p, uint64_t needle) { - assert(f); - assert(p > 0); - - if (p == needle) - return TEST_FOUND; - else if (p < needle) - return TEST_LEFT; - else - return TEST_RIGHT; -} - int journal_file_next_entry( JournalFile *f, Object *o, uint64_t p, @@ -1589,6 +1655,119 @@ int journal_file_next_entry_for_data( ret, offset); } +int journal_file_move_to_entry_by_offset_for_data( + JournalFile *f, + uint64_t data_offset, + uint64_t p, + direction_t direction, + Object **ret, uint64_t *offset) { + + int r; + Object *d; + + assert(f); + + r = journal_file_move_to_object(f, OBJECT_DATA, data_offset, &d); + if (r < 0) + return r; + + return generic_array_bisect_plus_one(f, + le64toh(d->data.entry_offset), + le64toh(d->data.entry_array_offset), + le64toh(d->data.n_entries), + p, + test_object_offset, + direction, + ret, offset, NULL); +} + +int journal_file_move_to_entry_by_monotonic_for_data( + JournalFile *f, + uint64_t data_offset, + sd_id128_t boot_id, + uint64_t monotonic, + direction_t direction, + Object **ret, uint64_t *offset) { + + char t[9+32+1] = "_BOOT_ID="; + Object *o, *d; + int r; + uint64_t b, z; + + assert(f); + + /* First, seek by time */ + sd_id128_to_string(boot_id, t + 9); + r = journal_file_find_data_object(f, t, strlen(t), &o, &b); + if (r < 0) + return r; + if (r == 0) + return -ENOENT; + + r = generic_array_bisect_plus_one(f, + le64toh(o->data.entry_offset), + le64toh(o->data.entry_array_offset), + le64toh(o->data.n_entries), + monotonic, + test_object_monotonic, + direction, + NULL, &z, NULL); + if (r <= 0) + return r; + + /* And now, continue seeking until we find an entry that + * exists in both bisection arrays */ + + for (;;) { + Object *qo; + uint64_t p, q; + + r = journal_file_move_to_object(f, OBJECT_DATA, data_offset, &d); + if (r < 0) + return r; + + r = generic_array_bisect_plus_one(f, + le64toh(d->data.entry_offset), + le64toh(d->data.entry_array_offset), + le64toh(d->data.n_entries), + z, + test_object_offset, + direction, + NULL, &p, NULL); + if (r <= 0) + return r; + + r = journal_file_move_to_object(f, OBJECT_DATA, b, &o); + if (r < 0) + return r; + + r = generic_array_bisect_plus_one(f, + le64toh(o->data.entry_offset), + le64toh(o->data.entry_array_offset), + le64toh(o->data.n_entries), + p, + test_object_offset, + direction, + &qo, &q, NULL); + + if (r <= 0) + return r; + + if (p == q) { + if (ret) + *ret = qo; + if (offset) + *offset = q; + + return 1; + } + + z = q; + } + + return 0; +} + int journal_file_move_to_entry_by_seqnum_for_data( JournalFile *f, uint64_t data_offset, @@ -1599,8 +1778,10 @@ int journal_file_move_to_entry_by_seqnum_for_data( Object *d; int r; + assert(f); + r = journal_file_move_to_object(f, OBJECT_DATA, data_offset, &d); - if (r <= 0) + if (r < 0) return r; return generic_array_bisect_plus_one(f, @@ -1623,8 +1804,10 @@ int journal_file_move_to_entry_by_realtime_for_data( Object *d; int r; + assert(f); + r = journal_file_move_to_object(f, OBJECT_DATA, data_offset, &d); - if (r <= 0) + if (r < 0) return r; return generic_array_bisect_plus_one(f, @@ -1638,29 +1821,15 @@ int journal_file_move_to_entry_by_realtime_for_data( } void journal_file_dump(JournalFile *f) { - char a[33], b[33], c[33]; Object *o; int r; uint64_t p; assert(f); - printf("File Path: %s\n" - "File ID: %s\n" - "Machine ID: %s\n" - "Boot ID: %s\n" - "Arena size: %llu\n" - "Objects: %lu\n" - "Entries: %lu\n", - f->path, - sd_id128_to_string(f->header->file_id, a), - sd_id128_to_string(f->header->machine_id, b), - sd_id128_to_string(f->header->boot_id, c), - (unsigned long long) le64toh(f->header->arena_size), - (unsigned long) le64toh(f->header->n_objects), - (unsigned long) le64toh(f->header->n_entries)); + journal_file_print_header(f); - p = le64toh(f->header->arena_offset); + p = le64toh(f->header->header_size); while (p != 0) { r = journal_file_move_to_object(f, -1, p, &o); if (r < 0) @@ -1694,6 +1863,10 @@ void journal_file_dump(JournalFile *f) { case OBJECT_ENTRY_ARRAY: printf("Type: OBJECT_ENTRY_ARRAY\n"); break; + + case OBJECT_SIGNATURE: + printf("Type: OBJECT_SIGNATURE\n"); + break; } if (o->object.flags & OBJECT_COMPRESSED) @@ -1710,6 +1883,58 @@ fail: log_error("File corrupt"); } +void journal_file_print_header(JournalFile *f) { + char a[33], b[33], c[33]; + char x[FORMAT_TIMESTAMP_MAX], y[FORMAT_TIMESTAMP_MAX]; + + assert(f); + + printf("File Path: %s\n" + "File ID: %s\n" + "Machine ID: %s\n" + "Boot ID: %s\n" + "Sequential Number ID: %s\n" + "Header size: %llu\n" + "Arena size: %llu\n" + "Data Hash Table Size: %llu\n" + "Field Hash Table Size: %llu\n" + "Objects: %llu\n" + "Entry Objects: %llu\n" + "Rotate Suggested: %s\n" + "Head Sequential Number: %llu\n" + "Tail Sequential Number: %llu\n" + "Head Realtime Timestamp: %s\n" + "Tail Realtime Timestamp: %s\n", + f->path, + sd_id128_to_string(f->header->file_id, a), + sd_id128_to_string(f->header->machine_id, b), + sd_id128_to_string(f->header->boot_id, c), + sd_id128_to_string(f->header->seqnum_id, c), + (unsigned long long) le64toh(f->header->header_size), + (unsigned long long) le64toh(f->header->arena_size), + (unsigned long long) le64toh(f->header->data_hash_table_size) / sizeof(HashItem), + (unsigned long long) le64toh(f->header->field_hash_table_size) / sizeof(HashItem), + (unsigned long long) le64toh(f->header->n_objects), + (unsigned long long) le64toh(f->header->n_entries), + yes_no(journal_file_rotate_suggested(f)), + (unsigned long long) le64toh(f->header->head_seqnum), + (unsigned long long) le64toh(f->header->tail_seqnum), + format_timestamp(x, sizeof(x), le64toh(f->header->head_entry_realtime)), + format_timestamp(y, sizeof(y), le64toh(f->header->tail_entry_realtime))); + + if (JOURNAL_HEADER_CONTAINS(f->header, n_data)) + printf("Data Objects: %llu\n" + "Data Hash Table Fill: %.1f%%\n", + (unsigned long long) le64toh(f->header->n_data), + 100.0 * (double) le64toh(f->header->n_data) / ((double) (le64toh(f->header->data_hash_table_size) / sizeof(HashItem)))); + + if (JOURNAL_HEADER_CONTAINS(f->header, n_fields)) + printf("Field Objects: %llu\n" + "Field Hash Table Fill: %.1f%%\n", + (unsigned long long) le64toh(f->header->n_fields), + 100.0 * (double) le64toh(f->header->n_fields) / ((double) (le64toh(f->header->field_hash_table_size) / sizeof(HashItem)))); +} + int journal_file_open( const char *fname, int flags, @@ -1775,7 +2000,7 @@ int journal_file_open( } } - if (f->last_stat.st_size < (off_t) sizeof(Header)) { + if (f->last_stat.st_size < (off_t) HEADER_SIZE_MIN) { r = -EIO; goto fail; } @@ -1857,7 +2082,7 @@ int journal_file_rotate(JournalFile **f) { sd_id128_to_string(old_file->header->seqnum_id, p + l - 8 + 1); snprintf(p + l - 8 + 1 + 32, 1 + 16 + 1 + 16 + 8 + 1, "-%016llx-%016llx.journal", - (unsigned long long) le64toh((*f)->header->seqnum), + (unsigned long long) le64toh((*f)->header->tail_seqnum), (unsigned long long) le64toh((*f)->header->tail_entry_realtime)); r = rename(old_file->path, p); @@ -2137,9 +2362,6 @@ int journal_file_copy_entry(JournalFile *from, JournalFile *to, Object *o, uint6 ts.monotonic < le64toh(to->header->tail_entry_monotonic)) return -EINVAL; - if (ts.realtime < le64toh(to->header->tail_entry_realtime)) - return -EINVAL; - n = journal_file_entry_n_items(o); items = alloca(sizeof(EntryItem) * n); @@ -2272,3 +2494,94 @@ void journal_default_metrics(JournalMetrics *m, int fd) { format_bytes(c, sizeof(c), m->min_size), format_bytes(d, sizeof(d), m->keep_free)); } + +int journal_file_get_cutoff_realtime_usec(JournalFile *f, usec_t *from, usec_t *to) { + assert(f); + assert(from || to); + + if (from) { + if (f->header->head_entry_realtime == 0) + return -ENOENT; + + *from = le64toh(f->header->head_entry_realtime); + } + + if (to) { + if (f->header->tail_entry_realtime == 0) + return -ENOENT; + + *to = le64toh(f->header->tail_entry_realtime); + } + + return 1; +} + +int journal_file_get_cutoff_monotonic_usec(JournalFile *f, sd_id128_t boot_id, usec_t *from, usec_t *to) { + char t[9+32+1] = "_BOOT_ID="; + Object *o; + uint64_t p; + int r; + + assert(f); + assert(from || to); + + sd_id128_to_string(boot_id, t + 9); + + r = journal_file_find_data_object(f, t, strlen(t), &o, &p); + if (r <= 0) + return r; + + if (le64toh(o->data.n_entries) <= 0) + return 0; + + if (from) { + r = journal_file_move_to_object(f, OBJECT_ENTRY, le64toh(o->data.entry_offset), &o); + if (r < 0) + return r; + + *from = le64toh(o->entry.monotonic); + } + + if (to) { + r = journal_file_move_to_object(f, OBJECT_DATA, p, &o); + if (r < 0) + return r; + + r = generic_array_get_plus_one(f, + le64toh(o->data.entry_offset), + le64toh(o->data.entry_array_offset), + le64toh(o->data.n_entries)-1, + &o, NULL); + if (r <= 0) + return r; + + *to = le64toh(o->entry.monotonic); + } + + return 1; +} + +bool journal_file_rotate_suggested(JournalFile *f) { + assert(f); + + /* If we gained new header fields we gained new features, + * hence suggest a rotation */ + if (le64toh(f->header->header_size) < sizeof(Header)) + return true; + + /* Let's check if the hash tables grew over a certain fill + * level (75%, borrowing this value from Java's hash table + * implementation), and if so suggest a rotation. To calculate + * the fill level we need the n_data field, which only exists + * in newer versions. */ + + if (JOURNAL_HEADER_CONTAINS(f->header, n_data)) + if (le64toh(f->header->n_data) * 4ULL > (le64toh(f->header->data_hash_table_size) / sizeof(HashItem)) * 3ULL) + return true; + + if (JOURNAL_HEADER_CONTAINS(f->header, n_fields)) + if (le64toh(f->header->n_fields) * 4ULL > (le64toh(f->header->field_hash_table_size) / sizeof(HashItem)) * 3ULL) + return true; + + return false; +}