X-Git-Url: http://www.chiark.greenend.org.uk/ucgi/~ianmdlvl/git?a=blobdiff_plain;f=src%2Fjournal%2Fjournal-file.c;h=b840124c9fe8815329ac643ef991286a2ee94c62;hb=4a842cadb8d6b30fa9fdc8ff183633c14e02cf96;hp=9128f0d642f20dab496de16f4e7d9d66fdf4b81d;hpb=dca6219e04505e9fa10b32e71059ce2abfae1dad;p=elogind.git diff --git a/src/journal/journal-file.c b/src/journal/journal-file.c index 9128f0d64..b840124c9 100644 --- a/src/journal/journal-file.c +++ b/src/journal/journal-file.c @@ -32,8 +32,8 @@ #include "lookup3.h" #include "compress.h" -#define DEFAULT_DATA_HASH_TABLE_SIZE (2047ULL*16ULL) -#define DEFAULT_FIELD_HASH_TABLE_SIZE (2047ULL*16ULL) +#define DEFAULT_DATA_HASH_TABLE_SIZE (2047ULL*sizeof(HashItem)) +#define DEFAULT_FIELD_HASH_TABLE_SIZE (333ULL*sizeof(HashItem)) #define DEFAULT_WINDOW_SIZE (8ULL*1024ULL*1024ULL) @@ -74,7 +74,8 @@ void journal_file_close(JournalFile *f) { assert(f); if (f->header) { - if (f->writable) + /* Mark the file offline. Don't override the archived state if it already is set */ + if (f->writable && f->header->state == STATE_ONLINE) f->header->state = STATE_OFFLINE; munmap(f->header, PAGE_ALIGN(sizeof(Header))); @@ -188,13 +189,15 @@ static int journal_file_verify_header(JournalFile *f) { state = f->header->state; - if (state == STATE_ONLINE) - log_debug("Journal file %s is already online. Assuming unclean closing. Ignoring.", f->path); - /* FIXME: immediately rotate */ - else if (state == STATE_ARCHIVED) + if (state == STATE_ONLINE) { + log_debug("Journal file %s is already online. Assuming unclean closing.", f->path); + return -EBUSY; + } else if (state == STATE_ARCHIVED) return -ESHUTDOWN; - else if (state != STATE_OFFLINE) - log_debug("Journal file %s has unknown state %u. Ignoring.", f->path, state); + else if (state != STATE_OFFLINE) { + log_debug("Journal file %s has unknown state %u.", f->path, state); + return -EBUSY; + } } return 0; @@ -507,7 +510,17 @@ static int journal_file_setup_data_hash_table(JournalFile *f) { assert(f); - s = DEFAULT_DATA_HASH_TABLE_SIZE; + /* We estimate that we need 1 hash table entry per 768 of + journal file and we want to make sure we never get beyond + 75% fill level. Calculate the hash table size for the + maximum file size based on these metrics. */ + + s = (f->metrics.max_size * 4 / 768 / 3) * sizeof(HashItem); + if (s < DEFAULT_DATA_HASH_TABLE_SIZE) + s = DEFAULT_DATA_HASH_TABLE_SIZE; + + log_info("Reserving %llu entries in hash table.", (unsigned long long) (s / sizeof(HashItem))); + r = journal_file_append_object(f, OBJECT_DATA_HASH_TABLE, offsetof(Object, hash_table.items) + s, @@ -1892,6 +1905,9 @@ void journal_file_print_header(JournalFile *f) { "Machine ID: %s\n" "Boot ID: %s\n" "Sequential Number ID: %s\n" + "State: %s\n" + "Compatible Flags:%s%s\n" + "Incompatible Flags:%s%s\n" "Header size: %llu\n" "Arena size: %llu\n" "Data Hash Table Size: %llu\n" @@ -1908,6 +1924,13 @@ void journal_file_print_header(JournalFile *f) { sd_id128_to_string(f->header->machine_id, b), sd_id128_to_string(f->header->boot_id, c), sd_id128_to_string(f->header->seqnum_id, c), + f->header->state == STATE_OFFLINE ? "offline" : + f->header->state == STATE_ONLINE ? "online" : + f->header->state == STATE_ARCHIVED ? "archived" : "unknown", + (f->header->compatible_flags & HEADER_COMPATIBLE_SIGNED) ? " SIGNED" : "", + (f->header->compatible_flags & ~HEADER_COMPATIBLE_SIGNED) ? " ???" : "", + (f->header->incompatible_flags & HEADER_INCOMPATIBLE_COMPRESSED) ? " COMPRESSED" : "", + (f->header->incompatible_flags & ~HEADER_INCOMPATIBLE_COMPRESSED) ? " ???" : "", (unsigned long long) le64toh(f->header->header_size), (unsigned long long) le64toh(f->header->arena_size), (unsigned long long) le64toh(f->header->data_hash_table_size) / sizeof(HashItem), @@ -1937,6 +1960,7 @@ int journal_file_open( const char *fname, int flags, mode_t mode, + JournalMetrics *metrics, JournalFile *template, JournalFile **ret) { @@ -1963,10 +1987,8 @@ int journal_file_open( f->writable = (flags & O_ACCMODE) != O_RDONLY; f->prot = prot_from_flags(flags); - if (template) { - f->metrics = template->metrics; + if (template) f->compress = template->compress; - } f->path = strdup(fname); if (!f->path) { @@ -2017,6 +2039,12 @@ int journal_file_open( } if (f->writable) { + if (metrics) { + journal_default_metrics(metrics, f->fd); + f->metrics = *metrics; + } else if (template) + f->metrics = template->metrics; + r = journal_file_refresh_header(f); if (r < 0) goto fail; @@ -2091,7 +2119,7 @@ int journal_file_rotate(JournalFile **f) { old_file->header->state = STATE_ARCHIVED; - r = journal_file_open(old_file->path, old_file->flags, old_file->mode, old_file, &new_file); + r = journal_file_open(old_file->path, old_file->flags, old_file->mode, NULL, old_file, &new_file); journal_file_close(old_file); *f = new_file; @@ -2102,6 +2130,7 @@ int journal_file_open_reliably( const char *fname, int flags, mode_t mode, + JournalMetrics *metrics, JournalFile *template, JournalFile **ret) { @@ -2109,11 +2138,13 @@ int journal_file_open_reliably( size_t l; char *p; - r = journal_file_open(fname, flags, mode, template, ret); + r = journal_file_open(fname, flags, mode, metrics, template, ret); if (r != -EBADMSG && /* corrupted */ r != -ENODATA && /* truncated */ r != -EHOSTDOWN && /* other machine */ - r != -EPROTONOSUPPORT) /* incompatible feature */ + r != -EPROTONOSUPPORT && /* incompatible feature */ + r != -EBUSY && /* unclean shutdown */ + r != -ESHUTDOWN /* already archived */) return r; if ((flags & O_ACCMODE) == O_RDONLY) @@ -2136,9 +2167,9 @@ int journal_file_open_reliably( if (r < 0) return -errno; - log_warning("File %s corrupted, renaming and replacing.", fname); + log_warning("File %s corrupted or uncleanly shut down, renaming and replacing.", fname); - return journal_file_open(fname, flags, mode, template, ret); + return journal_file_open(fname, flags, mode, metrics, template, ret); } struct vacuum_info { @@ -2564,8 +2595,10 @@ bool journal_file_rotate_suggested(JournalFile *f) { /* If we gained new header fields we gained new features, * hence suggest a rotation */ - if (le64toh(f->header->header_size) < sizeof(Header)) + if (le64toh(f->header->header_size) < sizeof(Header)) { + log_debug("%s uses an outdated header, suggesting rotation.", f->path); return true; + } /* Let's check if the hash tables grew over a certain fill * level (75%, borrowing this value from Java's hash table @@ -2574,12 +2607,26 @@ bool journal_file_rotate_suggested(JournalFile *f) { * in newer versions. */ if (JOURNAL_HEADER_CONTAINS(f->header, n_data)) - if (le64toh(f->header->n_data) * 4ULL > (le64toh(f->header->data_hash_table_size) / sizeof(HashItem)) * 3ULL) + if (le64toh(f->header->n_data) * 4ULL > (le64toh(f->header->data_hash_table_size) / sizeof(HashItem)) * 3ULL) { + log_debug("Data hash table of %s has a fill level at %.1f (%llu of %llu items, %llu file size, %llu bytes per hash table item), suggesting rotation.", + f->path, + 100.0 * (double) le64toh(f->header->n_data) / ((double) (le64toh(f->header->data_hash_table_size) / sizeof(HashItem))), + (unsigned long long) le64toh(f->header->n_data), + (unsigned long long) (le64toh(f->header->data_hash_table_size) / sizeof(HashItem)), + (unsigned long long) (f->last_stat.st_size), + (unsigned long long) (f->last_stat.st_size / le64toh(f->header->n_data))); return true; + } if (JOURNAL_HEADER_CONTAINS(f->header, n_fields)) - if (le64toh(f->header->n_fields) * 4ULL > (le64toh(f->header->field_hash_table_size) / sizeof(HashItem)) * 3ULL) + if (le64toh(f->header->n_fields) * 4ULL > (le64toh(f->header->field_hash_table_size) / sizeof(HashItem)) * 3ULL) { + log_debug("Field hash table of %s has a fill level at %.1f (%llu of %llu items), suggesting rotation.", + f->path, + 100.0 * (double) le64toh(f->header->n_fields) / ((double) (le64toh(f->header->field_hash_table_size) / sizeof(HashItem))), + (unsigned long long) le64toh(f->header->n_fields), + (unsigned long long) (le64toh(f->header->field_hash_table_size) / sizeof(HashItem))); return true; + } return false; }