X-Git-Url: http://www.chiark.greenend.org.uk/ucgi/~ianmdlvl/git?p=elogind.git;a=blobdiff_plain;f=src%2Fjournal%2Fjournal-file.c;h=718dc5d6eaa13f73e77ce8bb5f2106d9a1aa19ed;hp=d8ce495d86d0146be274bec804d8d4b98082e0b8;hb=64825d3c589cd8742887f30acde8c57eceac2001;hpb=cd96b3b86abb4a88cac2722bdfb6e5d4413f6831 diff --git a/src/journal/journal-file.c b/src/journal/journal-file.c index d8ce495d8..718dc5d6e 100644 --- a/src/journal/journal-file.c +++ b/src/journal/journal-file.c @@ -510,16 +510,16 @@ static int journal_file_setup_data_hash_table(JournalFile *f) { assert(f); - /* We estimate that we need 1 hash table entry per 2K of + /* We estimate that we need 1 hash table entry per 768 of journal file and we want to make sure we never get beyond 75% fill level. Calculate the hash table size for the maximum file size based on these metrics. */ - s = (f->metrics.max_size * 4 / 2048 / 3) * sizeof(HashItem); + s = (f->metrics.max_size * 4 / 768 / 3) * sizeof(HashItem); if (s < DEFAULT_DATA_HASH_TABLE_SIZE) s = DEFAULT_DATA_HASH_TABLE_SIZE; - log_info("Reserving %llu entries in hash table.", (unsigned long long) s); + log_info("Reserving %llu entries in hash table.", (unsigned long long) (s / sizeof(HashItem))); r = journal_file_append_object(f, OBJECT_DATA_HASH_TABLE, @@ -788,7 +788,7 @@ static int journal_file_append_data( } #endif - if (!compressed) + if (!compressed && size > 0) memcpy(o->data.payload, data, size); r = journal_file_link_data(f, o, p, hash); @@ -1057,7 +1057,8 @@ int journal_file_append_entry(JournalFile *f, const dual_timestamp *ts, const st ts->monotonic < le64toh(f->header->tail_entry_monotonic)) return -EINVAL; - items = alloca(sizeof(EntryItem) * n_iovec); + /* alloca() can't take 0, hence let's allocate at least one */ + items = alloca(sizeof(EntryItem) * MAX(1, n_iovec)); for (i = 0; i < n_iovec; i++) { uint64_t p; @@ -2142,7 +2143,9 @@ int journal_file_open_reliably( if (r != -EBADMSG && /* corrupted */ r != -ENODATA && /* truncated */ r != -EHOSTDOWN && /* other machine */ - r != -EPROTONOSUPPORT) /* incompatible feature */ + r != -EPROTONOSUPPORT && /* incompatible feature */ + r != -EBUSY && /* unclean shutdown */ + r != -ESHUTDOWN /* already archived */) return r; if ((flags & O_ACCMODE) == O_RDONLY) @@ -2165,7 +2168,7 @@ int journal_file_open_reliably( if (r < 0) return -errno; - log_warning("File %s corrupted, renaming and replacing.", fname); + log_warning("File %s corrupted or uncleanly shut down, renaming and replacing.", fname); return journal_file_open(fname, flags, mode, metrics, template, ret); } @@ -2334,7 +2337,8 @@ int journal_directory_vacuum(const char *directory, uint64_t max_use, uint64_t m n_list ++; } - qsort(list, n_list, sizeof(struct vacuum_info), vacuum_compare); + if (n_list > 0) + qsort(list, n_list, sizeof(struct vacuum_info), vacuum_compare); for(i = 0; i < n_list; i++) { struct statvfs ss; @@ -2593,8 +2597,10 @@ bool journal_file_rotate_suggested(JournalFile *f) { /* If we gained new header fields we gained new features, * hence suggest a rotation */ - if (le64toh(f->header->header_size) < sizeof(Header)) + if (le64toh(f->header->header_size) < sizeof(Header)) { + log_debug("%s uses an outdated header, suggesting rotation.", f->path); return true; + } /* Let's check if the hash tables grew over a certain fill * level (75%, borrowing this value from Java's hash table @@ -2603,12 +2609,26 @@ bool journal_file_rotate_suggested(JournalFile *f) { * in newer versions. */ if (JOURNAL_HEADER_CONTAINS(f->header, n_data)) - if (le64toh(f->header->n_data) * 4ULL > (le64toh(f->header->data_hash_table_size) / sizeof(HashItem)) * 3ULL) + if (le64toh(f->header->n_data) * 4ULL > (le64toh(f->header->data_hash_table_size) / sizeof(HashItem)) * 3ULL) { + log_debug("Data hash table of %s has a fill level at %.1f (%llu of %llu items, %llu file size, %llu bytes per hash table item), suggesting rotation.", + f->path, + 100.0 * (double) le64toh(f->header->n_data) / ((double) (le64toh(f->header->data_hash_table_size) / sizeof(HashItem))), + (unsigned long long) le64toh(f->header->n_data), + (unsigned long long) (le64toh(f->header->data_hash_table_size) / sizeof(HashItem)), + (unsigned long long) (f->last_stat.st_size), + (unsigned long long) (f->last_stat.st_size / le64toh(f->header->n_data))); return true; + } if (JOURNAL_HEADER_CONTAINS(f->header, n_fields)) - if (le64toh(f->header->n_fields) * 4ULL > (le64toh(f->header->field_hash_table_size) / sizeof(HashItem)) * 3ULL) + if (le64toh(f->header->n_fields) * 4ULL > (le64toh(f->header->field_hash_table_size) / sizeof(HashItem)) * 3ULL) { + log_debug("Field hash table of %s has a fill level at %.1f (%llu of %llu items), suggesting rotation.", + f->path, + 100.0 * (double) le64toh(f->header->n_fields) / ((double) (le64toh(f->header->field_hash_table_size) / sizeof(HashItem))), + (unsigned long long) le64toh(f->header->n_fields), + (unsigned long long) (le64toh(f->header->field_hash_table_size) / sizeof(HashItem))); return true; + } return false; }