}
#endif
- if (!compressed)
+ if (!compressed && size > 0)
memcpy(o->data.payload, data, size);
r = journal_file_link_data(f, o, p, hash);
ts->monotonic < le64toh(f->header->tail_entry_monotonic))
return -EINVAL;
- items = alloca(sizeof(EntryItem) * n_iovec);
+ /* alloca() can't take 0, hence let's allocate at least one */
+ items = alloca(sizeof(EntryItem) * MAX(1, n_iovec));
for (i = 0; i < n_iovec; i++) {
uint64_t p;
if (r != -EBADMSG && /* corrupted */
r != -ENODATA && /* truncated */
r != -EHOSTDOWN && /* other machine */
- r != -EPROTONOSUPPORT) /* incompatible feature */
+ r != -EPROTONOSUPPORT && /* incompatible feature */
+ r != -EBUSY && /* unclean shutdown */
+ r != -ESHUTDOWN /* already archived */)
return r;
if ((flags & O_ACCMODE) == O_RDONLY)
if (r < 0)
return -errno;
- log_warning("File %s corrupted, renaming and replacing.", fname);
+ log_warning("File %s corrupted or uncleanly shut down, renaming and replacing.", fname);
return journal_file_open(fname, flags, mode, metrics, template, ret);
}
n_list ++;
}
- qsort(list, n_list, sizeof(struct vacuum_info), vacuum_compare);
+ if (n_list > 0)
+ qsort(list, n_list, sizeof(struct vacuum_info), vacuum_compare);
for(i = 0; i < n_list; i++) {
struct statvfs ss;
/* If we gained new header fields we gained new features,
* hence suggest a rotation */
- if (le64toh(f->header->header_size) < sizeof(Header))
+ if (le64toh(f->header->header_size) < sizeof(Header)) {
+ log_debug("%s uses an outdated header, suggesting rotation.", f->path);
return true;
+ }
/* Let's check if the hash tables grew over a certain fill
* level (75%, borrowing this value from Java's hash table
* in newer versions. */
if (JOURNAL_HEADER_CONTAINS(f->header, n_data))
- if (le64toh(f->header->n_data) * 4ULL > (le64toh(f->header->data_hash_table_size) / sizeof(HashItem)) * 3ULL)
+ if (le64toh(f->header->n_data) * 4ULL > (le64toh(f->header->data_hash_table_size) / sizeof(HashItem)) * 3ULL) {
+ log_debug("Data hash table of %s has a fill level at %.1f (%llu of %llu items, %llu file size, %llu bytes per hash table item), suggesting rotation.",
+ f->path,
+ 100.0 * (double) le64toh(f->header->n_data) / ((double) (le64toh(f->header->data_hash_table_size) / sizeof(HashItem))),
+ (unsigned long long) le64toh(f->header->n_data),
+ (unsigned long long) (le64toh(f->header->data_hash_table_size) / sizeof(HashItem)),
+ (unsigned long long) (f->last_stat.st_size),
+ (unsigned long long) (f->last_stat.st_size / le64toh(f->header->n_data)));
return true;
+ }
if (JOURNAL_HEADER_CONTAINS(f->header, n_fields))
- if (le64toh(f->header->n_fields) * 4ULL > (le64toh(f->header->field_hash_table_size) / sizeof(HashItem)) * 3ULL)
+ if (le64toh(f->header->n_fields) * 4ULL > (le64toh(f->header->field_hash_table_size) / sizeof(HashItem)) * 3ULL) {
+ log_debug("Field hash table of %s has a fill level at %.1f (%llu of %llu items), suggesting rotation.",
+ f->path,
+ 100.0 * (double) le64toh(f->header->n_fields) / ((double) (le64toh(f->header->field_hash_table_size) / sizeof(HashItem))),
+ (unsigned long long) le64toh(f->header->n_fields),
+ (unsigned long long) (le64toh(f->header->field_hash_table_size) / sizeof(HashItem)));
return true;
+ }
return false;
}