X-Git-Url: https://www.chiark.greenend.org.uk/ucgi/~ianmdlvl/git?p=elogind.git;a=blobdiff_plain;f=src%2Fjournal%2Fjournal-file.c;h=c9030c56addb5e7dac5f6d19c23b9b70d9fae234;hp=44a96928e027dade0b915054fecaf8c0c1d63b1b;hb=b9a1617d75c16a48cccf4ff135013dca9af94e7d;hpb=fa6ac76083b8ffc1309876459f54f9f0e2843731 diff --git a/src/journal/journal-file.c b/src/journal/journal-file.c index 44a96928e..c9030c56a 100644 --- a/src/journal/journal-file.c +++ b/src/journal/journal-file.c @@ -27,6 +27,7 @@ #include #include +#include "btrfs-util.h" #include "journal-def.h" #include "journal-file.h" #include "journal-authenticate.h" @@ -67,6 +68,9 @@ /* How much to increase the journal file size at once each time we allocate something new. */ #define FILE_SIZE_INCREASE (8ULL*1024ULL*1024ULL) /* 8MB */ +/* Reread fstat() of the file for detecting deletions at least this often */ +#define LAST_STAT_REFRESH_USEC (5*USEC_PER_SEC) + /* The mmap context to use for the header we pick as one above the last defined typed */ #define CONTEXT_HEADER _OBJECT_TYPE_MAX @@ -137,6 +141,9 @@ void journal_file_close(JournalFile *f) { if (f->mmap && f->fd >= 0) mmap_cache_close_fd(f->mmap, f->fd); + if (f->fd >= 0 && f->defrag_on_close) + btrfs_defrag_fd(f->fd); + safe_close(f->fd); free(f->path); @@ -319,6 +326,22 @@ static int journal_file_verify_header(JournalFile *f) { return 0; } +static int journal_file_fstat(JournalFile *f) { + assert(f); + assert(f->fd >= 0); + + if (fstat(f->fd, &f->last_stat) < 0) + return -errno; + + f->last_stat_usec = now(CLOCK_MONOTONIC); + + /* Refuse appending to files that are already deleted */ + if (f->last_stat.st_nlink <= 0) + return -EIDRM; + + return 0; +} + static int journal_file_allocate(JournalFile *f, uint64_t offset, uint64_t size) { uint64_t old_size, new_size; int r; @@ -340,8 +363,21 @@ static int journal_file_allocate(JournalFile *f, uint64_t offset, uint64_t size) if (new_size < le64toh(f->header->header_size)) new_size = le64toh(f->header->header_size); - if (new_size <= old_size) - return 0; + if (new_size <= old_size) { + + /* We already pre-allocated enough space, but before + * we write to it, let's check with fstat() if the + * file got deleted, in order make sure we don't throw + * away the data immediately. Don't check fstat() for + * all writes though, but only once ever 10s. */ + + if (f->last_stat_usec + LAST_STAT_REFRESH_USEC > now(CLOCK_MONOTONIC)) + return 0; + + return journal_file_fstat(f); + } + + /* Allocate more space. */ if (f->metrics.max_size > 0 && new_size > f->metrics.max_size) return -E2BIG; @@ -376,12 +412,9 @@ static int journal_file_allocate(JournalFile *f, uint64_t offset, uint64_t size) if (r != 0) return -r; - if (fstat(f->fd, &f->last_stat) < 0) - return -errno; - f->header->arena_size = htole64(new_size - le64toh(f->header->header_size)); - return 0; + return journal_file_fstat(f); } static unsigned type_to_context(ObjectType type) { @@ -392,6 +425,8 @@ static unsigned type_to_context(ObjectType type) { } static int journal_file_move_to(JournalFile *f, ObjectType type, bool keep_always, uint64_t offset, uint64_t size, void **ret) { + int r; + assert(f); assert(ret); @@ -403,8 +438,11 @@ static int journal_file_move_to(JournalFile *f, ObjectType type, bool keep_alway /* Hmm, out of range? Let's refresh the fstat() data * first, before we trust that check. */ - if (fstat(f->fd, &f->last_stat) < 0 || - offset + size > (uint64_t) f->last_stat.st_size) + r = journal_file_fstat(f); + if (r < 0) + return r; + + if (offset + size > (uint64_t) f->last_stat.st_size) return -EADDRNOTAVAIL; } @@ -658,7 +696,7 @@ static int journal_file_link_field( uint64_t offset, uint64_t hash) { - uint64_t p, h; + uint64_t p, h, m; int r; assert(f); @@ -668,11 +706,14 @@ static int journal_file_link_field( if (o->object.type != OBJECT_FIELD) return -EINVAL; - /* This might alter the window we are looking at */ + m = le64toh(f->header->field_hash_table_size) / sizeof(HashItem); + if (m <= 0) + return -EBADMSG; + /* This might alter the window we are looking at */ o->field.next_hash_offset = o->field.head_data_offset = 0; - h = hash % (le64toh(f->header->field_hash_table_size) / sizeof(HashItem)); + h = hash % m; p = le64toh(f->field_hash_table[h].tail_hash_offset); if (p == 0) f->field_hash_table[h].head_hash_offset = htole64(offset); @@ -698,7 +739,7 @@ static int journal_file_link_data( uint64_t offset, uint64_t hash) { - uint64_t p, h; + uint64_t p, h, m; int r; assert(f); @@ -708,13 +749,16 @@ static int journal_file_link_data( if (o->object.type != OBJECT_DATA) return -EINVAL; - /* This might alter the window we are looking at */ + m = le64toh(f->header->data_hash_table_size) / sizeof(HashItem); + if (m <= 0) + return -EBADMSG; + /* This might alter the window we are looking at */ o->data.next_hash_offset = o->data.next_field_offset = 0; o->data.entry_offset = o->data.entry_array_offset = 0; o->data.n_entries = 0; - h = hash % (le64toh(f->header->data_hash_table_size) / sizeof(HashItem)); + h = hash % m; p = le64toh(f->data_hash_table[h].tail_hash_offset); if (p == 0) /* Only entry in the hash table is easy */ @@ -743,7 +787,7 @@ int journal_file_find_field_object_with_hash( const void *field, uint64_t size, uint64_t hash, Object **ret, uint64_t *offset) { - uint64_t p, osize, h; + uint64_t p, osize, h, m; int r; assert(f); @@ -751,10 +795,12 @@ int journal_file_find_field_object_with_hash( osize = offsetof(Object, field.payload) + size; - if (f->header->field_hash_table_size == 0) + m = le64toh(f->header->field_hash_table_size) / sizeof(HashItem); + + if (m <= 0) return -EBADMSG; - h = hash % (le64toh(f->header->field_hash_table_size) / sizeof(HashItem)); + h = hash % m; p = le64toh(f->field_hash_table[h].head_hash_offset); while (p > 0) { @@ -804,7 +850,7 @@ int journal_file_find_data_object_with_hash( const void *data, uint64_t size, uint64_t hash, Object **ret, uint64_t *offset) { - uint64_t p, osize, h; + uint64_t p, osize, h, m; int r; assert(f); @@ -812,10 +858,11 @@ int journal_file_find_data_object_with_hash( osize = offsetof(Object, data.payload) + size; - if (f->header->data_hash_table_size == 0) + m = le64toh(f->header->data_hash_table_size) / sizeof(HashItem); + if (m <= 0) return -EBADMSG; - h = hash % (le64toh(f->header->data_hash_table_size) / sizeof(HashItem)); + h = hash % m; p = le64toh(f->data_hash_table[h].head_hash_offset); while (p > 0) { @@ -2539,10 +2586,9 @@ int journal_file_open( goto fail; } - if (fstat(f->fd, &f->last_stat) < 0) { - r = -errno; + r = journal_file_fstat(f); + if (r < 0) goto fail; - } if (f->last_stat.st_size == 0 && f->writable) { /* Let's attach the creation time to the journal file, @@ -2571,10 +2617,9 @@ int journal_file_open( if (r < 0) goto fail; - if (fstat(f->fd, &f->last_stat) < 0) { - r = -errno; + r = journal_file_fstat(f); + if (r < 0) goto fail; - } newly_created = true; } @@ -2691,12 +2736,20 @@ int journal_file_rotate(JournalFile **f, bool compress, bool seal) { if (r < 0) return -ENOMEM; + /* Try to rename the file to the archived version. If the file + * already was deleted, we'll get ENOENT, let's ignore that + * case. */ r = rename(old_file->path, p); - if (r < 0) + if (r < 0 && errno != ENOENT) return -errno; old_file->header->state = STATE_ARCHIVED; + /* Currently, btrfs is not very good with out write patterns + * and fragments heavily. Let's defrag our journal files when + * we archive them */ + old_file->defrag_on_close = true; + r = journal_file_open(old_file->path, old_file->flags, old_file->mode, compress, seal, NULL, old_file->mmap, old_file, &new_file); journal_file_close(old_file); @@ -2727,7 +2780,8 @@ int journal_file_open_reliably( r != -EPROTONOSUPPORT && /* incompatible feature */ r != -EBUSY && /* unclean shutdown */ r != -ESHUTDOWN && /* already archived */ - r != -EIO /* IO error, including SIGBUS on mmap */) + r != -EIO && /* IO error, including SIGBUS on mmap */ + r != -EIDRM /* File has been deleted */) return r; if ((flags & O_ACCMODE) == O_RDONLY) @@ -2752,6 +2806,10 @@ int journal_file_open_reliably( if (r < 0) return -errno; + /* btrfs doesn't cope well with our write pattern and + * fragments heavily. Let's defrag all files we rotate */ + (void) btrfs_defrag(p); + log_warning("File %s corrupted or uncleanly shut down, renaming and replacing.", fname); return journal_file_open(fname, flags, mode, compress, seal,