/* How much to increase the journal file size at once each time we allocate something new. */
#define FILE_SIZE_INCREASE (8ULL*1024ULL*1024ULL) /* 8MB */
+/* Reread fstat() of the file for detecting deletions at least this often */
+#define LAST_STAT_REFRESH_USEC (5*USEC_PER_SEC)
+
/* The mmap context to use for the header we pick as one above the last defined typed */
#define CONTEXT_HEADER _OBJECT_TYPE_MAX
return 0;
}
+static int journal_file_fstat(JournalFile *f) {
+ assert(f);
+ assert(f->fd >= 0);
+
+ if (fstat(f->fd, &f->last_stat) < 0)
+ return -errno;
+
+ f->last_stat_usec = now(CLOCK_MONOTONIC);
+
+ /* Refuse appending to files that are already deleted */
+ if (f->last_stat.st_nlink <= 0)
+ return -EIDRM;
+
+ return 0;
+}
+
static int journal_file_allocate(JournalFile *f, uint64_t offset, uint64_t size) {
uint64_t old_size, new_size;
int r;
if (new_size < le64toh(f->header->header_size))
new_size = le64toh(f->header->header_size);
- if (new_size <= old_size)
- return 0;
+ if (new_size <= old_size) {
+
+ /* We already pre-allocated enough space, but before
+ * we write to it, let's check with fstat() if the
+ * file got deleted, in order make sure we don't throw
+ * away the data immediately. Don't check fstat() for
+ * all writes though, but only once ever 10s. */
+
+ if (f->last_stat_usec + LAST_STAT_REFRESH_USEC > now(CLOCK_MONOTONIC))
+ return 0;
+
+ return journal_file_fstat(f);
+ }
+
+ /* Allocate more space. */
if (f->metrics.max_size > 0 && new_size > f->metrics.max_size)
return -E2BIG;
if (r != 0)
return -r;
- if (fstat(f->fd, &f->last_stat) < 0)
- return -errno;
-
f->header->arena_size = htole64(new_size - le64toh(f->header->header_size));
- return 0;
+ return journal_file_fstat(f);
}
static unsigned type_to_context(ObjectType type) {
}
static int journal_file_move_to(JournalFile *f, ObjectType type, bool keep_always, uint64_t offset, uint64_t size, void **ret) {
+ int r;
+
assert(f);
assert(ret);
/* Hmm, out of range? Let's refresh the fstat() data
* first, before we trust that check. */
- if (fstat(f->fd, &f->last_stat) < 0 ||
- offset + size > (uint64_t) f->last_stat.st_size)
+ r = journal_file_fstat(f);
+ if (r < 0)
+ return r;
+
+ if (offset + size > (uint64_t) f->last_stat.st_size)
return -EADDRNOTAVAIL;
}
uint64_t offset,
uint64_t hash) {
- uint64_t p, h;
+ uint64_t p, h, m;
int r;
assert(f);
if (o->object.type != OBJECT_FIELD)
return -EINVAL;
- /* This might alter the window we are looking at */
+ m = le64toh(f->header->field_hash_table_size) / sizeof(HashItem);
+ if (m <= 0)
+ return -EBADMSG;
+ /* This might alter the window we are looking at */
o->field.next_hash_offset = o->field.head_data_offset = 0;
- h = hash % (le64toh(f->header->field_hash_table_size) / sizeof(HashItem));
+ h = hash % m;
p = le64toh(f->field_hash_table[h].tail_hash_offset);
if (p == 0)
f->field_hash_table[h].head_hash_offset = htole64(offset);
uint64_t offset,
uint64_t hash) {
- uint64_t p, h;
+ uint64_t p, h, m;
int r;
assert(f);
if (o->object.type != OBJECT_DATA)
return -EINVAL;
- /* This might alter the window we are looking at */
+ m = le64toh(f->header->data_hash_table_size) / sizeof(HashItem);
+ if (m <= 0)
+ return -EBADMSG;
+ /* This might alter the window we are looking at */
o->data.next_hash_offset = o->data.next_field_offset = 0;
o->data.entry_offset = o->data.entry_array_offset = 0;
o->data.n_entries = 0;
- h = hash % (le64toh(f->header->data_hash_table_size) / sizeof(HashItem));
+ h = hash % m;
p = le64toh(f->data_hash_table[h].tail_hash_offset);
if (p == 0)
/* Only entry in the hash table is easy */
const void *field, uint64_t size, uint64_t hash,
Object **ret, uint64_t *offset) {
- uint64_t p, osize, h;
+ uint64_t p, osize, h, m;
int r;
assert(f);
osize = offsetof(Object, field.payload) + size;
- if (f->header->field_hash_table_size == 0)
+ m = le64toh(f->header->field_hash_table_size) / sizeof(HashItem);
+
+ if (m <= 0)
return -EBADMSG;
- h = hash % (le64toh(f->header->field_hash_table_size) / sizeof(HashItem));
+ h = hash % m;
p = le64toh(f->field_hash_table[h].head_hash_offset);
while (p > 0) {
const void *data, uint64_t size, uint64_t hash,
Object **ret, uint64_t *offset) {
- uint64_t p, osize, h;
+ uint64_t p, osize, h, m;
int r;
assert(f);
osize = offsetof(Object, data.payload) + size;
- if (f->header->data_hash_table_size == 0)
+ m = le64toh(f->header->data_hash_table_size) / sizeof(HashItem);
+ if (m <= 0)
return -EBADMSG;
- h = hash % (le64toh(f->header->data_hash_table_size) / sizeof(HashItem));
+ h = hash % m;
p = le64toh(f->data_hash_table[h].head_hash_offset);
while (p > 0) {
goto fail;
}
- if (fstat(f->fd, &f->last_stat) < 0) {
- r = -errno;
+ r = journal_file_fstat(f);
+ if (r < 0)
goto fail;
- }
if (f->last_stat.st_size == 0 && f->writable) {
/* Let's attach the creation time to the journal file,
if (r < 0)
goto fail;
- if (fstat(f->fd, &f->last_stat) < 0) {
- r = -errno;
+ r = journal_file_fstat(f);
+ if (r < 0)
goto fail;
- }
newly_created = true;
}
if (r < 0)
return -ENOMEM;
+ /* Try to rename the file to the archived version. If the file
+ * already was deleted, we'll get ENOENT, let's ignore that
+ * case. */
r = rename(old_file->path, p);
- if (r < 0)
+ if (r < 0 && errno != ENOENT)
return -errno;
old_file->header->state = STATE_ARCHIVED;