if (r != 0)
return -r;
- mmap_cache_close_fd_range(f->mmap, f->fd, old_size);
-
if (fstat(f->fd, &f->last_stat) < 0)
return -errno;
return 0;
}
-static int journal_file_move_to(JournalFile *f, int context, uint64_t offset, uint64_t size, void **ret) {
+static int journal_file_move_to(JournalFile *f, int context, bool keep_always, uint64_t offset, uint64_t size, void **ret) {
assert(f);
assert(ret);
return -EADDRNOTAVAIL;
}
- return mmap_cache_get(f->mmap, f->fd, f->prot, context, offset, size, ret);
+ return mmap_cache_get(f->mmap, f->fd, f->prot, context, keep_always, offset, size, &f->last_stat, ret);
}
static uint64_t minimum_header_size(Object *o) {
/* One context for each type, plus one catch-all for the rest */
context = type > 0 && type < _OBJECT_TYPE_MAX ? type : 0;
- r = journal_file_move_to(f, context, offset, sizeof(ObjectHeader), &t);
+ r = journal_file_move_to(f, context, false, offset, sizeof(ObjectHeader), &t);
if (r < 0)
return r;
return -EBADMSG;
if (s > sizeof(ObjectHeader)) {
- r = journal_file_move_to(f, o->object.type, offset, s, &t);
+ r = journal_file_move_to(f, o->object.type, false, offset, s, &t);
if (r < 0)
return r;
if (r < 0)
return r;
- r = journal_file_move_to(f, type, p, size, &t);
+ r = journal_file_move_to(f, type, false, p, size, &t);
if (r < 0)
return r;
r = journal_file_move_to(f,
OBJECT_DATA_HASH_TABLE,
+ true,
p, s,
&t);
if (r < 0)
r = journal_file_move_to(f,
OBJECT_FIELD_HASH_TABLE,
+ true,
p, s,
&t);
if (r < 0)
c = (a + b) / 2;
- r = mmap_cache_get(m, fd, PROT_READ|PROT_WRITE, 0, c * sizeof(uint64_t), sizeof(uint64_t), (void **) &z);
+ r = mmap_cache_get(m, fd, PROT_READ|PROT_WRITE, 0, false, c * sizeof(uint64_t), sizeof(uint64_t), NULL, (void **) &z);
if (r < 0)
return r;
if (p < *z)
b = c;
- else {
+ else
a = c;
- }
}
return 0;
Window *v;
v = m->windows + w;
+ assert(v->n_ref == 0);
if (v->ptr) {
mmap_cache_window_unmap(m, w);
unsigned fd_index,
int prot,
unsigned context,
+ bool keep_always,
uint64_t offset,
uint64_t size,
+ struct stat *st,
void **ret) {
unsigned w;
wsize = WINDOW_SIZE;
}
+ if (st) {
+ /* Memory maps that are larger then the files
+ underneath have undefined behaviour. Hence, clamp
+ things to the file size if we know it */
+
+ if (woffset >= (uint64_t) st->st_size)
+ return -EADDRNOTAVAIL;
+
+ if (woffset + wsize > (uint64_t) st->st_size)
+ wsize = PAGE_ALIGN(st->st_size - woffset);
+ }
+
for (;;) {
d = mmap(NULL, wsize, prot, MAP_SHARED, fd, woffset);
if (d != MAP_FAILED)
v->offset = woffset;
v->size = wsize;
- v->n_ref = 0;
- mmap_cache_window_add_lru(m, w);
+ if (keep_always)
+ v->n_ref = 1;
+ else {
+ v->n_ref = 0;
+ mmap_cache_window_add_lru(m, w);
+ }
+
mmap_cache_fd_add(m, fd_index, w);
mmap_cache_context_set(m, context, w);
int fd,
int prot,
unsigned context,
+ bool keep_always,
uint64_t offset,
uint64_t size,
+ struct stat *st,
void **ret) {
unsigned fd_index;
return r;
/* Not found? Then, let's add it */
- return mmap_cache_put(m, fd, fd_index, prot, context, offset, size, ret);
+ return mmap_cache_put(m, fd, fd_index, prot, context, keep_always, offset, size, st, ret);
}
void mmap_cache_close_fd(MMapCache *m, int fd) {
m->n_fds --;
}
-void mmap_cache_close_fd_range(MMapCache *m, int fd, uint64_t p) {
- unsigned fd_index, c, w;
- int r;
-
- assert(m);
- assert(fd > 0);
-
- /* This drops all windows that include space right of the
- * specified offset. This is useful to ensure that after the
- * file size is extended we drop our mappings of the end and
- * create it anew, since otherwise it is undefined whether
- * mapping will continue to work as intended. */
-
- r = mmap_cache_peek_fd_index(m, fd, &fd_index);
- if (r <= 0)
- return;
-
- for (c = 0; c < m->contexts_max; c++) {
- w = m->by_context[c];
-
- if (w != (unsigned) -1 && m->windows[w].fd == fd)
- mmap_cache_context_unset(m, c);
- }
-
- w = m->by_fd[fd_index].windows;
- while (w != (unsigned) -1) {
- Window *v;
-
- v = m->windows + w;
- assert(v->fd == fd);
- assert(v->by_fd_next == (unsigned) -1 ||
- m->windows[v->by_fd_next].fd == fd);
-
- if (v->offset + v->size > p) {
-
- mmap_cache_window_unmap(m, w);
- mmap_cache_fd_remove(m, fd_index, w);
- v->fd = -1;
-
- w = m->by_fd[fd_index].windows;
- } else
- w = v->by_fd_next;
- }
-}
-
void mmap_cache_close_context(MMapCache *m, unsigned context) {
mmap_cache_context_unset(m, context);
}
MMapCache* mmap_cache_ref(MMapCache *m);
MMapCache* mmap_cache_unref(MMapCache *m);
-int mmap_cache_get(MMapCache *m, int fd, int prot, unsigned context, uint64_t offset, uint64_t size, void **ret);
+int mmap_cache_get(MMapCache *m, int fd, int prot, unsigned context, bool keep_always, uint64_t offset, uint64_t size, struct stat *st, void **ret);
void mmap_cache_close_fd(MMapCache *m, int fd);
-void mmap_cache_close_fd_range(MMapCache *m, int fd, uint64_t range);
void mmap_cache_close_context(MMapCache *m, unsigned context);