X-Git-Url: https://www.chiark.greenend.org.uk/ucgi/~ianmdlvl/git?a=blobdiff_plain;f=src%2Fjournal%2Fsd-journal.c;h=5d518a38709d07c2d1b9e7903f940a13838c90fb;hb=8f9b6cd9eb049b00b1e9e669d0e35aa415dc8fb0;hp=35f911f2ba8e8000b50b29473a6f2a9204571a3c;hpb=ae2cc8efc1659dcc6219dfcd07287288666aa303;p=elogind.git diff --git a/src/journal/sd-journal.c b/src/journal/sd-journal.c index 35f911f2b..5d518a387 100644 --- a/src/journal/sd-journal.c +++ b/src/journal/sd-journal.c @@ -28,13 +28,14 @@ #include "journal-file.h" #include "hashmap.h" #include "list.h" +#include "lookup3.h" typedef struct Match Match; struct Match { char *data; size_t size; - uint64_t hash; + uint64_t le_hash; LIST_FIELDS(Match, matches); }; @@ -43,34 +44,55 @@ struct sd_journal { Hashmap *files; JournalFile *current_file; + uint64_t current_field; LIST_HEAD(Match, matches); + unsigned n_matches; }; -int sd_journal_add_match(sd_journal *j, const char *field, const void *data, size_t size) { +static void reset_location(sd_journal *j) { + Iterator i; + JournalFile *f; + + assert(j); + + j->current_file = NULL; + j->current_field = 0; + + HASHMAP_FOREACH(f, j->files, i) + f->current_offset = 0; +} + +int sd_journal_add_match(sd_journal *j, const void *data, size_t size) { Match *m; - char *e; assert(j); - assert(field); - assert(data || size == 0); + + if (size <= 0) + return -EINVAL; + + assert(data); m = new0(Match, 1); if (!m) return -ENOMEM; - m->size = strlen(field) + 1 + size; + m->size = size; + m->data = malloc(m->size); if (!m->data) { free(m); return -ENOMEM; } - e = stpcpy(m->data, field); - *(e++) = '='; - memcpy(e, data, size); + memcpy(m->data, data, size); + m->le_hash = hash64(m->data, size); LIST_PREPEND(Match, matches, j->matches, m); + j->n_matches ++; + + reset_location(j); + return 0; } @@ -84,6 +106,10 @@ void sd_journal_flush_matches(sd_journal *j) { free(m->data); free(m); } + + j->n_matches = 0; + + reset_location(j); } static int compare_order(JournalFile *af, Object *ao, uint64_t ap, @@ -92,7 +118,16 @@ static int compare_order(JournalFile *af, Object *ao, uint64_t ap, uint64_t a, b; /* We operate on two different files here, hence we can access - * two objects at the same time, which we normally can't */ + * two objects at the same time, which we normally can't. + * + * If contents and timestamps match, these entries are + * identical, even if the seqnum does not match */ + + if (sd_id128_equal(ao->entry.boot_id, bo->entry.boot_id) && + ao->entry.monotonic == bo->entry.monotonic && + ao->entry.realtime == bo->entry.realtime && + ao->entry.xor_hash == bo->entry.xor_hash) + return 0; if (sd_id128_equal(af->header->seqnum_id, bf->header->seqnum_id)) { @@ -105,6 +140,10 @@ static int compare_order(JournalFile *af, Object *ao, uint64_t ap, return -1; if (a > b) return 1; + + /* Wow! This is weird, different data but the same + * seqnums? Something is borked, but let's make the + * best of it and compare by time. */ } if (sd_id128_equal(ao->entry.boot_id, bo->entry.boot_id)) { @@ -140,80 +179,113 @@ static int compare_order(JournalFile *af, Object *ao, uint64_t ap, return 0; } -int sd_journal_next(sd_journal *j) { - JournalFile *f, *new_current = NULL; - Iterator i; +static int move_to_next_with_matches(sd_journal *j, JournalFile *f, direction_t direction, Object **o, uint64_t *p) { int r; - uint64_t new_offset = 0; - Object *new_entry = NULL; + uint64_t cp; + Object *c; assert(j); + assert(f); + assert(o); + assert(p); - HASHMAP_FOREACH(f, j->files, i) { - Object *o; - uint64_t p; + if (!j->matches) { + /* No matches is easy, just go on to the next entry */ if (f->current_offset > 0) { - r = journal_file_move_to_object(f, f->current_offset, OBJECT_ENTRY, &o); + r = journal_file_move_to_object(f, f->current_offset, OBJECT_ENTRY, &c); if (r < 0) return r; } else - o = NULL; + c = NULL; - r = journal_file_next_entry(f, o, &o, &p); - if (r < 0) + return journal_file_next_entry(f, c, direction, o, p); + } + + /* So there are matches we have to adhere to, let's find the + * first entry that matches all of them */ + + if (f->current_offset > 0) + cp = f->current_offset; + else { + r = journal_file_find_first_entry(f, j->matches->data, j->matches->size, direction, &c, &cp); + if (r <= 0) return r; - else if (r == 0) - continue; - if (!new_current || - compare_order(new_current, new_entry, new_offset, f, o, p) > 0) { - new_current = f; - new_entry = o; - new_offset = p; + /* We can shortcut this if there's only one match */ + if (j->n_matches == 1) { + *o = c; + *p = cp; + return r; } } - if (new_current) { - j->current_file = new_current; - j->current_file->current_offset = new_offset; - j->current_file->current_field = 0; + for (;;) { + uint64_t np, n; + bool found; + Match *m; - /* Skip over any identical entries in the other files too */ + r = journal_file_move_to_object(f, cp, OBJECT_ENTRY, &c); + if (r < 0) + return r; - HASHMAP_FOREACH(f, j->files, i) { - Object *o; - uint64_t p; + n = journal_file_entry_n_items(c); - if (j->current_file == f) - continue; + /* Make sure we don't match the entry we are starting + * from. */ + found = f->current_offset != cp; - if (f->current_offset > 0) { - r = journal_file_move_to_object(f, f->current_offset, OBJECT_ENTRY, &o); - if (r < 0) - return r; - } else - o = NULL; + np = 0; + LIST_FOREACH(matches, m, j->matches) { + uint64_t q, k; - r = journal_file_next_entry(f, o, &o, &p); - if (r < 0) - return r; - else if (r == 0) + for (k = 0; k < n; k++) + if (c->entry.items[k].hash == m->le_hash) + break; + + if (k >= n) { + /* Hmm, didn't find any field that matched, so ignore + * this match. Go on with next match */ + + found = false; continue; + } - if (compare_order(new_current, new_entry, new_offset, f, o, p) == 0) { - f->current_offset = p; - f->current_field = 0; + /* Hmm, so, this field matched, let's remember + * where we'd have to try next, in case the other + * matches are not OK */ + + if (direction == DIRECTION_DOWN) { + q = le64toh(c->entry.items[k].next_entry_offset); + + if (q > np) + np = q; + } else { + q = le64toh(c->entry.items[k].prev_entry_offset); + + if (q != 0 && (np == 0 || q < np)) + np = q; } } - return 1; - } + /* Did this entry match against all matches? */ + if (found) { + *o = c; + *p = cp; + return 1; + } - return 0; + /* Did we find a subsequent entry? */ + if (np == 0) + return 0; + + /* Hmm, ok, this entry only matched partially, so + * let's try another one */ + cp = np; + } } -int sd_journal_previous(sd_journal *j) { +static int real_journal_next(sd_journal *j, direction_t direction) { JournalFile *f, *new_current = NULL; Iterator i; int r; @@ -226,20 +298,14 @@ int sd_journal_previous(sd_journal *j) { Object *o; uint64_t p; - if (f->current_offset > 0) { - r = journal_file_move_to_object(f, f->current_offset, OBJECT_ENTRY, &o); - if (r < 0) - return r; - } else - o = NULL; - - r = journal_file_prev_entry(f, o, &o, &p); + r = move_to_next_with_matches(j, f, direction, &o, &p); if (r < 0) return r; else if (r == 0) continue; - if (!new_current || compare_order(new_current, new_entry, new_offset, f, o, p) > 0) { + if (!new_current || + compare_order(new_current, new_entry, new_offset, f, o, p) > 0) { new_current = f; new_entry = o; new_offset = p; @@ -249,13 +315,41 @@ int sd_journal_previous(sd_journal *j) { if (new_current) { j->current_file = new_current; j->current_file->current_offset = new_offset; - j->current_file->current_field = 0; + j->current_field = 0; + + /* Skip over any identical entries in the other files too */ + + HASHMAP_FOREACH(f, j->files, i) { + Object *o; + uint64_t p; + + if (j->current_file == f) + continue; + + r = move_to_next_with_matches(j, f, direction, &o, &p); + if (r < 0) + return r; + else if (r == 0) + continue; + + if (compare_order(new_current, new_entry, new_offset, f, o, p) == 0) + f->current_offset = p; + } + return 1; } return 0; } +int sd_journal_next(sd_journal *j) { + return real_journal_next(j, DIRECTION_DOWN); +} + +int sd_journal_previous(sd_journal *j) { + return real_journal_next(j, DIRECTION_UP); +} + int sd_journal_get_cursor(sd_journal *j, char **cursor) { Object *o; int r; @@ -448,6 +542,8 @@ void sd_journal_close(sd_journal *j) { hashmap_free(j->files); } + sd_journal_flush_matches(j); + free(j); } @@ -490,7 +586,7 @@ int sd_journal_get_monotonic_usec(sd_journal *j, uint64_t *ret) { if (f->current_offset <= 0) return 0; - r = sd_id128_get_machine(&id); + r = sd_id128_get_boot(&id); if (r < 0) return r; @@ -506,7 +602,7 @@ int sd_journal_get_monotonic_usec(sd_journal *j, uint64_t *ret) { } -int sd_journal_get_field(sd_journal *j, const char *field, const void **data, size_t *size) { +int sd_journal_get_data(sd_journal *j, const char *field, const void **data, size_t *size) { JournalFile *f; uint64_t i, n; size_t field_length; @@ -536,14 +632,18 @@ int sd_journal_get_field(sd_journal *j, const char *field, const void **data, si n = journal_file_entry_n_items(o); for (i = 0; i < n; i++) { - uint64_t p, l; + uint64_t p, l, h; size_t t; p = le64toh(o->entry.items[i].object_offset); + h = o->entry.items[j->current_field].hash; r = journal_file_move_to_object(f, p, OBJECT_DATA, &o); if (r < 0) return r; + if (h != o->data.hash) + return -EBADMSG; + l = le64toh(o->object.size) - offsetof(Object, data.payload); if (l >= field_length+1 && @@ -569,9 +669,9 @@ int sd_journal_get_field(sd_journal *j, const char *field, const void **data, si return 0; } -int sd_journal_iterate_fields(sd_journal *j, const void **data, size_t *size) { +int sd_journal_enumerate_data(sd_journal *j, const void **data, size_t *size) { JournalFile *f; - uint64_t p, l, n; + uint64_t p, l, n, h; size_t t; int r; Object *o; @@ -592,14 +692,18 @@ int sd_journal_iterate_fields(sd_journal *j, const void **data, size_t *size) { return r; n = journal_file_entry_n_items(o); - if (f->current_field >= n) + if (j->current_field >= n) return 0; - p = le64toh(o->entry.items[f->current_field].object_offset); + p = le64toh(o->entry.items[j->current_field].object_offset); + h = o->entry.items[j->current_field].hash; r = journal_file_move_to_object(f, p, OBJECT_DATA, &o); if (r < 0) return r; + if (h != o->data.hash) + return -EBADMSG; + l = le64toh(o->object.size) - offsetof(Object, data.payload); t = (size_t) l; @@ -610,17 +714,29 @@ int sd_journal_iterate_fields(sd_journal *j, const void **data, size_t *size) { *data = o->data.payload; *size = t; - f->current_field ++; + j->current_field ++; return 1; } +void sd_journal_start_data(sd_journal *j) { + assert(j); + + j->current_field = 0; +} + int sd_journal_seek_head(sd_journal *j) { assert(j); - return -EINVAL; + + reset_location(j); + + return real_journal_next(j, DIRECTION_DOWN); } int sd_journal_seek_tail(sd_journal *j) { assert(j); - return -EINVAL; + + reset_location(j); + + return real_journal_next(j, DIRECTION_UP); }