X-Git-Url: https://www.chiark.greenend.org.uk/ucgi/~ianmdlvl/git?p=elogind.git;a=blobdiff_plain;f=src%2Fjournal%2Fsd-journal.c;h=5a00cb0c9475699565124432f94706e7ce48ece4;hp=b55cf37e50d93c609b0726e065d5bab4d7a990c2;hb=1eb6332d557e6e510a9ce723296cb3b658d7e9a4;hpb=b6741478e7661c7e580e5dcfd6a6fccd1899c1d0 diff --git a/src/journal/sd-journal.c b/src/journal/sd-journal.c index b55cf37e5..5a00cb0c9 100644 --- a/src/journal/sd-journal.c +++ b/src/journal/sd-journal.c @@ -51,6 +51,8 @@ #define DEFAULT_DATA_THRESHOLD (64*1024) +static void remove_file_real(sd_journal *j, JournalFile *f); + static bool journal_pid_changed(sd_journal *j) { assert(j); @@ -68,7 +70,7 @@ static int set_put_error(sd_journal *j, int r) { if (r >= 0) return r; - k = set_ensure_allocated(&j->errors, trivial_hash_func, trivial_compare_func); + k = set_ensure_allocated(&j->errors, NULL); if (k < 0) return k; @@ -84,8 +86,8 @@ static void detach_location(sd_journal *j) { j->current_file = NULL; j->current_field = 0; - HASHMAP_FOREACH(f, j->files, i) - f->current_offset = 0; + ORDERED_HASHMAP_FOREACH(f, j->files, i) + journal_file_reset_location(f); } static void reset_location(sd_journal *j) { @@ -112,20 +114,19 @@ static void init_location(Location *l, LocationType type, JournalFile *f, Object l->seqnum_set = l->realtime_set = l->monotonic_set = l->xor_hash_set = true; } -static void set_location(sd_journal *j, LocationType type, JournalFile *f, Object *o, - direction_t direction, uint64_t offset) { +static void set_location(sd_journal *j, JournalFile *f, Object *o) { assert(j); - assert(type == LOCATION_DISCRETE || type == LOCATION_SEEK); assert(f); assert(o); - init_location(&j->current_location, type, f, o); + init_location(&j->current_location, LOCATION_DISCRETE, f, o); j->current_file = f; j->current_field = 0; - f->last_direction = direction; - f->current_offset = offset; + /* Let f know its candidate entry was picked. */ + assert(f->location_type == LOCATION_SEEK); + f->location_type = LOCATION_DISCRETE; } static int match_is_valid(const void *data, size_t size) { @@ -464,7 +465,7 @@ static int compare_entry_order(JournalFile *af, Object *_ao, if (sd_id128_equal(ao->entry.boot_id, bo->entry.boot_id)) { - /* If the boot id matches compare monotonic time */ + /* If the boot id matches, compare monotonic time */ a = le64toh(ao->entry.monotonic); b = le64toh(bo->entry.monotonic); @@ -474,7 +475,7 @@ static int compare_entry_order(JournalFile *af, Object *_ao, return 1; } - /* Otherwise compare UTC time */ + /* Otherwise, compare UTC time */ a = le64toh(ao->entry.realtime); b = le64toh(bo->entry.realtime); @@ -847,14 +848,14 @@ static int next_beyond_location(sd_journal *j, JournalFile *f, direction_t direc int k; k = compare_with_location(f, c, &j->current_location); - if (direction == DIRECTION_DOWN) - found = k > 0; - else - found = k < 0; + + found = direction == DIRECTION_DOWN ? k > 0 : k < 0; } else found = true; if (found) { + journal_file_save_location(f, direction, c, cp); + if (ret) *ret = c; if (offset) @@ -871,23 +872,26 @@ static int next_beyond_location(sd_journal *j, JournalFile *f, direction_t direc static int real_journal_next(sd_journal *j, direction_t direction) { JournalFile *f, *new_file = NULL; uint64_t new_offset = 0; - Object *o; - uint64_t p; + uint64_t p = 0; Iterator i; + Object *o; int r; assert_return(j, -EINVAL); assert_return(!journal_pid_changed(j), -ECHILD); - HASHMAP_FOREACH(f, j->files, i) { + ORDERED_HASHMAP_FOREACH(f, j->files, i) { bool found; r = next_beyond_location(j, f, direction, &o, &p); if (r < 0) { - log_debug("Can't iterate through %s, ignoring: %s", f->path, strerror(-r)); + log_debug_errno(r, "Can't iterate through %s, ignoring: %m", f->path); + remove_file_real(j, f); continue; - } else if (r == 0) + } else if (r == 0) { + f->location_type = LOCATION_TAIL; continue; + } if (!new_file) found = true; @@ -912,7 +916,7 @@ static int real_journal_next(sd_journal *j, direction_t direction) { if (r < 0) return r; - set_location(j, LOCATION_DISCRETE, new_file, o, direction, new_offset); + set_location(j, new_file, o); return 1; } @@ -994,7 +998,7 @@ _public_ int sd_journal_get_cursor(sd_journal *j, char **cursor) { } _public_ int sd_journal_seek_cursor(sd_journal *j, const char *cursor) { - char *w, *state; + const char *word, *state; size_t l; unsigned long long seqnum, monotonic, realtime, xor_hash; bool @@ -1010,18 +1014,18 @@ _public_ int sd_journal_seek_cursor(sd_journal *j, const char *cursor) { assert_return(!journal_pid_changed(j), -ECHILD); assert_return(!isempty(cursor), -EINVAL); - FOREACH_WORD_SEPARATOR(w, l, cursor, ";", state) { + FOREACH_WORD_SEPARATOR(word, l, cursor, ";", state) { char *item; int k = 0; - if (l < 2 || w[1] != '=') + if (l < 2 || word[1] != '=') return -EINVAL; - item = strndup(w, l); + item = strndup(word, l); if (!item) return -ENOMEM; - switch (w[0]) { + switch (word[0]) { case 's': seqnum_id_set = true; @@ -1100,7 +1104,7 @@ _public_ int sd_journal_seek_cursor(sd_journal *j, const char *cursor) { _public_ int sd_journal_test_cursor(sd_journal *j, const char *cursor) { int r; - char *w, *state; + const char *word, *state; size_t l; Object *o; @@ -1115,20 +1119,20 @@ _public_ int sd_journal_test_cursor(sd_journal *j, const char *cursor) { if (r < 0) return r; - FOREACH_WORD_SEPARATOR(w, l, cursor, ";", state) { + FOREACH_WORD_SEPARATOR(word, l, cursor, ";", state) { _cleanup_free_ char *item = NULL; sd_id128_t id; unsigned long long ll; int k = 0; - if (l < 2 || w[1] != '=') + if (l < 2 || word[1] != '=') return -EINVAL; - item = strndup(w, l); + item = strndup(word, l); if (!item) return -ENOMEM; - switch (w[0]) { + switch (word[0]) { case 's': k = sd_id128_from_string(item+2, &id); @@ -1270,7 +1274,7 @@ static bool file_type_wanted(int flags, const char *filename) { if (flags & SD_JOURNAL_CURRENT_USER) { char prefix[5 + DECIMAL_STR_MAX(uid_t) + 1]; - assert_se(snprintf(prefix, sizeof(prefix), "user-%lu", (unsigned long) getuid()) + assert_se(snprintf(prefix, sizeof(prefix), "user-"UID_FMT, getuid()) < (int) sizeof(prefix)); if (file_has_type_prefix(prefix, filename)) @@ -1281,16 +1285,16 @@ static bool file_type_wanted(int flags, const char *filename) { } static int add_any_file(sd_journal *j, const char *path) { - JournalFile *f; + JournalFile *f = NULL; int r; assert(j); assert(path); - if (hashmap_get(j->files, path)) + if (ordered_hashmap_get(j->files, path)) return 0; - if (hashmap_size(j->files) >= JOURNAL_FILES_MAX) { + if (ordered_hashmap_size(j->files) >= JOURNAL_FILES_MAX) { log_warning("Too many open journal files, not adding %s.", path); return set_put_error(j, -ETOOMANYREFS); } @@ -1301,7 +1305,7 @@ static int add_any_file(sd_journal *j, const char *path) { /* journal_file_dump(f); */ - r = hashmap_put(j->files, f->path, f); + r = ordered_hashmap_put(j->files, f->path, f); if (r < 0) { journal_file_close(f); return r; @@ -1339,7 +1343,7 @@ static int add_file(sd_journal *j, const char *prefix, const char *filename) { } static int remove_file(sd_journal *j, const char *prefix, const char *filename) { - char *path; + _cleanup_free_ char *path; JournalFile *f; assert(j); @@ -1350,12 +1354,19 @@ static int remove_file(sd_journal *j, const char *prefix, const char *filename) if (!path) return -ENOMEM; - f = hashmap_get(j->files, path); - free(path); + f = ordered_hashmap_get(j->files, path); if (!f) return 0; - hashmap_remove(j->files, f->path); + remove_file_real(j, f); + return 0; +} + +static void remove_file_real(sd_journal *j, JournalFile *f) { + assert(j); + assert(f); + + ordered_hashmap_remove(j->files, f->path); log_debug("File %s removed.", f->path); @@ -1365,15 +1376,16 @@ static int remove_file(sd_journal *j, const char *prefix, const char *filename) } if (j->unique_file == f) { - j->unique_file = NULL; + /* Jump to the next unique_file or NULL if that one was last */ + j->unique_file = ordered_hashmap_next(j->files, j->unique_file->path); j->unique_offset = 0; + if (!j->unique_file) + j->unique_file_lost = true; } journal_file_close(f); j->current_invalidate_counter ++; - - return 0; } static int add_directory(sd_journal *j, const char *prefix, const char *dirname) { @@ -1401,7 +1413,7 @@ static int add_directory(sd_journal *j, const char *prefix, const char *dirname) d = opendir(path); if (!d) { - log_debug("Failed to open %s: %m", path); + log_debug_errno(errno, "Failed to open %s: %m", path); if (errno == ENOENT) return 0; return -errno; @@ -1442,18 +1454,23 @@ static int add_directory(sd_journal *j, const char *prefix, const char *dirname) for (;;) { struct dirent *de; - union dirent_storage buf; - r = readdir_r(d, &buf.de, &de); - if (r != 0 || !de) + errno = 0; + de = readdir(d); + if (!de && errno != 0) { + r = -errno; + log_debug_errno(errno, "Failed to read directory %s: %m", m->path); + return r; + } + if (!de) break; if (dirent_is_file_with_suffix(de, ".journal") || dirent_is_file_with_suffix(de, ".journal~")) { r = add_file(j, m->path, de->d_name); if (r < 0) { - log_debug("Failed to add file %s/%s: %s", - m->path, de->d_name, strerror(-r)); + log_debug_errno(r, "Failed to add file %s/%s: %m", + m->path, de->d_name); r = set_put_error(j, r); if (r < 0) return r; @@ -1466,7 +1483,7 @@ static int add_directory(sd_journal *j, const char *prefix, const char *dirname) return 0; } -static int add_root_directory(sd_journal *j, const char *p, const char *prefix) { +static int add_root_directory(sd_journal *j, const char *p) { _cleanup_closedir_ DIR *d = NULL; Directory *m; int r; @@ -1478,8 +1495,8 @@ static int add_root_directory(sd_journal *j, const char *p, const char *prefix) !path_startswith(p, "/run")) return -EINVAL; - if (prefix) - p = strappenda(prefix, p); + if (j->prefix) + p = strappenda(j->prefix, p); d = opendir(p); if (!d) @@ -1526,19 +1543,24 @@ static int add_root_directory(sd_journal *j, const char *p, const char *prefix) for (;;) { struct dirent *de; - union dirent_storage buf; sd_id128_t id; - r = readdir_r(d, &buf.de, &de); - if (r != 0 || !de) + errno = 0; + de = readdir(d); + if (!de && errno != 0) { + r = -errno; + log_debug_errno(errno, "Failed to read directory %s: %m", m->path); + return r; + } + if (!de) break; if (dirent_is_file_with_suffix(de, ".journal") || dirent_is_file_with_suffix(de, ".journal~")) { r = add_file(j, m->path, de->d_name); if (r < 0) { - log_debug("Failed to add file %s/%s: %s", - m->path, de->d_name, strerror(-r)); + log_debug_errno(r, "Failed to add file %s/%s: %m", + m->path, de->d_name); r = set_put_error(j, r); if (r < 0) return r; @@ -1548,7 +1570,7 @@ static int add_root_directory(sd_journal *j, const char *p, const char *prefix) r = add_directory(j, m->path, de->d_name); if (r < 0) - log_debug("Failed to add directory %s/%s: %s", m->path, de->d_name, strerror(-r)); + log_debug_errno(r, "Failed to add directory %s/%s: %m", m->path, de->d_name); } } @@ -1580,7 +1602,7 @@ static int remove_directory(sd_journal *j, Directory *d) { return 0; } -static int add_search_paths(sd_journal *j, const char *prefix) { +static int add_search_paths(sd_journal *j) { int r; const char search_paths[] = "/run/log/journal\0" @@ -1593,7 +1615,7 @@ static int add_search_paths(sd_journal *j, const char *prefix) { * what's actually accessible, and ignore the rest. */ NULSTR_FOREACH(p, search_paths) { - r = add_root_directory(j, p, prefix); + r = add_root_directory(j, p); if (r < 0 && r != -ENOENT) { r = set_put_error(j, r); if (r < 0) @@ -1615,15 +1637,15 @@ static int add_current_paths(sd_journal *j) { * "root" directories. We don't expect errors here, so we * treat them as fatal. */ - HASHMAP_FOREACH(f, j->files, i) { - int r; + ORDERED_HASHMAP_FOREACH(f, j->files, i) { _cleanup_free_ char *dir; + int r; dir = dirname_malloc(f->path); if (!dir) return -ENOMEM; - r = add_root_directory(j, dir, NULL); + r = add_root_directory(j, dir); if (r < 0) { set_put_error(j, r); return r; @@ -1644,7 +1666,7 @@ static int allocate_inotify(sd_journal *j) { } if (!j->directories_by_wd) { - j->directories_by_wd = hashmap_new(trivial_hash_func, trivial_compare_func); + j->directories_by_wd = hashmap_new(NULL); if (!j->directories_by_wd) return -ENOMEM; } @@ -1670,8 +1692,8 @@ static sd_journal *journal_new(int flags, const char *path) { goto fail; } - j->files = hashmap_new(string_hash_func, string_compare_func); - j->directories_by_path = hashmap_new(string_hash_func, string_compare_func); + j->files = ordered_hashmap_new(&string_hash_ops); + j->directories_by_path = hashmap_new(&string_hash_ops); j->mmap = mmap_cache_new(); if (!j->files || !j->directories_by_path || !j->mmap) goto fail; @@ -1694,7 +1716,7 @@ _public_ int sd_journal_open(sd_journal **ret, int flags) { if (!j) return -ENOMEM; - r = add_search_paths(j, NULL); + r = add_search_paths(j); if (r < 0) goto fail; @@ -1716,7 +1738,7 @@ _public_ int sd_journal_open_container(sd_journal **ret, const char *machine, in assert_return(machine, -EINVAL); assert_return(ret, -EINVAL); assert_return((flags & ~(SD_JOURNAL_LOCAL_ONLY|SD_JOURNAL_SYSTEM)) == 0, -EINVAL); - assert_return(filename_is_safe(machine), -EINVAL); + assert_return(machine_name_is_valid(machine), -EINVAL); p = strappenda("/run/systemd/machines/", machine); r = parse_env_file(p, NEWLINE, "ROOT", &root, "CLASS", &class, NULL); @@ -1734,7 +1756,10 @@ _public_ int sd_journal_open_container(sd_journal **ret, const char *machine, in if (!j) return -ENOMEM; - r = add_search_paths(j, root); + j->prefix = root; + root = NULL; + + r = add_search_paths(j); if (r < 0) goto fail; @@ -1758,7 +1783,7 @@ _public_ int sd_journal_open_directory(sd_journal **ret, const char *path, int f if (!j) return -ENOMEM; - r = add_root_directory(j, path, NULL); + r = add_root_directory(j, path); if (r < 0) { set_put_error(j, r); goto fail; @@ -1788,7 +1813,7 @@ _public_ int sd_journal_open_files(sd_journal **ret, const char **paths, int fla STRV_FOREACH(path, paths) { r = add_any_file(j, *path); if (r < 0) { - log_error("Failed to open %s: %s", *path, strerror(-r)); + log_error_errno(r, "Failed to open %s: %m", *path); goto fail; } } @@ -1813,10 +1838,10 @@ _public_ void sd_journal_close(sd_journal *j) { sd_journal_flush_matches(j); - while ((f = hashmap_steal_first(j->files))) + while ((f = ordered_hashmap_steal_first(j->files))) journal_file_close(f); - hashmap_free(j->files); + ordered_hashmap_free(j->files); while ((d = hashmap_first(j->directories_by_path))) remove_directory(j, d); @@ -1827,8 +1852,7 @@ _public_ void sd_journal_close(sd_journal *j) { hashmap_free(j->directories_by_path); hashmap_free(j->directories_by_wd); - if (j->inotify_fd >= 0) - close_nointr_nofail(j->inotify_fd); + safe_close(j->inotify_fd); if (j->mmap) { log_debug("mmap cache statistics: %u hit, %u miss", mmap_cache_get_hit(j->mmap), mmap_cache_get_missed(j->mmap)); @@ -1836,6 +1860,7 @@ _public_ void sd_journal_close(sd_journal *j) { } free(j->path); + free(j->prefix); free(j->unique_field); set_free(j->errors); free(j); @@ -1962,6 +1987,7 @@ _public_ int sd_journal_get_data(sd_journal *j, const char *field, const void ** uint64_t p, l; le64_t le_hash; size_t t; + int compression; p = le64toh(o->entry.items[i].object_offset); le_hash = o->entry.items[i].hash; @@ -1974,19 +2000,22 @@ _public_ int sd_journal_get_data(sd_journal *j, const char *field, const void ** l = le64toh(o->object.size) - offsetof(Object, data.payload); - if (o->object.flags & OBJECT_COMPRESSED) { - -#ifdef HAVE_XZ - if (uncompress_startswith(o->data.payload, l, + compression = o->object.flags & OBJECT_COMPRESSION_MASK; + if (compression) { +#if defined(HAVE_XZ) || defined(HAVE_LZ4) + if (decompress_startswith(compression, + o->data.payload, l, &f->compress_buffer, &f->compress_buffer_size, field, field_length, '=')) { - uint64_t rsize; + size_t rsize; - if (!uncompress_blob(o->data.payload, l, - &f->compress_buffer, &f->compress_buffer_size, &rsize, - j->data_threshold)) - return -EBADMSG; + r = decompress_blob(compression, + o->data.payload, l, + &f->compress_buffer, &f->compress_buffer_size, &rsize, + j->data_threshold); + if (r < 0) + return r; *data = f->compress_buffer; *size = (size_t) rsize; @@ -1996,7 +2025,6 @@ _public_ int sd_journal_get_data(sd_journal *j, const char *field, const void ** #else return -EPROTONOSUPPORT; #endif - } else if (l >= field_length+1 && memcmp(o->data.payload, field, field_length) == 0 && o->data.payload[field_length] == '=') { @@ -2023,6 +2051,7 @@ _public_ int sd_journal_get_data(sd_journal *j, const char *field, const void ** static int return_data(sd_journal *j, JournalFile *f, Object *o, const void **data, size_t *size) { size_t t; uint64_t l; + int compression; l = le64toh(o->object.size) - offsetof(Object, data.payload); t = (size_t) l; @@ -2031,12 +2060,17 @@ static int return_data(sd_journal *j, JournalFile *f, Object *o, const void **da if ((uint64_t) t != l) return -E2BIG; - if (o->object.flags & OBJECT_COMPRESSED) { -#ifdef HAVE_XZ - uint64_t rsize; + compression = o->object.flags & OBJECT_COMPRESSION_MASK; + if (compression) { +#if defined(HAVE_XZ) || defined(HAVE_LZ4) + size_t rsize; + int r; - if (!uncompress_blob(o->data.payload, l, &f->compress_buffer, &f->compress_buffer_size, &rsize, j->data_threshold)) - return -EBADMSG; + r = decompress_blob(compression, + o->data.payload, l, &f->compress_buffer, + &f->compress_buffer_size, &rsize, j->data_threshold); + if (r < 0) + return r; *data = f->compress_buffer; *size = (size_t) rsize; @@ -2121,9 +2155,9 @@ _public_ int sd_journal_get_fd(sd_journal *j) { if (j->no_new_files) r = add_current_paths(j); else if (j->path) - r = add_root_directory(j, j->path, NULL); + r = add_root_directory(j, j->path); else - r = add_search_paths(j, NULL); + r = add_search_paths(j); if (r < 0) return r; @@ -2187,8 +2221,8 @@ static void process_inotify_event(sd_journal *j, struct inotify_event *e) { if (e->mask & (IN_CREATE|IN_MOVED_TO|IN_MODIFY|IN_ATTRIB)) { r = add_file(j, d->path, e->name); if (r < 0) { - log_debug("Failed to add file %s/%s: %s", - d->path, e->name, strerror(-r)); + log_debug_errno(r, "Failed to add file %s/%s: %m", + d->path, e->name); set_put_error(j, r); } @@ -2196,7 +2230,7 @@ static void process_inotify_event(sd_journal *j, struct inotify_event *e) { r = remove_file(j, d->path, e->name); if (r < 0) - log_debug("Failed to remove file %s/%s: %s", d->path, e->name, strerror(-r)); + log_debug_errno(r, "Failed to remove file %s/%s: %m", d->path, e->name); } } else if (!d->is_root && e->len == 0) { @@ -2206,7 +2240,7 @@ static void process_inotify_event(sd_journal *j, struct inotify_event *e) { if (e->mask & (IN_DELETE_SELF|IN_MOVE_SELF|IN_UNMOUNT)) { r = remove_directory(j, d); if (r < 0) - log_debug("Failed to remove directory %s: %s", d->path, strerror(-r)); + log_debug_errno(r, "Failed to remove directory %s: %m", d->path); } @@ -2217,7 +2251,7 @@ static void process_inotify_event(sd_journal *j, struct inotify_event *e) { if (e->mask & (IN_CREATE|IN_MOVED_TO|IN_MODIFY|IN_ATTRIB)) { r = add_directory(j, d->path, e->name); if (r < 0) - log_debug("Failed to add directory %s/%s: %s", d->path, e->name, strerror(-r)); + log_debug_errno(r, "Failed to add directory %s/%s: %m", d->path, e->name); } } @@ -2242,7 +2276,6 @@ static int determine_change(sd_journal *j) { } _public_ int sd_journal_process(sd_journal *j) { - uint8_t buffer[sizeof(struct inotify_event) + FILENAME_MAX] _alignas_(struct inotify_event); bool got_something = false; assert_return(j, -EINVAL); @@ -2251,6 +2284,7 @@ _public_ int sd_journal_process(sd_journal *j) { j->last_process_usec = now(CLOCK_MONOTONIC); for (;;) { + uint8_t buffer[INOTIFY_EVENT_MAX] _alignas_(struct inotify_event); struct inotify_event *e; ssize_t l; @@ -2264,21 +2298,9 @@ _public_ int sd_journal_process(sd_journal *j) { got_something = true; - e = (struct inotify_event*) buffer; - while (l > 0) { - size_t step; - + FOREACH_INOTIFY_EVENT(e, buffer, l) process_inotify_event(j, e); - - step = sizeof(struct inotify_event) + e->len; - assert(step <= (size_t) l); - - e = (struct inotify_event*) ((uint8_t*) e + step); - l -= step; - } } - - return determine_change(j); } _public_ int sd_journal_wait(sd_journal *j, uint64_t timeout_usec) { @@ -2331,6 +2353,7 @@ _public_ int sd_journal_get_cutoff_realtime_usec(sd_journal *j, uint64_t *from, Iterator i; JournalFile *f; bool first = true; + uint64_t fmin = 0, tmax = 0; int r; assert_return(j, -EINVAL); @@ -2338,7 +2361,7 @@ _public_ int sd_journal_get_cutoff_realtime_usec(sd_journal *j, uint64_t *from, assert_return(from || to, -EINVAL); assert_return(from != to, -EINVAL); - HASHMAP_FOREACH(f, j->files, i) { + ORDERED_HASHMAP_FOREACH(f, j->files, i) { usec_t fr, t; r = journal_file_get_cutoff_realtime_usec(f, &fr, &t); @@ -2350,26 +2373,27 @@ _public_ int sd_journal_get_cutoff_realtime_usec(sd_journal *j, uint64_t *from, continue; if (first) { - if (from) - *from = fr; - if (to) - *to = t; + fmin = fr; + tmax = t; first = false; } else { - if (from) - *from = MIN(fr, *from); - if (to) - *to = MAX(t, *to); + fmin = MIN(fr, fmin); + tmax = MAX(t, tmax); } } + if (from) + *from = fmin; + if (to) + *to = tmax; + return first ? 0 : 1; } _public_ int sd_journal_get_cutoff_monotonic_usec(sd_journal *j, sd_id128_t boot_id, uint64_t *from, uint64_t *to) { Iterator i; JournalFile *f; - bool first = true; + bool found = false; int r; assert_return(j, -EINVAL); @@ -2377,7 +2401,7 @@ _public_ int sd_journal_get_cutoff_monotonic_usec(sd_journal *j, sd_id128_t boot assert_return(from || to, -EINVAL); assert_return(from != to, -EINVAL); - HASHMAP_FOREACH(f, j->files, i) { + ORDERED_HASHMAP_FOREACH(f, j->files, i) { usec_t fr, t; r = journal_file_get_cutoff_monotonic_usec(f, boot_id, &fr, &t); @@ -2388,21 +2412,21 @@ _public_ int sd_journal_get_cutoff_monotonic_usec(sd_journal *j, sd_id128_t boot if (r == 0) continue; - if (first) { + if (found) { if (from) - *from = fr; + *from = MIN(fr, *from); if (to) - *to = t; - first = false; + *to = MAX(t, *to); } else { if (from) - *from = MIN(fr, *from); + *from = fr; if (to) - *to = MAX(t, *to); + *to = t; + found = true; } } - return first ? 0 : 1; + return found; } void journal_print_header(sd_journal *j) { @@ -2412,7 +2436,7 @@ void journal_print_header(sd_journal *j) { assert(j); - HASHMAP_FOREACH(f, j->files, i) { + ORDERED_HASHMAP_FOREACH(f, j->files, i) { if (newline) putchar('\n'); else @@ -2431,7 +2455,7 @@ _public_ int sd_journal_get_usage(sd_journal *j, uint64_t *bytes) { assert_return(!journal_pid_changed(j), -ECHILD); assert_return(bytes, -EINVAL); - HASHMAP_FOREACH(f, j->files, i) { + ORDERED_HASHMAP_FOREACH(f, j->files, i) { struct stat st; if (fstat(f->fd, &st) < 0) @@ -2460,14 +2484,13 @@ _public_ int sd_journal_query_unique(sd_journal *j, const char *field) { j->unique_field = f; j->unique_file = NULL; j->unique_offset = 0; + j->unique_file_lost = false; return 0; } _public_ int sd_journal_enumerate_unique(sd_journal *j, const void **data, size_t *l) { - Object *o; size_t k; - int r; assert_return(j, -EINVAL); assert_return(!journal_pid_changed(j), -ECHILD); @@ -2478,18 +2501,24 @@ _public_ int sd_journal_enumerate_unique(sd_journal *j, const void **data, size_ k = strlen(j->unique_field); if (!j->unique_file) { - j->unique_file = hashmap_first(j->files); + if (j->unique_file_lost) + return 0; + + j->unique_file = ordered_hashmap_first(j->files); if (!j->unique_file) return 0; + j->unique_offset = 0; } for (;;) { JournalFile *of; Iterator i; + Object *o; const void *odata; size_t ol; bool found; + int r; /* Proceed to next data object in the field's linked list */ if (j->unique_offset == 0) { @@ -2508,36 +2537,52 @@ _public_ int sd_journal_enumerate_unique(sd_journal *j, const void **data, size_ /* We reached the end of the list? Then start again, with the next file */ if (j->unique_offset == 0) { - JournalFile *n; - - n = hashmap_next(j->files, j->unique_file->path); - if (!n) + j->unique_file = ordered_hashmap_next(j->files, j->unique_file->path); + if (!j->unique_file) return 0; - j->unique_file = n; continue; } - /* We do not use the type context here, but 0 instead, - * so that we can look at this data object at the same + /* We do not use OBJECT_DATA context here, but OBJECT_UNUSED + * instead, so that we can look at this data object at the same * time as one on another file */ - r = journal_file_move_to_object(j->unique_file, 0, j->unique_offset, &o); + r = journal_file_move_to_object(j->unique_file, OBJECT_UNUSED, j->unique_offset, &o); if (r < 0) return r; /* Let's do the type check by hand, since we used 0 context above. */ - if (o->object.type != OBJECT_DATA) + if (o->object.type != OBJECT_DATA) { + log_debug("%s:offset " OFSfmt ": object has type %d, expected %d", + j->unique_file->path, j->unique_offset, + o->object.type, OBJECT_DATA); return -EBADMSG; + } r = return_data(j, j->unique_file, o, &odata, &ol); if (r < 0) return r; + /* Check if we have at least the field name and "=". */ + if (ol <= k) { + log_debug("%s:offset " OFSfmt ": object has size %zu, expected at least %zu", + j->unique_file->path, j->unique_offset, + ol, k + 1); + return -EBADMSG; + } + + if (memcmp(odata, j->unique_field, k) || ((const char*) odata)[k] != '=') { + log_debug("%s:offset " OFSfmt ": object does not start with \"%s=\"", + j->unique_file->path, j->unique_offset, + j->unique_field); + return -EBADMSG; + } + /* OK, now let's see if we already returned this data * object by checking if it exists in the earlier * traversed files. */ found = false; - HASHMAP_FOREACH(of, j->files, i) { + ORDERED_HASHMAP_FOREACH(of, j->files, i) { Object *oo; uint64_t op; @@ -2575,6 +2620,7 @@ _public_ void sd_journal_restart_unique(sd_journal *j) { j->unique_file = NULL; j->unique_offset = 0; + j->unique_file_lost = false; } _public_ int sd_journal_reliable_fd(sd_journal *j) {