chiark / gitweb /
journal: after verification output validated time range
[elogind.git] / src / journal / journal-verify.c
index 7c99d44838b3608c8f80176dd65a050932cf104d..f9a930e42eac05d4f08cbf8549e7cf615e27220f 100644 (file)
 #include "journal-verify.h"
 #include "lookup3.h"
 #include "compress.h"
+#include "fsprg.h"
 
 /* FIXME:
  *
- * - verify FSPRG
+ * - write bit mucking test
+ * - evolve key even if nothing happened in regular intervals
+ *
  * - Allow building without libgcrypt
  * - check with sparse
  * - 64bit conversions
- * - verification should use MAP_PRIVATE
  *
  * */
 
@@ -285,7 +287,7 @@ static int entry_points_to_data(
                         if (le64toh(o->entry_array.items[j]) == entry_p)
                                 return 0;
 
-                a = le64toh(o->entry_array.next_entry_array_offset);;
+                a = le64toh(o->entry_array.next_entry_array_offset);
         }
 
         return 0;
@@ -316,16 +318,17 @@ static int verify_data(
         if (r < 0)
                 return r;
 
+        i = 1;
         while (i < n) {
                 uint64_t next, m, j;
 
                 if (a == 0) {
-                        log_error("Array chain too short at %llu.", (unsigned long long) p);
+                        log_error("Array chain too short at %llu", (unsigned long long) p);
                         return -EBADMSG;
                 }
 
                 if (!contains_uint64(f->mmap, entry_array_fd, n_entry_arrays, a)) {
-                        log_error("Invalid array at %llu.", (unsigned long long) p);
+                        log_error("Invalid array at %llu", (unsigned long long) p);
                         return -EBADMSG;
                 }
 
@@ -335,7 +338,7 @@ static int verify_data(
 
                 next = le64toh(o->entry_array.next_entry_array_offset);
                 if (next != 0 && next <= a) {
-                        log_error("Array chain has cycle at %llu.", (unsigned long long) p);
+                        log_error("Array chain has cycle at %llu", (unsigned long long) p);
                         return -EBADMSG;
                 }
 
@@ -344,7 +347,7 @@ static int verify_data(
 
                         q = le64toh(o->entry_array.items[j]);
                         if (q <= last) {
-                                log_error("Data object's entry array not sorted at %llu.", (unsigned long long) p);
+                                log_error("Data object's entry array not sorted at %llu", (unsigned long long) p);
                                 return -EBADMSG;
                         }
                         last = q;
@@ -393,7 +396,7 @@ static int verify_hash_table(
                         uint64_t next;
 
                         if (!contains_uint64(f->mmap, data_fd, n_data, p)) {
-                                log_error("Invalid data object at hash entry %llu of %llu.",
+                                log_error("Invalid data object at hash entry %llu of %llu",
                                           (unsigned long long) i, (unsigned long long) n);
                                 return -EBADMSG;
                         }
@@ -404,13 +407,13 @@ static int verify_hash_table(
 
                         next = le64toh(o->data.next_hash_offset);
                         if (next != 0 && next <= p) {
-                                log_error("Hash chain has a cycle in hash entry %llu of %llu.",
+                                log_error("Hash chain has a cycle in hash entry %llu of %llu",
                                           (unsigned long long) i, (unsigned long long) n);
                                 return -EBADMSG;
                         }
 
                         if (le64toh(o->data.hash) % n != i) {
-                                log_error("Hash value mismatch in hash entry %llu of %llu.",
+                                log_error("Hash value mismatch in hash entry %llu of %llu",
                                           (unsigned long long) i, (unsigned long long) n);
                                 return -EBADMSG;
                         }
@@ -424,7 +427,7 @@ static int verify_hash_table(
                 }
 
                 if (last != le64toh(f->data_hash_table[i].tail_hash_offset)) {
-                        log_error("Tail hash pointer mismatch in hash table.");
+                        log_error("Tail hash pointer mismatch in hash table");
                         return -EBADMSG;
                 }
         }
@@ -478,8 +481,8 @@ static int verify_entry(
                 h = le64toh(o->entry.items[i].hash);
 
                 if (!contains_uint64(f->mmap, data_fd, n_data, q)) {
-                        log_error("Invalid data object at entry %llu.",
-                                  (unsigned long long) o);
+                        log_error("Invalid data object at entry %llu",
+                                  (unsigned long long) p);
                                 return -EBADMSG;
                         }
 
@@ -488,7 +491,7 @@ static int verify_entry(
                         return r;
 
                 if (le64toh(u->data.hash) != h) {
-                        log_error("Hash mismatch for data object at entry %llu.",
+                        log_error("Hash mismatch for data object at entry %llu",
                                   (unsigned long long) p);
                         return -EBADMSG;
                 }
@@ -497,7 +500,7 @@ static int verify_entry(
                 if (r < 0)
                         return r;
                 if (r == 0) {
-                        log_error("Data object missing from hash at entry %llu.",
+                        log_error("Data object missing from hash at entry %llu",
                                   (unsigned long long) p);
                         return -EBADMSG;
                 }
@@ -531,13 +534,13 @@ static int verify_entry_array(
                 draw_progress(0x8000 + (0x3FFF * i / n), last_usec);
 
                 if (a == 0) {
-                        log_error("Array chain too short at %llu of %llu.",
+                        log_error("Array chain too short at %llu of %llu",
                                   (unsigned long long) i, (unsigned long long) n);
                         return -EBADMSG;
                 }
 
                 if (!contains_uint64(f->mmap, entry_array_fd, n_entry_arrays, a)) {
-                        log_error("Invalid array at %llu of %llu.",
+                        log_error("Invalid array at %llu of %llu",
                                   (unsigned long long) i, (unsigned long long) n);
                         return -EBADMSG;
                 }
@@ -548,7 +551,7 @@ static int verify_entry_array(
 
                 next = le64toh(o->entry_array.next_entry_array_offset);
                 if (next != 0 && next <= a) {
-                        log_error("Array chain has cycle at %llu of %llu.",
+                        log_error("Array chain has cycle at %llu of %llu",
                                   (unsigned long long) i, (unsigned long long) n);
                         return -EBADMSG;
                 }
@@ -559,14 +562,14 @@ static int verify_entry_array(
 
                         p = le64toh(o->entry_array.items[j]);
                         if (p <= last) {
-                                log_error("Entry array not sorted at %llu of %llu.",
+                                log_error("Entry array not sorted at %llu of %llu",
                                           (unsigned long long) i, (unsigned long long) n);
                                 return -EBADMSG;
                         }
                         last = p;
 
                         if (!contains_uint64(f->mmap, entry_fd, n_entries, p)) {
-                                log_error("Invalid array entry at %llu of %llu.",
+                                log_error("Invalid array entry at %llu of %llu",
                                           (unsigned long long) i, (unsigned long long) n);
                                 return -EBADMSG;
                         }
@@ -591,14 +594,73 @@ static int verify_entry_array(
         return 0;
 }
 
-int journal_file_verify(JournalFile *f, const char *key) {
+static int journal_file_parse_verification_key(JournalFile *f, const char *key) {
+        uint8_t *seed;
+        size_t seed_size, c;
+        const char *k;
+        int r;
+        unsigned long long start, interval;
+
+        seed_size = FSPRG_RECOMMENDED_SEEDLEN;
+        seed = malloc(seed_size);
+        if (!seed)
+                return -ENOMEM;
+
+        k = key;
+        for (c = 0; c < seed_size; c++) {
+                int x, y;
+
+                while (*k == '-')
+                        k++;
+
+                x = unhexchar(*k);
+                if (x < 0) {
+                        free(seed);
+                        return -EINVAL;
+                }
+                k++;
+                y = unhexchar(*k);
+                if (y < 0) {
+                        free(seed);
+                        return -EINVAL;
+                }
+                k++;
+
+                seed[c] = (uint8_t) (x * 16 + y);
+        }
+
+        if (*k != '/') {
+                free(seed);
+                return -EINVAL;
+        }
+        k++;
+
+        r = sscanf(k, "%llx-%llx", &start, &interval);
+        if (r != 2) {
+                free(seed);
+                return -EINVAL;
+        }
+
+        f->fsprg_seed = seed;
+        f->fsprg_seed_size = seed_size;
+
+        f->fss_start_usec = start * interval;
+        f->fss_interval_usec = interval;
+
+        return 0;
+}
+
+int journal_file_verify(
+                JournalFile *f,
+                const char *key,
+                usec_t *first_validated, usec_t *last_validated, usec_t *last_contained) {
         int r;
         Object *o;
-        uint64_t p = 0;
-        uint64_t tag_seqnum = 0, entry_seqnum = 0, entry_monotonic = 0, entry_realtime = 0;
+        uint64_t p = 0, last_tag = 0, last_epoch = 0, last_tag_realtime = 0;
+        uint64_t entry_seqnum = 0, entry_monotonic = 0, entry_realtime = 0;
         sd_id128_t entry_boot_id;
         bool entry_seqnum_set = false, entry_monotonic_set = false, entry_realtime_set = false, found_main_entry_array = false;
-        uint64_t n_weird = 0, n_objects = 0, n_entries = 0, n_data = 0, n_fields = 0, n_data_hash_tables = 0, n_field_hash_tables = 0, n_entry_arrays = 0;
+        uint64_t n_weird = 0, n_objects = 0, n_entries = 0, n_data = 0, n_fields = 0, n_data_hash_tables = 0, n_field_hash_tables = 0, n_entry_arrays = 0, n_tags = 0;
         usec_t last_usec = 0;
         int data_fd = -1, entry_fd = -1, entry_array_fd = -1;
         char data_path[] = "/var/tmp/journal-data-XXXXXX",
@@ -607,9 +669,18 @@ int journal_file_verify(JournalFile *f, const char *key) {
 
         assert(f);
 
+        if (key) {
+                r = journal_file_parse_verification_key(f, key);
+                if (r < 0) {
+                        log_error("Failed to parse seed.");
+                        return r;
+                }
+        }
+
         data_fd = mkostemp(data_path, O_CLOEXEC);
         if (data_fd < 0) {
                 log_error("Failed to create data file: %m");
+                r = -errno;
                 goto fail;
         }
         unlink(data_path);
@@ -617,6 +688,7 @@ int journal_file_verify(JournalFile *f, const char *key) {
         entry_fd = mkostemp(entry_path, O_CLOEXEC);
         if (entry_fd < 0) {
                 log_error("Failed to create entry file: %m");
+                r = -errno;
                 goto fail;
         }
         unlink(entry_path);
@@ -624,6 +696,7 @@ int journal_file_verify(JournalFile *f, const char *key) {
         entry_array_fd = mkostemp(entry_array_path, O_CLOEXEC);
         if (entry_array_fd < 0) {
                 log_error("Failed to create entry array file: %m");
+                r = -errno;
                 goto fail;
         }
         unlink(entry_array_path);
@@ -631,12 +704,6 @@ int journal_file_verify(JournalFile *f, const char *key) {
         /* First iteration: we go through all objects, verify the
          * superficial structure, headers, hashes. */
 
-        r = journal_file_hmac_put_header(f);
-        if (r < 0) {
-                log_error("Failed to calculate HMAC of header.");
-                goto fail;
-        }
-
         p = le64toh(f->header->header_size);
         while (p != 0) {
                 draw_progress(0x7FFF * p / le64toh(f->header->tail_object_offset), &last_usec);
@@ -648,7 +715,7 @@ int journal_file_verify(JournalFile *f, const char *key) {
                 }
 
                 if (le64toh(f->header->tail_object_offset) < p) {
-                        log_error("Invalid tail object pointer.");
+                        log_error("Invalid tail object pointer");
                         r = -EBADMSG;
                         goto fail;
                 }
@@ -663,40 +730,45 @@ int journal_file_verify(JournalFile *f, const char *key) {
 
                 if (o->object.flags & OBJECT_COMPRESSED &&
                     !(le32toh(f->header->incompatible_flags) & HEADER_INCOMPATIBLE_COMPRESSED)) {
-                        log_error("Compressed object without compression at %llu", (unsigned long long) p);
+                        log_error("Compressed object in file without compression at %llu", (unsigned long long) p);
                         r = -EBADMSG;
                         goto fail;
                 }
 
-                r = journal_file_hmac_put_object(f, -1, p);
-                if (r < 0) {
-                        log_error("Failed to calculate HMAC at %llu", (unsigned long long) p);
-                        goto fail;
-                }
+                switch (o->object.type) {
 
-                if (o->object.type == OBJECT_TAG) {
-
-                        if (!(le32toh(f->header->compatible_flags) & HEADER_COMPATIBLE_AUTHENTICATED)) {
-                                log_error("Tag object without authentication at %llu", (unsigned long long) p);
-                                r = -EBADMSG;
+                case OBJECT_DATA:
+                        r = write_uint64(data_fd, p);
+                        if (r < 0)
                                 goto fail;
-                        }
 
-                        if (le64toh(o->tag.seqnum) != tag_seqnum) {
-                                log_error("Tag sequence number out of synchronization at %llu", (unsigned long long) p);
+                        n_data++;
+                        break;
+
+                case OBJECT_FIELD:
+                        n_fields++;
+                        break;
+
+                case OBJECT_ENTRY:
+                        if ((le32toh(f->header->compatible_flags) & HEADER_COMPATIBLE_SEALED) && n_tags <= 0) {
+                                log_error("First entry before first tag at %llu", (unsigned long long) p);
                                 r = -EBADMSG;
                                 goto fail;
                         }
 
-                } else if (o->object.type == OBJECT_ENTRY) {
-
                         r = write_uint64(entry_fd, p);
                         if (r < 0)
                                 goto fail;
 
+                        if (last_tag_realtime > le64toh(o->entry.realtime)) {
+                                log_error("Older entry after newer tag at %llu", (unsigned long long) p);
+                                r = -EBADMSG;
+                                goto fail;
+                        }
+
                         if (!entry_seqnum_set &&
                             le64toh(o->entry.seqnum) != le64toh(f->header->head_entry_seqnum)) {
-                                log_error("Head entry sequence number incorrect");
+                                log_error("Head entry sequence number incorrect at %llu", (unsigned long long) p);
                                 r = -EBADMSG;
                                 goto fail;
                         }
@@ -734,8 +806,43 @@ int journal_file_verify(JournalFile *f, const char *key) {
                         entry_realtime_set = true;
 
                         n_entries ++;
-                } else if (o->object.type == OBJECT_ENTRY_ARRAY) {
+                        break;
+
+                case OBJECT_DATA_HASH_TABLE:
+                        if (n_data_hash_tables > 1) {
+                                log_error("More than one data hash table at %llu", (unsigned long long) p);
+                                r = -EBADMSG;
+                                goto fail;
+                        }
+
+                        if (le64toh(f->header->data_hash_table_offset) != p + offsetof(HashTableObject, items) ||
+                            le64toh(f->header->data_hash_table_size) != le64toh(o->object.size) - offsetof(HashTableObject, items)) {
+                                log_error("Header fields for data hash table invalid");
+                                r = -EBADMSG;
+                                goto fail;
+                        }
+
+                        n_data_hash_tables++;
+                        break;
 
+                case OBJECT_FIELD_HASH_TABLE:
+                        if (n_field_hash_tables > 1) {
+                                log_error("More than one field hash table at %llu", (unsigned long long) p);
+                                r = -EBADMSG;
+                                goto fail;
+                        }
+
+                        if (le64toh(f->header->field_hash_table_offset) != p + offsetof(HashTableObject, items) ||
+                            le64toh(f->header->field_hash_table_size) != le64toh(o->object.size) - offsetof(HashTableObject, items)) {
+                                log_error("Header fields for field hash table invalid");
+                                r = -EBADMSG;
+                                goto fail;
+                        }
+
+                        n_field_hash_tables++;
+                        break;
+
+                case OBJECT_ENTRY_ARRAY:
                         r = write_uint64(entry_array_fd, p);
                         if (r < 0)
                                 goto fail;
@@ -751,49 +858,93 @@ int journal_file_verify(JournalFile *f, const char *key) {
                         }
 
                         n_entry_arrays++;
+                        break;
 
-                } else if (o->object.type == OBJECT_DATA) {
+                case OBJECT_TAG: {
+                        uint64_t q, rt;
 
-                        r = write_uint64(data_fd, p);
-                        if (r < 0)
+                        if (!(le32toh(f->header->compatible_flags) & HEADER_COMPATIBLE_SEALED)) {
+                                log_error("Tag object in file without sealing at %llu", (unsigned long long) p);
+                                r = -EBADMSG;
                                 goto fail;
+                        }
 
-                        n_data++;
-
-                } else if (o->object.type == OBJECT_FIELD)
-                        n_fields++;
-                else if (o->object.type == OBJECT_DATA_HASH_TABLE) {
-                        n_data_hash_tables++;
+                        log_debug("Checking tag %llu..", (unsigned long long) le64toh(o->tag.seqnum));
 
-                        if (n_data_hash_tables > 1) {
-                                log_error("More than one data hash table at %llu", (unsigned long long) p);
+                        if (le64toh(o->tag.seqnum) != n_tags + 1) {
+                                log_error("Tag sequence number out of synchronization at %llu", (unsigned long long) p);
                                 r = -EBADMSG;
                                 goto fail;
                         }
 
-                        if (le64toh(f->header->data_hash_table_offset) != p + offsetof(HashTableObject, items) ||
-                            le64toh(f->header->data_hash_table_size) != le64toh(o->object.size) - offsetof(HashTableObject, items)) {
-                                log_error("Header fields for data hash table invalid.");
+                        if (le64toh(o->tag.epoch) < last_epoch) {
+                                log_error("Epoch sequence out of synchronization at %llu", (unsigned long long) p);
                                 r = -EBADMSG;
                                 goto fail;
                         }
-                } else if (o->object.type == OBJECT_FIELD_HASH_TABLE) {
-                        n_field_hash_tables++;
 
-                        if (n_field_hash_tables > 1) {
-                                log_error("More than one field hash table at %llu", (unsigned long long) p);
+                        rt = (o->tag.epoch + 1) * f->fss_interval_usec + f->fss_start_usec;
+                        if (entry_realtime_set && entry_realtime >= rt) {
+                                log_error("Tag/entry realtime timestamp out of synchronization at %llu", (unsigned long long) p);
                                 r = -EBADMSG;
                                 goto fail;
                         }
 
-                        if (le64toh(f->header->field_hash_table_offset) != p + offsetof(HashTableObject, items) ||
-                            le64toh(f->header->field_hash_table_size) != le64toh(o->object.size) - offsetof(HashTableObject, items)) {
-                                log_error("Header fields for field hash table invalid.");
+                        /* OK, now we know the epoch. So let's now set
+                         * it, and calculate the HMAC for everything
+                         * since the last tag. */
+                        r = journal_file_fsprg_seek(f, le64toh(o->tag.epoch));
+                        if (r < 0)
+                                goto fail;
+
+                        r = journal_file_hmac_start(f);
+                        if (r < 0)
+                                goto fail;
+
+                        if (last_tag == 0) {
+                                r = journal_file_hmac_put_header(f);
+                                if (r < 0)
+                                        goto fail;
+
+                                q = le64toh(f->header->header_size);
+                        } else
+                                q = last_tag;
+
+                        while (q <= p) {
+                                r = journal_file_move_to_object(f, -1, q, &o);
+                                if (r < 0)
+                                        goto fail;
+
+                                r = journal_file_hmac_put_object(f, -1, q);
+                                if (r < 0)
+                                        goto fail;
+
+                                q = q + ALIGN64(le64toh(o->object.size));
+                        }
+
+                        /* Position might have changed, let's reposition things */
+                        r = journal_file_move_to_object(f, -1, p, &o);
+                        if (r < 0)
+                                goto fail;
+
+                        if (memcmp(o->tag.tag, gcry_md_read(f->hmac, 0), TAG_LENGTH) != 0) {
+                                log_error("Tag failed verification at %llu", (unsigned long long) p);
                                 r = -EBADMSG;
                                 goto fail;
                         }
-                } else if (o->object.type >= _OBJECT_TYPE_MAX)
+
+                        f->hmac_running = false;
+
+                        last_tag = p + ALIGN64(le64toh(o->object.size));
+                        last_tag_realtime = rt;
+
+                        n_tags ++;
+                        break;
+                }
+
+                default:
                         n_weird ++;
+                }
 
                 if (p == le64toh(f->header->tail_object_offset))
                         p = 0;
@@ -828,12 +979,19 @@ int journal_file_verify(JournalFile *f, const char *key) {
         }
 
         if (JOURNAL_HEADER_CONTAINS(f->header, n_tags) &&
-            tag_seqnum != le64toh(f->header->n_tags)) {
+            n_tags != le64toh(f->header->n_tags)) {
                 log_error("Tag number mismatch");
                 r = -EBADMSG;
                 goto fail;
         }
 
+        if (JOURNAL_HEADER_CONTAINS(f->header, n_entry_arrays) &&
+            n_entry_arrays != le64toh(f->header->n_entry_arrays)) {
+                log_error("Entry array number mismatch");
+                r = -EBADMSG;
+                goto fail;
+        }
+
         if (n_data_hash_tables != 1) {
                 log_error("Missing data hash table");
                 r = -EBADMSG;
@@ -907,6 +1065,13 @@ int journal_file_verify(JournalFile *f, const char *key) {
         close_nointr_nofail(entry_fd);
         close_nointr_nofail(entry_array_fd);
 
+        if (first_validated)
+                *first_validated = le64toh(f->header->head_entry_realtime);
+        if (last_validated)
+                *last_validated = last_tag_realtime;
+        if (last_contained)
+                *last_contained = le64toh(f->header->tail_entry_realtime);
+
         return 0;
 
 fail: