chiark / gitweb /
journal: don't write tag objects if nothing has been written since the last time
[elogind.git] / src / journal / journal-verify.c
index c68a22e81a1a988f9b7e41deb8ec16b77cd0b8f7..9907c5fe26617c89fd87551f096c08ac9fa25408 100644 (file)
 #include "journal-verify.h"
 #include "lookup3.h"
 #include "compress.h"
 #include "journal-verify.h"
 #include "lookup3.h"
 #include "compress.h"
+#include "fsprg.h"
 
 /* FIXME:
  *
 
 /* FIXME:
  *
- * - verify FSPRG
+ * - write bit mucking test
+ * - tag timestamps should be between entry timestamps
+ * - output validated time ranges
+ * - add missing fields to journal header dump
+ *
  * - Allow building without libgcrypt
  * - check with sparse
  * - 64bit conversions
  * - Allow building without libgcrypt
  * - check with sparse
  * - 64bit conversions
@@ -315,6 +320,7 @@ static int verify_data(
         if (r < 0)
                 return r;
 
         if (r < 0)
                 return r;
 
+        i = 1;
         while (i < n) {
                 uint64_t next, m, j;
 
         while (i < n) {
                 uint64_t next, m, j;
 
@@ -590,14 +596,70 @@ static int verify_entry_array(
         return 0;
 }
 
         return 0;
 }
 
+static int journal_file_parse_verification_key(JournalFile *f, const char *key) {
+        uint8_t *seed;
+        size_t seed_size, c;
+        const char *k;
+        int r;
+        unsigned long long start, interval;
+
+        seed_size = FSPRG_RECOMMENDED_SEEDLEN;
+        seed = malloc(seed_size);
+        if (!seed)
+                return -ENOMEM;
+
+        k = key;
+        for (c = 0; c < seed_size; c++) {
+                int x, y;
+
+                while (*k == '-')
+                        k++;
+
+                x = unhexchar(*k);
+                if (x < 0) {
+                        free(seed);
+                        return -EINVAL;
+                }
+                k++;
+                y = unhexchar(*k);
+                if (y < 0) {
+                        free(seed);
+                        return -EINVAL;
+                }
+                k++;
+
+                seed[c] = (uint8_t) (x * 16 + y);
+        }
+
+        if (*k != '/') {
+                free(seed);
+                return -EINVAL;
+        }
+        k++;
+
+        r = sscanf(k, "%llx-%llx", &start, &interval);
+        if (r != 2) {
+                free(seed);
+                return -EINVAL;
+        }
+
+        f->fsprg_seed = seed;
+        f->fsprg_seed_size = seed_size;
+
+        f->fss_start_usec = start;
+        f->fss_interval_usec = interval;
+
+        return 0;
+}
+
 int journal_file_verify(JournalFile *f, const char *key) {
         int r;
         Object *o;
 int journal_file_verify(JournalFile *f, const char *key) {
         int r;
         Object *o;
-        uint64_t p = 0;
-        uint64_t tag_seqnum = 0, entry_seqnum = 0, entry_monotonic = 0, entry_realtime = 0;
+        uint64_t p = 0, last_tag = 0, last_epoch = 0;
+        uint64_t entry_seqnum = 0, entry_monotonic = 0, entry_realtime = 0;
         sd_id128_t entry_boot_id;
         bool entry_seqnum_set = false, entry_monotonic_set = false, entry_realtime_set = false, found_main_entry_array = false;
         sd_id128_t entry_boot_id;
         bool entry_seqnum_set = false, entry_monotonic_set = false, entry_realtime_set = false, found_main_entry_array = false;
-        uint64_t n_weird = 0, n_objects = 0, n_entries = 0, n_data = 0, n_fields = 0, n_data_hash_tables = 0, n_field_hash_tables = 0, n_entry_arrays = 0;
+        uint64_t n_weird = 0, n_objects = 0, n_entries = 0, n_data = 0, n_fields = 0, n_data_hash_tables = 0, n_field_hash_tables = 0, n_entry_arrays = 0, n_tags = 0;
         usec_t last_usec = 0;
         int data_fd = -1, entry_fd = -1, entry_array_fd = -1;
         char data_path[] = "/var/tmp/journal-data-XXXXXX",
         usec_t last_usec = 0;
         int data_fd = -1, entry_fd = -1, entry_array_fd = -1;
         char data_path[] = "/var/tmp/journal-data-XXXXXX",
@@ -606,9 +668,18 @@ int journal_file_verify(JournalFile *f, const char *key) {
 
         assert(f);
 
 
         assert(f);
 
+        if (key) {
+                r = journal_file_parse_verification_key(f, key);
+                if (r < 0) {
+                        log_error("Failed to parse seed.");
+                        return r;
+                }
+        }
+
         data_fd = mkostemp(data_path, O_CLOEXEC);
         if (data_fd < 0) {
                 log_error("Failed to create data file: %m");
         data_fd = mkostemp(data_path, O_CLOEXEC);
         if (data_fd < 0) {
                 log_error("Failed to create data file: %m");
+                r = -errno;
                 goto fail;
         }
         unlink(data_path);
                 goto fail;
         }
         unlink(data_path);
@@ -616,6 +687,7 @@ int journal_file_verify(JournalFile *f, const char *key) {
         entry_fd = mkostemp(entry_path, O_CLOEXEC);
         if (entry_fd < 0) {
                 log_error("Failed to create entry file: %m");
         entry_fd = mkostemp(entry_path, O_CLOEXEC);
         if (entry_fd < 0) {
                 log_error("Failed to create entry file: %m");
+                r = -errno;
                 goto fail;
         }
         unlink(entry_path);
                 goto fail;
         }
         unlink(entry_path);
@@ -623,6 +695,7 @@ int journal_file_verify(JournalFile *f, const char *key) {
         entry_array_fd = mkostemp(entry_array_path, O_CLOEXEC);
         if (entry_array_fd < 0) {
                 log_error("Failed to create entry array file: %m");
         entry_array_fd = mkostemp(entry_array_path, O_CLOEXEC);
         if (entry_array_fd < 0) {
                 log_error("Failed to create entry array file: %m");
+                r = -errno;
                 goto fail;
         }
         unlink(entry_array_path);
                 goto fail;
         }
         unlink(entry_array_path);
@@ -630,12 +703,6 @@ int journal_file_verify(JournalFile *f, const char *key) {
         /* First iteration: we go through all objects, verify the
          * superficial structure, headers, hashes. */
 
         /* First iteration: we go through all objects, verify the
          * superficial structure, headers, hashes. */
 
-        r = journal_file_hmac_put_header(f);
-        if (r < 0) {
-                log_error("Failed to calculate HMAC of header.");
-                goto fail;
-        }
-
         p = le64toh(f->header->header_size);
         while (p != 0) {
                 draw_progress(0x7FFF * p / le64toh(f->header->tail_object_offset), &last_usec);
         p = le64toh(f->header->header_size);
         while (p != 0) {
                 draw_progress(0x7FFF * p / le64toh(f->header->tail_object_offset), &last_usec);
@@ -667,28 +734,21 @@ int journal_file_verify(JournalFile *f, const char *key) {
                         goto fail;
                 }
 
                         goto fail;
                 }
 
-                r = journal_file_hmac_put_object(f, -1, p);
-                if (r < 0) {
-                        log_error("Failed to calculate HMAC at %llu", (unsigned long long) p);
-                        goto fail;
-                }
-
-                if (o->object.type == OBJECT_TAG) {
+                switch (o->object.type) {
 
 
-                        if (!(le32toh(f->header->compatible_flags) & HEADER_COMPATIBLE_AUTHENTICATED)) {
-                                log_error("Tag object without authentication at %llu", (unsigned long long) p);
-                                r = -EBADMSG;
+                case OBJECT_DATA:
+                        r = write_uint64(data_fd, p);
+                        if (r < 0)
                                 goto fail;
                                 goto fail;
-                        }
 
 
-                        if (le64toh(o->tag.seqnum) != tag_seqnum) {
-                                log_error("Tag sequence number out of synchronization at %llu", (unsigned long long) p);
-                                r = -EBADMSG;
-                                goto fail;
-                        }
+                        n_data++;
+                        break;
 
 
-                } else if (o->object.type == OBJECT_ENTRY) {
+                case OBJECT_FIELD:
+                        n_fields++;
+                        break;
 
 
+                case OBJECT_ENTRY:
                         r = write_uint64(entry_fd, p);
                         if (r < 0)
                                 goto fail;
                         r = write_uint64(entry_fd, p);
                         if (r < 0)
                                 goto fail;
@@ -733,8 +793,43 @@ int journal_file_verify(JournalFile *f, const char *key) {
                         entry_realtime_set = true;
 
                         n_entries ++;
                         entry_realtime_set = true;
 
                         n_entries ++;
-                } else if (o->object.type == OBJECT_ENTRY_ARRAY) {
+                        break;
+
+                case OBJECT_DATA_HASH_TABLE:
+                        if (n_data_hash_tables > 1) {
+                                log_error("More than one data hash table at %llu", (unsigned long long) p);
+                                r = -EBADMSG;
+                                goto fail;
+                        }
+
+                        if (le64toh(f->header->data_hash_table_offset) != p + offsetof(HashTableObject, items) ||
+                            le64toh(f->header->data_hash_table_size) != le64toh(o->object.size) - offsetof(HashTableObject, items)) {
+                                log_error("Header fields for data hash table invalid.");
+                                r = -EBADMSG;
+                                goto fail;
+                        }
+
+                        n_data_hash_tables++;
+                        break;
+
+                case OBJECT_FIELD_HASH_TABLE:
+                        if (n_field_hash_tables > 1) {
+                                log_error("More than one field hash table at %llu", (unsigned long long) p);
+                                r = -EBADMSG;
+                                goto fail;
+                        }
+
+                        if (le64toh(f->header->field_hash_table_offset) != p + offsetof(HashTableObject, items) ||
+                            le64toh(f->header->field_hash_table_size) != le64toh(o->object.size) - offsetof(HashTableObject, items)) {
+                                log_error("Header fields for field hash table invalid.");
+                                r = -EBADMSG;
+                                goto fail;
+                        }
 
 
+                        n_field_hash_tables++;
+                        break;
+
+                case OBJECT_ENTRY_ARRAY:
                         r = write_uint64(entry_array_fd, p);
                         if (r < 0)
                                 goto fail;
                         r = write_uint64(entry_array_fd, p);
                         if (r < 0)
                                 goto fail;
@@ -750,49 +845,84 @@ int journal_file_verify(JournalFile *f, const char *key) {
                         }
 
                         n_entry_arrays++;
                         }
 
                         n_entry_arrays++;
+                        break;
 
 
-                } else if (o->object.type == OBJECT_DATA) {
+                case OBJECT_TAG: {
+                        uint64_t q;
 
 
-                        r = write_uint64(data_fd, p);
-                        if (r < 0)
+                        if (!(le32toh(f->header->compatible_flags) & HEADER_COMPATIBLE_SEALED)) {
+                                log_error("Tag object without sealing at %llu", (unsigned long long) p);
+                                r = -EBADMSG;
                                 goto fail;
                                 goto fail;
+                        }
 
 
-                        n_data++;
-
-                } else if (o->object.type == OBJECT_FIELD)
-                        n_fields++;
-                else if (o->object.type == OBJECT_DATA_HASH_TABLE) {
-                        n_data_hash_tables++;
+                        log_debug("Checking tag %llu..", (unsigned long long) le64toh(o->tag.seqnum));
 
 
-                        if (n_data_hash_tables > 1) {
-                                log_error("More than one data hash table at %llu", (unsigned long long) p);
+                        if (le64toh(o->tag.seqnum) != n_tags + 1) {
+                                log_error("Tag sequence number out of synchronization at %llu", (unsigned long long) p);
                                 r = -EBADMSG;
                                 goto fail;
                         }
 
                                 r = -EBADMSG;
                                 goto fail;
                         }
 
-                        if (le64toh(f->header->data_hash_table_offset) != p + offsetof(HashTableObject, items) ||
-                            le64toh(f->header->data_hash_table_size) != le64toh(o->object.size) - offsetof(HashTableObject, items)) {
-                                log_error("Header fields for data hash table invalid.");
+                        if (le64toh(o->tag.epoch) < last_epoch) {
+                                log_error("Epoch sequence out of synchronization at %llu", (unsigned long long) p);
                                 r = -EBADMSG;
                                 goto fail;
                         }
                                 r = -EBADMSG;
                                 goto fail;
                         }
-                } else if (o->object.type == OBJECT_FIELD_HASH_TABLE) {
-                        n_field_hash_tables++;
 
 
-                        if (n_field_hash_tables > 1) {
-                                log_error("More than one field hash table at %llu", (unsigned long long) p);
-                                r = -EBADMSG;
+                        /* OK, now we know the epoch. So let's now set
+                         * it, and calculate the HMAC for everything
+                         * since the last tag. */
+                        r = journal_file_fsprg_seek(f, le64toh(o->tag.epoch));
+                        if (r < 0)
                                 goto fail;
                                 goto fail;
+
+                        r = journal_file_hmac_start(f);
+                        if (r < 0)
+                                goto fail;
+
+                        if (last_tag == 0) {
+                                r = journal_file_hmac_put_header(f);
+                                if (r < 0)
+                                        goto fail;
+
+                                q = le64toh(f->header->header_size);
+                        } else
+                                q = last_tag;
+
+                        while (q <= p) {
+                                r = journal_file_move_to_object(f, -1, q, &o);
+                                if (r < 0)
+                                        goto fail;
+
+                                r = journal_file_hmac_put_object(f, -1, q);
+                                if (r < 0)
+                                        goto fail;
+
+                                q = q + ALIGN64(le64toh(o->object.size));
                         }
 
                         }
 
-                        if (le64toh(f->header->field_hash_table_offset) != p + offsetof(HashTableObject, items) ||
-                            le64toh(f->header->field_hash_table_size) != le64toh(o->object.size) - offsetof(HashTableObject, items)) {
-                                log_error("Header fields for field hash table invalid.");
+                        /* Position might have changed, let's reposition things */
+                        r = journal_file_move_to_object(f, -1, p, &o);
+                        if (r < 0)
+                                goto fail;
+
+                        if (memcmp(o->tag.tag, gcry_md_read(f->hmac, 0), TAG_LENGTH) != 0) {
+                                log_error("Tag failed verification at %llu", (unsigned long long) p);
                                 r = -EBADMSG;
                                 goto fail;
                         }
                                 r = -EBADMSG;
                                 goto fail;
                         }
-                } else if (o->object.type >= _OBJECT_TYPE_MAX)
+
+                        f->hmac_running = false;
+
+                        last_tag = p + ALIGN64(le64toh(o->object.size));
+                        n_tags ++;
+                        break;
+                }
+
+                default:
                         n_weird ++;
                         n_weird ++;
+                }
 
                 if (p == le64toh(f->header->tail_object_offset))
                         p = 0;
 
                 if (p == le64toh(f->header->tail_object_offset))
                         p = 0;
@@ -827,12 +957,19 @@ int journal_file_verify(JournalFile *f, const char *key) {
         }
 
         if (JOURNAL_HEADER_CONTAINS(f->header, n_tags) &&
         }
 
         if (JOURNAL_HEADER_CONTAINS(f->header, n_tags) &&
-            tag_seqnum != le64toh(f->header->n_tags)) {
+            n_tags != le64toh(f->header->n_tags)) {
                 log_error("Tag number mismatch");
                 r = -EBADMSG;
                 goto fail;
         }
 
                 log_error("Tag number mismatch");
                 r = -EBADMSG;
                 goto fail;
         }
 
+        if (JOURNAL_HEADER_CONTAINS(f->header, n_entry_arrays) &&
+            n_entry_arrays != le64toh(f->header->n_entry_arrays)) {
+                log_error("Entry array number mismatch");
+                r = -EBADMSG;
+                goto fail;
+        }
+
         if (n_data_hash_tables != 1) {
                 log_error("Missing data hash table");
                 r = -EBADMSG;
         if (n_data_hash_tables != 1) {
                 log_error("Missing data hash table");
                 r = -EBADMSG;