chiark / gitweb /
doxygen: implemented result map prefix merging.
authorVladimír Vondruš <mosra@centrum.cz>
Sun, 4 Feb 2018 10:02:13 +0000 (11:02 +0100)
committerVladimír Vondruš <mosra@centrum.cz>
Sun, 4 Feb 2018 22:56:27 +0000 (23:56 +0100)
doxygen/dox2html5.py
doxygen/search.js
doxygen/test/js-test-data/searchdata.b85
doxygen/test/js-test-data/searchdata.bin
doxygen/test/test-search.js
doxygen/test/test_search.py

index fbc0d23830ce3e63b722feffd0ae788e344013d6..98f46e0a3d3ad23e493f2595fe6a50bf3b9fa27e 100755 (executable)
@@ -81,7 +81,6 @@ class Trie:
         self.children[char][1]._insert(path[1:], result, [b - 1 for b in lookahead_barriers])
 
     def insert(self, path: str, result, lookahead_barriers=[]):
-        assert not path.isupper() # to avoid unnecessary duplicates
         self._insert(path.encode('utf-8'), result, lookahead_barriers)
 
     # Returns offset of the serialized thing in `output`
@@ -128,6 +127,7 @@ class Trie:
 
 class ResultFlag(Flag):
     HAS_SUFFIX = 1 << 0
+    HAS_PREFIX = 1 << 3
     DEPRECATED = 1 << 1
     DELETED = 1 << 2
 
@@ -152,20 +152,34 @@ class ResultMap:
     #   + offset   |   + offset   | ... |   + offset   | size |  data  | ...
     #    8 + 24b   |    8 + 24b   |     |    8 + 24b   |  32b |        |
     #
-    # basic item (flags & 0x1 == 0):
+    # basic item (flags & 0b11 == 0b00):
     #
     # name | \0 | URL
     #      |    |
     #      | 8b |
     #
-    # function item (flags & 0x1 == 1):
+    # suffixed item (flags & 0b11 == 0b01):
     #
     # suffix | name | \0 | URL
     # length |      |    |
     #   8b   |      | 8b |
     #
+    # prefixed item (flags & 0xb11 == 0b10):
+    #
+    #  prefix  |  name  | \0 |  URL
+    # id + len | suffix |    | suffix
+    # 16b + 8b |        | 8b |
+    #
+    # prefixed & suffixed item (flags & 0xb11 == 0b11):
+    #
+    #  prefix  | suffix |  name  | \0 | URL
+    # id + len | length | suffix |    |
+    # 16b + 8b |   8b   |        | 8b |
+    #
     offset_struct = struct.Struct('<I')
     flags_struct = struct.Struct('<B')
+    prefix_struct = struct.Struct('<H')
+    prefix_length_struct = struct.Struct('<B')
     suffix_length_struct = struct.Struct('<B')
 
     def __init__(self):
@@ -178,14 +192,71 @@ class ResultMap:
         entry.name = name
         entry.url = url
         entry.flags = flags
+        entry.prefix = 0
+        entry.prefix_length = 0
         entry.suffix_length = suffix_length
 
         self.entries += [entry]
         return len(self.entries) - 1
 
-    def serialize(self) -> bytearray:
+    def serialize(self, merge_prefixes=True) -> bytearray:
         output = bytearray()
 
+        if merge_prefixes:
+            # Put all entry names into a trie to discover common prefixes
+            trie = Trie()
+            for index, e in enumerate(self.entries):
+                trie.insert(e.name, index)
+
+            # Create a new list with merged prefixes
+            merged = []
+            for e in self.entries:
+                # Search in the trie and get the longest shared name prefix
+                # that is already fully contained in some other entry
+                current = trie
+                longest_prefix = None
+                for c in e.name.encode('utf-8'):
+                    # If current node has results, save it as the longest prefix
+                    if current.results:
+                        # well, the prefix would have 0 bytes
+                        assert current is not trie
+                        longest_prefix = current
+
+                    for candidate, child in current.children.items():
+                        if c == candidate:
+                            current = child[1]
+                            break
+                    else: assert False
+
+                # Name prefix found, for all possible URLs find the one that
+                # shares the longest prefix
+                if longest_prefix:
+                    max_prefix = (0, -1)
+                    for index in longest_prefix.results:
+                        prefix_length = 0
+                        for i in range(min(len(e.url), len(self.entries[index].url))):
+                            if e.url[i] != self.entries[index].url[i]: break
+                            prefix_length += 1
+                        if max_prefix[1] < prefix_length:
+                            max_prefix = (index, prefix_length)
+
+                    # Save the entry with reference to the prefix
+                    entry = Empty()
+                    assert e.name.startswith(self.entries[longest_prefix.results[0]].name)
+                    entry.name = e.name[len(self.entries[longest_prefix.results[0]].name):]
+                    entry.url = e.url[max_prefix[1]:]
+                    entry.flags = e.flags|ResultFlag.HAS_PREFIX
+                    entry.prefix = max_prefix[0]
+                    entry.prefix_length = max_prefix[1]
+                    entry.suffix_length = e.suffix_length
+                    merged += [entry]
+
+                # No prefix found, copy the entry verbatim
+                else: merged += [e]
+
+            # Everything merged, replace the original list
+            self.entries = merged
+
         # Write the offset array. Starting offset for items is after the offset
         # array and the file size
         offset = (len(self.entries) + 1)*4
@@ -194,19 +265,25 @@ class ResultMap:
             output += self.offset_struct.pack(offset)
             self.flags_struct.pack_into(output, len(output) - 1, e.flags.value)
 
+            # Extra field for prefix index and length
+            if e.flags & ResultFlag.HAS_PREFIX:
+                offset += 3
+
             # Extra field for suffix length
             if e.flags & ResultFlag.HAS_SUFFIX:
-                offset += len(e.name.encode('utf-8')) + len(e.url.encode('utf-8')) + 2
+                offset += 1
 
-            # Just the 0-delimiter
-            else:
-                offset += len(e.name.encode('utf-8')) + len(e.url.encode('utf-8')) + 1
+            # Length of name, URL and 0-delimiter
+            offset += len(e.name.encode('utf-8')) + len(e.url.encode('utf-8')) + 1
 
         # Write file size
         output += self.offset_struct.pack(offset)
 
         # Write the entries themselves
         for e in self.entries:
+            if e.flags & ResultFlag.HAS_PREFIX:
+                output += self.prefix_struct.pack(e.prefix)
+                output += self.prefix_length_struct.pack(e.prefix_length)
             if e.flags & ResultFlag.HAS_SUFFIX:
                 output += self.suffix_length_struct.pack(e.suffix_length)
             output += e.name.encode('utf-8')
@@ -218,9 +295,9 @@ class ResultMap:
 
 search_data_header_struct = struct.Struct('<3sBI')
 
-def serialize_search_data(trie: Trie, map: ResultMap, merge_subtrees=True) -> bytearray:
+def serialize_search_data(trie: Trie, map: ResultMap, merge_subtrees=True, merge_prefixes=True) -> bytearray:
     serialized_trie = trie.serialize(merge_subtrees=merge_subtrees)
-    serialized_map = map.serialize()
+    serialized_map = map.serialize(merge_prefixes=merge_prefixes)
     # magic header, version, offset of result map
     return search_data_header_struct.pack(b'MCS', 0, len(serialized_trie) + 8) + serialized_trie + serialized_map
 
@@ -1628,7 +1705,7 @@ def _build_search_data(state: State, prefix, id: str, trie: Trie, map: ResultMap
         if i in state.compounds:
             _build_search_data(state, prefixed_name, i, trie, map, add_lookahead_barriers=add_lookahead_barriers)
 
-def build_search_data(state: State, merge_subtrees=True, add_lookahead_barriers=True) -> bytearray:
+def build_search_data(state: State, merge_subtrees=True, add_lookahead_barriers=True, merge_prefixes=True) -> bytearray:
     trie = Trie()
     map = ResultMap()
 
@@ -1670,7 +1747,7 @@ def build_search_data(state: State, merge_subtrees=True, add_lookahead_barriers=
                 name += html.unescape(j)
             trie.insert(name.lower(), index, lookahead_barriers=lookahead_barriers if add_lookahead_barriers else [])
 
-    return serialize_search_data(trie, map, merge_subtrees=merge_subtrees)
+    return serialize_search_data(trie, map, merge_subtrees=merge_subtrees, merge_prefixes=merge_prefixes)
 
 def base85encode_search_data(data: bytearray) -> bytearray:
     return (b"/* Generated by http://mcss.mosra.cz/doxygen/. Do not edit. */\n" +
@@ -2496,7 +2573,7 @@ default_index_pages = ['pages', 'files', 'namespaces', 'modules', 'annotated']
 default_wildcard = '*.xml'
 default_templates = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'templates/')
 
-def run(doxyfile, templates=default_templates, wildcard=default_wildcard, index_pages=default_index_pages, search_add_lookahead_barriers=True, search_merge_subtrees=True):
+def run(doxyfile, templates=default_templates, wildcard=default_wildcard, index_pages=default_index_pages, search_add_lookahead_barriers=True, search_merge_subtrees=True, search_merge_prefixes=True):
     state = State()
     state.basedir = os.path.dirname(doxyfile)
 
@@ -2580,7 +2657,7 @@ def run(doxyfile, templates=default_templates, wildcard=default_wildcard, index_
             f.write(rendered)
 
     if not state.doxyfile['M_SEARCH_DISABLED']:
-        data = build_search_data(state, add_lookahead_barriers=search_add_lookahead_barriers, merge_subtrees=search_merge_subtrees)
+        data = build_search_data(state, add_lookahead_barriers=search_add_lookahead_barriers, merge_subtrees=search_merge_subtrees, merge_prefixes=search_merge_prefixes)
 
         if state.doxyfile['M_SEARCH_DOWNLOAD_BINARY']:
             with open(os.path.join(html_output, "searchdata.bin"), 'wb') as f:
@@ -2614,6 +2691,7 @@ if __name__ == '__main__': # pragma: no cover
     parser.add_argument('--no-doxygen', help="don't run Doxygen before", action='store_true')
     parser.add_argument('--search-no-subtree-merging', help="don't merge search data subtrees", action='store_true')
     parser.add_argument('--search-no-lookahead-barriers', help="don't insert search lookahead barriers", action='store_true')
+    parser.add_argument('--search-no-prefix-merging', help="don't merge search result prefixes", action='store_true')
     parser.add_argument('--debug', help="verbose debug output", action='store_true')
     args = parser.parse_args()
 
@@ -2628,4 +2706,4 @@ if __name__ == '__main__': # pragma: no cover
     if not args.no_doxygen:
         subprocess.run(["doxygen", doxyfile], cwd=os.path.dirname(doxyfile))
 
-    run(doxyfile, os.path.abspath(args.templates), args.wildcard, args.index_pages, search_merge_subtrees=not args.search_no_subtree_merging, search_add_lookahead_barriers=not args.search_no_lookahead_barriers)
+    run(doxyfile, os.path.abspath(args.templates), args.wildcard, args.index_pages, search_merge_subtrees=not args.search_no_subtree_merging, search_add_lookahead_barriers=not args.search_no_lookahead_barriers, search_merge_prefixes=not args.search_no_prefix_merging)
index 43f6a2f2d07baea4eecdfb84a07eddee22141644..2a1fe78089a92a7b1821be827fe196efaa345e27 100644 (file)
@@ -235,42 +235,7 @@ var Search = {
             let resultCount = this.trie.getUint8(offset);
             for(let i = 0; i != resultCount; ++i) {
                 let index = this.trie.getUint16(offset + (i + 1)*2, true);
-                let flags = this.map.getUint8(index*4 + 3);
-                let resultOffset = this.map.getUint32(index*4, true) & 0x00ffffff;
-
-                /* The result has a suffix, extract its length */
-                let resultSuffixLength = 0;
-                if(flags & 0x01) {
-                    resultSuffixLength = this.map.getUint8(resultOffset);
-                    ++resultOffset;
-                }
-
-                let nextResultOffset = this.map.getUint32((index + 1)*4, true) & 0x00ffffff;
-
-                let name = '';
-                let j = resultOffset;
-                for(; j != nextResultOffset; ++j) {
-                    let c = this.map.getUint8(j);
-
-                    /* End of null-delimited name */
-                    if(!c) {
-                        ++j;
-                        break; /* null-delimited */
-                    }
-
-                    name += String.fromCharCode(c); /* eheh. IS THIS FAST?! */
-                }
-
-                let url = '';
-                for(; j != nextResultOffset; ++j) {
-                    url += String.fromCharCode(this.map.getUint8(j));
-                }
-
-                /* Keeping in UTF-8, as we need that for proper slicing */
-                results.push({name: name,
-                              url: url,
-                              flags: flags,
-                              suffixLength: suffixLength + resultSuffixLength});
+                results.push(this.gatherResult(index, suffixLength, 0xffffff)); /* should be enough haha */
 
                 /* 'nuff said. */
                 if(results.length >= this.maxResults) return results;
@@ -295,6 +260,60 @@ var Search = {
         return results;
     },
 
+    gatherResult: function(index, suffixLength, maxUrlPrefix) {
+        let flags = this.map.getUint8(index*4 + 3);
+        let resultOffset = this.map.getUint32(index*4, true) & 0x00ffffff;
+
+        /* The result has a prefix, parse that first, recursively */
+        let name = '';
+        let url = '';
+        if(flags & (1 << 3)) {
+            let prefixIndex = this.map.getUint16(resultOffset, true);
+            let prefixUrlPrefixLength = Math.min(this.map.getUint8(resultOffset + 2), maxUrlPrefix);
+
+            let prefix = this.gatherResult(prefixIndex, 0 /*ignored*/, prefixUrlPrefixLength);
+            name = prefix.name;
+            url = prefix.url;
+
+            resultOffset += 3;
+        }
+
+        /* The result has a suffix, extract its length */
+        let resultSuffixLength = 0;
+        if(flags & (1 << 0)) {
+            resultSuffixLength = this.map.getUint8(resultOffset);
+            ++resultOffset;
+        }
+
+        let nextResultOffset = this.map.getUint32((index + 1)*4, true) & 0x00ffffff;
+
+        /* Extract name */
+        let j = resultOffset;
+        for(; j != nextResultOffset; ++j) {
+            let c = this.map.getUint8(j);
+
+            /* End of null-delimited name */
+            if(!c) {
+                ++j;
+                break; /* null-delimited */
+            }
+
+            name += String.fromCharCode(c); /* eheh. IS THIS FAST?! */
+        }
+
+        /* Extract URL */
+        let max = Math.min(j + maxUrlPrefix, nextResultOffset);
+        for(; j != max; ++j) {
+            url += String.fromCharCode(this.map.getUint8(j));
+        }
+
+        /* Keeping in UTF-8, as we need that for proper slicing (and concatenating) */
+        return {name: name,
+                url: url,
+                flags: flags,
+                suffixLength: suffixLength + resultSuffixLength};
+    },
+
     escapeForRtl: function(name) {
         /* Besides the obvious escaping of HTML entities we also need
            to escape punctuation, because due to the RTL hack to cut
index a9f602a19b31905eb552383bd714b835448d6543..773bb0ea609737bf02af1973e26a1070481429ef 100644 (file)
@@ -1 +1 @@
-O+!-vL;(N*Dggih0RRC2009I504V?g2mk;m009mF0B!&Q6aWBe0RRI400AHX04V?gBme*?00Alh0B!&QFaQ8)00A}t0BryPJOBVX0RaL4LI8j|00Bq<0CE5UPyhgL00CA20CWHWTmS%L00CkE0A&FH1poj6ZU6u&00D9U04M+fcmM!y00Djg0BHaLga80-00D{s06GBy1OSi#fI0vHmH+@{00Eu=0A~OJqyPYJ00F810AT<F8UO%oXaE3qumAvZ00FiD06GBy006`QfI0vH$^Zap00Ghf0CWQY0RRI41poj6-T(k800HIz04M+f>;M3600P(m0Aca~0BHdL1^@s70s#PJ009O80A~OJ3;_UP009yK0B`^S7y$rc00ABW0CfNa_y7QHXaE3qumAvZBmn?(AOHXmHvj-(VgLXlhX4R!z5oCq;Q#<-76AajG64VpO<{Cs0B&JzWpi+0V`WWYbZ9PUbZu-1O<{CsIy!A>ZYXJPbSxlgZgeRCZeeX@b8ul}WldppXf9}UZEPcLX>LtnbZ9y{R%K&!Z*l-*Y+-YAO<{CsUol@XR%K&!Z*neZbZu+~O<{CsIyzQmV{~tFIy!A>ZYU`rV{dMAbO2*)VRLg$VRUF;F<&uOWn*-2axQ3eZEPcLX>LtnbZ9y{QekdqWdLJrVRLg$VRUF;F<&uKVQyz-E@*UZYz9qXbZ9y{QekdqWjZ=-X>KSfAY*TCb94Y>Y+-YAO<{CsUol@XQekdqWiDuRZEPcLX>L$qXJsJ5yC73_VsK$+WdL(^VsK$+WiDuRZEOGl
\ No newline at end of file
+O+!-vL;(N*Dggih0RRC2009I504V?g2mk;m009mF0B!&Q6aWBe0RRI400AHX04V?gBme*?00Alh0B!&QFaQ8)00A}t0BryPJOBVX0RaL4LI8j|00Bq<0CE5UPyhgL00CA20CWHWTmS%L00CkE0A&FH1poj6ZU6u&00D9U04M+fcmM!y00Djg0BHaLga80-00D{s06GBy1OSi#fI0vHmH+@{00Eu=0A~OJqyPYJ00F810AT<F8UO%oXaE3qumAvZ00FiD06GBy006`QfI0vH$^Zap00Ghf0CWQY0RRI41poj6-T(k800HIz04M+f>;M3600P(m0Aca~0BHdL1^@s70s#PJ009O80A~OJ3;_UP009yK0B`^S7y$rc00ABW0CfNa_y7QHXaE3qumAvZBmn?(AOHXmHvj->PXGWaa{vHoi2wj7s{jCP!2kfj-2eapO<{Cs0B&JzWpi+0V`WWYbZ9PUbZu+^01^l~I&EogC~0nVEFfuabSVHMZE0=*0025VR%K&!Z*l-*Y+-YAO<{CsUol@XR%K&!Z*neZbZu+`02l^3I&EogC@COgZ*FsR03&T_ZU6uPIyzEeZf9ixV{Bn_b4_7%XkRg3F;Zb}XJsyEbZu+|02c;2I&EogC@COgZ*FsR03&T_Zct%oWgx=4AX9Z>aA9X<0CRO>aA9X<E@*UZYybcN
\ No newline at end of file
index 3c44891c33b1fa3edb935a8e0ed2e00e8ffac0d3..6ab162d541b6fca34a57ea06f74f13b0e1ba5720 100644 (file)
Binary files a/doxygen/test/js-test-data/searchdata.bin and b/doxygen/test/js-test-data/searchdata.bin differ
index 129093aa7248adec1388c8658adbcfc4b07e83e5..d09c33c54d5375fb2745d22527b12eaaa2135e4a 100644 (file)
@@ -70,7 +70,7 @@ const { StringDecoder } = require('string_decoder');
 /* Verify that base85-decoded file is equivalent to the binary */
 {
     let binary = fs.readFileSync(path.join(__dirname, "js-test-data/searchdata.bin"));
-    assert.equal(binary.byteLength, 630);
+    assert.equal(binary.byteLength, 545);
     let b85 = fs.readFileSync(path.join(__dirname, "js-test-data/searchdata.b85"), {encoding: 'utf-8'});
     assert.deepEqual(new DataView(binary.buffer.slice(binary.byteOffset, binary.byteOffset + binary.byteLength)), new DataView(Search.base85decode(b85), 0, binary.byteLength));
 }
@@ -105,7 +105,7 @@ const { StringDecoder } = require('string_decoder');
 {
     let buffer = fs.readFileSync(path.join(__dirname, "js-test-data/searchdata.bin"));
     assert.ok(Search.init(buffer.buffer.slice(buffer.byteOffset, buffer.byteOffset + buffer.byteLength)));
-    assert.equal(Search.dataSize, 0.6);
+    assert.equal(Search.dataSize, 0.5);
     assert.equal(Search.symbolCount, 7);
     assert.equal(Search.maxResults, 100);
 
@@ -117,15 +117,15 @@ const { StringDecoder } = require('string_decoder');
           suffixLength: 3 },
         { name: 'Math::min(int, int)',
           url: 'namespaceMath.html#min',
-          flags: 97,
+          flags: 105,
           suffixLength: 12 },
         { name: 'Math::Vector::min() const',
           url: 'classMath_1_1Vector.html#min',
-          flags: 97,
+          flags: 105,
           suffixLength: 10 },
         { name: 'Math::Range::min() const',
           url: 'classMath_1_1Range.html#min',
-          flags: 101,
+          flags: 109,
           suffixLength: 10 }];
     assert.deepEqual(Search.search('m'), resultsForM);
 
@@ -133,15 +133,15 @@ const { StringDecoder } = require('string_decoder');
     assert.deepEqual(Search.search('min'), [
         { name: 'Math::min(int, int)',
           url: 'namespaceMath.html#min',
-          flags: 97,
+          flags: 105,
           suffixLength: 10 },
         { name: 'Math::Vector::min() const',
           url: 'classMath_1_1Vector.html#min',
-          flags: 97,
+          flags: 105,
           suffixLength: 8 },
         { name: 'Math::Range::min() const',
           url: 'classMath_1_1Range.html#min',
-          flags: 101,
+          flags: 109,
           suffixLength: 8 }]);
 
     /* Go back, get the same thing */
@@ -151,7 +151,7 @@ const { StringDecoder } = require('string_decoder');
     let resultsForVec = [
         { name: 'Math::Vector',
           url: 'classMath_1_1Vector.html',
-          flags: 32|2, /* Deprecated */
+          flags: 40|2, /* Deprecated */
           suffixLength: 3 }];
     assert.deepEqual(Search.search('vec'), resultsForVec);
 
@@ -173,7 +173,7 @@ const { StringDecoder } = require('string_decoder');
 {
     let buffer = fs.readFileSync(path.join(__dirname, "js-test-data/searchdata.bin"));
     assert.ok(Search.init(buffer.buffer.slice(buffer.byteOffset, buffer.byteOffset + buffer.byteLength), 3));
-    assert.equal(Search.dataSize, 0.6);
+    assert.equal(Search.dataSize, 0.5);
     assert.equal(Search.symbolCount, 7);
     assert.equal(Search.maxResults, 3);
     assert.deepEqual(Search.search('m'), [
@@ -183,11 +183,11 @@ const { StringDecoder } = require('string_decoder');
           suffixLength: 3 },
         { name: 'Math::min(int, int)',
           url: 'namespaceMath.html#min',
-          flags: 97,
+          flags: 105,
           suffixLength: 12 },
         { name: 'Math::Vector::min() const',
           url: 'classMath_1_1Vector.html#min',
-          flags: 97,
+          flags: 105,
           suffixLength: 10 }]);
 }
 
@@ -195,21 +195,21 @@ const { StringDecoder } = require('string_decoder');
 {
     let b85 = fs.readFileSync(path.join(__dirname, "js-test-data/searchdata.b85"), {encoding: 'utf-8'});
     assert.ok(Search.load(b85));
-    assert.equal(Search.dataSize, 0.6);
+    assert.equal(Search.dataSize, 0.5);
     assert.equal(Search.symbolCount, 7);
     assert.equal(Search.maxResults, 100);
     assert.deepEqual(Search.search('min'), [
         { name: 'Math::min(int, int)',
           url: 'namespaceMath.html#min',
-          flags: 97,
+          flags: 105,
           suffixLength: 10 },
         { name: 'Math::Vector::min() const',
           url: 'classMath_1_1Vector.html#min',
-          flags: 97,
+          flags: 105,
           suffixLength: 8 },
         { name: 'Math::Range::min() const',
           url: 'classMath_1_1Range.html#min',
-          flags: 101,
+          flags: 109,
           suffixLength: 8 }]);
 }
 
index a05e5ea1bafeace89361f53c567d46ecd96110c1..0137f54577faec9227cc585eb93b0aef7117ec5a 100755 (executable)
@@ -132,13 +132,16 @@ def pretty_print_map(serialized: bytes, colors=False):
         if i: out += '\n'
         flags = ResultFlag(ResultMap.flags_struct.unpack_from(serialized, i*4 + 3)[0])
         extra = []
+        if flags & ResultFlag.HAS_PREFIX:
+            extra += ['prefix={}[:{}]'.format(ResultMap.prefix_struct.unpack_from(serialized, offset)[0] & 0x00ffffff, ResultMap.prefix_length_struct.unpack_from(serialized, offset + 2)[0])]
+            offset += 3
+        if flags & ResultFlag.HAS_SUFFIX:
+            extra += ['suffix_length={}'.format(ResultMap.suffix_length_struct.unpack_from(serialized, offset)[0])]
+            offset += 1
         if flags & ResultFlag.DEPRECATED:
             extra += ['deprecated']
         if flags & ResultFlag.DELETED:
             extra += ['deleted']
-        if flags & ResultFlag.HAS_SUFFIX:
-            extra += ['suffix_length={}'.format(ResultMap.suffix_length_struct.unpack_from(serialized, offset)[0])]
-            offset += 1
         if flags & ResultFlag._TYPE:
             extra += ['type={}'.format((flags & ResultFlag._TYPE).name)]
         next_offset = ResultMap.offset_struct.unpack_from(serialized, (i + 1)*4)[0] & 0x00ffffff
@@ -306,17 +309,17 @@ class MapSerialization(unittest.TestCase):
         self.assertEqual(map.add("Math::Vector", "classMath_1_1Vector.html", flags=ResultFlag.CLASS), 1)
         self.assertEqual(map.add("Math::Range", "classMath_1_1Range.html", flags=ResultFlag.CLASS), 2)
         self.assertEqual(map.add("Math::min()", "namespaceMath.html#abcdef2875", flags=ResultFlag.FUNC), 3)
-        self.assertEqual(map.add("Math::max(int, int)", "namespaceMath.html#abcdef2875", suffix_length=8, flags=ResultFlag.FUNC|ResultFlag.DEPRECATED|ResultFlag.DELETED), 4)
+        self.assertEqual(map.add("Math::max(int, int)", "namespaceMath.html#abcdef1234", suffix_length=8, flags=ResultFlag.FUNC|ResultFlag.DEPRECATED|ResultFlag.DELETED), 4)
 
         serialized = map.serialize()
         self.compare(serialized, """
 0: Math [type=NAMESPACE] -> namespaceMath.html
-1: Math::Vector [type=CLASS] -> classMath_1_1Vector.html
-2: Math::Range [type=CLASS] -> classMath_1_1Range.html
-3: Math::min() [type=FUNC] -> namespaceMath.html#abcdef2875
-4: Math::max(int, int) [deprecated, deleted, suffix_length=8, type=FUNC] -> namespaceMath.html#abcdef2875
+1: ::Vector [prefix=0[:0], type=CLASS] -> classMath_1_1Vector.html
+2: ::Range [prefix=0[:0], type=CLASS] -> classMath_1_1Range.html
+3: ::min() [prefix=0[:18], type=FUNC] -> #abcdef2875
+4: ::max(int, int) [prefix=0[:18], suffix_length=8, deprecated, deleted, type=FUNC] -> #abcdef1234
 """)
-        self.assertEqual(len(serialized), 210)
+        self.assertEqual(len(serialized), 170)
 
 class Serialization(unittest.TestCase):
     def __init__(self, *args, **kwargs):
@@ -348,10 +351,10 @@ math [0]
 vector [1]
 range [2]
 0: Math [type=NAMESPACE] -> namespaceMath.html
-1: Math::Vector [type=CLASS] -> classMath_1_1Vector.html
-2: Math::Range [type=CLASS] -> classMath_1_1Range.html
+1: ::Vector [prefix=0[:0], type=CLASS] -> classMath_1_1Vector.html
+2: ::Range [prefix=0[:0], type=CLASS] -> classMath_1_1Range.html
 """)
-        self.assertEqual(len(serialized), 241)
+        self.assertEqual(len(serialized), 239)
 
 class Search(IntegrationTestCase):
     def __init__(self, *args, **kwargs):
@@ -430,41 +433,41 @@ macro [33]
 |    _function() [34]
 |             _with_params() [35]
 0: DeprecatedNamespace [deprecated, type=NAMESPACE] -> namespaceDeprecatedNamespace.html
-1: DeprecatedNamespace::DeprecatedClass [deprecated, type=STRUCT] -> structDeprecatedNamespace_1_1DeprecatedClass.html
-2: DeprecatedNamespace::DeprecatedStruct [deprecated, type=STRUCT] -> structDeprecatedNamespace_1_1DeprecatedStruct.html
-3: DeprecatedNamespace::DeprecatedUnion [deprecated, type=UNION] -> unionDeprecatedNamespace_1_1DeprecatedUnion.html
+1: ::DeprecatedClass [prefix=0[:0], deprecated, type=STRUCT] -> structDeprecatedNamespace_1_1DeprecatedClass.html
+2: ::DeprecatedStruct [prefix=0[:0], deprecated, type=STRUCT] -> structDeprecatedNamespace_1_1DeprecatedStruct.html
+3: ::DeprecatedUnion [prefix=0[:0], deprecated, type=UNION] -> unionDeprecatedNamespace_1_1DeprecatedUnion.html
 4: A group [type=GROUP] -> group__group.html
 5: Deprecated List [type=PAGE] -> deprecated.html
 6: Namespace [type=NAMESPACE] -> namespaceNamespace.html
-7: Namespace::Class [type=CLASS] -> classNamespace_1_1Class.html
-8: Namespace::Struct [type=STRUCT] -> structNamespace_1_1Struct.html
-9: Namespace::Union [type=UNION] -> unionNamespace_1_1Union.html
+7: ::Class [prefix=6[:0], type=CLASS] -> classNamespace_1_1Class.html
+8: ::Struct [prefix=6[:0], type=STRUCT] -> structNamespace_1_1Struct.html
+9: ::Union [prefix=6[:0], type=UNION] -> unionNamespace_1_1Union.html
 10: A group [type=GROUP] -> group__deprecated-group.html
 11: A page [type=PAGE] -> page.html
-12: A page » Subpage [type=PAGE] -> subpage.html
+12:  » Subpage [prefix=11[:0], type=PAGE] -> subpage.html
 13: Dir [type=DIR] -> dir_da5033def2d0db76e9883b31b76b3d0c.html
-14: Dir/File.h [type=FILE] -> File_8h.html
+14: /File.h [prefix=13[:0], type=FILE] -> File_8h.html
 15: DeprecatedDir [deprecated, type=DIR] -> dir_c6c97faf5a6cbd0f62c27843ce3af4d0.html
-16: DeprecatedDir/DeprecatedFile.h [deprecated, type=FILE] -> DeprecatedFile_8h.html
-17: Namespace::Class::foo() [type=FUNC] -> classNamespace_1_1Class.html#aaeba4096356215868370d6ea476bf5d9
-18: Namespace::Class::foo() const [suffix_length=6, type=FUNC] -> classNamespace_1_1Class.html#ac03c5b93907dda16763eabd26b25500a
-19: Namespace::Class::foo() && [deleted, suffix_length=3, type=FUNC] -> classNamespace_1_1Class.html#ac9e7e80d06281e30cfcc13171d117ade
-20: Namespace::Class::foo(const Enum&, Typedef) [suffix_length=20, type=FUNC] -> classNamespace_1_1Class.html#aba8d57a830d4d79f86d58d92298677fa
-21: DeprecatedNamespace::DeprecatedEnum::Value [type=ENUM_VALUE] -> namespaceDeprecatedNamespace.html#ab1e37ddc1d65765f2a48485df4af7b47a689202409e48743b914713f96d93947c
-22: DeprecatedNamespace::DeprecatedEnum [deprecated, type=ENUM] -> namespaceDeprecatedNamespace.html#ab1e37ddc1d65765f2a48485df4af7b47
-23: DeprecatedNamespace::Enum::DeprecatedValue [deprecated, type=ENUM_VALUE] -> namespaceDeprecatedNamespace.html#ac59010e983270c330b8625b5433961b9a4b5b0e9709902228c33df7e5e377e596
-24: DeprecatedNamespace::Enum [type=ENUM] -> namespaceDeprecatedNamespace.html#ac59010e983270c330b8625b5433961b9
-25: DeprecatedNamespace::DeprecatedTypedef [deprecated, type=TYPEDEF] -> namespaceDeprecatedNamespace.html#af503ad3ff194a4c2512aff16df771164
-26: DeprecatedNamespace::DeprecatedVariable [deprecated, type=VAR] -> namespaceDeprecatedNamespace.html#ae934297fc39624409333eefbfeabf5e5
-27: DeprecatedNamespace::deprecatedFoo(int, bool, double) [deprecated, suffix_length=17, type=FUNC] -> namespaceDeprecatedNamespace.html#a9a1b3fc71d294b548095985acc0d5092
-28: Namespace::Enum::Value [type=ENUM_VALUE] -> namespaceNamespace.html#add172b93283b1ab7612c3ca6cc5dcfeaa689202409e48743b914713f96d93947c
-29: Namespace::Enum [type=ENUM] -> namespaceNamespace.html#add172b93283b1ab7612c3ca6cc5dcfea
-30: Namespace::Typedef [type=TYPEDEF] -> namespaceNamespace.html#abe2a245304bc2234927ef33175646e08
-31: Namespace::Variable [type=VAR] -> namespaceNamespace.html#ad3121960d8665ab045ca1bfa1480a86d
-32: DEPRECATED_MACRO(a, b, c) [deprecated, suffix_length=7, type=DEFINE] -> DeprecatedFile_8h.html#a7f8376730349fef9ff7d103b0245a13e
+16: /DeprecatedFile.h [prefix=15[:0], deprecated, type=FILE] -> DeprecatedFile_8h.html
+17: ::foo() [prefix=7[:28], type=FUNC] -> #aaeba4096356215868370d6ea476bf5d9
+18:  const [prefix=17[:30], suffix_length=6, type=FUNC] -> c03c5b93907dda16763eabd26b25500a
+19:  && [prefix=17[:30], suffix_length=3, deleted, type=FUNC] -> c9e7e80d06281e30cfcc13171d117ade
+20: ::foo(const Enum&, Typedef) [prefix=7[:28], suffix_length=20, type=FUNC] -> #aba8d57a830d4d79f86d58d92298677fa
+21: ::Value [prefix=22[:67], type=ENUM_VALUE] -> a689202409e48743b914713f96d93947c
+22: ::DeprecatedEnum [prefix=0[:33], deprecated, type=ENUM] -> #ab1e37ddc1d65765f2a48485df4af7b47
+23: ::DeprecatedValue [prefix=24[:67], deprecated, type=ENUM_VALUE] -> a4b5b0e9709902228c33df7e5e377e596
+24: ::Enum [prefix=0[:33], type=ENUM] -> #ac59010e983270c330b8625b5433961b9
+25: ::DeprecatedTypedef [prefix=0[:33], deprecated, type=TYPEDEF] -> #af503ad3ff194a4c2512aff16df771164
+26: ::DeprecatedVariable [prefix=0[:33], deprecated, type=VAR] -> #ae934297fc39624409333eefbfeabf5e5
+27: ::deprecatedFoo(int, bool, double) [prefix=0[:33], suffix_length=17, deprecated, type=FUNC] -> #a9a1b3fc71d294b548095985acc0d5092
+28: ::Value [prefix=29[:57], type=ENUM_VALUE] -> a689202409e48743b914713f96d93947c
+29: ::Enum [prefix=6[:23], type=ENUM] -> #add172b93283b1ab7612c3ca6cc5dcfea
+30: ::Typedef [prefix=6[:23], type=TYPEDEF] -> #abe2a245304bc2234927ef33175646e08
+31: ::Variable [prefix=6[:23], type=VAR] -> #ad3121960d8665ab045ca1bfa1480a86d
+32: DEPRECATED_MACRO(a, b, c) [suffix_length=7, deprecated, type=DEFINE] -> DeprecatedFile_8h.html#a7f8376730349fef9ff7d103b0245a13e
 33: MACRO [type=DEFINE] -> File_8h.html#a824c99cb152a3c2e9111a2cb9c34891e
-34: MACRO_FUNCTION() [type=DEFINE] -> File_8h.html#a025158d6007b306645a8eb7c7a9237c1
-35: MACRO_FUNCTION_WITH_PARAMS(params) [suffix_length=6, type=DEFINE] -> File_8h.html#a88602bba5a72becb4f2dc544ce12c420
+34: _FUNCTION() [prefix=33[:14], type=DEFINE] -> 025158d6007b306645a8eb7c7a9237c1
+35: _FUNCTION_WITH_PARAMS(params) [prefix=33[:15], suffix_length=6, type=DEFINE] -> 8602bba5a72becb4f2dc544ce12c420
 """.strip())
 
 if __name__ == '__main__': # pragma: no cover