chiark / gitweb /
doxygen: search result clarity improvements.
authorVladimír Vondruš <mosra@centrum.cz>
Sun, 28 Jan 2018 19:56:27 +0000 (20:56 +0100)
committerVladimír Vondruš <mosra@centrum.cz>
Sat, 3 Feb 2018 09:51:55 +0000 (10:51 +0100)
 * Function/macro params and suffix (such as const&&) is included in
   search results.
 * Directories are displayed with / at the end.
 * Parent pages are included in the name.

In order to properly highlight the typed value, a new "suffix length"
field is introduced in the result map, saying how much text there
is after the matching trie entry.

Also, in order to display the right angle quote for subpages, a bunch of
UTF-8-awareness fixes was done across the code.

doxygen/dox2html5.py
doxygen/search.js
doxygen/test/js-test-data/searchdata.b85
doxygen/test/js-test-data/searchdata.bin
doxygen/test/populate-js-test-data.py
doxygen/test/test-search.js
doxygen/test/test_search.py

index 81e6d2a760c466524938a5ebbded62138a3ecd86..522bcab688d0e941220b13b66a22219c922ae356 100755 (executable)
@@ -38,6 +38,7 @@ import struct
 import subprocess
 import urllib.parse
 import logging
+from enum import Flag
 from types import SimpleNamespace as Empty
 from typing import Tuple, Dict, Any, List
 
@@ -122,18 +123,43 @@ class Trie:
         self.root_offset_struct.pack_into(output, 0, self._serialize(hashtable, output))
         return output
 
+class ResultFlag(Flag):
+    HAS_SUFFIX = 1
+
 class ResultMap:
-    # item 1 flags | item 2 flags |     | item N flags | file |   item 1      |
-    #   + offset   |   + offset   | ... |   + offset   | size | name + url    | ...
-    #    8 + 24b   |    8 + 24b   |     |    8 + 24b   |  32b | (0-delimited) |
+    # item 1 flags | item 2 flags |     | item N flags | file | item 1 |
+    #   + offset   |   + offset   | ... |   + offset   | size |  data  | ...
+    #    8 + 24b   |    8 + 24b   |     |    8 + 24b   |  32b |        |
+    #
+    # basic item (flags & 0x1 == 0):
+    #
+    # name | \0 | URL
+    #      |    |
+    #      | 8b |
+    #
+    # function item (flags & 0x1 == 1):
+    #
+    # suffix | name | \0 | URL
+    # length |      |    |
+    #   8b   |      | 8b |
+    #
     offset_struct = struct.Struct('<I')
     flags_struct = struct.Struct('<B')
+    suffix_length_struct = struct.Struct('<B')
 
     def __init__(self):
         self.entries = []
 
-    def add(self, name, url, flags = 0) -> int:
-        self.entries += [(name, url, flags)]
+    def add(self, name, url, suffix_length=0, flags=ResultFlag(0)) -> int:
+        if suffix_length: flags |= ResultFlag.HAS_SUFFIX
+
+        entry = Empty()
+        entry.name = name
+        entry.url = url
+        entry.flags = flags
+        entry.suffix_length = suffix_length
+
+        self.entries += [entry]
         return len(self.entries) - 1
 
     def serialize(self) -> bytearray:
@@ -142,22 +168,29 @@ class ResultMap:
         # Write the offset array. Starting offset for items is after the offset
         # array and the file size
         offset = (len(self.entries) + 1)*4
-        for name, url, flags in self.entries:
+        for e in self.entries:
             assert offset < 2**24
             output += self.offset_struct.pack(offset)
-            self.flags_struct.pack_into(output, len(output) - 1, flags)
+            self.flags_struct.pack_into(output, len(output) - 1, e.flags.value)
+
+            # Extra field for suffix length
+            if e.flags & ResultFlag.HAS_SUFFIX:
+                offset += len(e.name.encode('utf-8')) + len(e.url.encode('utf-8')) + 2
 
-            # include the 0-delimiter
-            offset += len(name) + len(url) + 1
+            # Just the 0-delimiter
+            else:
+                offset += len(e.name.encode('utf-8')) + len(e.url.encode('utf-8')) + 1
 
         # Write file size
         output += self.offset_struct.pack(offset)
 
         # Write the entries themselves
-        for name, url, _ in self.entries:
-            output += name.encode('utf-8')
+        for e in self.entries:
+            if e.flags & ResultFlag.HAS_SUFFIX:
+                output += self.suffix_length_struct.pack(e.suffix_length)
+            output += e.name.encode('utf-8')
             output += b'\0'
-            output += url.encode('utf-8')
+            output += e.url.encode('utf-8')
 
         assert len(output) == offset
         return output
@@ -1092,13 +1125,21 @@ def parse_enum(state: State, element: ET.Element):
         if value.description:
             enum.has_value_details = True
             if not state.doxyfile['M_SEARCH_DISABLED']:
-                state.search += [(state.current_url + '#' + value.id, state.current_prefix + [enum.name], value.name)]
+                result = Empty()
+                result.url = state.current_url + '#' + value.id
+                result.prefix = state.current_prefix + [enum.name]
+                result.name = value.name
+                state.search += [result]
         enum.values += [value]
 
     enum.has_details = enum.description or enum.has_value_details
     if enum.brief or enum.has_details or enum.has_value_details:
         if not state.doxyfile['M_SEARCH_DISABLED']:
-            state.search += [(state.current_url + '#' + enum.id, state.current_prefix, enum.name)]
+            result = Empty()
+            result.url = state.current_url + '#' + enum.id
+            result.prefix = state.current_prefix
+            result.name = enum.name
+            state.search += [result]
         return enum
     return None
 
@@ -1155,7 +1196,11 @@ def parse_typedef(state: State, element: ET.Element):
 
     typedef.has_details = typedef.description or typedef.has_template_details
     if typedef.brief or typedef.has_details:
-        state.search += [(state.current_url + '#' + typedef.id, state.current_prefix, typedef.name)]
+        result = Empty()
+        result.url = state.current_url + '#' + typedef.id
+        result.prefix = state.current_prefix
+        result.name = typedef.name
+        state.search += [result]
         return typedef
     return None
 
@@ -1250,7 +1295,13 @@ def parse_func(state: State, element: ET.Element):
     func.has_details = func.description or func.has_template_details or func.has_param_details or func.return_value
     if func.brief or func.has_details:
         if not state.doxyfile['M_SEARCH_DISABLED']:
-            state.search += [(state.current_url + '#' + func.id, state.current_prefix, func.name + '()')]
+            result = Empty()
+            result.url = state.current_url + '#' + func.id
+            result.prefix = state.current_prefix
+            result.name = func.name
+            result.params = [param.type for param in func.params]
+            result.suffix = func.suffix
+            state.search += [result]
         return func
     return None
 
@@ -1275,7 +1326,11 @@ def parse_var(state: State, element: ET.Element):
     var.has_details = not not var.description
     if var.brief or var.has_details:
         if not state.doxyfile['M_SEARCH_DISABLED']:
-            state.search += [(state.current_url + '#' + var.id, state.current_prefix, var.name)]
+            result = Empty()
+            result.url = state.current_url + '#' + var.id
+            result.prefix = state.current_prefix
+            result.name = var.name
+            state.search += [result]
         return var
     return None
 
@@ -1308,7 +1363,12 @@ def parse_define(state: State, element: ET.Element):
     define.has_details = define.description or define.return_value
     if define.brief or define.has_details:
         if not state.doxyfile['M_SEARCH_DISABLED']:
-            state.search += [(state.current_url + '#' + define.id, [], define.name + ('' if define.params is None else '()'))]
+            result = Empty()
+            result.url = state.current_url + '#' + define.id
+            result.prefix = []
+            result.name = define.name
+            result.params = None if define.params is None else [param[0] for param in define.params]
+            state.search += [result]
         return define
     return None
 
@@ -1456,25 +1516,36 @@ def _build_search_data(state: State, prefix, id: str, trie: Trie, map: ResultMap
 
     # Add current item name to prefix list
     prefixed_name = prefix + [compound.leaf_name]
+    prefixed_result_name = prefix + [compound.leaf_name]
+    suffix_length = 0
 
     # Calculate fully-qualified name
     if compound.kind in ['namespace', 'struct', 'class', 'union']:
-        joiner = '::'
+        joiner = result_joiner = '::'
     elif compound.kind in ['file', 'dir']:
-        joiner = '/'
-    else:
+        joiner = result_joiner = '/'
+    elif compound.kind in ['page', 'group']:
         joiner = ''
+        result_joiner = ' » '
+    else: assert False # pragma: no cover
+
+    # Show dirs with / at the end
+    if compound.kind == 'dir':
+        prefixed_result_name += ['']
+        suffix_length = 1
 
     # If just a leaf name, add it once
     if not joiner:
+        result_name = result_joiner.join(prefixed_result_name)
+
         # TODO: escape elsewhere so i don't have to unescape here
-        name = html.unescape(compound.leaf_name)
-        trie.insert(name.lower(), map.add(name, compound.url))
+        index = map.add(html.unescape(result_name), compound.url)
+        trie.insert(html.unescape(compound.leaf_name).lower(), index)
 
     # Otherwise add it multiple times with all possible prefixes
     else:
         # TODO: escape elsewhere so i don't have to unescape here
-        index = map.add(html.unescape(joiner.join(prefixed_name)), compound.url)
+        index = map.add(html.unescape(result_joiner.join(prefixed_result_name)), compound.url, suffix_length=suffix_length)
         for i in range(len(prefixed_name)):
             trie.insert(html.unescape(joiner.join(prefixed_name[i:])).lower(), index)
 
@@ -1490,12 +1561,25 @@ def build_search_data(state: State) -> bytearray:
         if compound.parent: continue # start from the root
         _build_search_data(state, [], id, trie, map)
 
-    for url, prefix, name in state.search:
-        # Add current item name to prefix list
-        prefixed_name = prefix + [name]
+    # TODO: examples?
+
+    for result in state.search:
+        name_with_args = result.name
+        name = result.name
+        suffix_length = 0
+        if hasattr(result, 'params') and result.params is not None:
+            name_with_args += '(' + ', '.join(result.params) + ')'
+            name += '()'
+            suffix_length += len(', '.join(result.params))
+        if hasattr(result, 'suffix') and result.suffix:
+            name_with_args += result.suffix
+            # TODO: escape elsewhere so i don't have to unescape here
+            suffix_length += len(html.unescape(result.suffix))
 
         # TODO: escape elsewhere so i don't have to unescape here
-        index = map.add(html.unescape('::'.join(prefixed_name)), url)
+        index = map.add(html.unescape('::'.join(result.prefix + [name_with_args])), result.url, suffix_length=suffix_length)
+
+        prefixed_name = result.prefix + [name]
         for i in range(len(prefixed_name)):
             trie.insert(html.unescape('::'.join(prefixed_name[i:])).lower(), index)
 
index 22e8b5cf7df34a9ae29603c78812eee3628609cf..ab8af9869c383cb123721305df824fb29cd007a3 100644 (file)
@@ -228,8 +228,16 @@ var Search = {
         /* Populate the results with all values associated with this node */
         for(let i = 0; i != valueCount; ++i) {
             let index = this.trie.getUint16(offset + (i + 1)*2, true);
-            //let flags = this.map.getUint8(index*4 + 3); /* not used yet */
+            let flags = this.map.getUint8(index*4 + 3);
             let resultOffset = this.map.getUint32(index*4, true) & 0x00ffffff;
+
+            /* The result has a suffix, extract its length */
+            let resultSuffixLength = 0;
+            if(flags & 0x01) {
+                resultSuffixLength = this.map.getUint8(resultOffset);
+                ++resultOffset;
+            }
+
             let nextResultOffset = this.map.getUint32((index + 1)*4, true) & 0x00ffffff;
 
             let name = '';
@@ -251,7 +259,11 @@ var Search = {
                 url += String.fromCharCode(this.map.getUint8(j));
             }
 
-            results.push({name: name, url: url, suffixLength: suffixLength});
+            /* Properly decode UTF-8 in the name
+               http://ecmanaut.blogspot.com/2006/07/encoding-decoding-utf8-in-javascript.html */
+            results.push({name: decodeURIComponent(escape(name)),
+                          url: url,
+                          suffixLength: suffixLength + resultSuffixLength});
 
             /* 'nuff said. */
             /* TODO: remove once proper barriers are in */
index b453a005b8fe2e6f61f4fc033e596ddeefcbe433..1fd24166796cdc8e173f73ca39ae4697d8c3b829 100644 (file)
@@ -1 +1 @@
-O+!-v7y$qP0s#O30s#R40{{d704W0i2mk;m0{{*H0B!>S6aWBe0s#X60{|cZ04W0iBme*?0{|)j0B!>SFaQ8)0{}Jv0Br*RJOBVX1OWm7LI40d0{}<>0CEEWPyhgL0{~V40CWQYTmS%L0{~(G0A&IJ1pos8ZU6u&0|0UW04M_hcmM!y0|0&i0BHjNga80-0|1Hu06GK#1OSi#06GHzmH+@{0|1@?0A~XLqyPYJ0|2T30AU9J8UO%oXaE3qumAvZ0|2%F06GK#006`Q06GHz$^Zap0|3$h0CWTc0RRI41pos8-T(k80|4d#04M_h>;M361pwFp0Aca~0BHvR_y7QHXaE3qumAvZ8~^|SGXMYpRR910dH?_bssI20%>V!Z`v3p{O<{Cs0B&JzWpi+0V`WWYbZ9PUbZu-+VRUFZI&EogC@BDLVQpn|aA9L*O<{CsE@*UZY$I)HZcSlyXgWGpWn*-2asXp&VRLg$VRUF;F<&uOWn*-2axQ3eZEQ_pbZ9y{R%K&!Z*n?1ZE0>ODF9<^VRLg$VRUF;F<&uOWn*-2axQ3eZEPcLX>LtnbZ9y{QekdqWdLJrVRLg$VRUF;F<&uKVQyz-E@*UZY)xTwXgWGlVQyz-Iy!A>ZYU`LV{Bn_b4_7%XkRg3F;Zb}XJsyEbZu-SZE0=*
\ No newline at end of file
+O+!-vL;(N*Dggih0s#R40{{d704W0i2mk;m0{{*H0B!>S6aWBe0s#X60{|cZ04W0iBme*?0{|)j0B!>SFaQ8)0{}Jv0Br*RJOBVX1OWm7LI40d0{}<>0CEEWPyhgL0{~V40CWQYTmS%L0{~(G0A&IJ1pos8ZU6u&0|0UW04M_hcmM!y0|0&i0BHjNga80-0|1Hu06GK#1OSi#06GHzmH+@{0|1@?0A~XLqyPYJ0|2T30AU9J8UO%oXaE3qumAvZ0|2%F06GK#006`Q06GHz$^Zap0|3$h0CWTc0RRI41pos8-T(k80|4d#04M_h>;M361pwFp0Aca~0BHgN1^@#90s#PJ0{{jA0A~XL3;_UP0{{{M0B{2U7y$rc0{|WY0Cfof_y7QHXaE3qumAvZBmn?(AOHXWHvj+uVgLXDhX4Qpz5oCK;Q#;u76AYNG64VpO<{Cs0B&JzWpi+0V`WWYbZ9PUbZu-1O<{CsIy!A>ZYXJPbSxlgZgeRCZeeX@b8ul}WldppXf9}UZEPcLX>LtnbZ9y{R%K&!Z*l-*Y+-YAO<{CsUol@XR%K&!Z*neZbZu+~O<{CsIyzQmV{~tFIy!A>ZYU`rV{dMAbO2*)VRLg$VRUF;F<&uOWn*-2axQ3eZEPcLX>LtnbZ9y{QekdqWdLJrVRLg$VRUF;F<&uKVQyz-E@*UZYz9qXbZ9y{QekdqWjZ=-X>KSfAY*TCb94Y>Y+-YAO<{CsUol@XQekdqWiDuRZEPcLX>L$qXJsJ5yC73_VsK$+WdL(^VsK$+WiDuRZEOGl
\ No newline at end of file
index 916a900d66fe91ee0b1e653094ec1d2b93223930..e7a0c3e91cb828fd2c1a8d07de752f5f40104bbd 100644 (file)
Binary files a/doxygen/test/js-test-data/searchdata.bin and b/doxygen/test/js-test-data/searchdata.bin differ
index a59f57cb3b688b2577b1f2cef87342f52b8a97e3..310e87442be4a10806789c5936d8217134ce0e5c 100755 (executable)
@@ -47,23 +47,24 @@ trie = Trie()
 map = ResultMap()
 
 trie.insert("math", map.add("Math", "namespaceMath.html"))
-index = map.add("Math::min()", "namespaceMath.html#min")
+index = map.add("Math::min(int, int)", "namespaceMath.html#min", suffix_length=8)
 trie.insert("math::min()", index)
 trie.insert("min()", index)
 index = map.add("Math::Vector", "classMath_1_1Vector.html")
 trie.insert("math::vector", index)
 trie.insert("vector", index)
-index = map.add("Math::Vector::min()", "classMath_1_1Vector.html#min")
+index = map.add("Math::Vector::min() const", "classMath_1_1Vector.html#min", suffix_length=6)
 trie.insert("math::vector::min()", index)
 trie.insert("vector::min()", index)
 trie.insert("min()", index)
 index = map.add("Math::Range", "classMath_1_1Range.html")
 trie.insert("math::range", index)
 trie.insert("range", index)
-index = map.add("Math::Range::min()", "classMath_1_1Range.html#min")
+index = map.add("Math::Range::min() const", "classMath_1_1Range.html#min", suffix_length=6)
 trie.insert("math::range::min()", index)
 trie.insert("range::min()", index)
 trie.insert("min()", index)
+trie.insert("subpage", map.add("Page » Subpage", "subpage.html"))
 
 with open(basedir/'searchdata.bin', 'wb') as f:
     f.write(serialize_search_data(trie, map))
index d9f035385a7cba62a4f12945710cce3b754b497e..c48dd8123b6c103b975736ccbe38ccc47391be0f 100644 (file)
@@ -65,7 +65,7 @@ const { StringDecoder } = require('string_decoder');
 /* Verify that base85-decoded file is equivalent to the binary */
 {
     let binary = fs.readFileSync(path.join(__dirname, "js-test-data/searchdata.bin"));
-    assert.ok(binary.byteLength, 531);
+    assert.equal(binary.byteLength, 630);
     let b85 = fs.readFileSync(path.join(__dirname, "js-test-data/searchdata.b85"), {encoding: 'utf-8'});
     assert.deepEqual(new DataView(binary.buffer.slice(binary.byteOffset, binary.byteOffset + binary.byteLength)), new DataView(Search.base85decode(b85), 0, binary.byteLength));
 }
@@ -100,7 +100,7 @@ const { StringDecoder } = require('string_decoder');
 {
     let buffer = fs.readFileSync(path.join(__dirname, "js-test-data/searchdata.bin"));
     assert.ok(Search.init(buffer.buffer.slice(buffer.byteOffset, buffer.byteOffset + buffer.byteLength)));
-    assert.equal(Search.symbolCount, 6);
+    assert.equal(Search.symbolCount, 7);
     assert.equal(Search.maxResults, 100);
 
     /* Blow up */
@@ -108,43 +108,43 @@ const { StringDecoder } = require('string_decoder');
         { name: 'Math',
           url: 'namespaceMath.html',
           suffixLength: 3 },
-        { name: 'Math::min()',
+        { name: 'Math::min(int, int)',
           url: 'namespaceMath.html#min',
-          suffixLength: 10 },
+          suffixLength: 18 },
         { name: 'Math::Vector',
           url: 'classMath_1_1Vector.html',
           suffixLength: 11 },
-        { name: 'Math::Vector::min()',
+        { name: 'Math::Vector::min() const',
           url: 'classMath_1_1Vector.html#min',
-          suffixLength: 18 },
+          suffixLength: 24 },
         { name: 'Math::Range',
           url: 'classMath_1_1Range.html',
           suffixLength: 10 },
-        { name: 'Math::Range::min()',
+        { name: 'Math::Range::min() const',
           url: 'classMath_1_1Range.html#min',
-          suffixLength: 17 },
-        { name: 'Math::min()',
+          suffixLength: 23 },
+        { name: 'Math::min(int, int)',
           url: 'namespaceMath.html#min',
-          suffixLength: 4 },
-        { name: 'Math::Vector::min()',
+          suffixLength: 12 },
+        { name: 'Math::Vector::min() const',
           url: 'classMath_1_1Vector.html#min',
-          suffixLength: 4 },
-        { name: 'Math::Range::min()',
+          suffixLength: 10 },
+        { name: 'Math::Range::min() const',
           url: 'classMath_1_1Range.html#min',
-          suffixLength: 4 } ];
+          suffixLength: 10 }];
     assert.deepEqual(Search.search('m'), resultsForM);
 
     /* Add more characters */
     assert.deepEqual(Search.search('min'), [
-        { name: 'Math::min()',
+        { name: 'Math::min(int, int)',
           url: 'namespaceMath.html#min',
-          suffixLength: 2 },
-        { name: 'Math::Vector::min()',
+          suffixLength: 10 },
+        { name: 'Math::Vector::min() const',
           url: 'classMath_1_1Vector.html#min',
-          suffixLength: 2 },
-        { name: 'Math::Range::min()',
+          suffixLength: 8 },
+        { name: 'Math::Range::min() const',
           url: 'classMath_1_1Range.html#min',
-          suffixLength: 2 } ]);
+          suffixLength: 8 }]);
 
     /* Go back, get the same thing */
     assert.deepEqual(Search.search('m'), resultsForM);
@@ -154,9 +154,9 @@ const { StringDecoder } = require('string_decoder');
         { name: 'Math::Vector',
           url: 'classMath_1_1Vector.html',
           suffixLength: 3 },
-        { name: 'Math::Vector::min()',
+        { name: 'Math::Vector::min() const',
           url: 'classMath_1_1Vector.html#min',
-          suffixLength: 10 }];
+          suffixLength: 16 }];
     assert.deepEqual(Search.search('vec'), resultsForVec);
 
     /* Uppercase things and spaces */
@@ -164,21 +164,27 @@ const { StringDecoder } = require('string_decoder');
 
     /* Not found */
     assert.deepEqual(Search.search('pizza'), []);
+
+    /* UTF-8 decoding */
+    assert.deepEqual(Search.search('su'), [
+        { name: 'Page » Subpage',
+          url: 'subpage.html',
+          suffixLength: 5 }]);
 }
 
 /* Search, limiting the results to 3 */
 {
     let buffer = fs.readFileSync(path.join(__dirname, "js-test-data/searchdata.bin"));
     assert.ok(Search.init(buffer.buffer.slice(buffer.byteOffset, buffer.byteOffset + buffer.byteLength), 3));
-    assert.equal(Search.symbolCount, 6);
+    assert.equal(Search.symbolCount, 7);
     assert.equal(Search.maxResults, 3);
     assert.deepEqual(Search.search('m'), [
         { name: 'Math',
           url: 'namespaceMath.html',
           suffixLength: 3 },
-        { name: 'Math::min()',
+        { name: 'Math::min(int, int)',
           url: 'namespaceMath.html#min',
-          suffixLength: 10 },
+          suffixLength: 18 },
         { name: 'Math::Vector',
           url: 'classMath_1_1Vector.html',
           suffixLength: 11 }]);
@@ -188,18 +194,18 @@ const { StringDecoder } = require('string_decoder');
 {
     let b85 = fs.readFileSync(path.join(__dirname, "js-test-data/searchdata.b85"), {encoding: 'utf-8'});
     assert.ok(Search.load(b85));
-    assert.equal(Search.symbolCount, 6);
+    assert.equal(Search.symbolCount, 7);
     assert.equal(Search.maxResults, 100);
     assert.deepEqual(Search.search('min'), [
-        { name: 'Math::min()',
+        { name: 'Math::min(int, int)',
           url: 'namespaceMath.html#min',
-          suffixLength: 2 },
-        { name: 'Math::Vector::min()',
+          suffixLength: 10 },
+        { name: 'Math::Vector::min() const',
           url: 'classMath_1_1Vector.html#min',
-          suffixLength: 2 },
-        { name: 'Math::Range::min()',
+          suffixLength: 8 },
+        { name: 'Math::Range::min() const',
           url: 'classMath_1_1Range.html#min',
-          suffixLength: 2 } ]);
+          suffixLength: 8 }]);
 }
 
 /* Not testing Search.download() because the xmlhttprequest npm package is *crap* */
index 87a9a7cce768c74d8830a4ad4e04a8a00d78fd9b..9029b1a933ff80aaa87b0acb3cdaa64df7e89af1 100755 (executable)
@@ -30,7 +30,7 @@ import sys
 import unittest
 from types import SimpleNamespace as Empty
 
-from dox2html5 import Trie, ResultMap, serialize_search_data, search_data_header_struct
+from dox2html5 import Trie, ResultMap, ResultFlag, serialize_search_data, search_data_header_struct
 
 from test import IntegrationTestCase
 
@@ -106,10 +106,14 @@ def pretty_print_map(serialized: bytes):
     out = ''
     for i in range(size):
         if i: out += '\n'
-        flags = ResultMap.flags_struct.unpack_from(serialized, i*4 + 3)[0]
+        flags = ResultFlag(ResultMap.flags_struct.unpack_from(serialized, i*4 + 3)[0])
+        extra = [str(int(flags.value))]
+        if flags & ResultFlag.HAS_SUFFIX:
+            extra += ['suffix_length={}'.format(ResultMap.suffix_length_struct.unpack_from(serialized, offset)[0])]
+            offset += 1
         next_offset = ResultMap.offset_struct.unpack_from(serialized, (i + 1)*4)[0] & 0x00ffffff
         name, _, url = serialized[offset:next_offset].partition(b'\0')
-        out += "{}: {} [{}] -> {}".format(i, name.decode('utf-8'), flags, url.decode('utf-8'))
+        out += "{}: {} [{}] -> {}".format(i, name.decode('utf-8'), ', '.join(extra), url.decode('utf-8'))
         offset = next_offset
     return out
 
@@ -230,32 +234,32 @@ class MapSerialization(unittest.TestCase):
 
     def test_single(self):
         map = ResultMap()
-        self.assertEqual(map.add("Magnum", "namespaceMagnum.html", 11), 0)
+        self.assertEqual(map.add("Magnum", "namespaceMagnum.html", suffix_length=11), 0)
 
         serialized = map.serialize()
         self.compare(serialized, """
-0: Magnum [11] -> namespaceMagnum.html
+0: Magnum [1, suffix_length=11] -> namespaceMagnum.html
 """)
-        self.assertEqual(len(serialized), 35)
+        self.assertEqual(len(serialized), 36)
 
     def test_multiple(self):
         map = ResultMap()
 
         self.assertEqual(map.add("Math", "namespaceMath.html"), 0)
-        self.assertEqual(map.add("Math::Vector", "classMath_1_1Vector.html", 42), 1)
-        self.assertEqual(map.add("Math::Range", "classMath_1_1Range.html", 255), 2)
+        self.assertEqual(map.add("Math::Vector", "classMath_1_1Vector.html"), 1)
+        self.assertEqual(map.add("Math::Range", "classMath_1_1Range.html"), 2)
         self.assertEqual(map.add("Math::min()", "namespaceMath.html#abcdef2875"), 3)
-        self.assertEqual(map.add("Math::max()", "namespaceMath.html#abcdef2875"), 4)
+        self.assertEqual(map.add("Math::max(int, int)", "namespaceMath.html#abcdef2875", suffix_length=8), 4)
 
         serialized = map.serialize()
         self.compare(serialized, """
 0: Math [0] -> namespaceMath.html
-1: Math::Vector [42] -> classMath_1_1Vector.html
-2: Math::Range [255] -> classMath_1_1Range.html
+1: Math::Vector [0] -> classMath_1_1Vector.html
+2: Math::Range [0] -> classMath_1_1Range.html
 3: Math::min() [0] -> namespaceMath.html#abcdef2875
-4: Math::max() [0] -> namespaceMath.html#abcdef2875
+4: Math::max(int, int) [1, suffix_length=8] -> namespaceMath.html#abcdef2875
 """)
-        self.assertEqual(len(serialized), 201)
+        self.assertEqual(len(serialized), 210)
 
 class Serialization(unittest.TestCase):
     def __init__(self, *args, **kwargs):
@@ -272,10 +276,10 @@ class Serialization(unittest.TestCase):
         map = ResultMap()
 
         trie.insert("math", map.add("Math", "namespaceMath.html"))
-        index = map.add("Math::Vector", "classMath_1_1Vector.html", 42)
+        index = map.add("Math::Vector", "classMath_1_1Vector.html")
         trie.insert("math::vector", index)
         trie.insert("vector", index)
-        index = map.add("Math::Range", "classMath_1_1Range.html", 255)
+        index = map.add("Math::Range", "classMath_1_1Range.html")
         trie.insert("math::range", index)
         trie.insert("range", index)
 
@@ -287,8 +291,8 @@ math [0]
 vector [1]
 range [2]
 0: Math [0] -> namespaceMath.html
-1: Math::Vector [42] -> classMath_1_1Vector.html
-2: Math::Range [255] -> classMath_1_1Range.html
+1: Math::Vector [0] -> classMath_1_1Vector.html
+2: Math::Range [0] -> classMath_1_1Range.html
 """)
         self.assertEqual(len(serialized), 241)
 
@@ -329,20 +333,20 @@ macro [14]
 0: Namespace [0] -> namespaceNamespace.html
 1: Namespace::Class [0] -> classNamespace_1_1Class.html
 2: A page [0] -> page.html
-3: Subpage [0] -> subpage.html
-4: Dir [0] -> dir_da5033def2d0db76e9883b31b76b3d0c.html
+3: A page » Subpage [0] -> subpage.html
+4: Dir/ [1, suffix_length=1] -> dir_da5033def2d0db76e9883b31b76b3d0c.html
 5: Dir/File.h [0] -> File_8h.html
 6: Namespace::Class::foo() [0] -> classNamespace_1_1Class.html#aaeba4096356215868370d6ea476bf5d9
-7: Namespace::Class::foo() [0] -> classNamespace_1_1Class.html#ac03c5b93907dda16763eabd26b25500a
-8: Namespace::Class::foo() [0] -> classNamespace_1_1Class.html#ac9e7e80d06281e30cfcc13171d117ade
-9: Namespace::Class::foo() [0] -> classNamespace_1_1Class.html#ac03e8437172963981197eb393e0550d3
+7: Namespace::Class::foo() const [1, suffix_length=6] -> classNamespace_1_1Class.html#ac03c5b93907dda16763eabd26b25500a
+8: Namespace::Class::foo() && [1, suffix_length=3] -> classNamespace_1_1Class.html#ac9e7e80d06281e30cfcc13171d117ade
+9: Namespace::Class::foo(with, arguments) [1, suffix_length=15] -> classNamespace_1_1Class.html#ac03e8437172963981197eb393e0550d3
 10: Namespace::Enum::Value [0] -> namespaceNamespace.html#add172b93283b1ab7612c3ca6cc5dcfeaa689202409e48743b914713f96d93947c
 11: Namespace::Enum [0] -> namespaceNamespace.html#add172b93283b1ab7612c3ca6cc5dcfea
 12: Namespace::Typedef [0] -> namespaceNamespace.html#abe2a245304bc2234927ef33175646e08
 13: Namespace::Variable [0] -> namespaceNamespace.html#ad3121960d8665ab045ca1bfa1480a86d
 14: MACRO [0] -> File_8h.html#a824c99cb152a3c2e9111a2cb9c34891e
 15: MACRO_FUNCTION() [0] -> File_8h.html#a025158d6007b306645a8eb7c7a9237c1
-16: MACRO_FUNCTION_WITH_PARAMS() [0] -> File_8h.html#a88602bba5a72becb4f2dc544ce12c420
+16: MACRO_FUNCTION_WITH_PARAMS(params) [1, suffix_length=6] -> File_8h.html#a88602bba5a72becb4f2dc544ce12c420
 """.strip())
 
 if __name__ == '__main__': # pragma: no cover