3 * Portable bit-level manipulation macros
5 * (c) 1998 Straylight/Edgeware
8 /*----- Licensing notice --------------------------------------------------*
10 * This file is part of the mLib utilities library.
12 * mLib is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU Library General Public License as
14 * published by the Free Software Foundation; either version 2 of the
15 * License, or (at your option) any later version.
17 * mLib is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU Library General Public License for more details.
22 * You should have received a copy of the GNU Library General Public
23 * License along with mLib; if not, write to the Free
24 * Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
35 /*----- Header files ------------------------------------------------------*/
39 #if __STDC_VERSION__ >= 199901
43 #ifndef MLIB_COMPILER_H
44 # include "compiler.h"
47 /*----- Decide on some types ----------------------------------------------*/
49 /* --- Make GNU C shut up --- */
51 #if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 91)
52 # define MLIB_BITS_EXTENSION __extension__
54 # define MLIB_BITS_EXTENSION
57 /* --- Decide on a 32-bit type --- *
59 * I want a type which is capable of expressing 32-bit numbers. Because some
60 * implementations have 64-bit @long@s (infinitely preferable to the abortion
61 * that is @long long@), using @unsigned long@ regardless is wasteful. So,
62 * if @int@ appears to be good enough, then I'll go with that.
65 #if UINT_MAX >= 0xffffffffu
66 typedef unsigned int uint32;
68 typedef unsigned long uint32;
71 /* --- Decide on a 64-bit type --- *
73 * The test is quite subtle. Think about it. Note that (at least on my
74 * machine), the 32-bit macros are *much* faster than GCC's @long long@
78 #if defined(ULONG_LONG_MAX) && !defined(ULLONG_MAX)
79 # define ULLONG_MAX ULONG_LONG_MAX
82 #if UINT_MAX >> 31 > 0xffffffff
84 typedef unsigned int uint64;
85 #elif ULONG_MAX >> 31 > 0xffffffff
87 typedef unsigned long uint64;
88 #elif defined(ULLONG_MAX)
90 MLIB_BITS_EXTENSION typedef unsigned long long uint64;
98 typedef struct { uint64 i; } kludge64;
100 typedef struct { uint32 hi, lo; } kludge64;
103 /* --- Decide on a 24-bit type --- */
105 #if UINT_MAX >= 0x00ffffffu
106 typedef unsigned int uint24;
108 typedef unsigned long uint24;
111 /* --- Decide on 16-bit and 8-bit types --- *
113 * This is more for brevity than anything else.
116 typedef unsigned short uint16;
117 typedef unsigned char octet, uint8;
119 /* --- WARNING! --- *
121 * Never lose sight of the fact that the above types may be wider than the
122 * names suggest. Some architectures have 32-bit @short@s for example.
125 /*----- Macros ------------------------------------------------------------*/
127 /* --- Useful masks --- */
130 #define MASK16 0xffffu
131 #define MASK16_L MASK16
132 #define MASK16_B MASK16
133 #define MASK24 0xffffffu
134 #define MASK24_L MASK24
135 #define MASK24_B MASK24
136 #define MASK32 0xffffffffu
137 #define MASK32_L MASK32
138 #define MASK32_B MASK32
141 # define MASK64 MLIB_BITS_EXTENSION 0xffffffffffffffffu
142 # define MASK64_L MASK64
143 # define MASK64_B MASK64
165 /* --- Type aliases --- */
168 #define TY_U16 uint16
169 #define TY_U16_L uint16
170 #define TY_U16_B uint16
171 #define TY_U24 uint24
172 #define TY_U24_L uint24
173 #define TY_U24_B uint24
174 #define TY_U32 uint32
175 #define TY_U32_L uint32
176 #define TY_U32_B uint32
179 # define TY_U64 uint64
180 # define TY_U64_L uint64
181 # define TY_U64_B uint64
184 /* --- List macros --- */
187 # define DOUINTCONV_64(_) \
188 _(64, 64, 64) _(64, 64_L, 64l) _(64, 64_B, 64b)
189 # define DOUINTSZ_64(_) _(64)
191 # define DOUINTCONV_64(_)
192 # define DOUINTSZ_64(_)
195 #define DOUINTCONV(_) \
197 _(16, 16, 16) _(16, 16_L, 16l) _(16, 16_B, 16b) \
198 _(24, 24, 24) _(24, 24_L, 24l) _(24, 24_B, 24b) \
199 _(32, 32, 32) _(32, 32_L, 32l) _(32, 32_B, 32b) \
201 #define DOUINTSZ(_) _(8) _(16) _(24) _(32) _DOUINTSZ_64(_)
203 /* --- Type coercions --- */
205 #define U8(x) ((octet)((x) & MASK8))
206 #define U16(x) ((uint16)((x) & MASK16))
207 #define U24(x) ((uint24)((x) & MASK24))
208 #define U32(x) ((uint32)((x) & MASK32))
211 # define U64(x) ((uint64)(x) & MASK64)
212 # define U64_(d, x) ((d).i = U64(x).i)
214 # define U64_(d, x) ((d).hi = U32((x).hi), (d).lo = U32((x).lo))
217 /* --- Safe shifting macros --- */
219 #define LSL8(v, s) U8(U8(v) << ((s) & 7u))
220 #define LSR8(v, s) U8(U8(v) >> ((s) & 7u))
221 #define LSL16(v, s) U16(U16(v) << ((s) & 15u))
222 #define LSR16(v, s) U16(U16(v) >> ((s) & 15u))
223 #define LSL24(v, s) U24(U24(v) << ((s) % 24u))
224 #define LSR24(v, s) U24(U24(v) >> ((s) % 24u))
225 #define LSL32(v, s) U32(U32(v) << ((s) & 31u))
226 #define LSR32(v, s) U32(U32(v) >> ((s) & 31u))
229 # define LSL64(v, s) U64(U64(v) << ((s) & 63u))
230 # define LSR64(v, s) U64(U64(v) >> ((s) & 63u))
231 # define LSL64_(d, v, s) ((d).i = LSL64((v).i, (s)))
232 # define LSR64_(d, v, s) ((d).i = LSR64((v).i, (s)))
234 # define LSL64_(d, v, s) do { \
235 unsigned _s = (s) & 63u; \
236 uint32 _l = (v).lo, _h = (v).hi; \
237 kludge64 *_d = &(d); \
239 _d->hi = LSL32(_l, _s - 32u); \
245 _d->hi = LSL32(_h, _s) | LSR32(_l, 32u - _s); \
246 _d->lo = LSL32(_l, _s); \
249 # define LSR64_(d, v, s) do { \
250 unsigned _s = (s) & 63u; \
251 uint32 _l = (v).lo, _h = (v).hi; \
252 kludge64 *_d = &(d); \
254 _d->lo = LSR32(_h, _s - 32u); \
260 _d->lo = LSR32(_l, _s) | LSL32(_h, 32u - _s); \
261 _d->hi = LSR32(_h, _s); \
266 /* --- Rotation macros --- */
268 #define ROL8(v, s) (LSL8((v), (s)) | (LSR8((v), 8u - (s))))
269 #define ROR8(v, s) (LSR8((v), (s)) | (LSL8((v), 8u - (s))))
270 #define ROL16(v, s) (LSL16((v), (s)) | (LSR16((v), 16u - (s))))
271 #define ROR16(v, s) (LSR16((v), (s)) | (LSL16((v), 16u - (s))))
272 #define ROL24(v, s) (LSL24((v), (s)) | (LSR24((v), 24u - (s))))
273 #define ROR24(v, s) (LSR24((v), (s)) | (LSL24((v), 24u - (s))))
274 #define ROL32(v, s) (LSL32((v), (s)) | (LSR32((v), 32u - (s))))
275 #define ROR32(v, s) (LSR32((v), (s)) | (LSL32((v), 32u - (s))))
278 # define ROL64(v, s) (LSL64((v), (s)) | (LSR64((v), 64u - (s))))
279 # define ROR64(v, s) (LSR64((v), (s)) | (LSL64((v), 64u - (s))))
280 # define ROL64_(d, v, s) ((d).i = ROL64((v).i, (s)))
281 # define ROR64_(d, v, s) ((d).i = ROR64((v).i, (s)))
283 # define ROL64_(d, v, s) do { \
284 unsigned _s = (s) & 63u; \
285 uint32 _l = (v).lo, _h = (v).hi; \
286 kludge64 *_d = &(d); \
288 _d->hi = LSL32(_l, _s - 32u) | LSR32(_h, 64u - _s); \
289 _d->lo = LSL32(_h, _s - 32u) | LSR32(_l, 64u - _s); \
293 } else if (_s == 32) { \
297 _d->hi = LSL32(_h, _s) | LSR32(_l, 32u - _s); \
298 _d->lo = LSL32(_l, _s) | LSR32(_h, 32u - _s); \
301 # define ROR64_(d, v, s) do { \
302 unsigned _s = (s) & 63u; \
303 uint32 _l = (v).lo, _h = (v).hi; \
304 kludge64 *_d = &(d); \
306 _d->hi = LSR32(_l, _s - 32u) | LSL32(_h, 64u - _s); \
307 _d->lo = LSR32(_h, _s - 32u) | LSL32(_l, 64u - _s); \
311 } else if (_s == 32) { \
315 _d->hi = LSR32(_h, _s) | LSL32(_l, 32u - _s); \
316 _d->lo = LSR32(_l, _s) | LSL32(_h, 32u - _s); \
321 /* --- Endianness swapping --- */
323 #if GCC_VERSION_P(4, 8) || CLANG_VERSION_P(3, 2)
324 # define ENDSWAP16(x) ((uint16)__builtin_bswap16(x))
326 #if GCC_VERSION_P(4, 3) || CLANG_VERSION_P(3, 2)
327 # define ENDSWAP32(x) ((uint32)__builtin_bswap32(x))
329 #if (GCC_VERSION_P(4, 3) || CLANG_VERSION_P(3, 2)) && defined(HAVE_UINT64)
330 # define ENDSWAP64(x) ((uint64)__builtin_bswap64(x))
334 # define ENDSWAP8(x) U8(x)
337 # define ENDSWAP16(x) \
338 ((((uint16)(x) >> 8)&0xff) | \
339 (((uint16)(x)&0xff) << 8))
342 # define ENDSWAP24(x) \
343 ((((uint24)(x) >> 16)&0xff) | \
344 ((uint24)(x)&0xff00) | \
345 ((uint24)((x)&0xff) << 16))
348 # define ENDSWAP32(x) \
349 (ENDSWAP16(((uint32)(x) >> 16)&0xffff) | \
350 ((uint32)ENDSWAP16((x)&0xffff) << 16))
352 #if defined(HAVE_UINT64) && !defined(ENDSWAP64)
353 # define ENDSWAP64(x) \
354 (ENDSWAP32(((uint64)(x) >> 32)&0xffffffff) | \
355 ((uint64)ENDSWAP32((x)&0xffffffff) << 32))
358 # define ENDSWAP64_(z, x) \
359 ((z).i = ENDSWAP64((x).i))
361 # define ENDSWAP64_(z, x) \
362 ((z).lo = ENDSWAP32((x).hi), \
363 (z).hi = ENDSWAP32((x).lo))
366 #define MLIB_LITTLE_ENDIAN 1234
367 #define MLIB_BIG_ENDIAN 4321
368 #if defined(__ORDER_LITTLE_ENDIAN__) && \
369 __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
370 # define MLIB_BYTE_ORDER MLIB_LITTLE_ENDIAN
371 #elif defined(__ORDER_BIG_ENDIAN__) && \
372 __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
373 # define MLIB_BYTE_ORDER MLIB_BIG_ENDIAN
376 #if MLIB_BYTE_ORDER == MLIB_LITTLE_ENDIAN
377 # define HTOL16(x) (x)
378 # define LTOH16(x) (x)
379 # define HTOB16(x) ENDSWAP16(x)
380 # define BTOH16(x) ENDSWAP16(x)
381 # define HTOL24(x) (x)
382 # define LTOH24(x) (x)
383 # define HTOB24(x) ENDSWAP24(x)
384 # define BTOH24(x) ENDSWAP24(x)
385 # define HTOL32(x) (x)
386 # define LTOH32(x) (x)
387 # define HTOB32(x) ENDSWAP32(x)
388 # define BTOH32(x) ENDSWAP32(x)
390 # define HTOL64(x) (x)
391 # define LTOH64(x) (x)
392 # define HTOB64(x) ENDSWAP64(x)
393 # define BTOH64(x) ENDSWAP64(x)
395 # define HTOL64_(z, x) ASSIGN64(z, x)
396 # define LTOH64_(z, x) ASSIGN64(z, x)
397 # define HTOB64_(z, x) ENDSWAP64_(z, x)
398 # define BTOH64_(z, x) ENDSWAP64_(z, x)
399 #elif MLIB_BYTE_ORDER == MLIB_BIG_ENDIAN
400 # define HTOL16(x) ENDSWAP16(x)
401 # define LTOH16(x) ENDSWAP16(x)
402 # define HTOB16(x) (x)
403 # define BTOH16(x) (x)
404 # define HTOL24(x) ENDSWAP24(x)
405 # define LTOH24(x) ENDSWAP24(x)
406 # define HTOB24(x) (x)
407 # define BTOH24(x) (x)
408 # define HTOL32(x) ENDSWAP32(x)
409 # define LTOH32(x) ENDSWAP32(x)
410 # define HTOB32(x) (x)
411 # define BTOH32(x) (x)
413 # define HTOL64(x) ENDSWAP64(x)
414 # define LTOH64(x) ENDSWAP64(x)
415 # define HTOB64(x) (x)
416 # define BTOH64(x) (x)
418 # define HTOL64_(z, x) ENDSWAP64_(z, x)
419 # define LTOH64_(z, x) ENDSWAP64_(z, x)
420 # define HTOB64_(z, x) ASSIGN64(z, x)
421 # define BTOH64_(z, x) ASSIGN64(z, x)
424 /* --- Unaligned access (GCC-specific) --- */
426 #if (GCC_VERSION_P(3, 3) || CLANG_VERSION_P(3, 0)) && CHAR_BIT == 8
427 # define MLIB_MISALIGNED __attribute__((aligned(1), may_alias))
428 # if __SIZEOF_SHORT__ == 2
429 typedef MLIB_MISALIGNED unsigned short misaligned_uint16;
430 # define RAW16(p) (*(misaligned_uint16 *)(p))
432 # if __SIZEOF_INT__ == 4
433 typedef MLIB_MISALIGNED unsigned int misaligned_uint32;
434 # define RAW32(p) (*(misaligned_uint32 *)(p))
435 # elif __SIZEOF_LONG__ == 4
436 typedef MLIB_MISALIGNED unsigned long misaligned_uint32;
437 # define RAW32(p) (*(misaligned_uint32 *)(p))
439 # if __SIZEOF_LONG__ == 8
440 typedef MLIB_MISALIGNED unsigned long misaligned_uint64;
441 # define RAW64(p) (*(misaligned_uint64 *)(p))
442 # elif __SIZEOF_LONG_LONG__ == 8
443 typedef MLIB_MISALIGNED unsigned long long misaligned_uint64;
444 # define RAW64(p) (*(misaligned_uint64 *)(p))
448 /* --- Storage and retrieval --- */
450 #if defined(RAW16) && defined(LTOH16)
451 # define LOAD16_L(p) LTOH16(RAW16(p))
453 #if defined(RAW16) && defined(HTOL16)
454 # define STORE16_L(p, x) (RAW16(p) = HTOL16(x))
456 #if defined(RAW16) && defined(BTOH16)
457 # define LOAD16_B(p) BTOH16(RAW16(p))
459 #if defined(RAW16) && defined(HTOB16)
460 # define STORE16_B(p, x) (RAW16(p) = HTOB16(x))
463 #if defined(RAW32) && defined(LTOH32)
464 # define LOAD32_L(p) LTOH32(RAW32(p))
466 #if defined(RAW32) && defined(HTOL32)
467 # define STORE32_L(p, x) (RAW32(p) = HTOL32(x))
469 #if defined(RAW32) && defined(BTOH32)
470 # define LOAD32_B(p) BTOH32(RAW32(p))
472 #if defined(RAW32) && defined(HTOB32)
473 # define STORE32_B(p, x) (RAW32(p) = HTOB32(x))
476 #if defined(RAW64) && defined(LTOH64)
477 # define LOAD64_L(p) LTOH64(RAW64(p))
479 #if defined(RAW64) && defined(HTOL64)
480 # define STORE64_L(p, x) (RAW64(p) = HTOL64(x))
482 #if defined(RAW64) && defined(BTOH64)
483 # define LOAD64_B(p) BTOH64(RAW64(p))
485 #if defined(RAW64) && defined(HTOB64)
486 # define STORE64_B(p, x) (RAW64(p) = HTOB64(x))
489 #define GETBYTE(p, o) (((octet *)(p))[o] & MASK8)
490 #define PUTBYTE(p, o, v) (((octet *)(p))[o] = U8((v)))
492 #define LOAD8(p) (GETBYTE((p), 0))
493 #define STORE8(p, v) (PUTBYTE((p), 0, (v)))
497 (((uint16)GETBYTE((p), 0) << 8) | \
498 ((uint16)GETBYTE((p), 1) << 0))
501 # define LOAD16_L(p) \
502 (((uint16)GETBYTE((p), 0) << 0) | \
503 ((uint16)GETBYTE((p), 1) << 8))
505 #define LOAD16(p) LOAD16_B((p))
508 # define STORE16_B(p, v) \
509 (PUTBYTE((p), 0, (uint16)(v) >> 8), \
510 PUTBYTE((p), 1, (uint16)(v) >> 0))
513 # define STORE16_L(p, v) \
514 (PUTBYTE((p), 0, (uint16)(v) >> 0), \
515 PUTBYTE((p), 1, (uint16)(v) >> 8))
517 #define STORE16(p, v) STORE16_B((p), (v))
520 # define LOAD24_B(p) \
521 (((uint24)GETBYTE((p), 0) << 16) | \
522 ((uint24)LOAD16_B((octet *)(p) + 1) << 0))
525 # define LOAD24_L(p) \
526 (((uint24)LOAD16_L((octet *)(p) + 0) << 0) | \
527 ((uint24)GETBYTE((p), 2) << 16))
529 #define LOAD24(p) LOAD24_B((p))
532 # define STORE24_B(p, v) \
533 (PUTBYTE((p), 0, (uint24)(v) >> 16), \
534 STORE16_B((octet *)(p) + 1, (uint24)(v) >> 0))
537 # define STORE24_L(p, v) \
538 (STORE16_L((octet *)(p) + 0, (uint24)(v) >> 0), \
539 PUTBYTE((p), 2, (uint24)(v) >> 16))
541 #define STORE24(p, v) STORE24_B((p), (v))
544 # define LOAD32_B(p) \
545 (((uint32)LOAD16_B((octet *)(p) + 0) << 16) | \
546 ((uint32)LOAD16_B((octet *)(p) + 2) << 0))
549 # define LOAD32_L(p) \
550 (((uint32)LOAD16_L((octet *)(p) + 0) << 0) | \
551 ((uint32)LOAD16_L((octet *)(p) + 2) << 16))
553 #define LOAD32(p) LOAD32_B((p))
556 # define STORE32_B(p, v) \
557 (STORE16_B((octet *)(p) + 0, (uint32)(v) >> 16), \
558 STORE16_B((octet *)(p) + 2, (uint32)(v) >> 0))
561 # define STORE32_L(p, v) \
562 (STORE16_L((octet *)(p) + 0, (uint32)(v) >> 0), \
563 STORE16_L((octet *)(p) + 2, (uint32)(v) >> 16))
565 #define STORE32(p, v) STORE32_B((p), (v))
570 # define LOAD64_B(p) \
571 (((uint64)LOAD32_B((octet *)(p) + 0) << 32) | \
572 ((uint64)LOAD32_B((octet *)(p) + 4) << 0))
575 # define LOAD64_L(p) \
576 (((uint64)LOAD32_L((octet *)(p) + 0) << 0) | \
577 ((uint64)LOAD32_L((octet *)(p) + 4) << 32))
579 # define LOAD64(p) LOAD64_B((p))
580 # define LOAD64_B_(d, p) ((d).i = LOAD64_B((p)))
581 # define LOAD64_L_(d, p) ((d).i = LOAD64_L((p)))
582 # define LOAD64_(d, p) LOAD64_B_((d), (p))
585 # define STORE64_B(p, v) \
586 (STORE32_B((octet *)(p) + 0, (uint64)(v) >> 32), \
587 STORE32_B((octet *)(p) + 4, (uint64)(v) >> 0))
590 # define STORE64_L(p, v) \
591 (STORE32_L((octet *)(p) + 0, (uint64)(v) >> 0), \
592 STORE32_L((octet *)(p) + 4, (uint64)(v) >> 32))
594 # define STORE64(p, v) STORE64_B((p), (v))
595 # define STORE64_B_(p, v) STORE64_B((p), (v).i)
596 # define STORE64_L_(p, v) STORE64_L((p), (v).i)
597 # define STORE64_(p, v) STORE64_B_((p), (v))
601 # define LOAD64_B_(d, p) \
602 ((d).hi = LOAD32_B((octet *)(p) + 0), \
603 (d).lo = LOAD32_B((octet *)(p) + 4))
604 # define LOAD64_L_(d, p) \
605 ((d).lo = LOAD32_L((octet *)(p) + 0), \
606 (d).hi = LOAD32_L((octet *)(p) + 4))
607 # define LOAD64_(d, p) LOAD64_B_((d), (p))
609 # define STORE64_B_(p, v) \
610 (STORE32_B((octet *)(p) + 0, (v).hi), \
611 STORE32_B((octet *)(p) + 4, (v).lo))
612 # define STORE64_L_(p, v) \
613 (STORE32_L((octet *)(p) + 0, (v).lo), \
614 STORE32_L((octet *)(p) + 4, (v).hi))
615 # define STORE64_(p, v) STORE64_B_((p), (v))
619 /* --- Other operations on 64-bit integers --- */
622 # define SET64(d, h, l) ((d).i = (U64((h)) << 32) | U64((l)))
623 # define ASSIGN64(d, x) ((d).i = U64((x)))
624 # define HI64(x) U32((x).i >> 32)
625 # define LO64(x) U32((x).i)
626 # define GET64(t, x) ((t)(x).i)
628 # define SET64(d, h, l) ((d).hi = U32(h), (d).lo = U32(l))
629 # define ASSIGN64(d, x) \
630 ((d).hi = ((x & ~MASK32) >> 16) >> 16, (d).lo = U32(x))
631 # define HI64(x) U32((x).hi)
632 # define LO64(x) U32((x).lo)
633 # define GET64(t, x) (((((t)HI64(x) << 16) << 16) & ~MASK32) | (t)LO64(x))
637 # define AND64(d, x, y) ((d).i = (x).i & (y).i)
638 # define OR64(d, x, y) ((d).i = (x).i | (y).i)
639 # define XOR64(d, x, y) ((d).i = (x).i ^ (y).i)
640 # define CPL64(d, x) ((d).i = ~(x).i)
641 # define ADD64(d, x, y) ((d).i = (x).i + (y).i)
642 # define SUB64(d, x, y) ((d).i = (x).i - (y).i)
643 # define CMP64(x, op, y) ((x).i op (y).i)
644 # define ZERO64(x) ((x) == 0)
646 # define AND64(d, x, y) ((d).lo = (x).lo & (y).lo, (d).hi = (x).hi & (y).hi)
647 # define OR64(d, x, y) ((d).lo = (x).lo | (y).lo, (d).hi = (x).hi | (y).hi)
648 # define XOR64(d, x, y) ((d).lo = (x).lo ^ (y).lo, (d).hi = (x).hi ^ (y).hi)
649 # define CPL64(d, x) ((d).lo = ~(x).lo, (d).hi = ~(x).hi)
650 # define ADD64(d, x, y) do { \
651 uint32 _x = U32((x).lo + (y).lo); \
652 (d).hi = (x).hi + (y).hi + (_x < (x).lo); \
655 # define SUB64(d, x, y) do { \
656 uint32 _x = U32((x).lo - (y).lo); \
657 (d).hi = (x).hi - (y).hi - (_x > (x).lo); \
660 # define CMP64(x, op, y) \
661 ((x).hi == (y).hi ? (x).lo op (y).lo : (x).hi op (y).hi)
662 # define ZERO64(x) ((x).lo == 0 && (x).hi == 0)
665 /* --- Storing integers in tables --- */
668 # define X64(x, y) { 0x##x##y }
670 # define X64(x, y) { 0x##x, 0x##y }
673 /*----- That's all, folks -------------------------------------------------*/