#include <limits.h>
#include <stddef.h>
-#if __STDC_VERSION__ >= 199900l
+#if __STDC_VERSION__ >= 199901
# include <stdint.h>
#endif
/* --- List macros --- */
#ifdef HAVE_UINT64
-# define DOUINTCONV(_) \
- _(8, 8, 8) \
- _(16, 16, 16) _(16, 16_L, 16l) _(16, 16_B, 16b) \
- _(24, 24, 24) _(24, 24_L, 24l) _(24, 24_B, 24b) \
- _(32, 32, 32) _(32, 32_L, 32l) _(32, 32_B, 32b) \
+# define DOUINTCONV_64(_) \
_(64, 64, 64) _(64, 64_L, 64l) _(64, 64_B, 64b)
-# define DOUINTSZ(_) _(8) _(16) _(24) _(32) _(64)
+# define DOUINTSZ_64(_) _(64)
#else
-# define DOUINTCONV(_) \
+# define DOUINTCONV_64(_)
+# define DOUINTSZ_64(_)
+#endif
+
+#define DOUINTCONV(_) \
_(8, 8, 8) \
_(16, 16, 16) _(16, 16_L, 16l) _(16, 16_B, 16b) \
_(24, 24, 24) _(24, 24_L, 24l) _(24, 24_B, 24b) \
- _(32, 32, 32) _(32, 32_L, 32l) _(32, 32_B, 32b)
-# define DOUINTSZ(_) _(8) _(16) _(24) _(32)
-#endif
+ _(32, 32, 32) _(32, 32_L, 32l) _(32, 32_B, 32b) \
+ DOUINTCONV_64(_)
+#define DOUINTSZ(_) _(8) _(16) _(24) _(32) _DOUINTSZ_64(_)
/* --- Type coercions --- */
-#define U8(x) ((octet)((x) & MASK8))
-#define U16(x) ((uint16)((x) & MASK16))
-#define U24(x) ((uint24)((x) & MASK24))
-#define U32(x) ((uint32)((x) & MASK32))
+#define U8(x) ((octet)((x)&MASK8))
+#define U16(x) ((uint16)((x)&MASK16))
+#define U24(x) ((uint24)((x)&MASK24))
+#define U32(x) ((uint32)((x)&MASK32))
#ifdef HAVE_UINT64
-# define U64(x) ((uint64)(x) & MASK64)
+# define U64(x) ((uint64)(x)&MASK64)
# define U64_(d, x) ((d).i = U64(x).i)
#else
# define U64_(d, x) ((d).hi = U32((x).hi), (d).lo = U32((x).lo))
/* --- Safe shifting macros --- */
-#define LSL8(v, s) U8(U8(v) << ((s) & 7u))
-#define LSR8(v, s) U8(U8(v) >> ((s) & 7u))
-#define LSL16(v, s) U16(U16(v) << ((s) & 15u))
-#define LSR16(v, s) U16(U16(v) >> ((s) & 15u))
-#define LSL24(v, s) U24(U24(v) << ((s) % 24u))
-#define LSR24(v, s) U24(U24(v) >> ((s) % 24u))
-#define LSL32(v, s) U32(U32(v) << ((s) & 31u))
-#define LSR32(v, s) U32(U32(v) >> ((s) & 31u))
+#define LSL8(v, s) U8(U8(v) << ((s)&7u))
+#define LSR8(v, s) U8(U8(v) >> ((s)&7u))
+#define LSL16(v, s) U16(U16(v) << ((s)&15u))
+#define LSR16(v, s) U16(U16(v) >> ((s)&15u))
+#define LSL24(v, s) U24(U24(v) << ((s)%24u))
+#define LSR24(v, s) U24(U24(v) >> ((s)%24u))
+#define LSL32(v, s) U32(U32(v) << ((s)&31u))
+#define LSR32(v, s) U32(U32(v) >> ((s)&31u))
#ifdef HAVE_UINT64
-# define LSL64(v, s) U64(U64(v) << ((s) & 63u))
-# define LSR64(v, s) U64(U64(v) >> ((s) & 63u))
+# define LSL64(v, s) U64(U64(v) << ((s)&63u))
+# define LSR64(v, s) U64(U64(v) >> ((s)&63u))
# define LSL64_(d, v, s) ((d).i = LSL64((v).i, (s)))
# define LSR64_(d, v, s) ((d).i = LSR64((v).i, (s)))
#else
# define LSL64_(d, v, s) do { \
- unsigned _s = (s) & 63u; \
+ unsigned _s = (s)&63u; \
uint32 _l = (v).lo, _h = (v).hi; \
kludge64 *_d = &(d); \
if (_s >= 32) { \
} \
} while (0)
# define LSR64_(d, v, s) do { \
- unsigned _s = (s) & 63u; \
+ unsigned _s = (s)&63u; \
uint32 _l = (v).lo, _h = (v).hi; \
kludge64 *_d = &(d); \
if (_s >= 32) { \
# define ROR64_(d, v, s) ((d).i = ROR64((v).i, (s)))
#else
# define ROL64_(d, v, s) do { \
- unsigned _s = (s) & 63u; \
+ unsigned _s = (s)&63u; \
uint32 _l = (v).lo, _h = (v).hi; \
kludge64 *_d = &(d); \
if (_s > 32) { \
} \
} while (0)
# define ROR64_(d, v, s) do { \
- unsigned _s = (s) & 63u; \
+ unsigned _s = (s)&63u; \
uint32 _l = (v).lo, _h = (v).hi; \
kludge64 *_d = &(d); \
if (_s > 32) { \
/* --- Endianness swapping --- */
-#if GCC_VERSION_P(4, 8)
+#if GCC_VERSION_P(4, 8) || CLANG_VERSION_P(3, 2)
# define ENDSWAP16(x) ((uint16)__builtin_bswap16(x))
#endif
-#if GCC_VERSION_P(4, 3)
+#if GCC_VERSION_P(4, 3) || CLANG_VERSION_P(3, 2)
# define ENDSWAP32(x) ((uint32)__builtin_bswap32(x))
#endif
-#if GCC_VERSION_P(4, 3) && defined(HAVE_UINT64)
+#if (GCC_VERSION_P(4, 3) || CLANG_VERSION_P(3, 2)) && defined(HAVE_UINT64)
# define ENDSWAP64(x) ((uint64)__builtin_bswap64(x))
#endif
# define LTOH64(x) ENDSWAP64(x)
# define HTOB64(x) (x)
# define BTOH64(x) (x)
-# define HTOL64_(z, x) ENDSWAP64_(z, x)
-# define LTOH64_(z, x) ENDSWAP64_(z, x)
-# define HTOB64_(z, x) ((z).i = (x).i)
-# define BTOH64_(z, x) ((z).i = (x).i)
# endif
# define HTOL64_(z, x) ENDSWAP64_(z, x)
# define LTOH64_(z, x) ENDSWAP64_(z, x)
/* --- Unaligned access (GCC-specific) --- */
-#if GCC_VERSION_P(3, 3) && CHAR_BIT == 8
+#if (GCC_VERSION_P(3, 3) || CLANG_VERSION_P(3, 0)) && CHAR_BIT == 8
# define MLIB_MISALIGNED __attribute__((aligned(1), may_alias))
# if __SIZEOF_SHORT__ == 2
typedef MLIB_MISALIGNED unsigned short misaligned_uint16;
# define STORE64_B(p, x) (RAW64(p) = HTOB64(x))
#endif
-#define GETBYTE(p, o) (((octet *)(p))[o] & MASK8)
+#define GETBYTE(p, o) (((octet *)(p))[o]&MASK8)
#define PUTBYTE(p, o, v) (((octet *)(p))[o] = U8((v)))
#define LOAD8(p) (GETBYTE((p), 0))
/* --- Other operations on 64-bit integers --- */
#ifdef HAVE_UINT64
-# define SET64(d, h, l) ((d).i = (U64((h)) << 32) | U64((l)))
+# define SET64(d, h, l) ((d).i = ((uint64)(U32(h)) << 32) | U32(l))
# define ASSIGN64(d, x) ((d).i = U64((x)))
# define HI64(x) U32((x).i >> 32)
# define LO64(x) U32((x).i)
# define GET64(t, x) ((t)(x).i)
+# define SETBYTE64(z, x, j) ((z).i |= (uint64)U8(x) << 8*(j))
#else
# define SET64(d, h, l) ((d).hi = U32(h), (d).lo = U32(l))
# define ASSIGN64(d, x) \
- ((d).hi = ((x & ~MASK32) >> 16) >> 16, (d).lo = U32(x))
+ ((d).hi = ((x&~MASK32) >> 16) >> 16, (d).lo = U32(x))
# define HI64(x) U32((x).hi)
# define LO64(x) U32((x).lo)
-# define GET64(t, x) (((((t)HI64(x) << 16) << 16) & ~MASK32) | (t)LO64(x))
+# define GET64(t, x) (((((t)HI64(x) << 16) << 16)&~MASK32) | (t)LO64(x))
+# define SETBYTE64(z, x, j) \
+ ((j) < 4 ? (z).lo |= (uint32)U8(x) << 8*(j) \
+ : (z).hi |= (uint32)U8(x) << 8*((j) - 4))
#endif
#ifdef HAVE_UINT64
-# define AND64(d, x, y) ((d).i = (x).i & (y).i)
+# define AND64(d, x, y) ((d).i = (x).i&(y).i)
# define OR64(d, x, y) ((d).i = (x).i | (y).i)
# define XOR64(d, x, y) ((d).i = (x).i ^ (y).i)
# define CPL64(d, x) ((d).i = ~(x).i)
# define ADD64(d, x, y) ((d).i = (x).i + (y).i)
# define SUB64(d, x, y) ((d).i = (x).i - (y).i)
# define CMP64(x, op, y) ((x).i op (y).i)
-# define ZERO64(x) ((x) == 0)
+# define ZERO64(x) ((x).i == 0)
#else
-# define AND64(d, x, y) ((d).lo = (x).lo & (y).lo, (d).hi = (x).hi & (y).hi)
+# define AND64(d, x, y) ((d).lo = (x).lo&(y).lo, (d).hi = (x).hi&(y).hi)
# define OR64(d, x, y) ((d).lo = (x).lo | (y).lo, (d).hi = (x).hi | (y).hi)
# define XOR64(d, x, y) ((d).lo = (x).lo ^ (y).lo, (d).hi = (x).hi ^ (y).hi)
# define CPL64(d, x) ((d).lo = ~(x).lo, (d).hi = ~(x).hi)