/* --- Type coercions --- */
-#define U8(x) ((octet)((x) & MASK8))
-#define U16(x) ((uint16)((x) & MASK16))
-#define U24(x) ((uint24)((x) & MASK24))
-#define U32(x) ((uint32)((x) & MASK32))
+#define U8(x) ((octet)((x)&MASK8))
+#define U16(x) ((uint16)((x)&MASK16))
+#define U24(x) ((uint24)((x)&MASK24))
+#define U32(x) ((uint32)((x)&MASK32))
#ifdef HAVE_UINT64
-# define U64(x) ((uint64)(x) & MASK64)
+# define U64(x) ((uint64)(x)&MASK64)
# define U64_(d, x) ((d).i = U64(x).i)
#else
# define U64_(d, x) ((d).hi = U32((x).hi), (d).lo = U32((x).lo))
/* --- Safe shifting macros --- */
-#define LSL8(v, s) U8(U8(v) << ((s) & 7u))
-#define LSR8(v, s) U8(U8(v) >> ((s) & 7u))
-#define LSL16(v, s) U16(U16(v) << ((s) & 15u))
-#define LSR16(v, s) U16(U16(v) >> ((s) & 15u))
-#define LSL24(v, s) U24(U24(v) << ((s) % 24u))
-#define LSR24(v, s) U24(U24(v) >> ((s) % 24u))
-#define LSL32(v, s) U32(U32(v) << ((s) & 31u))
-#define LSR32(v, s) U32(U32(v) >> ((s) & 31u))
+#define LSL8(v, s) U8(U8(v) << ((s)&7u))
+#define LSR8(v, s) U8(U8(v) >> ((s)&7u))
+#define LSL16(v, s) U16(U16(v) << ((s)&15u))
+#define LSR16(v, s) U16(U16(v) >> ((s)&15u))
+#define LSL24(v, s) U24(U24(v) << ((s)%24u))
+#define LSR24(v, s) U24(U24(v) >> ((s)%24u))
+#define LSL32(v, s) U32(U32(v) << ((s)&31u))
+#define LSR32(v, s) U32(U32(v) >> ((s)&31u))
#ifdef HAVE_UINT64
-# define LSL64(v, s) U64(U64(v) << ((s) & 63u))
-# define LSR64(v, s) U64(U64(v) >> ((s) & 63u))
+# define LSL64(v, s) U64(U64(v) << ((s)&63u))
+# define LSR64(v, s) U64(U64(v) >> ((s)&63u))
# define LSL64_(d, v, s) ((d).i = LSL64((v).i, (s)))
# define LSR64_(d, v, s) ((d).i = LSR64((v).i, (s)))
#else
# define LSL64_(d, v, s) do { \
- unsigned _s = (s) & 63u; \
+ unsigned _s = (s)&63u; \
uint32 _l = (v).lo, _h = (v).hi; \
kludge64 *_d = &(d); \
if (_s >= 32) { \
} \
} while (0)
# define LSR64_(d, v, s) do { \
- unsigned _s = (s) & 63u; \
+ unsigned _s = (s)&63u; \
uint32 _l = (v).lo, _h = (v).hi; \
kludge64 *_d = &(d); \
if (_s >= 32) { \
# define ROR64_(d, v, s) ((d).i = ROR64((v).i, (s)))
#else
# define ROL64_(d, v, s) do { \
- unsigned _s = (s) & 63u; \
+ unsigned _s = (s)&63u; \
uint32 _l = (v).lo, _h = (v).hi; \
kludge64 *_d = &(d); \
if (_s > 32) { \
} \
} while (0)
# define ROR64_(d, v, s) do { \
- unsigned _s = (s) & 63u; \
+ unsigned _s = (s)&63u; \
uint32 _l = (v).lo, _h = (v).hi; \
kludge64 *_d = &(d); \
if (_s > 32) { \
# define LTOH64(x) ENDSWAP64(x)
# define HTOB64(x) (x)
# define BTOH64(x) (x)
-# define HTOL64_(z, x) ENDSWAP64_(z, x)
-# define LTOH64_(z, x) ENDSWAP64_(z, x)
-# define HTOB64_(z, x) ((z).i = (x).i)
-# define BTOH64_(z, x) ((z).i = (x).i)
# endif
# define HTOL64_(z, x) ENDSWAP64_(z, x)
# define LTOH64_(z, x) ENDSWAP64_(z, x)
# define STORE64_B(p, x) (RAW64(p) = HTOB64(x))
#endif
-#define GETBYTE(p, o) (((octet *)(p))[o] & MASK8)
+#define GETBYTE(p, o) (((octet *)(p))[o]&MASK8)
#define PUTBYTE(p, o, v) (((octet *)(p))[o] = U8((v)))
#define LOAD8(p) (GETBYTE((p), 0))
/* --- Other operations on 64-bit integers --- */
#ifdef HAVE_UINT64
-# define SET64(d, h, l) ((d).i = (U64((h)) << 32) | U64((l)))
+# define SET64(d, h, l) ((d).i = ((uint64)(U32(h)) << 32) | U32(l))
# define ASSIGN64(d, x) ((d).i = U64((x)))
# define HI64(x) U32((x).i >> 32)
# define LO64(x) U32((x).i)
# define GET64(t, x) ((t)(x).i)
+# define SETBYTE64(z, x, j) ((z).i |= (uint64)U8(x) << 8*(j))
#else
# define SET64(d, h, l) ((d).hi = U32(h), (d).lo = U32(l))
# define ASSIGN64(d, x) \
- ((d).hi = ((x & ~MASK32) >> 16) >> 16, (d).lo = U32(x))
+ ((d).hi = ((x&~MASK32) >> 16) >> 16, (d).lo = U32(x))
# define HI64(x) U32((x).hi)
# define LO64(x) U32((x).lo)
-# define GET64(t, x) (((((t)HI64(x) << 16) << 16) & ~MASK32) | (t)LO64(x))
+# define GET64(t, x) (((((t)HI64(x) << 16) << 16)&~MASK32) | (t)LO64(x))
+# define SETBYTE64(z, x, j) \
+ ((j) < 4 ? (z).lo |= (uint32)U8(x) << 8*(j) \
+ : (z).hi |= (uint32)U8(x) << 8*((j) - 4))
#endif
#ifdef HAVE_UINT64
-# define AND64(d, x, y) ((d).i = (x).i & (y).i)
+# define AND64(d, x, y) ((d).i = (x).i&(y).i)
# define OR64(d, x, y) ((d).i = (x).i | (y).i)
# define XOR64(d, x, y) ((d).i = (x).i ^ (y).i)
# define CPL64(d, x) ((d).i = ~(x).i)
# define ADD64(d, x, y) ((d).i = (x).i + (y).i)
# define SUB64(d, x, y) ((d).i = (x).i - (y).i)
# define CMP64(x, op, y) ((x).i op (y).i)
-# define ZERO64(x) ((x) == 0)
+# define ZERO64(x) ((x).i == 0)
#else
-# define AND64(d, x, y) ((d).lo = (x).lo & (y).lo, (d).hi = (x).hi & (y).hi)
+# define AND64(d, x, y) ((d).lo = (x).lo&(y).lo, (d).hi = (x).hi&(y).hi)
# define OR64(d, x, y) ((d).lo = (x).lo | (y).lo, (d).hi = (x).hi | (y).hi)
# define XOR64(d, x, y) ((d).lo = (x).lo ^ (y).lo, (d).hi = (x).hi ^ (y).hi)
# define CPL64(d, x) ((d).lo = ~(x).lo, (d).hi = ~(x).hi)