6b033bc4 |
1 | /* -*-c-*- |
2 | * |
a6f4a484 |
3 | * $Id: bits.h,v 1.6 2000/07/16 12:28:28 mdw Exp $ |
6b033bc4 |
4 | * |
5 | * Portable bit-level manipulation macros |
6 | * |
7 | * (c) 1998 Straylight/Edgeware |
8 | */ |
9 | |
10 | /*----- Licensing notice --------------------------------------------------* |
11 | * |
12 | * This file is part of the mLib utilities library. |
13 | * |
14 | * mLib is free software; you can redistribute it and/or modify |
15 | * it under the terms of the GNU Library General Public License as |
16 | * published by the Free Software Foundation; either version 2 of the |
17 | * License, or (at your option) any later version. |
18 | * |
19 | * mLib is distributed in the hope that it will be useful, |
20 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
21 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
22 | * GNU Library General Public License for more details. |
23 | * |
24 | * You should have received a copy of the GNU Library General Public |
25 | * License along with mLib; if not, write to the Free |
26 | * Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, |
27 | * MA 02111-1307, USA. |
28 | */ |
29 | |
30 | /*----- Revision history --------------------------------------------------* |
31 | * |
32 | * $Log: bits.h,v $ |
a6f4a484 |
33 | * Revision 1.6 2000/07/16 12:28:28 mdw |
34 | * Add 64-bit support, with faked arithmetic on 32-bit hosts. |
35 | * |
6a0129ea |
36 | * Revision 1.5 2000/06/17 10:36:06 mdw |
37 | * Support for 24-bit types. |
38 | * |
c6e0eaf0 |
39 | * Revision 1.4 1999/12/10 23:42:04 mdw |
40 | * Change header file guard names. |
41 | * |
0d4e268a |
42 | * Revision 1.3 1999/06/20 23:31:52 mdw |
43 | * More portability enhancements. |
44 | * |
bc9f60b6 |
45 | * Revision 1.2 1999/06/17 00:12:46 mdw |
46 | * Improve portability for shift and rotate macros. |
47 | * |
6b033bc4 |
48 | * Revision 1.1 1999/06/01 09:46:19 mdw |
49 | * New addition: bit manipulation macros. |
50 | * |
51 | */ |
52 | |
c6e0eaf0 |
53 | #ifndef MLIB_BITS_H |
54 | #define MLIB_BITS_H |
6b033bc4 |
55 | |
56 | #ifdef __cplusplus |
57 | extern "C" { |
58 | #endif |
59 | |
60 | /*----- Header files ------------------------------------------------------*/ |
61 | |
62 | #include <limits.h> |
63 | #include <stddef.h> |
a6f4a484 |
64 | #if __STDC_VERSION__ >= 199900l |
65 | # include <stdint.h> |
66 | #endif |
6b033bc4 |
67 | |
68 | /*----- Decide on some types ----------------------------------------------*/ |
69 | |
70 | /* --- Decide on a 32-bit type --- * |
71 | * |
72 | * I want a type which is capable of expressing 32-bit numbers. Because some |
73 | * implementations have 64-bit @long@s (infinitely preferable to the abortion |
74 | * that is @long long@), using @unsigned long@ regardless is wasteful. So, |
75 | * if @int@ appears to be good enough, then I'll go with that. |
76 | */ |
77 | |
78 | #if UINT_MAX >= 0xffffffffu |
79 | typedef unsigned int uint32; |
80 | #else |
81 | typedef unsigned long uint32; |
82 | #endif |
83 | |
a6f4a484 |
84 | /* --- Decide on a 64-bit type --- * |
85 | * |
86 | * The test is quite subtle. Think about it. Note that (at least on my |
87 | * machine), the 32-bit macros are *much* faster than GCC's @long long@ |
88 | * support. |
89 | */ |
90 | |
91 | #if defined(ULONG_LONG_MAX) && !defined(ULLONG_MAX) |
92 | # define ULLONG_MAX ULONG_LONG_MAX |
93 | #endif |
94 | |
95 | #if UINT_MAX >> 31 > 0xffffffff |
96 | # define HAVE_UINT64 |
97 | typedef unsigned int uint64; |
98 | #elif ULONG_MAX >> 31 > 0xffffffff |
99 | # define HAVE_UINT64 |
100 | typedef unsigned long uint64; |
101 | #elif defined(ULLONG_MAX) |
102 | # define HAVE_UINT64 |
103 | typedef unsigned long long uint64; |
104 | #endif |
105 | |
106 | #ifdef DEBUG64 |
107 | # undef HAVE_UINT64 |
108 | #endif |
109 | |
110 | #ifdef HAVE_UINT64 |
111 | typedef struct { uint64 i; } kludge64; |
112 | #else |
113 | typedef struct { uint32 hi, lo; } kludge64; |
114 | #endif |
115 | |
6a0129ea |
116 | /* --- Decide on a 24-bit type --- */ |
117 | |
118 | #if UINT_MAX >= 0x00ffffffu |
119 | typedef unsigned int uint24; |
120 | #else |
121 | typedef unsigned long uint24; |
122 | #endif |
123 | |
6b033bc4 |
124 | /* --- Decide on 16-bit and 8-bit types --- * |
125 | * |
126 | * This is more for brevity than anything else. |
127 | */ |
128 | |
129 | typedef unsigned short uint16; |
130 | typedef unsigned char octet; |
131 | |
132 | /* --- WARNING! --- * |
133 | * |
134 | * Never lose sight of the fact that the above types may be wider than the |
135 | * names suggest. Some architectures have 32-bit @short@s for example. |
136 | */ |
137 | |
138 | /*----- Macros ------------------------------------------------------------*/ |
139 | |
140 | /* --- Useful masks --- */ |
141 | |
142 | #define MASK8 0xffu |
143 | #define MASK16 0xffffu |
6a0129ea |
144 | #define MASK24 0xffffffu |
6b033bc4 |
145 | #define MASK32 0xffffffffu |
146 | |
a6f4a484 |
147 | #ifdef HAVE_UINT64 |
148 | # define MASK64 0xffffffffffffffffull |
149 | #endif |
150 | |
6b033bc4 |
151 | /* --- Type coercions --- */ |
152 | |
153 | #define U8(x) ((octet)((x) & MASK8)) |
154 | #define U16(x) ((uint16)((x) & MASK16)) |
6a0129ea |
155 | #define U24(x) ((uint24)((x) & MASK24)) |
6b033bc4 |
156 | #define U32(x) ((uint32)((x) & MASK32)) |
157 | |
a6f4a484 |
158 | #ifdef HAVE_UINT64 |
159 | # define U64(x) ((uint64)(x) & MASK64) |
160 | # define U64_(d, x) ((d).i = U64(x).i) |
161 | #else |
162 | # define U64_(d, x) ((d).hi = U32((x).hi), (d).lo = U32((x).lo)) |
163 | #endif |
164 | |
6b033bc4 |
165 | /* --- Safe shifting macros --- */ |
166 | |
bc9f60b6 |
167 | #define LSL8(v, s) U8(U8(v) << ((s) & 7u)) |
168 | #define LSR8(v, s) U8(U8(v) >> ((s) & 7u)) |
169 | #define LSL16(v, s) U16(U16(v) << ((s) & 15u)) |
170 | #define LSR16(v, s) U16(U16(v) >> ((s) & 15u)) |
6a0129ea |
171 | #define LSL24(v, s) U24(U24(v) << ((s) % 24u)) |
172 | #define LSR24(v, s) U24(U24(v) >> ((s) % 24u)) |
bc9f60b6 |
173 | #define LSL32(v, s) U32(U32(v) << ((s) & 31u)) |
174 | #define LSR32(v, s) U32(U32(v) >> ((s) & 31u)) |
6b033bc4 |
175 | |
a6f4a484 |
176 | #ifdef HAVE_UINT64 |
177 | # define LSL64(v, s) U64(U64(v) << ((s) & 63u)) |
178 | # define LSR64(v, s) U64(U64(v) >> ((s) & 63u)) |
179 | # define LSL64_(d, v, s) ((d).i = LSL64((v).i, (s))) |
180 | # define LSR64_(d, v, s) ((d).i = LSR64((v).i, (s))) |
181 | #else |
182 | # define LSL64_(d, v, s) do { \ |
183 | unsigned _s = (s) & 63u; \ |
184 | uint32 _l = (v).lo, _h = (v).hi; \ |
185 | kludge64 *_d = &(d); \ |
186 | if (_s >= 32) { \ |
187 | _d->hi = LSL32(_l, _s - 32u); \ |
188 | _d->lo = 0; \ |
189 | } else if (!_s) { \ |
190 | _d->lo = _l; \ |
191 | _d->hi = _h; \ |
192 | } else { \ |
193 | _d->hi = LSL32(_h, _s) | LSR32(_l, 32u - _s); \ |
194 | _d->lo = LSL32(_l, _s); \ |
195 | } \ |
196 | } while (0) |
197 | # define LSR64_(d, v, s) do { \ |
198 | unsigned _s = (s) & 63u; \ |
199 | uint32 _l = (v).lo, _h = (v).hi; \ |
200 | kludge64 *_d = &(d); \ |
201 | if (_s >= 32) { \ |
202 | _d->lo = LSR32(_h, _s - 32u); \ |
203 | _d->hi = 0; \ |
204 | } else if (!_s) { \ |
205 | _d->lo = _l; \ |
206 | _d->hi = _h; \ |
207 | } else { \ |
208 | _d->lo = LSR32(_l, _s) | LSL32(_h, 32u - _s); \ |
209 | _d->hi = LSR32(_h, _s); \ |
210 | } \ |
211 | } while (0) |
212 | #endif |
213 | |
6b033bc4 |
214 | /* --- Rotation macros --- */ |
215 | |
bc9f60b6 |
216 | #define ROL8(v, s) (LSL8((v), (s)) | (LSR8((v), 8u - (s)))) |
217 | #define ROR8(v, s) (LSR8((v), (s)) | (LSL8((v), 8u - (s)))) |
218 | #define ROL16(v, s) (LSL16((v), (s)) | (LSR16((v), 16u - (s)))) |
219 | #define ROR16(v, s) (LSR16((v), (s)) | (LSL16((v), 16u - (s)))) |
6a0129ea |
220 | #define ROL24(v, s) (LSL24((v), (s)) | (LSR24((v), 24u - (s)))) |
221 | #define ROR24(v, s) (LSR24((v), (s)) | (LSL24((v), 24u - (s)))) |
bc9f60b6 |
222 | #define ROL32(v, s) (LSL32((v), (s)) | (LSR32((v), 32u - (s)))) |
223 | #define ROR32(v, s) (LSR32((v), (s)) | (LSL32((v), 32u - (s)))) |
6b033bc4 |
224 | |
a6f4a484 |
225 | #ifdef HAVE_UINT64 |
226 | # define ROL64(v, s) (LSL64((v), (s)) | (LSR64((v), 64u - (s)))) |
227 | # define ROR64(v, s) (LSR64((v), (s)) | (LSL64((v), 64u - (s)))) |
228 | # define ROL64_(d, v, s) ((d).i = ROL64((v).i, (s))) |
229 | # define ROR64_(d, v, s) ((d).i = ROR64((v).i, (s))) |
230 | #else |
231 | # define ROL64_(d, v, s) do { \ |
232 | unsigned _s = (s) & 63u; \ |
233 | uint32 _l = (v).lo, _h = (v).hi; \ |
234 | kludge64 *_d = &(d); \ |
235 | if (_s >= 32) { \ |
236 | _d->hi = LSL32(_l, _s - 32u) | LSR32(_h, 64u - _s); \ |
237 | _d->lo = LSL32(_h, _s - 32u) | LSR32(_l, 64u - _s); \ |
238 | } else if (!_s) { \ |
239 | _d->lo = _l; \ |
240 | _d->hi = _h; \ |
241 | } else { \ |
242 | _d->hi = LSL32(_h, _s) | LSR32(_l, 32u - _s); \ |
243 | _d->lo = LSL32(_l, _s) | LSR32(_h, 32u - _s); \ |
244 | } \ |
245 | } while (0) |
246 | # define ROR64_(d, v, s) do { \ |
247 | unsigned _s = (s) & 63u; \ |
248 | uint32 _l = (v).lo, _h = (v).hi; \ |
249 | kludge64 *_d = &(d); \ |
250 | if (_s >= 32) { \ |
251 | _d->hi = LSR32(_l, _s - 32u) | LSL32(_h, 64u - _s); \ |
252 | _d->lo = LSR32(_h, _s - 32u) | LSL32(_l, 64u - _s); \ |
253 | } else if (!_s) { \ |
254 | _d->lo = _l; \ |
255 | _d->hi = _h; \ |
256 | } else { \ |
257 | _d->hi = LSR32(_h, _s) | LSL32(_l, 32u - _s); \ |
258 | _d->lo = LSR32(_l, _s) | LSL32(_h, 32u - _s); \ |
259 | } \ |
260 | } while (0) |
261 | #endif |
262 | |
6b033bc4 |
263 | /* --- Storage and retrieval --- */ |
264 | |
265 | #define GETBYTE(p, o) (((octet *)(p))[o] & MASK8) |
266 | #define PUTBYTE(p, o, v) (((octet *)(p))[o] = U8((v))) |
267 | |
268 | #define LOAD8(p) (GETBYTE((p), 0)) |
269 | #define STORE8(p, v) (PUTBYTE((p), 0, (v))) |
270 | |
0d4e268a |
271 | #define LOAD16_B(p) \ |
272 | (((uint16)GETBYTE((p), 0) << 8) | \ |
273 | ((uint16)GETBYTE((p), 1) << 0)) |
274 | #define LOAD16_L(p) \ |
275 | (((uint16)GETBYTE((p), 0) << 0) | \ |
276 | ((uint16)GETBYTE((p), 1) << 8)) |
6b033bc4 |
277 | #define LOAD16(p) LOAD16_B((p)) |
278 | |
0d4e268a |
279 | #define STORE16_B(p, v) \ |
280 | (PUTBYTE((p), 0, (uint16)(v) >> 8), \ |
281 | PUTBYTE((p), 1, (uint16)(v) >> 0)) |
282 | #define STORE16_L(p, v) \ |
283 | (PUTBYTE((p), 0, (uint16)(v) >> 0), \ |
284 | PUTBYTE((p), 1, (uint16)(v) >> 8)) |
6b033bc4 |
285 | #define STORE16(p, v) STORE16_B((p), (v)) |
286 | |
6a0129ea |
287 | #define LOAD24_B(p) \ |
288 | (((uint24)GETBYTE((p), 0) << 16) | \ |
289 | ((uint24)GETBYTE((p), 1) << 8) | \ |
290 | ((uint24)GETBYTE((p), 2) << 0)) |
291 | #define LOAD24_L(p) \ |
292 | (((uint24)GETBYTE((p), 0) << 0) | \ |
293 | ((uint24)GETBYTE((p), 1) << 8) | \ |
294 | ((uint24)GETBYTE((p), 2) << 16)) |
295 | #define LOAD24(p) LOAD24_B((p)) |
296 | |
297 | #define STORE24_B(p, v) \ |
298 | (PUTBYTE((p), 0, (uint24)(v) >> 16), \ |
299 | PUTBYTE((p), 1, (uint24)(v) >> 8), \ |
300 | PUTBYTE((p), 2, (uint24)(v) >> 0)) |
301 | #define STORE24_L(p, v) \ |
302 | (PUTBYTE((p), 0, (uint24)(v) >> 0), \ |
303 | PUTBYTE((p), 1, (uint24)(v) >> 8), \ |
304 | PUTBYTE((p), 2, (uint24)(v) >> 16)) |
305 | #define STORE24(p, v) STORE24_B((p), (v)) |
306 | |
6b033bc4 |
307 | #define LOAD32_B(p) \ |
0d4e268a |
308 | (((uint32)GETBYTE((p), 0) << 24) | \ |
309 | ((uint32)GETBYTE((p), 1) << 16) | \ |
310 | ((uint32)GETBYTE((p), 2) << 8) | \ |
311 | ((uint32)GETBYTE((p), 3) << 0)) |
6b033bc4 |
312 | #define LOAD32_L(p) \ |
0d4e268a |
313 | (((uint32)GETBYTE((p), 0) << 0) | \ |
314 | ((uint32)GETBYTE((p), 1) << 8) | \ |
315 | ((uint32)GETBYTE((p), 2) << 16) | \ |
316 | ((uint32)GETBYTE((p), 3) << 24)) |
6b033bc4 |
317 | #define LOAD32(p) LOAD32_B((p)) |
318 | |
319 | #define STORE32_B(p, v) \ |
0d4e268a |
320 | (PUTBYTE((p), 0, (uint32)(v) >> 24), \ |
321 | PUTBYTE((p), 1, (uint32)(v) >> 16), \ |
322 | PUTBYTE((p), 2, (uint32)(v) >> 8), \ |
323 | PUTBYTE((p), 3, (uint32)(v) >> 0)) |
6b033bc4 |
324 | #define STORE32_L(p, v) \ |
0d4e268a |
325 | (PUTBYTE((p), 0, (uint32)(v) >> 0), \ |
326 | PUTBYTE((p), 1, (uint32)(v) >> 8), \ |
327 | PUTBYTE((p), 2, (uint32)(v) >> 16), \ |
328 | PUTBYTE((p), 3, (uint32)(v) >> 24)) |
6b033bc4 |
329 | #define STORE32(p, v) STORE32_B((p), (v)) |
330 | |
a6f4a484 |
331 | #ifdef HAVE_UINT64 |
332 | |
333 | # define LOAD64_B(p) \ |
334 | (((uint64)GETBYTE((p), 0) << 56) | \ |
335 | ((uint64)GETBYTE((p), 1) << 48) | \ |
336 | ((uint64)GETBYTE((p), 2) << 40) | \ |
337 | ((uint64)GETBYTE((p), 3) << 32) | \ |
338 | ((uint64)GETBYTE((p), 4) << 24) | \ |
339 | ((uint64)GETBYTE((p), 5) << 16) | \ |
340 | ((uint64)GETBYTE((p), 6) << 8) | \ |
341 | ((uint64)GETBYTE((p), 7) << 0)) |
342 | # define LOAD64_L(p) \ |
343 | (((uint64)GETBYTE((p), 0) << 0) | \ |
344 | ((uint64)GETBYTE((p), 1) << 8) | \ |
345 | ((uint64)GETBYTE((p), 2) << 16) | \ |
346 | ((uint64)GETBYTE((p), 3) << 24) | \ |
347 | ((uint64)GETBYTE((p), 4) << 32) | \ |
348 | ((uint64)GETBYTE((p), 5) << 40) | \ |
349 | ((uint64)GETBYTE((p), 6) << 48) | \ |
350 | ((uint64)GETBYTE((p), 7) << 56)) |
351 | # define LOAD64(p) LOAD64_B((p)) |
352 | # define LOAD64_B_(d, p) ((d).i = LOAD64_B((p))) |
353 | # define LOAD64_L_(d, p) ((d).i = LOAD64_L((p))) |
354 | # define LOAD64_(d, p) LOAD64_B_((d), (p)) |
355 | |
356 | # define STORE64_B(p, v) \ |
357 | (PUTBYTE((p), 0, (uint64)(v) >> 56), \ |
358 | PUTBYTE((p), 1, (uint64)(v) >> 48), \ |
359 | PUTBYTE((p), 2, (uint64)(v) >> 40), \ |
360 | PUTBYTE((p), 3, (uint64)(v) >> 32), \ |
361 | PUTBYTE((p), 4, (uint64)(v) >> 24), \ |
362 | PUTBYTE((p), 5, (uint64)(v) >> 16), \ |
363 | PUTBYTE((p), 6, (uint64)(v) >> 8), \ |
364 | PUTBYTE((p), 7, (uint64)(v) >> 0)) |
365 | # define STORE64_L(p, v) \ |
366 | (PUTBYTE((p), 0, (uint64)(v) >> 0), \ |
367 | PUTBYTE((p), 1, (uint64)(v) >> 8), \ |
368 | PUTBYTE((p), 2, (uint64)(v) >> 16), \ |
369 | PUTBYTE((p), 3, (uint64)(v) >> 24), \ |
370 | PUTBYTE((p), 4, (uint64)(v) >> 32), \ |
371 | PUTBYTE((p), 5, (uint64)(v) >> 40), \ |
372 | PUTBYTE((p), 6, (uint64)(v) >> 48), \ |
373 | PUTBYTE((p), 7, (uint64)(v) >> 56)) |
374 | # define STORE64(p, v) STORE64_B((p), (v)) |
375 | # define STORE64_B_(p, v) STORE64_B((p), (v).i) |
376 | # define STORE64_L_(p, v) STORE64_L((p), (v).i) |
377 | # define STORE64_(p, v) STORE64_B_((p), (v)) |
378 | |
379 | #else |
380 | |
381 | # define LOAD64_B_(d, p) \ |
382 | ((d).hi = LOAD32_B((octet *)(p) + 0), \ |
383 | (d).lo = LOAD32_B((octet *)(p) + 4)) |
384 | # define LOAD64_L_(d, p) \ |
385 | ((d).lo = LOAD32_L((octet *)(p) + 0), \ |
386 | (d).hi = LOAD32_L((octet *)(p) + 4)) |
387 | # define LOAD64_(d, p) LOAD64_B_((d), (p)) |
388 | |
389 | # define STORE64_B_(p, v) \ |
390 | (STORE32_B((octet *)(p) + 0, (v).hi), \ |
391 | STORE32_B((octet *)(p) + 4, (v).lo)) |
392 | # define STORE64_L_(p, v) \ |
393 | (STORE32_L((octet *)(p) + 0, (v).lo), \ |
394 | STORE32_L((octet *)(p) + 4, (v).hi)) |
395 | # define STORE64_(p, v) STORE64_B_((p), (v)) |
396 | |
397 | #endif |
398 | |
399 | /* --- Other operations on 64-bit integers --- */ |
400 | |
401 | #ifdef HAVE_UINT64 |
402 | # define SET64(d, h, l) ((d).i = (U64((h)) << 32) | U64((l))) |
403 | # define ASSIGN64(d, x) ((d).i = U64((x))) |
404 | # define HI64(x) U32((x).i >> 32) |
405 | # define LO64(x) U32((x).i) |
406 | #else |
407 | # define SET64(d, h, l) ((d).hi = U32(h), (d).lo = U32(l)) |
408 | # define ASSIGN64(d, x) \ |
409 | ((d).hi = ((x & ~MASK32) >> 16) >> 16, (d).lo = U32(x)) |
410 | # define HI64(x) U32((x).hi) |
411 | # define LO64(x) U32((x).lo) |
412 | #endif |
413 | |
414 | #ifdef HAVE_UINT64 |
415 | # define AND64(d, x, y) ((d).i = (x).i & (y).i) |
416 | # define OR64(d, x, y) ((d).i = (x).i | (y).i) |
417 | # define XOR64(d, x, y) ((d).i = (x).i ^ (y).i) |
418 | # define CPL64(d, x) ((d).i = ~(x).i) |
419 | # define ADD64(d, x, y) ((d).i = (x).i + (y).i) |
420 | # define SUB64(d, x, y) ((d).i = (x).i - (y).i) |
421 | # define CMP64(x, op, y) ((x).i op (y).i) |
422 | # define ZERO64(x) ((x) == 0) |
423 | #else |
424 | # define AND64(d, x, y) ((d).lo = (x).lo & (y).lo, (d).hi = (x).hi & (y).hi) |
425 | # define OR64(d, x, y) ((d).lo = (x).lo | (y).lo, (d).hi = (x).hi | (y).hi) |
426 | # define XOR64(d, x, y) ((d).lo = (x).lo ^ (y).lo, (d).hi = (x).hi ^ (y).hi) |
427 | # define CPL64(d, x) ((d).lo = ~(x).lo, (d).hi = ~(x).hi) |
428 | # define ADD64(d, x, y) do { \ |
429 | uint32 _x = U32((x).lo + (y).lo); \ |
430 | (d).hi = (x).hi + (y).hi + (_x < (x).lo); \ |
431 | (d).lo = _x; \ |
432 | } while (0) |
433 | # define SUB64(d, x, y) do { \ |
434 | uint32 _x = U32((x).lo - (y).lo); \ |
435 | (d).hi = (x).hi - (y).hi - (_x > (x).lo); \ |
436 | (d).lo = _x; \ |
437 | } while (0) |
438 | # define CMP64(x, op, y) \ |
439 | ((x).hi == (y).hi ? (x).lo op (y).lo : (x).hi op (y).hi) |
440 | # define ZERO64(x) ((x).lo == 0 && (x).hi == 0) |
441 | #endif |
442 | |
6b033bc4 |
443 | /*----- That's all, folks -------------------------------------------------*/ |
444 | |
445 | #ifdef __cplusplus |
446 | } |
447 | #endif |
448 | |
449 | #endif |