5 * (c) 2023 Straylight/Edgeware
8 /*----- Licensing notice --------------------------------------------------*
10 * This file is part of the mLib utilities library.
12 * mLib is free software: you can redistribute it and/or modify it under
13 * the terms of the GNU Library General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or (at
15 * your option) any later version.
17 * mLib is distributed in the hope that it will be useful, but WITHOUT
18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public
20 * License for more details.
22 * You should have received a copy of the GNU Library General Public
23 * License along with mLib. If not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
28 /*----- Header files ------------------------------------------------------*/
45 /*----- Data structures ---------------------------------------------------*/
48 struct bench_timer _t;
49 const struct timer_ops *clkops, *cyops; /* time and cycle measurements */
50 union { int fd; } u_cy; /* state for cycle measurement */
54 void (*now)(struct bench_time *t_out, struct timer *t); /* read current */
55 void (*teardown)(struct timer *t); /* release held resources */
58 /*----- Preliminaries -----------------------------------------------------*/
60 #define NS_PER_S 1000000000
64 * Arguments: @const char *fmt@ = format control string
65 * @...@ = format arguemnts
69 * Use: Maybe report a debugging message to standard error.
72 static PRINTF_LIKE(1, 2) void debug(const char *fmt, ...)
77 p = getenv("MLIB_BENCH_DEBUG");
78 if (p && *p != 'n' && *p != '0') {
80 fputs("mLib BENCH: ", stderr);
81 vfprintf(stderr, fmt, ap);
87 /* --- @timer_diff@ --- *
89 * Arguments: @struct bench_timing *delta_out@ = where to putt the result
90 * @const struct bench_time *t0, *t1@ = two times captured by a
91 * timer's @now@ function
95 * Use: Calculates the difference between two captured times. The
96 * flags are set according to whether the differences are
97 * meaningful; @delta_out->n@ is left unset.
100 static void timer_diff(struct bench_timing *delta_out,
101 const struct bench_time *t0,
102 const struct bench_time *t1)
104 unsigned f = t0->f&t1->f;
108 # define FLOATK64(k) ((double)(k).i)
110 # define FLOATK64(k) ((double)(k).lo + 4275123318.0*(double)(k).hi)
116 SUB64(k, t1->s, t0->s);
117 delta_out->t = FLOATK64(k) - 1 +
118 (t1->ns + NS_PER_S - t0->ns)/(double)NS_PER_S;
124 SUB64(k, t1->cy, t0->cy);
125 delta_out->cy = FLOATK64(k);
133 /*----- The null clock ----------------------------------------------------*/
135 /* This is a cycle counter which does nothing, in case we don't have any
139 static void null_now(struct bench_time *t_out, struct timer *t) { ; }
140 static void null_teardown(struct timer *t) { ; }
141 static const struct timer_ops null_ops = { null_now, null_teardown };
143 static int null_cyinit(struct timer *t)
144 { t->cyops = &null_ops; return (0); }
146 #define NULL_CYENT { "null", null_cyinit },
148 /*----- Linux performance counters ----------------------------------------*/
150 /* This is a cycle counter which uses the Linux performance event system,
151 * which is probably the best choice if it's available.
154 #if defined(HAVE_LINUX_PERF_EVENT_H) && defined(HAVE_UINT64)
156 #include <sys/types.h>
159 #include <linux/perf_event.h>
160 #include <asm/unistd.h>
162 static void perfevent_now(struct bench_time *t_out, struct timer *t)
166 n = read(t->u_cy.fd, &t_out->cy.i, sizeof(t_out->cy.i));
167 if (n != sizeof(t_out->cy.i)) {
168 debug("failed to read perf-event counter: %s", strerror(errno));
171 t_out->f |= BTF_CYOK;
174 static void perfevent_teardown(struct timer *t)
175 { close(t->u_cy.fd); }
177 static const struct timer_ops perfevent_ops =
178 { perfevent_now, perfevent_teardown };
180 static int perfevent_init(struct timer *t)
182 struct perf_event_attr attr = { 0 };
183 struct bench_time tm;
185 attr.type = PERF_TYPE_HARDWARE;
186 attr.size = sizeof(attr);
187 attr.config = PERF_COUNT_HW_CPU_CYCLES;
189 attr.exclude_kernel = 1;
192 t->u_cy.fd = syscall(__NR_perf_event_open, &attr, 0, -1, -1, 0);
193 if (t->u_cy.fd < 0) {
194 debug("couldn't open perf evvent: %s", strerror(errno));
198 tm.f = 0; perfevent_now(&tm, t);
199 if (!(tm.f&BTF_CYOK)) { close(t->u_cy.fd); return (-1); }
201 t->cyops = &perfevent_ops; return (0);
203 # define PERFEVENT_CYENT { "linux-perf-event", perfevent_init },
205 # define PERFEVENT_CYENT
208 /*----- Intel time-stamp counter ------------------------------------------*/
210 /* This is a cycle counter based on the Intel `rdtsc' instruction. It's not
211 * really suitable for performance measurement because it gets confused by
212 * CPU frequency adjustments.
215 #if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
217 #define EFLAGS_ID (1u << 21)
218 #define CPUID_1D_TSC (1u << 4)
220 static uint32 set_flags(unsigned long m, unsigned long x)
232 "mov %0, " TMP "\n\t"
247 struct cpuid { uint32 a, b, c, d; };
249 static void cpuid(struct cpuid *info_out, uint32 a, uint32 c)
251 __asm__ ("movl %1, %%eax\n\t"
254 "movl %%eax, 0(%0)\n\t"
255 "movl %%ebx, 4(%0)\n\t"
256 "movl %%ecx, 8(%0)\n\t"
257 "movl %%edx, 12(%0)\n\t"
259 : "r"(info_out), "g"(a), "g"(c)
260 : "eax", "ebx", "ecx", "edx", "cc");
263 static void x86rdtsc_now(struct bench_time *t_out, struct timer *t)
267 __asm__ __volatile__ ("rdtsc" : "=a"(lo), "=d"(hi));
268 SET64(t_out->cy, hi, lo); t_out->f |= BTF_CYOK;
271 static const struct timer_ops x86rdtsc_ops =
272 { x86rdtsc_now, null_teardown };
274 static int x86rdtsc_init(struct timer *t)
278 if ((set_flags(~EFLAGS_ID, 0)&EFLAGS_ID) ||
279 !(set_flags(~EFLAGS_ID, EFLAGS_ID)&EFLAGS_ID))
280 { debug("no `cpuid' instruction"); return (-1); }
282 if (info.a < 1) { debug("no `cpuid' leaf 1"); return (-1); }
284 if (!(info.d&CPUID_1D_TSC))
285 { debug("no `rdtsc' instrunction"); return (-1); }
286 t->cyops = &x86rdtsc_ops; return (0);
289 # define X86RDTSC_CYENT { "x86-rdtsc", x86rdtsc_init },
291 # define X86RDTWC_CYENT
294 /*----- POSIX `clock_gettime' ---------------------------------------------*/
296 /* This is a real-time clock based on the POSIX time interface, with up to
297 * nanosecond precision.
300 #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_THREAD_CPUTIME_ID)
302 static void gettime_now(struct bench_time *t_out, struct timer *t)
306 if (clock_gettime(CLOCK_THREAD_CPUTIME_ID, &now))
307 { debug("error reading POSIX clock: %s", strerror(errno)); return; }
308 ASSIGN64(t_out->s, now.tv_sec); t_out->ns = now.tv_nsec;
309 t_out->f |= BTF_TIMEOK;
312 static const struct timer_ops gettime_ops = { gettime_now, null_teardown };
314 static int gettime_init(struct timer *t)
316 struct bench_time tm;
318 tm.f = 0; gettime_now(&tm, t); if (!tm.f&BTF_TIMEOK) return (-1);
319 t->clkops = &gettime_ops; return (0);
322 # define GETTIME_CLKENT { "posix-clock_gettime", gettime_init },
324 # define GETTIME_CLKENT
327 /*----- Standard C `clock' ------------------------------------------------*/
329 /* This is a real-time clock based on the C `clock' function which is
330 * guaranteed to be available, though it's not likely to be very good.
333 static void clock_now(struct bench_time *t_out, struct timer *t)
336 unsigned long s; uint32 ns;
339 if (now == (clock_t)-1) {
340 debug("error reading standard clock: %s", strerror(errno));
343 x = now/CLOCKS_PER_SEC;
344 if (x > ULONG_MAX) { debug("standard clock out of range"); return; }
346 s = x; x = now - CLOCKS_PER_SEC*s;
347 if (!(NS_PER_S%CLOCKS_PER_SEC))
348 ns = x*(NS_PER_S/CLOCKS_PER_SEC);
349 else if (NS_PER_S <= ULONG_MAX/CLOCKS_PER_SEC)
350 ns = (x*NS_PER_S)/CLOCKS_PER_SEC;
352 ns = x*((NS_PER_S + 0.0)/CLOCKS_PER_SEC);
353 ASSIGN64(t_out->s, s); t_out->ns = ns; t_out->f |= BTF_TIMEOK;
356 static const struct timer_ops clock_ops = { clock_now, null_teardown };
358 static int clock_init(struct timer *t)
360 struct bench_time tm;
362 tm.f = 0; clock_now(&tm, t); if (!tm.f&BTF_TIMEOK) return (-1);
363 t->clkops = &clock_ops; return (0);
366 #define CLOCK_CLKENT { "clock", clock_init },
368 /*----- Timing setup ------------------------------------------------------*/
370 /* Tables of timing sources. */
371 static const struct timerent {
373 int (*init)(struct timer */*t*/);
375 clktab[] = { GETTIME_CLKENT CLOCK_CLKENT { 0, 0 } },
376 cytab[] = { PERFEVENT_CYENT X86RDTSC_CYENT NULL_CYENT { 0, 0 } };
378 /* --- @find_timer@ --- *
380 * Arguments: @const char *name@ = timer name
381 * @size_t sz@ = length of name
382 * @const struct timerent *timers@ = table to search
383 * @const char *what@ = adjective describing table
385 * Returns: The table entry matching the given name, or null if there
389 static const struct timerent *find_timer_n(const char *name, size_t sz,
390 const struct timerent *timers,
393 while (timers->name) {
394 if (strlen(timers->name) == sz && MEMCMP(name, ==, timers->name, sz))
398 debug("%s timer `%.*s' not found", what, (int)sz, name); return (0);
401 /* --- @try_timer@ --- *
403 * Arguments: @struct timer *t@ = timer structure
404 * @const struct timerent *timer@ = timer table entry
405 * @const char *what@ = adjective describing table
407 * Returns: Zero on success, @-1@ if timer failed.
409 * Use: Tries to initialize the timer @t@, reporting a debug message
413 static int try_timer(struct timer *t,
414 const struct timerent *timer, const char *what)
416 if (timer->init(t)) return (-1);
417 debug("selected %s timer `%s'", what, timer->name); return (0);
420 /* --- @select_timer@ --- *
422 * Arguments: @struct timer *t@ = timer structure
423 * @const struct timerent *timer@ = timer table
424 * @const char *varname@ = environment variable to consult
425 * @const char *what@ = adjective describing table
427 * Returns: Zero on success, @-1@ if timer failed.
429 * Use: Select a timer from the table. If the environment variable
430 * is set, then parse a comma-separated list of timer names and
431 * use the first one listed that seems to work; otherwise, try
432 * the timers in the table in order.
435 static int select_timer(struct timer *t, const struct timerent *timers,
436 const char *varname, const char *what)
438 const char *p; size_t n;
439 const struct timerent *timer;
444 if (!try_timer(t, timers++, what)) return (0);
448 timer = find_timer_n(p, n, timers, what);
449 if (timer && !try_timer(t, timer, what)) return (0);
454 debug("no suitable %s timer found", what); return (-1);
457 /* Bench timer operations. */
458 static void timer_now(struct bench_timer *tm, struct bench_time *t_out)
460 struct timer *t = (struct timer *)tm;
462 t->clkops->now(t_out, t);
463 t->cyops->now(t_out, t);
465 static void timer_destroy(struct bench_timer *tm)
467 struct timer *t = (struct timer *)tm;
470 if (t->clkops) t->clkops->teardown(t);
471 if (t->cyops) t->cyops->teardown(t);
475 static const struct bench_timerops timer_ops = { timer_now, timer_destroy };
477 /* --- @bench_createtimer@ --- *
481 * Returns: A freshly constructed standard timer object.
483 * Use: Allocate a timer. Dispose of it by calling
484 * @tm->ops->destroy(tm)@ when you're done.
487 struct bench_timer *bench_createtimer(void)
490 struct bench_timer *ret = 0;
492 t = xmalloc(sizeof(*t)); t->cyops = 0; t->clkops = 0;
493 if (select_timer(t, clktab, "MLIB_BENCH_CLKTIMER", "clock")) goto end;
494 if (select_timer(t, cytab, "MLIB_BENCH_CYCLETIMER", "cycle")) goto end;
495 t->_t.ops = &timer_ops; ret = &t->_t; t = 0;
497 if (t) timer_destroy(&t->_t);
501 /*----- Benchmarking ------------------------------------------------------*/
503 /* --- @bench_init@ --- *
505 * Arguments: @struct bench_state *b@ = bench state to initialize
506 * @struct bench_timer *tm@ = timer to attach
510 * Use: Initialize the benchmark state. It still needs to be
511 * calibrated (use @bench_calibrate@) before it can be used, but
512 * this will be done automatically by @bench_measure@ if it's
513 * not done by hand earlier. The timer is now owned by the
514 * benchmark state and will be destroyed by @bench_destroy@.
517 void bench_init(struct bench_state *b, struct bench_timer *tm)
518 { b->tm = tm; b->target_s = 1.0; b->f = 0; }
520 /* --- @bench_destroy@ --- *
522 * Arguments: @struct bench_state *b@ = bench state
526 * Use: Destroy the benchmark state, releasing the resources that it
530 void bench_destroy(struct bench_state *b)
531 { b->tm->ops->destroy(b->tm); }
533 /* --- @do_nothing@ --- *
535 * Arguments: @unsigned long n@ = iteration count
536 * @void *ctx@ = context pointer (ignored)
540 * Use: Does nothing at all for @n@ iterations. Used to calibrate
541 * the benchmarking state.
544 static void do_nothing(unsigned long n, void *ctx)
545 { while (n--) RELAX; }
547 /* --- @bench_calibrate@ --- *
549 * Arguments: @struct bench_state *b@ = bench state
551 * Returns: Zero on success, @-1@ if calibration failed.
553 * Use: Calibrate the benchmark state, so that it can be used to
554 * measure performance reasonably accurately.
557 int bench_calibrate(struct bench_state *b)
559 struct linreg lr_clk = LINREG_INIT, lr_cy = LINREG_INIT;
562 struct bench_timer *tm = b->tm;
563 struct bench_time t0, t1;
564 struct bench_timing delta;
565 bench_fn *fn = LAUNDER(&do_nothing);
566 unsigned f = BTF_ANY;
569 /* The model here is that a timing loop has a fixed overhead as we enter
570 * and leave (e.g., to do with the indirect branch into the code), and
571 * per-iteration overheads as we check the counter and loop back. We aim
572 * to split these apart using linear regression.
575 /* If we've already calibrated then there's nothing to do. */
576 if (b->f&BTF_ANY) return (0);
578 /* Exercise the inner loop a few times to educate the branch predictor. */
579 for (i = 0; i < 10; i++)
580 { tm->ops->now(tm, &t0); fn(50, 0); tm->ops->now(tm, &t1); }
582 /* Now we measure idle loops until they take sufficiently long -- or we run
585 debug("calibrating...");
589 /* Measure @n@ iterations of the idle loop. */
590 tm->ops->now(tm, &t0); fn(n, 0); tm->ops->now(tm, &t1);
591 timer_diff(&delta, &t0, &t1); f &= delta.f;
592 if (!(f&BTF_TIMEOK)) { rc = -1; goto end; }
594 /* Register the timings with the regression machinery. */
595 linreg_update(&lr_clk, n, delta.t);
597 debug(" n = %10lu; t = %12g s", n, delta.t);
599 linreg_update(&lr_cy, n, delta.cy);
600 debug(" n = %10lu; t = %12g s, cy = %10.0f", n, delta.t, delta.cy);
603 /* If we're done then stop. */
604 if (delta.t >= b->target_s/20.0) break;
605 if (n >= ULONG_MAX - n/3) break;
607 /* Update the counter and continue. */
611 /* Now run the linear regression to extract the constant and per-iteration
614 linreg_fit(&lr_clk, &b->clk.m, &b->clk.c, 0);
615 debug("clock overhead = (%g n + %g) s", b->clk.m, b->clk.c);
617 linreg_fit(&lr_clk, &b->clk.m, &b->clk.c, 0);
618 debug("cycle overhead = (%g n + %g) cy", b->cy.m, b->cy.c);
627 /* --- @bench_measure@ --- *
629 * Arguments: @struct bench_timing *t_out@ = where to leave the timing
630 * @struct bench_state *b@ = benchmark state
631 * @double base@ = number of internal units per call
632 * @bench_fn *fn@, @void *ctx@ = benchmark function to run
634 * Returns: Zero on success, @-1@ if timing failed.
636 * Use: Measure a function. The function @fn@ is called adaptively
637 * with an iteration count @n@ set so as to run for
638 * approximately @b->target_s@ seconds.
640 * The result is left in @*t_out@, with @t_out->n@ counting the
641 * final product of the iteration count and @base@ (which might,
642 * e.g., reflect the number of inner iterations the function
643 * performs, or the number of bytes it processes per iteration).
646 int bench_measure(struct bench_timing *t_out, struct bench_state *b,
647 double base, bench_fn *fn, void *ctx)
649 struct bench_timer *tm = b->tm;
650 struct bench_time t0, t1;
653 /* Make sure the state is calibrated. */
654 if (bench_calibrate(b)) return (-1);
656 /* Main adaptive measurement loop. */
657 debug("measuring..."); n = 1;
659 tm->ops->now(tm, &t0); fn(n, ctx); tm->ops->now(tm, &t1);
660 timer_diff(t_out, &t0, &t1);
661 if (!(t_out->f&BTF_TIMEOK)) return (-1);
662 if (!(t_out->f&BTF_CYOK)) debug(" n = %10lu; t = %12g", n, t_out->t);
663 else debug(" n = %10lu; t = %12g, cy = %10.0f", n, t_out->t, t_out->cy);
664 if (t_out->t >= 0.72*b->target_s) break;
665 n *= 1.44*b->target_s/t_out->t;
668 /* Adjust according to the calibration. */
669 t_out->t -= n*b->clk.m + b->clk.c;
670 if (t_out->f&BTF_CYOK) t_out->cy -= n*b->cy.m + b->cy.c;
672 /* Report the results, if debugging. */
673 if (!(t_out->f&BTF_CYOK)) debug(" adjusted t' = %12g", t_out->t);
674 else debug(" adjusted t = %12g, cy = %10.0f", t_out->t, t_out->cy);
675 if (!(t_out->f&BTF_CYOK))
676 debug(" %g s per op; %g ops/s", t_out->t/n, n/t_out->t);
678 debug(" %g s (%g cy) per op; %g ops/s",
679 t_out->t/n, t_out->cy/n, n/t_out->t);
682 t_out->n = n*base; return (0);
685 /*----- That's all, folks -------------------------------------------------*/