mirror of
https://git.savannah.gnu.org/git/guile.git
synced 2025-04-29 19:30:36 +02:00
modernize (benchmark-suite lib)
* benchmark-suite/benchmark-suite/lib.scm: Rewrite to be more modern, using parameters, records, and higher precision timers. Since this file was never installed, this is an acceptable interface change. (run-benchmark): Run the thunk once before going into the benchmark. Adapt to new `report' interface. (report): Change to expect only one argument, a <benchmark-result> object. (print-result): Adapt. The result is in the same format as before. (print-user-result): Adapt. The result is different from before, but as this is just printed on stdout and not logged, there should be no problem. (calibrate-benchmark-framework): Pull initialization into a function.
This commit is contained in:
parent
b064d56514
commit
7e822b32d2
1 changed files with 196 additions and 232 deletions
|
@ -1,5 +1,5 @@
|
|||
;;;; benchmark-suite/lib.scm --- generic support for benchmarking
|
||||
;;;; Copyright (C) 2002, 2006, 2011 Free Software Foundation, Inc.
|
||||
;;;; Copyright (C) 2002, 2006, 2011, 2012 Free Software Foundation, Inc.
|
||||
;;;;
|
||||
;;;; This program is free software; you can redistribute it and/or
|
||||
;;;; modify it under the terms of the GNU Lesser General Public
|
||||
|
@ -17,31 +17,33 @@
|
|||
;;;; Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
|
||||
(define-module (benchmark-suite lib)
|
||||
:export (
|
||||
#:use-module (srfi srfi-9)
|
||||
#:export (;; Controlling the execution.
|
||||
iteration-factor
|
||||
scale-iterations
|
||||
|
||||
;; Controlling the execution.
|
||||
iteration-factor
|
||||
scale-iterations
|
||||
;; Running benchmarks.
|
||||
run-benchmark
|
||||
benchmark
|
||||
|
||||
;; Running benchmarks.
|
||||
run-benchmark
|
||||
benchmark
|
||||
;; Naming groups of benchmarks in a regular fashion.
|
||||
with-benchmark-prefix with-benchmark-prefix*
|
||||
current-benchmark-prefix format-benchmark-name
|
||||
|
||||
;; Naming groups of benchmarks in a regular fashion.
|
||||
with-benchmark-prefix with-benchmark-prefix* current-benchmark-prefix
|
||||
format-benchmark-name
|
||||
;; <benchmark-result> accessors
|
||||
benchmark-result:name
|
||||
benchmark-result:iterations
|
||||
benchmark-result:real-time
|
||||
benchmark-result:run-time
|
||||
benchmark-result:gc-time
|
||||
benchmark-result:core-time
|
||||
|
||||
;; Computing timing results
|
||||
benchmark-time-base
|
||||
benchmark-total-time benchmark-user-time benchmark-system-time
|
||||
benchmark-frame-time benchmark-core-time
|
||||
benchmark-user-time\interpreter benchmark-core-time\interpreter
|
||||
|
||||
;; Reporting results in various ways.
|
||||
register-reporter unregister-reporter reporter-registered?
|
||||
make-log-reporter
|
||||
full-reporter
|
||||
user-reporter))
|
||||
;; Reporting results in various ways.
|
||||
report current-reporter
|
||||
register-reporter unregister-reporter reporter-registered?
|
||||
make-log-reporter
|
||||
full-reporter
|
||||
user-reporter))
|
||||
|
||||
|
||||
;;;; If you're using Emacs's Scheme mode:
|
||||
|
@ -214,81 +216,71 @@
|
|||
|
||||
;;;; TIME CALCULATION
|
||||
;;;;
|
||||
;;;; The library uses the guile functions (times) and (gc-run-time) to
|
||||
;;;; determine the execution time for a single benchmark. Based on these
|
||||
;;;; functions, the values of BEFORE, AFTER and GC-TIME are computed, which
|
||||
;;;; are then passed to the reporter functions. All three values BEFORE,
|
||||
;;;; AFTER and GC-TIME include the time needed to executed the benchmark code
|
||||
;;;; itself, but also the surrounding code that implements the loop to run the
|
||||
;;;; benchmark code for the given number of times. This is undesirable, since
|
||||
;;;; one would prefer to only get the timing data for the benchmarking code.
|
||||
;;;; The library uses the guile functions `get-internal-run-time',
|
||||
;;;; `get-internal-real-time', and `gc-run-time' to determine the
|
||||
;;;; execution time for a single benchmark. Based on these functions,
|
||||
;;;; Guile makes a <benchmark-result>, a record containing the elapsed
|
||||
;;;; run time, real time, gc time, and possibly other metrics. These
|
||||
;;;; times include the time needed to executed the benchmark code
|
||||
;;;; itself, but also the surrounding code that implements the loop to
|
||||
;;;; run the benchmark code for the given number of times. This is
|
||||
;;;; undesirable, since one would prefer to only get the timing data for
|
||||
;;;; the benchmarking code.
|
||||
;;;;
|
||||
;;;; To cope with this, the benchmarking framework uses a trick: During
|
||||
;;;; initialization of the library, the time for executing an empty benchmark
|
||||
;;;; is measured and stored. This is an estimate for the time needed by the
|
||||
;;;; benchmarking framework itself. For later benchmarks, this time can then
|
||||
;;;; be subtracted from the measured execution times.
|
||||
;;;; initialization of the library, the time for executing an empty
|
||||
;;;; benchmark is measured and stored. This is an estimate for the time
|
||||
;;;; needed by the benchmarking framework itself. For later benchmarks,
|
||||
;;;; this time can then be subtracted from the measured execution times.
|
||||
;;;; Note that for very short benchmarks, this may result in a negative
|
||||
;;;; number.
|
||||
;;;;
|
||||
;;;; In order to simplify the time calculation for users who want to write
|
||||
;;;; their own reporters, benchmarking framework provides the following
|
||||
;;;; definitions:
|
||||
;;;; The benchmarking framework provides the following accessors for
|
||||
;;;; <benchmark-result> values. Note that all time values are in
|
||||
;;;; internal time units; divide by internal-time-units-per-second to
|
||||
;;;; get seconds.
|
||||
;;;;
|
||||
;;;; benchmark-time-base : This variable holds the number of time units that
|
||||
;;;; make up a second. By deviding the results of each of the functions
|
||||
;;;; below by this value, you get the corresponding time in seconds. For
|
||||
;;;; example (/ (benchmark-total-time before after) benchmark-time-base)
|
||||
;;;; will give you the total time in seconds.
|
||||
;;;; benchmark-total-time : this function takes two arguments BEFORE and AFTER
|
||||
;;;; and computes the total time between the two timestamps. The result
|
||||
;;;; of this function is what the time command of the unix command line
|
||||
;;;; would report as real time.
|
||||
;;;; benchmark-user-time : this function takes two arguments BEFORE and AFTER
|
||||
;;;; and computes the time spent in the benchmarking process between the
|
||||
;;;; two timestamps. That means, the time consumed by other processes
|
||||
;;;; running on the same machine is not part of the resulting time,
|
||||
;;;; neither is time spent within the operating system. The result of
|
||||
;;;; this function is what the time command of the unix command line would
|
||||
;;;; report as user time.
|
||||
;;;; benchmark-system-time : similar to benchmark-user-time, but here the time
|
||||
;;;; spent within the operating system is given. The result of this
|
||||
;;;; function is what the time command of the unix command line would
|
||||
;;;; report as system time.
|
||||
;;;; benchmark-frame-time : this function takes the argument ITERATIONS. It
|
||||
;;;; reports the part of the user time that is consumed by the
|
||||
;;;; benchmarking framework itself to run some benchmark for the given
|
||||
;;;; number of iterations. You can think of this as the time that would
|
||||
;;;; still be consumed, even if the benchmarking code itself was empty.
|
||||
;;;; This value does not include any time for garbage collection, even if
|
||||
;;;; it is the benchmarking framework which is responsible for causing a
|
||||
;;;; garbage collection.
|
||||
;;;; benchmark-core-time : this function takes three arguments ITERATIONS,
|
||||
;;;; BEFORE and AFTER. It reports the part of the user time that is
|
||||
;;;; actually spent within the benchmarking code. That is, the time
|
||||
;;;; needed for the benchmarking framework is subtracted from the user
|
||||
;;;; time. This value, however, includes all garbage collection times,
|
||||
;;;; even if some part of the gc-time had actually to be attributed to the
|
||||
;;;; benchmarking framework.
|
||||
;;;; benchmark-user-time\interpreter : this function takes three arguments
|
||||
;;;; BEFORE AFTER and GC-TIME. It reports the part of the user time that
|
||||
;;;; is spent in the interpreter (and not in garbage collection).
|
||||
;;;; benchmark-core-time\interpreter : this function takes four arguments
|
||||
;;;; ITERATIONS, BEFORE, AFTER. and GC-TIME. It reports the part of the
|
||||
;;;; benchmark-core-time that is spent in the interpreter (and not in
|
||||
;;;; garbage collection). This value is most probably the one you are
|
||||
;;;; interested in, except if you are doing some garbage collection
|
||||
;;;; checks.
|
||||
;;;;
|
||||
;;;; There is no function to calculate the garbage-collection time, since the
|
||||
;;;; garbage collection time is already passed as an argument GC-TIME to the
|
||||
;;;; reporter functions.
|
||||
;;;; benchmark-result:name : Return the name of the benchmark.
|
||||
;;;;
|
||||
;;;; benchmark-result:iterations : Return the number of iterations that
|
||||
;;;; this benchmark ran for.
|
||||
;;;;
|
||||
;;;; benchmark-result:real-time : Return the clock time elapsed while
|
||||
;;;; this benchmark executed.
|
||||
;;;;
|
||||
;;;; benchmark-result:run-time : Return the CPU time elapsed while this
|
||||
;;;; benchmark executed, both in user and kernel space.
|
||||
;;;;
|
||||
;;;; benchmark-result:gc-time : Return the approximate amount of time
|
||||
;;;; spent in garbage collection while this benchmark executed, both
|
||||
;;;; in user and kernel space.
|
||||
;;;;
|
||||
;;;; benchmark-result:core-time : Like benchmark-result:run-time, but
|
||||
;;;; also estimates the time spent by the framework for the number
|
||||
;;;; of iterations, and subtracts off that time from the result.
|
||||
;;;;
|
||||
|
||||
;;;; This module is used when benchmarking different Guiles, and so it
|
||||
;;;; should run on all the Guiles of interest. Currently this set
|
||||
;;;; includes Guile 1.8, so be careful with introducing features that
|
||||
;;;; only Guile 2.0 supports.
|
||||
|
||||
|
||||
;;;; MISCELLANEOUS
|
||||
;;;;
|
||||
|
||||
(define-record-type <benchmark-result>
|
||||
(make-benchmark-result name iterations real-time run-time gc-time)
|
||||
benchmark-result?
|
||||
(name benchmark-result:name)
|
||||
(iterations benchmark-result:iterations)
|
||||
(real-time benchmark-result:real-time)
|
||||
(run-time benchmark-result:run-time)
|
||||
(gc-time benchmark-result:gc-time))
|
||||
|
||||
;;; Perform a division and convert the result to inexact.
|
||||
(define (i/ a b)
|
||||
(exact->inexact (/ a b)))
|
||||
(define (->seconds time)
|
||||
(/ time 1.0 internal-time-units-per-second))
|
||||
|
||||
;;; Scale the number of iterations according to the given scaling factor.
|
||||
(define iteration-factor 1)
|
||||
|
@ -296,36 +288,49 @@
|
|||
(let* ((i (inexact->exact (round (* iterations iteration-factor)))))
|
||||
(if (< i 1) 1 i)))
|
||||
|
||||
;;; Parameters.
|
||||
(cond-expand
|
||||
(srfi-39 #t)
|
||||
(else (use-modules (srfi srfi-39))))
|
||||
|
||||
;;;; CORE FUNCTIONS
|
||||
;;;;
|
||||
|
||||
;;; The central routine for executing benchmarks.
|
||||
;;; The idea is taken from Greg, the GNUstep regression test environment.
|
||||
(define run-benchmark #f)
|
||||
(let ((benchmark-running #f))
|
||||
(define (local-run-benchmark name iterations thunk)
|
||||
(if benchmark-running
|
||||
(error "Nested calls to run-benchmark are not permitted.")
|
||||
(let ((benchmark-name (full-name name))
|
||||
(iterations (scale-iterations iterations)))
|
||||
(set! benchmark-running #t)
|
||||
(let ((before #f) (after #f) (gc-time #f))
|
||||
(gc)
|
||||
(set! gc-time (gc-run-time))
|
||||
(set! before (times))
|
||||
(do ((i 0 (+ i 1)))
|
||||
((= i iterations))
|
||||
(thunk))
|
||||
(set! after (times))
|
||||
(set! gc-time (- (gc-run-time) gc-time))
|
||||
(report benchmark-name iterations before after gc-time))
|
||||
(set! benchmark-running #f))))
|
||||
(set! run-benchmark local-run-benchmark))
|
||||
(define benchmark-running? (make-parameter #f))
|
||||
(define (run-benchmark name iterations thunk)
|
||||
(if (benchmark-running?)
|
||||
(error "Nested calls to run-benchmark are not permitted."))
|
||||
(if (not (and (integer? iterations) (exact? iterations)))
|
||||
(error "Expected exact integral number of iterations"))
|
||||
(parameterize ((benchmark-running? #t))
|
||||
;; Warm up the benchmark first. This will resolve any toplevel-ref
|
||||
;; forms.
|
||||
(thunk)
|
||||
(gc)
|
||||
(let* ((before-gc-time (gc-run-time))
|
||||
(before-real-time (get-internal-real-time))
|
||||
(before-run-time (get-internal-run-time)))
|
||||
(do ((i iterations (1- i)))
|
||||
((zero? i))
|
||||
(thunk))
|
||||
(let ((after-run-time (get-internal-run-time))
|
||||
(after-real-time (get-internal-real-time))
|
||||
(after-gc-time (gc-run-time)))
|
||||
(report (make-benchmark-result (full-name name) iterations
|
||||
(- after-real-time before-real-time)
|
||||
(- after-run-time before-run-time)
|
||||
(- after-gc-time before-gc-time)))))))
|
||||
|
||||
;;; A short form for benchmarks.
|
||||
(defmacro benchmark (name iterations body . rest)
|
||||
`(run-benchmark ,name ,iterations (lambda () ,body ,@rest)))
|
||||
(cond-expand
|
||||
(guile-2
|
||||
(define-syntax-rule (benchmark name iterations body body* ...)
|
||||
(run-benchmark name iterations (lambda () body body* ...))))
|
||||
(else
|
||||
(defmacro benchmark (name iterations body . rest)
|
||||
`(run-benchmark ,name ,iterations (lambda () ,body ,@rest)))))
|
||||
|
||||
|
||||
;;;; BENCHMARK NAMES
|
||||
|
@ -333,31 +338,21 @@
|
|||
|
||||
;;;; Turn a benchmark name into a nice human-readable string.
|
||||
(define (format-benchmark-name name)
|
||||
(call-with-output-string
|
||||
(lambda (port)
|
||||
(let loop ((name name)
|
||||
(separator ""))
|
||||
(if (pair? name)
|
||||
(begin
|
||||
(display separator port)
|
||||
(display (car name) port)
|
||||
(loop (cdr name) ": ")))))))
|
||||
(string-join name ": "))
|
||||
|
||||
;;;; For a given benchmark-name, deliver the full name including all prefixes.
|
||||
(define (full-name name)
|
||||
(append (current-benchmark-prefix) (list name)))
|
||||
|
||||
;;; A fluid containing the current benchmark prefix, as a list.
|
||||
(define prefix-fluid (make-fluid '()))
|
||||
(define (current-benchmark-prefix)
|
||||
(fluid-ref prefix-fluid))
|
||||
;;; A parameter containing the current benchmark prefix, as a list.
|
||||
(define current-benchmark-prefix
|
||||
(make-parameter '()))
|
||||
|
||||
;;; Postpend PREFIX to the current name prefix while evaluting THUNK.
|
||||
;;; The name prefix is only changed within the dynamic scope of the
|
||||
;;; call to with-benchmark-prefix*. Return the value returned by THUNK.
|
||||
(define (with-benchmark-prefix* prefix thunk)
|
||||
(with-fluids ((prefix-fluid
|
||||
(append (fluid-ref prefix-fluid) (list prefix))))
|
||||
(parameterize ((current-benchmark-prefix (full-name prefix)))
|
||||
(thunk)))
|
||||
|
||||
;;; (with-benchmark-prefix PREFIX BODY ...)
|
||||
|
@ -365,77 +360,58 @@
|
|||
;;; The name prefix is only changed within the dynamic scope of the
|
||||
;;; with-benchmark-prefix expression. Return the value returned by the last
|
||||
;;; BODY expression.
|
||||
(defmacro with-benchmark-prefix (prefix . body)
|
||||
`(with-benchmark-prefix* ,prefix (lambda () ,@body)))
|
||||
(cond-expand
|
||||
(guile-2
|
||||
(define-syntax-rule (with-benchmark-prefix prefix body body* ...)
|
||||
(with-benchmark-prefix* prefix (lambda () body body* ...))))
|
||||
(else
|
||||
(defmacro with-benchmark-prefix (prefix . body)
|
||||
`(with-benchmark-prefix* ,prefix (lambda () ,@body)))))
|
||||
|
||||
|
||||
;;;; TIME CALCULATION
|
||||
;;;; Benchmark results
|
||||
;;;;
|
||||
|
||||
(define benchmark-time-base
|
||||
internal-time-units-per-second)
|
||||
|
||||
(define time-base ;; short-cut, not exported
|
||||
benchmark-time-base)
|
||||
|
||||
(define frame-time/iteration
|
||||
(define *calibration-result*
|
||||
"<will be set during initialization>")
|
||||
|
||||
(define (benchmark-total-time before after)
|
||||
(- (tms:clock after) (tms:clock before)))
|
||||
(define (benchmark-overhead iterations accessor)
|
||||
(* (/ iterations 1.0 (benchmark-result:iterations *calibration-result*))
|
||||
(accessor *calibration-result*)))
|
||||
|
||||
(define (benchmark-user-time before after)
|
||||
(- (tms:utime after) (tms:utime before)))
|
||||
|
||||
(define (benchmark-system-time before after)
|
||||
(- (tms:stime after) (tms:stime before)))
|
||||
|
||||
(define (benchmark-frame-time iterations)
|
||||
(* iterations frame-time/iteration))
|
||||
|
||||
(define (benchmark-core-time iterations before after)
|
||||
(- (benchmark-user-time before after) (benchmark-frame-time iterations)))
|
||||
|
||||
(define (benchmark-user-time\interpreter before after gc-time)
|
||||
(- (benchmark-user-time before after) gc-time))
|
||||
|
||||
(define (benchmark-core-time\interpreter iterations before after gc-time)
|
||||
(- (benchmark-core-time iterations before after) gc-time))
|
||||
(define (benchmark-result:core-time result)
|
||||
(- (benchmark-result:run-time result)
|
||||
(benchmark-overhead (benchmark-result:iterations result)
|
||||
benchmark-result:run-time)))
|
||||
|
||||
|
||||
;;;; REPORTERS
|
||||
;;;;
|
||||
|
||||
;;; The global list of reporters.
|
||||
(define reporters '())
|
||||
;;; The global set of reporters.
|
||||
(define report-hook (make-hook 1))
|
||||
|
||||
;;; The default reporter, to be used only if no others exist.
|
||||
(define default-reporter #f)
|
||||
(define (default-reporter result)
|
||||
(if (hook-empty? report-hook)
|
||||
(user-reporter result)
|
||||
(run-hook report-hook result)))
|
||||
|
||||
(define current-reporter
|
||||
(make-parameter default-reporter))
|
||||
|
||||
;;; Add the procedure REPORTER to the current set of reporter functions.
|
||||
;;; Signal an error if that reporter procedure object is already registered.
|
||||
(define (register-reporter reporter)
|
||||
(if (memq reporter reporters)
|
||||
(error "register-reporter: reporter already registered: " reporter))
|
||||
(set! reporters (cons reporter reporters)))
|
||||
(add-hook! report-hook reporter))
|
||||
|
||||
;;; Remove the procedure REPORTER from the current set of reporter
|
||||
;;; functions. Signal an error if REPORTER is not currently registered.
|
||||
(define (unregister-reporter reporter)
|
||||
(if (memq reporter reporters)
|
||||
(set! reporters (delq! reporter reporters))
|
||||
(error "unregister-reporter: reporter not registered: " reporter)))
|
||||
(remove-hook! report-hook reporter))
|
||||
|
||||
;;; Return true iff REPORTER is in the current set of reporter functions.
|
||||
(define (reporter-registered? reporter)
|
||||
(if (memq reporter reporters) #t #f))
|
||||
(if (memq reporter (hook->list report-hook)) #t #f))
|
||||
|
||||
;;; Send RESULT to all currently registered reporter functions.
|
||||
(define (report . args)
|
||||
(if (pair? reporters)
|
||||
(for-each (lambda (reporter) (apply reporter args))
|
||||
reporters)
|
||||
(apply default-reporter args)))
|
||||
(define (report result)
|
||||
((current-reporter) result))
|
||||
|
||||
|
||||
;;;; Some useful standard reporters:
|
||||
|
@ -444,26 +420,22 @@
|
|||
;;;; User reporters write some interesting results to the standard output.
|
||||
|
||||
;;; Display a single benchmark result to the given port
|
||||
(define (print-result port name iterations before after gc-time)
|
||||
(let* ((name (format-benchmark-name name))
|
||||
(total-time (benchmark-total-time before after))
|
||||
(user-time (benchmark-user-time before after))
|
||||
(system-time (benchmark-system-time before after))
|
||||
(frame-time (benchmark-frame-time iterations))
|
||||
(benchmark-time (benchmark-core-time iterations before after))
|
||||
(user-time\interpreter
|
||||
(benchmark-user-time\interpreter before after gc-time))
|
||||
(benchmark-core-time\interpreter
|
||||
(benchmark-core-time\interpreter iterations before after gc-time)))
|
||||
(define (print-result port result)
|
||||
(let ((name (format-benchmark-name (benchmark-result:name result)))
|
||||
(iterations (benchmark-result:iterations result))
|
||||
(real-time (benchmark-result:real-time result))
|
||||
(run-time (benchmark-result:run-time result))
|
||||
(gc-time (benchmark-result:gc-time result))
|
||||
(core-time (benchmark-result:core-time result)))
|
||||
(write (list name iterations
|
||||
'total (i/ total-time time-base)
|
||||
'user (i/ user-time time-base)
|
||||
'system (i/ system-time time-base)
|
||||
'frame (i/ frame-time time-base)
|
||||
'benchmark (i/ benchmark-time time-base)
|
||||
'user/interp (i/ user-time\interpreter time-base)
|
||||
'bench/interp (i/ benchmark-core-time\interpreter time-base)
|
||||
'gc (i/ gc-time time-base))
|
||||
'total (->seconds real-time)
|
||||
'user (->seconds run-time)
|
||||
'system 0
|
||||
'frame (->seconds (- run-time core-time))
|
||||
'benchmark (->seconds core-time)
|
||||
'user/interp (->seconds (- run-time gc-time))
|
||||
'bench/interp (->seconds (- core-time gc-time))
|
||||
'gc (->seconds gc-time))
|
||||
port)
|
||||
(newline port)))
|
||||
|
||||
|
@ -472,58 +444,50 @@
|
|||
(define (make-log-reporter file)
|
||||
(let ((port (if (output-port? file) file
|
||||
(open-output-file file))))
|
||||
(lambda args
|
||||
(apply print-result port args)
|
||||
(lambda (result)
|
||||
(print-result port result)
|
||||
(force-output port))))
|
||||
|
||||
;;; A reporter that reports all results to the user.
|
||||
(define (full-reporter . args)
|
||||
(apply print-result (current-output-port) args))
|
||||
(define (full-reporter result)
|
||||
(print-result (current-output-port) result))
|
||||
|
||||
;;; Display interesting results of a single benchmark to the given port
|
||||
(define (print-user-result port name iterations before after gc-time)
|
||||
(let* ((name (format-benchmark-name name))
|
||||
(user-time (benchmark-user-time before after))
|
||||
(benchmark-time (benchmark-core-time iterations before after))
|
||||
(benchmark-core-time\interpreter
|
||||
(benchmark-core-time\interpreter iterations before after gc-time)))
|
||||
(write (list name iterations
|
||||
'user (i/ user-time time-base)
|
||||
'benchmark (i/ benchmark-time time-base)
|
||||
'bench/interp (i/ benchmark-core-time\interpreter time-base)
|
||||
'gc (i/ gc-time time-base))
|
||||
(define (print-user-result port result)
|
||||
(let ((name (format-benchmark-name (benchmark-result:name result)))
|
||||
(iterations (benchmark-result:iterations result))
|
||||
(real-time (benchmark-result:real-time result))
|
||||
(run-time (benchmark-result:run-time result))
|
||||
(gc-time (benchmark-result:gc-time result))
|
||||
(core-time (benchmark-result:core-time result)))
|
||||
(write (list name iterations
|
||||
'real (->seconds real-time)
|
||||
'real/iteration (->seconds (/ real-time iterations))
|
||||
'run/iteration (->seconds (/ run-time iterations))
|
||||
'core/iteration (->seconds (/ core-time iterations))
|
||||
'gc (->seconds gc-time))
|
||||
port)
|
||||
(newline port)))
|
||||
|
||||
;;; A reporter that reports interesting results to the user.
|
||||
(define (user-reporter . args)
|
||||
(apply print-user-result (current-output-port) args))
|
||||
(define (user-reporter result)
|
||||
(print-user-result (current-output-port) result))
|
||||
|
||||
|
||||
;;;; Initialize the benchmarking system:
|
||||
;;;;
|
||||
|
||||
;;; First, display version information
|
||||
(display ";; running guile version " (current-output-port))
|
||||
(display (version) (current-output-port))
|
||||
(newline (current-output-port))
|
||||
(define (calibrate-benchmark-framework)
|
||||
(display ";; running guile version ")
|
||||
(display (version))
|
||||
(newline)
|
||||
(display ";; calibrating the benchmarking framework...")
|
||||
(newline)
|
||||
(parameterize ((current-reporter
|
||||
(lambda (result)
|
||||
(set! *calibration-result* result)
|
||||
(display ";; calibration: ")
|
||||
(print-user-result (current-output-port) result))))
|
||||
(benchmark "empty initialization benchmark" 10000000 #t)))
|
||||
|
||||
;;; Second, make sure the benchmarking routines are compiled.
|
||||
(define (null-reporter . args) #t)
|
||||
(set! default-reporter null-reporter)
|
||||
(benchmark "empty initialization benchmark" 2 #t)
|
||||
|
||||
;;; Third, initialize the system constants
|
||||
(display ";; calibrating the benchmarking framework..." (current-output-port))
|
||||
(newline (current-output-port))
|
||||
(define (initialization-reporter name iterations before after gc-time)
|
||||
(let* ((frame-time (- (tms:utime after) (tms:utime before) gc-time 3)))
|
||||
(set! frame-time/iteration (/ frame-time iterations))
|
||||
(display ";; framework time per iteration: " (current-output-port))
|
||||
(display (i/ frame-time/iteration time-base) (current-output-port))
|
||||
(newline (current-output-port))))
|
||||
(set! default-reporter initialization-reporter)
|
||||
(benchmark "empty initialization benchmark" 524288 #t)
|
||||
|
||||
;;; Finally, set the default reporter
|
||||
(set! default-reporter user-reporter)
|
||||
(calibrate-benchmark-framework)
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue