From 6d315cbf9e574b5f964bc09a402070778aa1804e Mon Sep 17 00:00:00 2001 From: David van Moolenbroek Date: Mon, 20 Jul 2015 11:02:57 +0000 Subject: [PATCH] benchmarks: remove unixbench This code appears to be GPL-licensed and thus does not belong in the MINIX3 source tree. Change-Id: I9e88c3ffd8eae8697b629899dba9728863a4413a --- distrib/sets/lists/minix/mi | 36 - etc/mtree/NetBSD.dist.base | 6 - minix/Makefile | 1 - minix/benchmarks/Makefile | 5 - minix/benchmarks/Makefile.inc | 1 - minix/benchmarks/run | 51 - minix/benchmarks/unixbench-5.1.2/Makefile | 8 - minix/benchmarks/unixbench-5.1.2/README | 406 - minix/benchmarks/unixbench-5.1.2/Run | 1870 --- minix/benchmarks/unixbench-5.1.2/USAGE | 394 - .../benchmarks/unixbench-5.1.2/WRITING_TESTS | 133 - .../benchmarks/unixbench-5.1.2/pgms/Makefile | 6 - .../unixbench-5.1.2/pgms/Makefile.inc | 5 - .../unixbench-5.1.2/pgms/arithoh/Makefile | 7 - .../unixbench-5.1.2/pgms/context1/Makefile | 5 - .../unixbench-5.1.2/pgms/dhry2/Makefile | 7 - .../unixbench-5.1.2/pgms/dhry2reg/Makefile | 7 - .../unixbench-5.1.2/pgms/double/Makefile | 7 - .../unixbench-5.1.2/pgms/execl/Makefile | 5 - .../unixbench-5.1.2/pgms/float/Makefile | 7 - .../unixbench-5.1.2/pgms/fstime/Makefile | 5 - minix/benchmarks/unixbench-5.1.2/pgms/gfx-x11 | 476 - .../unixbench-5.1.2/pgms/hanoi/Makefile | 5 - .../unixbench-5.1.2/pgms/index.base/Makefile | 3 - .../pgms/index.base/index.base | 46 - .../unixbench-5.1.2/pgms/int/Makefile | 7 - .../unixbench-5.1.2/pgms/long/Makefile | 7 - .../unixbench-5.1.2/pgms/looper/Makefile | 5 - .../unixbench-5.1.2/pgms/multi.sh/Makefile | 3 - .../unixbench-5.1.2/pgms/multi.sh/multi.sh | 23 - .../unixbench-5.1.2/pgms/pipe/Makefile | 5 - .../unixbench-5.1.2/pgms/poll/Makefile | 7 - .../unixbench-5.1.2/pgms/poll2/Makefile | 7 - .../unixbench-5.1.2/pgms/register/Makefile | 7 - .../unixbench-5.1.2/pgms/select/Makefile | 7 - .../unixbench-5.1.2/pgms/short/Makefile | 7 - .../unixbench-5.1.2/pgms/spawn/Makefile | 5 - .../unixbench-5.1.2/pgms/syscall/Makefile | 5 - .../unixbench-5.1.2/pgms/tst.sh/Makefile | 3 - .../unixbench-5.1.2/pgms/tst.sh/tst.sh | 19 - .../unixbench-5.1.2/pgms/ubgears/Makefile | 5 - .../pgms/unixbench.logo/Makefile | 3 - .../pgms/unixbench.logo/unixbench.logo | 14 - .../pgms/whetstone-double/Makefile | 8 - .../unixbench-5.1.2/run.sh/Makefile | 5 - .../benchmarks/unixbench-5.1.2/run.sh/run.sh | 4 - minix/benchmarks/unixbench-5.1.2/src/arith.c | 108 - minix/benchmarks/unixbench-5.1.2/src/big.c | 595 - .../benchmarks/unixbench-5.1.2/src/context1.c | 125 - minix/benchmarks/unixbench-5.1.2/src/dhry.h | 435 - minix/benchmarks/unixbench-5.1.2/src/dhry_1.c | 427 - minix/benchmarks/unixbench-5.1.2/src/dhry_2.c | 209 - minix/benchmarks/unixbench-5.1.2/src/dummy.c | 319 - minix/benchmarks/unixbench-5.1.2/src/execl.c | 91 - minix/benchmarks/unixbench-5.1.2/src/fstime.c | 469 - minix/benchmarks/unixbench-5.1.2/src/hanoi.c | 75 - minix/benchmarks/unixbench-5.1.2/src/looper.c | 103 - minix/benchmarks/unixbench-5.1.2/src/pipe.c | 66 - minix/benchmarks/unixbench-5.1.2/src/spawn.c | 78 - .../benchmarks/unixbench-5.1.2/src/syscall.c | 107 - .../unixbench-5.1.2/src/time-polling.c | 578 - minix/benchmarks/unixbench-5.1.2/src/timeit.c | 39 - .../benchmarks/unixbench-5.1.2/src/ubgears.c | 650 - minix/benchmarks/unixbench-5.1.2/src/whets.c | 1313 -- .../unixbench-5.1.2/testdir/Makefile | 2 - .../unixbench-5.1.2/testdir/Makefile.inc | 1 - .../unixbench-5.1.2/testdir/cctest.c/Makefile | 3 - .../unixbench-5.1.2/testdir/cctest.c/cctest.c | 156 - .../unixbench-5.1.2/testdir/dc.dat/Makefile | 3 - .../unixbench-5.1.2/testdir/dc.dat/dc.dat | 8 - .../testdir/large.txt/Makefile | 3 - .../testdir/large.txt/large.txt | 10000 ---------------- .../unixbench-5.1.2/testdir/sort.src/Makefile | 3 - .../unixbench-5.1.2/testdir/sort.src/sort.src | 362 - minix/man/man7/hier.7 | 2 - 75 files changed, 19988 deletions(-) delete mode 100644 minix/benchmarks/Makefile delete mode 100644 minix/benchmarks/Makefile.inc delete mode 100644 minix/benchmarks/run delete mode 100644 minix/benchmarks/unixbench-5.1.2/Makefile delete mode 100644 minix/benchmarks/unixbench-5.1.2/README delete mode 100755 minix/benchmarks/unixbench-5.1.2/Run delete mode 100644 minix/benchmarks/unixbench-5.1.2/USAGE delete mode 100644 minix/benchmarks/unixbench-5.1.2/WRITING_TESTS delete mode 100644 minix/benchmarks/unixbench-5.1.2/pgms/Makefile delete mode 100644 minix/benchmarks/unixbench-5.1.2/pgms/Makefile.inc delete mode 100644 minix/benchmarks/unixbench-5.1.2/pgms/arithoh/Makefile delete mode 100644 minix/benchmarks/unixbench-5.1.2/pgms/context1/Makefile delete mode 100644 minix/benchmarks/unixbench-5.1.2/pgms/dhry2/Makefile delete mode 100644 minix/benchmarks/unixbench-5.1.2/pgms/dhry2reg/Makefile delete mode 100644 minix/benchmarks/unixbench-5.1.2/pgms/double/Makefile delete mode 100644 minix/benchmarks/unixbench-5.1.2/pgms/execl/Makefile delete mode 100644 minix/benchmarks/unixbench-5.1.2/pgms/float/Makefile delete mode 100644 minix/benchmarks/unixbench-5.1.2/pgms/fstime/Makefile delete mode 100755 minix/benchmarks/unixbench-5.1.2/pgms/gfx-x11 delete mode 100644 minix/benchmarks/unixbench-5.1.2/pgms/hanoi/Makefile delete mode 100644 minix/benchmarks/unixbench-5.1.2/pgms/index.base/Makefile delete mode 100644 minix/benchmarks/unixbench-5.1.2/pgms/index.base/index.base delete mode 100644 minix/benchmarks/unixbench-5.1.2/pgms/int/Makefile delete mode 100644 minix/benchmarks/unixbench-5.1.2/pgms/long/Makefile delete mode 100644 minix/benchmarks/unixbench-5.1.2/pgms/looper/Makefile delete mode 100644 minix/benchmarks/unixbench-5.1.2/pgms/multi.sh/Makefile delete mode 100755 minix/benchmarks/unixbench-5.1.2/pgms/multi.sh/multi.sh delete mode 100644 minix/benchmarks/unixbench-5.1.2/pgms/pipe/Makefile delete mode 100644 minix/benchmarks/unixbench-5.1.2/pgms/poll/Makefile delete mode 100644 minix/benchmarks/unixbench-5.1.2/pgms/poll2/Makefile delete mode 100644 minix/benchmarks/unixbench-5.1.2/pgms/register/Makefile delete mode 100644 minix/benchmarks/unixbench-5.1.2/pgms/select/Makefile delete mode 100644 minix/benchmarks/unixbench-5.1.2/pgms/short/Makefile delete mode 100644 minix/benchmarks/unixbench-5.1.2/pgms/spawn/Makefile delete mode 100644 minix/benchmarks/unixbench-5.1.2/pgms/syscall/Makefile delete mode 100644 minix/benchmarks/unixbench-5.1.2/pgms/tst.sh/Makefile delete mode 100755 minix/benchmarks/unixbench-5.1.2/pgms/tst.sh/tst.sh delete mode 100644 minix/benchmarks/unixbench-5.1.2/pgms/ubgears/Makefile delete mode 100644 minix/benchmarks/unixbench-5.1.2/pgms/unixbench.logo/Makefile delete mode 100644 minix/benchmarks/unixbench-5.1.2/pgms/unixbench.logo/unixbench.logo delete mode 100644 minix/benchmarks/unixbench-5.1.2/pgms/whetstone-double/Makefile delete mode 100644 minix/benchmarks/unixbench-5.1.2/run.sh/Makefile delete mode 100755 minix/benchmarks/unixbench-5.1.2/run.sh/run.sh delete mode 100644 minix/benchmarks/unixbench-5.1.2/src/arith.c delete mode 100644 minix/benchmarks/unixbench-5.1.2/src/big.c delete mode 100644 minix/benchmarks/unixbench-5.1.2/src/context1.c delete mode 100644 minix/benchmarks/unixbench-5.1.2/src/dhry.h delete mode 100644 minix/benchmarks/unixbench-5.1.2/src/dhry_1.c delete mode 100644 minix/benchmarks/unixbench-5.1.2/src/dhry_2.c delete mode 100644 minix/benchmarks/unixbench-5.1.2/src/dummy.c delete mode 100644 minix/benchmarks/unixbench-5.1.2/src/execl.c delete mode 100644 minix/benchmarks/unixbench-5.1.2/src/fstime.c delete mode 100644 minix/benchmarks/unixbench-5.1.2/src/hanoi.c delete mode 100644 minix/benchmarks/unixbench-5.1.2/src/looper.c delete mode 100644 minix/benchmarks/unixbench-5.1.2/src/pipe.c delete mode 100644 minix/benchmarks/unixbench-5.1.2/src/spawn.c delete mode 100644 minix/benchmarks/unixbench-5.1.2/src/syscall.c delete mode 100644 minix/benchmarks/unixbench-5.1.2/src/time-polling.c delete mode 100644 minix/benchmarks/unixbench-5.1.2/src/timeit.c delete mode 100644 minix/benchmarks/unixbench-5.1.2/src/ubgears.c delete mode 100644 minix/benchmarks/unixbench-5.1.2/src/whets.c delete mode 100644 minix/benchmarks/unixbench-5.1.2/testdir/Makefile delete mode 100644 minix/benchmarks/unixbench-5.1.2/testdir/Makefile.inc delete mode 100644 minix/benchmarks/unixbench-5.1.2/testdir/cctest.c/Makefile delete mode 100644 minix/benchmarks/unixbench-5.1.2/testdir/cctest.c/cctest.c delete mode 100644 minix/benchmarks/unixbench-5.1.2/testdir/dc.dat/Makefile delete mode 100644 minix/benchmarks/unixbench-5.1.2/testdir/dc.dat/dc.dat delete mode 100644 minix/benchmarks/unixbench-5.1.2/testdir/large.txt/Makefile delete mode 100644 minix/benchmarks/unixbench-5.1.2/testdir/large.txt/large.txt delete mode 100644 minix/benchmarks/unixbench-5.1.2/testdir/sort.src/Makefile delete mode 100644 minix/benchmarks/unixbench-5.1.2/testdir/sort.src/sort.src diff --git a/distrib/sets/lists/minix/mi b/distrib/sets/lists/minix/mi index 23f4b5dea..a0fc79585 100644 --- a/distrib/sets/lists/minix/mi +++ b/distrib/sets/lists/minix/mi @@ -221,42 +221,6 @@ ./usr/ast minix-sys obsolete ./usr/ast/.exrc minix-sys obsolete ./usr/ast/.profile minix-sys obsolete -./usr/benchmarks minix-sys -./usr/benchmarks/unixbench minix-sys -./usr/benchmarks/unixbench/pgms minix-sys -./usr/benchmarks/unixbench/pgms/arithoh minix-sys -./usr/benchmarks/unixbench/pgms/context1 minix-sys -./usr/benchmarks/unixbench/pgms/dhry2 minix-sys -./usr/benchmarks/unixbench/pgms/dhry2reg minix-sys -./usr/benchmarks/unixbench/pgms/double minix-sys -./usr/benchmarks/unixbench/pgms/execl minix-sys -./usr/benchmarks/unixbench/pgms/float minix-sys -./usr/benchmarks/unixbench/pgms/fstime minix-sys -./usr/benchmarks/unixbench/pgms/hanoi minix-sys -./usr/benchmarks/unixbench/pgms/index.base minix-sys -./usr/benchmarks/unixbench/pgms/int minix-sys -./usr/benchmarks/unixbench/pgms/long minix-sys -./usr/benchmarks/unixbench/pgms/looper minix-sys -./usr/benchmarks/unixbench/pgms/multi.sh minix-sys -./usr/benchmarks/unixbench/pgms/pipe minix-sys -./usr/benchmarks/unixbench/pgms/poll minix-sys -./usr/benchmarks/unixbench/pgms/register minix-sys -./usr/benchmarks/unixbench/pgms/select minix-sys -./usr/benchmarks/unixbench/pgms/short minix-sys -./usr/benchmarks/unixbench/pgms/spawn minix-sys -./usr/benchmarks/unixbench/pgms/syscall minix-sys -./usr/benchmarks/unixbench/pgms/tst.sh minix-sys -./usr/benchmarks/unixbench/pgms/unixbench.logo minix-sys -./usr/benchmarks/unixbench/pgms/whetstone-double minix-sys -./usr/benchmarks/unixbench/results minix-sys -./usr/benchmarks/unixbench/Run minix-sys -./usr/benchmarks/unixbench/run.sh minix-sys -./usr/benchmarks/unixbench/testdir minix-sys -./usr/benchmarks/unixbench/testdir/cctest.c minix-sys -./usr/benchmarks/unixbench/testdir/dc.dat minix-sys -./usr/benchmarks/unixbench/testdir/large.txt minix-sys -./usr/benchmarks/unixbench/testdir/sort.src minix-sys -./usr/benchmarks/unixbench/tmp minix-sys ./usr/bin minix-sys ./usr/bin/addr2line minix-sys binutils ./usr/bin/add_route minix-sys diff --git a/etc/mtree/NetBSD.dist.base b/etc/mtree/NetBSD.dist.base index 3e2e59511..b0a8f4c65 100644 --- a/etc/mtree/NetBSD.dist.base +++ b/etc/mtree/NetBSD.dist.base @@ -54,12 +54,6 @@ ./usr/adm/old ./usr/bin ./usr/etc -./usr/benchmarks -./usr/benchmarks/unixbench -./usr/benchmarks/unixbench/pgms -./usr/benchmarks/unixbench/tmp -./usr/benchmarks/unixbench/testdir -./usr/benchmarks/unixbench/results ./usr/games ./usr/games/hide gname=games mode=0750 ./usr/include diff --git a/minix/Makefile b/minix/Makefile index 8705154ac..adca356d4 100644 --- a/minix/Makefile +++ b/minix/Makefile @@ -1,6 +1,5 @@ SUBDIR+= include .WAIT -SUBDIR+= benchmarks SUBDIR+= bin SUBDIR+= commands SUBDIR+= fs diff --git a/minix/benchmarks/Makefile b/minix/benchmarks/Makefile deleted file mode 100644 index f0578eda7..000000000 --- a/minix/benchmarks/Makefile +++ /dev/null @@ -1,5 +0,0 @@ -# Makefile for the benchmarks. - -SUBDIR=unixbench-5.1.2 - -.include diff --git a/minix/benchmarks/Makefile.inc b/minix/benchmarks/Makefile.inc deleted file mode 100644 index 01b5f2341..000000000 --- a/minix/benchmarks/Makefile.inc +++ /dev/null @@ -1 +0,0 @@ -.include "../Makefile.inc" diff --git a/minix/benchmarks/run b/minix/benchmarks/run deleted file mode 100644 index 54458c95b..000000000 --- a/minix/benchmarks/run +++ /dev/null @@ -1,51 +0,0 @@ -#!/bin/sh - -set -e - -make - -BENCHDIR=/usr/local/benchmarks - -basebenchmarks=`echo *bench*` - -if [ -d $BENCHDIR ] -then packagebenchmarks=`(cd $BENCHDIR && echo *bench*)` -fi - -runbench() { - bench=$1 - out="Results/$bench.`date +%Y%m%d.%H%M%S`" - if [ -d $bench ] - then dir=$bench - fi - if [ -d $BENCHDIR/$bench ] - then dir=$BENCHDIR/$bench - fi - clear - echo "Running $dir." - echo "Saving output to $out." - echo "" - ( cd $dir && sh run.sh 2>&1 ) | tee $out -} - -clear -n=1 -for b in $basebenchmarks $packagebenchmarks -do echo "$n. $b" - eval "n$n=$b" - n=`expr $n + 1` -done -echo -echo -n "Run which benchmark or 'all'? " -read bench -eval var=\$n$bench -if [ "$bench" = all ] -then for b in $basebenchmarks $packagebenchmarks - do runbench $b - done -else if [ -d "$var" -o -d "$BENCHDIR/$var" ] - then runbench $var - else echo "Unknown benchmark $var." - exit 1 - fi -fi diff --git a/minix/benchmarks/unixbench-5.1.2/Makefile b/minix/benchmarks/unixbench-5.1.2/Makefile deleted file mode 100644 index 16f604460..000000000 --- a/minix/benchmarks/unixbench-5.1.2/Makefile +++ /dev/null @@ -1,8 +0,0 @@ -SCRIPTS=Run - -SUBDIR=pgms testdir run.sh - -BINDIR=/usr/benchmarks/unixbench - -.include -.include diff --git a/minix/benchmarks/unixbench-5.1.2/README b/minix/benchmarks/unixbench-5.1.2/README deleted file mode 100644 index 9a4f0c8ab..000000000 --- a/minix/benchmarks/unixbench-5.1.2/README +++ /dev/null @@ -1,406 +0,0 @@ -Version 5.1.2 -- 2007-12-26 - -================================================================ -To use Unixbench: - -1. UnixBench from version 5.1 on has both system and graphics tests. - If you want to use the graphic tests, edit the Makefile and make sure - that the line "GRAPHIC_TESTS = defined" is not commented out; then check - that the "GL_LIBS" definition is OK for your system. Also make sure - that the "x11perf" command is on your search path. - - If you don't want the graphics tests, then comment out the - "GRAPHIC_TESTS = defined" line. Note: comment it out, don't - set it to anything. - -2. Do "make". - -3. Do "Run" to run the system test; "Run graphics" to run the graphics - tests; "Run gindex" to run both. - -You will need perl, as Run is written in perl. - -For more information on using the tests, read "USAGE". - -For information on adding tests into the benchmark, see "WRITING_TESTS". - - -===================== RELEASE NOTES ===================================== - -======================== Dec 07 ========================== - -v5.1.2 - -One big fix: if unixbench is installed in a directory whose pathname contains -a space, it should now run (previously it failed). - -To avoid possible clashes, the environment variables unixbench uses are now -prefixed with "UB_". These are all optional, and for most people will be -completely unnecessary, but if you want you can set these: - - UB_BINDIR Directory where the test programs live. - UB_TMPDIR Temp directory, for temp files. - UB_RESULTDIR Directory to put results in. - UB_TESTDIR Directory where the tests are executed. - -And a couple of tiny fixes: -* In pgms/tst.sh, changed "sort -n +1" to "sort -n -k 1" -* In Makefile, made it clearer that GRAPHIC_TESTS should be commented - out (not set to 0) to disable graphics -Thanks to nordi for pointing these out. - - -Ian Smith, December 26, 2007 -johantheghost at yahoo period com - - -======================== Oct 07 ========================== - -v5.1.1 - -It turns out that the setting of LANG is crucial to the results. This -explains why people in different regions were seeing odd results, and also -why runlevel 1 produced odd results -- runlevel 1 doesn't set LANG, and -hence reverts to ASCII, whereas most people use a UTF-8 encoding, which is -much slower in some tests (eg. shell tests). - -So now we manually set LANG to "en_US.utf8", which is configured with the -variable "$language". Don't change this if you want to share your results. -We also report the language settings in use. - -See "The Language Setting" in USAGE for more info. Thanks to nordi for -pointing out the LANG issue. - -I also added the "grep" and "sysexec" tests. These are non-index tests, -and "grep" uses the system's grep, so it's not much use for comparing -different systems. But some folks on the OpenSuSE list have been finding -these useful. They aren't in any of the main test groups; do "Run grep -sysexec" to run them. - -Index Changes -------------- - -The setting of LANG will affect consistency with systems where this is -not the default value. However, it should produce more consistent results -in future. - - -Ian Smith, October 15, 2007 -johantheghost at yahoo period com - - -======================== Oct 07 ========================== - -v5.1 - -The major new feature in this version is the addition of graphical -benchmarks. Since these may not compile on all systems, you can enable/ -disable them with the GRAPHIC_TESTS variable in the Makefile. - -As before, each test is run for 3 or 10 iterations. However, we now discard -the worst 1/3 of the scores before averaging the remainder. The logic is -that a glitch in the system (background process waking up, for example) may -make one or two runs go slow, so let's discard those. Hopefully this will -produce more consistent and repeatable results. Check the log file -for a test run to see the discarded scores. - -Made the tests compile and run on x86-64/Linux (fixed an execl bug passing -int instead of pointer). - -Also fixed some general bugs. - -Thanks to Stefan Esser for help and testing / bug reporting. - -Index Changes -------------- - -The tests are now divided into categories, and each category generates -its own index. This keeps the graphics test results separate from -the system tests. - -The "graphics" test and corresponding index are new. - -The "discard the worst scores" strategy should produce slightly higher -test scores, but at least they should (hopefully!) be more consistent. -The scores should not be higher than the best scores you would have got -with 5.0, so this should not be a huge consistency issue. - -Ian Smith, October 11, 2007 -johantheghost at yahoo period com - - -======================== Sep 07 ========================== - -v5.0 - -All the work I've done on this release is Linux-based, because that's -the only Unix I have access to. I've tried to make it more OS-agnostic -if anything; for example, it no longer has to figure out the format reported -by /usr/bin/time. However, it's possible that portability has been damaged. -If anyone wants to fix this, please feel free to mail me patches. - -In particular, the analysis of the system's CPUs is done via /proc/cpuinfo. -For systems which don't have this, please make appropriate changes in -getCpuInfo() and getSystemInfo(). - -The big change has been to make the tests multi-CPU aware. See the -"Multiple CPUs" section in "USAGE" for details. Other changes: - -* Completely rewrote Run in Perl; drastically simplified the way data is - processed. The confusing system of interlocking shell and awk scripts is - now just one script. Various intermediate files used to store and process - results are now replaced by Perl data structures internal to the script. - -* Removed from the index runs file system read and write tests which were - ignored for the index and wasted about 10 minutes per run (see fstime.c). - The read and write tests can now be selected individually. Made fstime.c - take parameters, so we no longer need to build 3 versions of it. - -* Made the output file names unique; they are built from - hostname-date-sequence. - -* Worked on result reporting, error handling, and logging. See TESTS. - We now generate both text and HTML reports. - -* Removed some obsolete files. - -Index Changes -------------- - -The index is still based on David Niemi's SPARCstation 20-61 (rated at 10.0), -and the intention in the changes I've made has been to keep the tests -unchanged, in order to maintain consistency with old result sets. - -However, the following changes have been made to the index: - -* The Pipe-based Context Switching test (context1) was being dropped - from the index report in v4.1.0 due to a bug; I've put it back in. - -* I've added shell1 to the index, to get a measure of how the shell tests - scale with multiple CPUs (shell8 already exercises all the CPUs, even - in single-copy mode). I made up the baseline score for this by - extrapolation. - -Both of these test can be dropped, if you wish, by editing the "TEST -SPECIFICATIONS" section of Run. - -Ian Smith, September 20, 2007 -johantheghost at yahoo period com - -======================== Aug 97 ========================== - -v4.1.0 - -Double precision Whetstone put in place instead of the old "double" benchmark. - -Removal of some obsolete files. - -"system" suite adds shell8. - -perlbench and poll added as "exhibition" (non-index) benchmarks. - -Incorporates several suggestions by Andre Derrick Balsa - -Code cleanups to reduce compiler warnings by David C Niemi -and Andy Kahn ; Digital Unix options by Andy Kahn. - -======================== Jun 97 ========================== - -v4.0.1 - -Minor change to fstime.c to fix overflow problems on fast machines. Counting -is now done in units of 256 (smallest BUFSIZE) and unsigned longs are used, -giving another 23 dB or so of headroom ;^) Results should be virtually -identical aside from very small rounding errors. - -======================== Dec 95 ========================== - -v4.0 - -Byte no longer seems to have anything to do with this benchmark, and I was -unable to reach any of the original authors; so I have taken it upon myself -to clean it up. - -This is version 4. Major assumptions made in these benchmarks have changed -since they were written, but they are nonetheless popular (particularly for -measuring hardware for Linux). Some changes made: - -- The biggest change is to put a lot more operating system-oriented - tests into the index. I experimented for a while with a decibel-like - logarithmic scale, but finally settled on using a geometric mean for - the final index (the individual scores are a normalized, and their - logs are averaged; the resulting value is exponentiated). - - "George", certain SPARCstation 20-61 with 128 MB RAM, a SPARC Storage - Array, and Solaris 2.3 is my new baseline; it is rated at 10.0 in each - of the index scores for a final score of 10.0. - - Overall I find the geometric averaging is a big improvement for - avoiding the skew that was once possible (e.g. a Pentium-75 which got - 40 on the buggy version of fstime, such that fstime accounted for over - half of its total score and hence wildly skewed its average). - - I also expect that the new numbers look different enough from the old - ones that no one is too likely to casually mistake them for each other. - - I am finding new SPARCs running Solaris 2.4 getting about 15-20, and - my 486 DX2-66 Compaq running Linux 1.3.45 got a 9.1. It got - understandably poor scores on CPU and FPU benchmarks (a horrible - 1.8 on "double" and 1.3 on "fsdisk"); but made up for it by averaging - over 20 on the OS-oriented benchmarks. The Pentium-75 running - Linux gets about 20 (and it *still* runs Windows 3.1 slowly. Oh well). - -- It is difficult to get a modern compiler to even consider making - dhry2 without registers, short of turning off *all* optimizations. - This is also not a terribly meaningful test, even if it were possible, - as noone compiles without registers nowadays. Replaced this benchmark - with dhry2reg in the index, and dropped it out of usage in general as - it is so hard to make a legitimate one. - -- fstime: this had some bugs when compiled on modern systems which return - the number of bytes read/written for read(2)/write(2) calls. The code - assumed that a negative return code was given for EOF, but most modern - systems return 0 (certainly on SunOS 4, Solaris2, and Linux, which is - what counts for me). The old code yielded wildly inflated read scores, - would eat up tens of MB of disk space on fast systems, and yielded - roughly 50% lower than normal copy scores than it should have. - - Also, it counted partial blocks *fully*; made it count the proportional - part of the block which was actually finished. - - Made bigger and smaller variants of fstime which are designed to beat - up the disk I/O and the buffer cache, respectively. Adjusted the - sleeps so that they are short for short benchmarks. - -- Instead of 1,2,4, and 8-shell benchmarks, went to 1, 8, and 16 to - give a broader range of information (and to run 1 fewer test). - The only real problem with this is that not many iterations get - done with 16 at a time on slow systems, so there are some significant - rounding errors; 8 therefore still used for the benchmark. There is - also the problem that the last (uncompleted) loop is counted as a full - loop, so it is impossible to score below 1.0 lpm (which gave my laptop - a break). Probably redesigning Shell to do each loop a bit more - quickly (but with less intensity) would be a good idea. - - This benchmark appears to be very heavily influenced by the speed - of the loader, by which shell is being used as /bin/sh, and by how - well-compiled some of the common shell utilities like grep, sed, and - sort are. With a consistent tool set it is also a good indicator of - the bandwidth between main memory and the CPU (e.g. Pentia score about - twice as high as 486es due to their 64-bit bus). Small, sometimes - broken shells like "ash-linux" do particularly well here, while big, - robust shells like bash do not. - -- "dc" is a somewhat iffy benchmark, because there are two versions of - it floating around, one being small, very fast, and buggy, and one - being more correct but slow. It was never in the index anyway. - -- Execl is a somewhat troubling benchmark in that it yields much higher - scores if compiled statically. I frown on this practice because it - distorts the scores away from reflecting how programs are really used - (i.e. dynamically linked). - -- Arithoh is really more an indicator of the compiler quality than of - the computer itself. For example, GCC 2.7.x with -O2 and a few extra - options optimizes much of it away, resulting in about a 1200% boost - to the score. Clearly not a good one for the index. - -I am still a bit unhappy with the variance in some of the benchmarks, most -notably the fstime suite; and with how long it takes to run. But I think -it gets significantly more reliable results than the older version in less -time. - -If anyone has ideas on how to make these benchmarks faster, lower-variance, -or more meaningful; or has nice, new, portable benchmarks to add, don't -hesitate to e-mail me. - -David C Niemi 7 Dec 1995 - -======================== May 91 ========================== -This is version 3. This set of programs should be able to determine if -your system is BSD or SysV. (It uses the output format of time (1) -to see. If you have any problems, contact me (by email, -preferably): ben@bytepb.byte.com - ---- - -The document doc/bench.doc describes the basic flow of the -benchmark system. The document doc/bench3.doc describes the major -changes in design of this version. As a user of the benchmarks, -you should understand some of the methods that have been -implemented to generate loop counts: - -Tests that are compiled C code: - The function wake_me(second, func) is included (from the file -timeit.c). This function uses signal and alarm to set a countdown -for the time request by the benchmark administration script -(Run). As soon as the clock is started, the test is run with a -counter keeping track of the number of loops that the test makes. -When alarm sends its signal, the loop counter value is sent to stderr -and the program terminates. Since the time resolution, signal -trapping and other factors don't insure that the test is for the -precise time that was requested, the test program is also run -from the time (1) command. The real time value returned from time -(1) is what is used in calculating the number of loops per second -(or minute, depending on the test). As is obvious, there is some -overhead time that is not taken into account, therefore the -number of loops per second is not absolute. The overhead of the -test starting and stopping and the signal and alarm calls is -common to the overhead of real applications. If a program loads -quickly, the number of loops per second increases; a phenomenon -that favors systems that can load programs quickly. (Setting the -sticky bit of the test programs is not considered fair play.) - -Test that use existing UNIX programs or shell scripts: - The concept is the same as that of compiled tests, except the -alarm and signal are contained in separate compiled program, -looper (source is looper.c). Looper uses an execvp to invoke the -test with its arguments. Here, the overhead includes the -invocation and execution of looper. - --- - -The index numbers are generated from a baseline file that is in -pgms/index.base. You can put tests that you wish in this file. -All you need to do is take the results/log file from your -baseline machine, edit out the comment and blank lines, and sort -the result (vi/ex command: 1,$!sort). The sort in necessary -because the process of generating the index report uses join (1). -You can regenerate the reports by running "make report." - --- - -========================= Jan 90 ============================= -Tom Yager has joined the effort here at BYTE; he is responsible -for many refinements in the UNIX benchmarks. - -The memory access tests have been deleted from the benchmarks. -The file access tests have been reversed so that the test is run -for a fixed time. The amount of data transfered (written, read, -and copied) is the variable. !WARNING! This test can eat up a -large hunk of disk space. - -The initial line of all shell scripts has been changed from the -SCO and XENIX form (:) to the more standard form "#! /bin/sh". -But different systems handle shell switching differently. Check -the documentation on your system and find out how you are -supposed to do it. Or, simpler yet, just run the benchmarks from -the Bourne shell. (You may need to set SHELL=/bin/sh as well.) - -The options to Run have not been checked in a while. They may no -longer function. Next time, I'll get back on them. There needs to -be another option added (next time) that halts testing between -each test. !WARNING! Some systems have caches that are not getting flushed -before the next test or iteration is run. This can cause -erroneous values. - -========================= Sept 89 ============================= -The database (db) programs now have a tuneable message queue space. -queue space. The default set in the Run script is 1024 bytes. -Other major changes are in the format of the times. We now show -Arithmetic and Geometric mean and standard deviation for User -Time, System Time, and Real Time. Generally, in reporting, we -plan on using the Real Time values with the benchs run with one -active user (the bench user). Comments and arguments are requested. - -contact: BIX bensmith or rick_g diff --git a/minix/benchmarks/unixbench-5.1.2/Run b/minix/benchmarks/unixbench-5.1.2/Run deleted file mode 100755 index 799f81665..000000000 --- a/minix/benchmarks/unixbench-5.1.2/Run +++ /dev/null @@ -1,1870 +0,0 @@ -#!/usr/pkg/bin/perl -w - -use strict; - -use POSIX qw(strftime); -use Time::HiRes; -use IO::Handle; - - -############################################################################ -# UnixBench - Release 5.1.1, based on: -# The BYTE UNIX Benchmarks - Release 3 -# Module: Run SID: 3.11 5/15/91 19:30:14 -# Original Byte benchmarks written by: -# Ben Smith, Tom Yager at BYTE Magazine -# ben@bytepb.byte.com tyager@bytepb.byte.com -# BIX: bensmith tyager -# -####################################################################### -# General Purpose Benchmark -# based on the work by Ken McDonell, Computer Science, Monash University -# -# You will need ... -# perl Time::HiRes IO::Handlecat cc chmod comm cp date dc df echo -# kill ls make mkdir rm sed test time touch tty umask who -############################################################################### -# Modification Log: -# $Header: run,v 5.2 88/01/12 06:23:43 kenj Exp $ -# Ken McDonell, Computer Science, Monash University -# August 1, 1983 -# 3/89 - Ben Smith - BYTE: globalized many variables, modernized syntax -# 5/89 - commented and modernized. Removed workload items till they -# have been modernized. Added database server test. -# 11/14/89 - Made modifications to reflect new version of fstime -# and elimination of mem tests. -# 10/22/90 - Many tests have been flipped so that they run for -# a specified length of time and loops are counted. -# 4/3/91 - Cleaned up and debugged several test parameters - Ben -# 4/9/91 - Added structure for creating index and determing flavor of UNIX -# 4/26/91 - Made changes and corrections suggested by Tin Le of Sony -# 5/15/91 - Removed db from distribution -# 4/4/92 Jon Tombs fixed for GNU time to look like -# BSD (don't know the format of sysV!) -# 12/95 - Massive changes for portability, speed, and more meaningful index -# DCN David C Niemi -# 1997.06.20 DCN Fixed overflow condition in fstime.c on fast machines -# 1997.08.24 DCN Modified "system", replaced double with -# whetstone-double in "index" -# 1997.09.10 DCN Added perlbench as an Exhibition benchmark -# 1997.09.23 DCN Added rgooch's select as an Exhibition benchmark -# 1999.07.28 DCN "select" not compiled or run by default, because it -# does not compile on many platforms. PerlBench also -# not run by default. -# 2007.09.26 IS Huge rewrite -- see release notes in README. -# 2007.10.12 IS Added graphics tests, categories feature. -# 2007.10.14 IS Set and report LANG. Added "grep" and "sysexec". -# 2007.12.22 IS Tiny fixes; see README. - - -############################################################################ -# CONFIGURATION -############################################################################ - -# Version number of the script. -my $version = "5.1.2"; - -# The setting of LANG makes a huge difference to some of the scores, -# particularly depending on whether UTF-8 is used. So we always set -# it to the same value, which is configured here. -# -# If you want your results to be meaningful when compared to other peoples' -# results, you should not change this. Change it if you want to measure the -# effect of different languages. -my $language = "en_US.utf8"; - -# The number of iterations per test. -my $longIterCount = 10; -my $shortIterCount = 3; - -# C compiler to use in compilation tests. -my $cCompiler = $ENV{CC}; - -# Establish full paths to directories. These need to be full pathnames -# (or do they, any more?). They can be set in env. -# variables whose names are the first parameter to getDir() below. -my $BASEDIR = `pwd`; -chomp($BASEDIR); - -# Directory where the test programs live. -my $BINDIR = getDir('UB_BINDIR', $BASEDIR . "/pgms"); - -# Temp directory, for temp files. -my $TMPDIR = getDir('UB_TMPDIR', $BASEDIR . "/tmp"); - -# Directory to put results in. -my $RESULTDIR = getDir('UB_RESULTDIR', $BASEDIR . "/results"); - -# Directory where the tests are executed. -my $TESTDIR = getDir('UB_TESTDIR', $BASEDIR . "/testdir"); - - -############################################################################ -# TEST SPECIFICATIONS -############################################################################ - -# Configure the categories to which tests can belong. -my $testCats = { - 'system' => { 'name' => "System Benchmarks", 'maxCopies' => 16 }, - '2d' => { 'name' => "2D Graphics Benchmarks", 'maxCopies' => 1 }, - '3d' => { 'name' => "3D Graphics Benchmarks", 'maxCopies' => 1 }, - 'misc' => { 'name' => "Non-Index Benchmarks", 'maxCopies' => 16 }, -}; - - -my $arithmetic = [ - "arithoh", "short", "int", "long", "float", "double", "whetstone-double" -]; - -my $fs = [ - "fstime-w", "fstime-r", "fstime", - "fsbuffer-w", "fsbuffer-r", "fsbuffer", - "fsdisk-w", "fsdisk-r", "fsdisk" -]; - -my $oldsystem = [ - "execl", "fstime", "fsbuffer", "fsdisk", "pipe", "context1", "spawn", - "syscall" -]; - -my $system = [ - @$oldsystem, "shell1", "shell8" # , "shell16" -]; - -my $index = [ - "dhry2reg", "whetstone-double", @$oldsystem, "shell1", "shell8" -]; - -my $graphics = [ - "2d-rects", "2d-ellipse", "2d-aashapes", "2d-text", "2d-blit", - "2d-window", "ubgears" -]; - - -# List of all supported test names. -my $testList = { - # Individual tests. - "dhry2reg" => undef, - "whetstone-double" => undef, - "syscall" => undef, - "pipe" => undef, - "context1" => undef, - "spawn" => undef, - "execl" => undef, - "fstime-w" => undef, - "fstime-r" => undef, - "fstime" => undef, - "fsbuffer-w" => undef, - "fsbuffer-r" => undef, - "fsbuffer" => undef, - "fsdisk-w" => undef, - "fsdisk-r" => undef, - "fsdisk" => undef, - "shell1" => undef, - "shell8" => undef, - "shell16" => undef, - "short" => undef, - "int" => undef, - "long" => undef, - "float" => undef, - "double" => undef, - "arithoh" => undef, - "C" => undef, - "dc" => undef, - "hanoi" => undef, - "grep" => undef, - "sysexec" => undef, - - "2d-rects" => undef, - "2d-lines" => undef, - "2d-circle" => undef, - "2d-ellipse" => undef, - "2d-shapes" => undef, - "2d-aashapes" => undef, - "2d-polys" => undef, - "2d-text" => undef, - "2d-blit" => undef, - "2d-window" => undef, - - "ubgears" => undef, - - # Named combos and shorthands. - "arithmetic" => $arithmetic, - "dhry" => [ "dhry2reg" ], - "dhrystone" => [ "dhry2reg" ], - "whets" => [ "whetstone-double" ], - "whetstone" => [ "whetstone-double" ], - "load" => [ "shell" ], - "misc" => [ "C", "dc", "hanoi" ], - "speed" => [ @$arithmetic, @$system ], - "oldsystem" => $oldsystem, - "system" => $system, - "fs" => $fs, - "shell" => [ "shell1", "shell8" ], - "graphics" => $graphics, - - # The tests which constitute the official index. - "index" => $index, - - # The tests which constitute the official index plus the graphics - # index. - "gindex" => [ @$index, @$graphics ], -}; - - -# Default parameters for benchmarks. Note that if "prog" is used, -# it must contain just the program name, as it will be quoted (this -# is necessary if BINDIR contains spaces). Put any options in "options". -my $baseParams = { - "prog" => undef, - "options" => "", - "repeat" => 'short', - "stdout" => 1, # Non-0 to keep stdout. - "stdin" => "", - "logmsg" => "", -}; - - -# Individual parameters for all benchmarks. -my $testParams = { - - ########################## - ## System Benchmarks ## - ########################## - - "dhry2reg" => { - "logmsg" => "Dhrystone 2 using register variables", - "cat" => 'system', - "options" => "10", - "repeat" => 'long', - }, - "whetstone-double" => { - "logmsg" => "Double-Precision Whetstone", - "cat" => 'system', - "repeat" => 'long', - }, - "syscall" => { - "logmsg" => "System Call Overhead", - "cat" => 'system', - "repeat" => 'long', - "options" => "10", - }, - "context1" => { - "logmsg" => "Pipe-based Context Switching", - "cat" => 'system', - "repeat" => 'long', - "options" => "10", - }, - "pipe" => { - "logmsg" => "Pipe Throughput", - "cat" => 'system', - "repeat" => 'long', - "options" => "10", - }, - "spawn" => { - "logmsg" => "Process Creation", - "cat" => 'system', - "options" => "30", - }, - "execl" => { - "logmsg" => "Execl Throughput", - "cat" => 'system', - "options" => "30", - }, - "fstime-w" => { - "logmsg" => "File Write 1024 bufsize 2000 maxblocks", - "cat" => 'system', - "prog" => "${BINDIR}/fstime", - "options" => "-w -t 30 -d \"${TMPDIR}\" -b 1024 -m 2000", - }, - "fstime-r" => { - "logmsg" => "File Read 1024 bufsize 2000 maxblocks", - "cat" => 'system', - "prog" => "${BINDIR}/fstime", - "options" => "-r -t 30 -d \"${TMPDIR}\" -b 1024 -m 2000", - }, - "fstime" => { - "logmsg" => "File Copy 1024 bufsize 2000 maxblocks", - "cat" => 'system', - "prog" => "${BINDIR}/fstime", - "options" => "-c -t 30 -d \"${TMPDIR}\" -b 1024 -m 2000", - }, - "fsbuffer-w" => { - "logmsg" => "File Write 256 bufsize 500 maxblocks", - "cat" => 'system', - "prog" => "${BINDIR}/fstime", - "options" => "-w -t 30 -d \"${TMPDIR}\" -b 256 -m 500", - }, - "fsbuffer-r" => { - "logmsg" => "File Read 256 bufsize 500 maxblocks", - "cat" => 'system', - "prog" => "${BINDIR}/fstime", - "options" => "-r -t 30 -d \"${TMPDIR}\" -b 256 -m 500", - }, - "fsbuffer" => { - "logmsg" => "File Copy 256 bufsize 500 maxblocks", - "cat" => 'system', - "prog" => "${BINDIR}/fstime", - "options" => "-c -t 30 -d \"${TMPDIR}\" -b 256 -m 500", - }, - "fsdisk-w" => { - "logmsg" => "File Write 4096 bufsize 8000 maxblocks", - "cat" => 'system', - "prog" => "${BINDIR}/fstime", - "options" => "-w -t 30 -d \"${TMPDIR}\" -b 4096 -m 8000", - }, - "fsdisk-r" => { - "logmsg" => "File Read 4096 bufsize 8000 maxblocks", - "cat" => 'system', - "prog" => "${BINDIR}/fstime", - "options" => "-r -t 30 -d \"${TMPDIR}\" -b 4096 -m 8000", - }, - "fsdisk" => { - "logmsg" => "File Copy 4096 bufsize 8000 maxblocks", - "cat" => 'system', - "prog" => "${BINDIR}/fstime", - "options" => "-c -t 30 -d \"${TMPDIR}\" -b 4096 -m 8000", - }, - "shell1" => { - "logmsg" => "Shell Scripts (1 concurrent)", - "cat" => 'system', - "prog" => "${BINDIR}/looper", - "options" => "60 \"${BINDIR}/multi.sh\" 1", - }, - "shell8" => { - "logmsg" => "Shell Scripts (8 concurrent)", - "cat" => 'system', - "prog" => "${BINDIR}/looper", - "options" => "60 \"${BINDIR}/multi.sh\" 8", - }, - "shell16" => { - "logmsg" => "Shell Scripts (16 concurrent)", - "cat" => 'system', - "prog" => "${BINDIR}/looper", - "options" => "60 \"${BINDIR}/multi.sh\" 16", - }, - - ########################## - ## Graphics Benchmarks ## - ########################## - - "2d-rects" => { - "logmsg" => "2D graphics: rectangles", - "cat" => '2d', - "prog" => "${BINDIR}/gfx-x11", - "options" => "rects 3 2", - }, - - "2d-lines" => { - "logmsg" => "2D graphics: lines", - "cat" => '2d', - "prog" => "${BINDIR}/gfx-x11", - "options" => "lines 3 2", - }, - - "2d-circle" => { - "logmsg" => "2D graphics: circles", - "cat" => '2d', - "prog" => "${BINDIR}/gfx-x11", - "options" => "circle 3 2", - }, - - "2d-ellipse" => { - "logmsg" => "2D graphics: ellipses", - "cat" => '2d', - "prog" => "${BINDIR}/gfx-x11", - "options" => "ellipse 3 2", - }, - - "2d-shapes" => { - "logmsg" => "2D graphics: polygons", - "cat" => '2d', - "prog" => "${BINDIR}/gfx-x11", - "options" => "shapes 3 2", - }, - - "2d-aashapes" => { - "logmsg" => "2D graphics: aa polygons", - "cat" => '2d', - "prog" => "${BINDIR}/gfx-x11", - "options" => "aashapes 3 2", - }, - - "2d-polys" => { - "logmsg" => "2D graphics: complex polygons", - "cat" => '2d', - "prog" => "${BINDIR}/gfx-x11", - "options" => "polys 3 2", - }, - - "2d-text" => { - "logmsg" => "2D graphics: text", - "cat" => '2d', - "prog" => "${BINDIR}/gfx-x11", - "options" => "text 3 2", - }, - - "2d-blit" => { - "logmsg" => "2D graphics: images and blits", - "cat" => '2d', - "prog" => "${BINDIR}/gfx-x11", - "options" => "blit 3 2", - }, - - "2d-window" => { - "logmsg" => "2D graphics: windows", - "cat" => '2d', - "prog" => "${BINDIR}/gfx-x11", - "options" => "window 3 2", - }, - - "ubgears" => { - "logmsg" => "3D graphics: gears", - "cat" => '3d', - "options" => "-time 20 -v", - }, - - - ########################## - ## Non-Index Benchmarks ## - ########################## - - "C" => { - "logmsg" => "C Compiler Throughput ($cCompiler)", - "cat" => 'misc', - "prog" => "${BINDIR}/looper", - "options" => "60 $cCompiler cctest.c", - }, - "arithoh" => { - "logmsg" => "Arithoh", - "cat" => 'misc', - "options" => "10", - }, - "short" => { - "logmsg" => "Arithmetic Test (short)", - "cat" => 'misc', - "options" => "10", - }, - "int" => { - "logmsg" => "Arithmetic Test (int)", - "cat" => 'misc', - "options" => "10", - }, - "long" => { - "logmsg" => "Arithmetic Test (long)", - "cat" => 'misc', - "options" => "10", - }, - "float" => { - "logmsg" => "Arithmetic Test (float)", - "cat" => 'misc', - "options" => "10", - }, - "double" => { - "logmsg" => "Arithmetic Test (double)", - "cat" => 'misc', - "options" => "10", - }, - "dc" => { - "logmsg" => "Dc: sqrt(2) to 99 decimal places", - "cat" => 'misc', - "prog" => "${BINDIR}/looper", - "options" => "30 dc", - "stdin" => "dc.dat", - }, - "hanoi" => { - "logmsg" => "Recursion Test -- Tower of Hanoi", - "cat" => 'misc', - "options" => "20", - }, - "grep" => { - "logmsg" => "Grep a large file (system's grep)", - "cat" => 'misc', - "prog" => "${BINDIR}/looper", - "options" => "30 grep -c gimp large.txt", - }, - "sysexec" => { - "logmsg" => "Exec System Call Overhead", - "cat" => 'misc', - "repeat" => 'long', - "prog" => "${BINDIR}/syscall", - "options" => "10 exec", - }, -}; - - -# CPU flags of interest. -my $x86CpuFlags = { - 'pae' => "Physical Address Ext", - 'sep' => "SYSENTER/SYSEXIT", - 'syscall' => "SYSCALL/SYSRET", - 'mmx' => "MMX", - 'mmxext' => "AMD MMX", - 'cxmmx' => "Cyrix MMX", - 'xmm' => "Streaming SIMD", - 'xmm2' => "Streaming SIMD-2", - 'xmm3' => "Streaming SIMD-3", - 'ht' => "Hyper-Threading", - 'ia64' => "IA-64 processor", - 'lm' => "x86-64", - 'vmx' => "Intel virtualization", - 'svm' => "AMD virtualization", -}; - - -############################################################################ -# UTILITIES -############################################################################ - -# Exec the given command, and catch its standard output. -# We return an array containing the PID and the filehandle on the -# process' standard output. It's up to the caller to wait for the command -# to terminate. -sub command { - my ( $cmd ) = @_; - - my $pid = open(my $childFd, "-|"); - if (!defined($pid)) { - die("Run: fork() failed (undef)\n"); - } elsif ($pid == 0) { - exec($cmd); - die("Run: exec() failed (returned)\n"); - } - - return ( $pid, $childFd ); -} - - -# Get data from running a system command. Used for things like getting -# the host OS from `uname -o` etc. -# -# Ignores initial blank lines from the command and returns the first -# non-blank line, with white space trimmed off. Returns a blank string -# if there is no output; undef if the command fails. -sub getCmdOutput { - my ( $cmd ) = @_; - - my ( $pid, $fd ) = command($cmd . " 2>/dev/null"); - my $result = ""; - while (<$fd>) { - chomp; - next if /^[ \t]*$/; - - $result = $_; - $result =~ s/^[ \t]+//; - $result =~ s/[ \t]+$//; - last; - } - - # Close the command and wait for it to die. - waitpid($pid, 0); - my $status = $?; - - return $status == 0 ? $result : undef; -} - - -# Get a directory pathname from an environment variable, or the given -# default. Canonicalise and return the value. -sub getDir { - my ( $var, $def ) = @_; - - my $val = $ENV{$var} || $def; - - # Canonicalise the value. - my $wd; - chomp($wd = `pwd`); - chdir($val); - chomp($val = `pwd`); - chdir($wd); - $ENV{$var} = $val; - - $val; -} - - -# Get the name of the file we're going to log to. The name uses the hostname -# and date, plus a sequence number to make it unique. -sub logFile { - my ( $sysInfo ) = @_; - - my $count = 1; - - # Use the date in the base file name. - my $ymd = strftime "%Y-%m-%d", localtime; - - while (1) { - my $log = sprintf "%s/%s-%s-%02d", - ${RESULTDIR}, $sysInfo->{'name'}, $ymd, $count; - return $log if (! -e $log); - ++$count; - } -} - - -# Print a message to the named log file. We use this method rather than -# keeping the FD open because we use shell redirection to send command -# output to the same file. -sub printLog { - my ( $logFile, @args ) = @_; - - open(my $fd, ">>", $logFile) || abortRun("can't append to $logFile"); - printf $fd @args; - close($fd); -} - - -# Display a number of something, auto-selecting the plural form -# if appropriate. We are given the number, the singular, and the -# plural; if the plural is omitted, it defaults to singular + "s". -sub number { - my ( $n, $what, $plural ) = @_; - - $plural = $what . "s" if !defined($plural); - - if (!defined($n)) { - return sprintf "unknown %s", $plural; - } else { - return sprintf "%d %s", $n, $n == 1 ? $what : $plural; - } -} - - -# Merge two sets of test parameters -- defaults and actual parameters. -# Return the merged parameter hash. -sub mergeParams { - my ( $def, $vals ) = @_; - - my $params = { }; - foreach my $k (keys(%$def)) { - $params->{$k} = $def->{$k}; - } - foreach my $k (keys(%$vals)) { - $params->{$k} = $vals->{$k}; - } - - $params; -} - - -############################################################################ -# SYSTEM ANALYSIS -############################################################################ - -# Extract interesting flags from the given processor flags string and -# convert them to descriptive names. -sub processCpuFlags { - my ( $flagStr ) = @_; - - my @names; - foreach my $f (sort split(/\s+/, $flagStr)) { - my $name = $x86CpuFlags->{$f}; - push(@names, $name) if $name; - } - - join(", ", @names); -} - - -# Get information on the CPUs in the system. Returns a reference to an -# array of N entries, one per CPU, where each entry is a hash containing -# these fields: -# describing the model etc. Returns undef if the information can't be got. -sub getCpuInfo { - open(my $fd, "<", "/proc/cpuinfo") || return undef; - - my $cpus = [ ]; - my $cpu = 0; - while (<$fd>) { - chomp; - my ( $field, $val ) = split(/[ \t]*:[ \t]*/); - next if (!$field || !$val); - if ($field eq "processor") { - $cpu = $val; - } elsif ($field eq "model name") { - my $model = $val; - $model =~ s/ +/ /g; - $cpus->[$cpu]{'model'} = $model; - } elsif ($field eq "bogomips") { - $cpus->[$cpu]{'bogo'} = $val; - } elsif ($field eq "flags") { - $cpus->[$cpu]{'flags'} = processCpuFlags($val); - } - } - - close($fd); - - $cpus; -} - - -# Get information on the host system. Returns a reference to a hash -# with the following fields: -# name Host name -# os Host OS name -# osRel Host OS release -# osVer Host OS version -# mach Host machine name (eg. "SparcStation 20", but on -# PC/Linux usually "i686" etc.) -# platform Hardware platform; on Linux, the base CPU type? -# system System name (eg. hostname and Linux distro, like -# "hostname: openSUSE 10.2 (i586)"). -# cpus Value returned by getCpuInfo(), undef if not avail. -# numCpus Number of CPUs if known, else undef. -# load System load message as per "uptime". -# numUsers Number of users and/or open shell sessions. -sub getSystemInfo { - my $info = { }; - - # Get host system data. - if ($ENV{MINIX}) { - $info->{'name'} = getCmdOutput("uname -a"); - } else { - $info->{'name'} = getCmdOutput("hostname"); - } - $info->{'os'} = getCmdOutput("uname -o") || getCmdOutput("uname -s"); - $info->{'osRel'} = getCmdOutput("uname -r"); - $info->{'osVer'} = getCmdOutput("uname -v"); - $info->{'mach'} = getCmdOutput("uname -m"); - if (!$ENV{MINIX}) { - $info->{'platform'} = getCmdOutput("uname -i"); - } - - # Get the system name (SUSE, Red Hat, etc.) if possible. - $info->{'system'} = $info->{'os'}; - if ( -r "/etc/SuSE-release" ) { - $info->{'system'} = getCmdOutput("cat /etc/SuSE-release"); - } elsif ( -r "/etc/release" ) { - $info->{'system'} = getCmdOutput("cat /etc/release"); - } - - # Get the language info. - if (!$ENV{MINIX}) { - my $lang = getCmdOutput("printenv LANG"); - my $map = getCmdOutput("locale -k LC_CTYPE | grep charmap"); - $map =~ s/.*=//; - my $coll = getCmdOutput("locale -k LC_COLLATE | grep collate-codeset"); - $coll =~ s/.*=//; - $info->{'language'} = sprintf "%s (charmap=%s, collate=%s)", - $lang, $map, $coll; - } - - # Get details on the CPUs, if possible. - my $cpus = getCpuInfo(); - if (defined($cpus)) { - $info->{'cpus'} = $cpus; - $info->{'numCpus'} = scalar(@$cpus); - } - - # Get graphics hardware info. - # if (!$ENV{MINIX}) { - # $info->{'graphics'} = getCmdOutput("3dinfo | cut -f1 -d\'(\'"); - # } - - # Get system run state, load and usage info. - if (!$ENV{MINIX}) { - $info->{'runlevel'} = getCmdOutput("runlevel | cut -f2 -d\" \""); - } - $info->{'load'} = getCmdOutput("uptime"); - $info->{'numUsers'} = getCmdOutput("who | wc -l"); - - $info; -} - - -############################################################################ -# ERROR HANDLING -############################################################################ - -# Abort the benchmarking run with an error message. -sub abortRun { - my ( $err ) = @_; - - printf STDERR "\n**********************************************\n"; - printf STDERR "Run: %s; aborting\n", $err; - exit(1); -} - - -############################################################################ -# TEST SETUP -############################################################################ - -# Do checks that everything's ready for testing. -sub preChecks { - # Set the language. - $ENV{'LANG'} = $language; - - # Check that the required files are in the proper places. - if (!$ENV{MINIX}) { - system("make check"); - if ($? != 0) { - system("make all"); - if ($? != 0) { - abortRun("\"make all\" failed"); - } - } - } - - # Create a script to kill this run. - system("echo \"kill -9 $$\" > \"${TMPDIR}/kill_run\""); - chmod(0755, $TMPDIR . "/kill_run"); -} - - -# Parse the command arguments. -sub parseArgs { - my @words = @_; - - # The accumulator for the bench units to be run. - my $tests = [ ]; - my $params = { 'tests' => $tests }; - - # Generate the requested list of bench programs. - my $opt; - my $word; - while ($word = shift(@words)) { - if ($word !~ m/^-/) { # A test name. - if ($word eq "all") { - foreach my $t (keys(%$testList)) { - push(@$tests, $t) if (!defined($testList->{$t})); - } - } elsif (exists($testList->{$word})) { - my $val = $testList->{$word} || [ $word ]; - push(@$tests, @$val); - } else { - die("Run: unknown test \"$word\"\n"); - } - } elsif ($word eq "-q") { - $params->{'verbose'} = 0; - } elsif ($word eq "-v") { - $params->{'verbose'} = 2; - } elsif ($word eq "-i") { - $params->{'iterations'} = shift(@words); - } elsif ($word eq "-c") { - if (!defined($params->{'copies'})) { - $params->{'copies'} = [ ]; - } - push(@{$params->{'copies'}}, shift(@words)); - } else { - die("Run: unknown option $word\n"); - } - } - - $params; -} - - -############################################################################ -# RESULTS INPUT / OUTPUT -############################################################################ - -# Read a set of benchmarking results from the given file. -# Returns results in the form returned by runTests(), but without the -# individual pass results. -sub readResultsFromFile { - my ( $file ) = @_; - - # Attempt to get the baseline data file; if we can't, just return undef. - open(my $fd, "<", $file) || return undef; - - my $results = { }; - while (<$fd>) { - chomp; - - # Dump comments, ignore blank lines. - s/#.*//; - next if /^\s*$/; - - my ( $name, $time, $slab, $sum, $score, $iters ) = split(/\|/); - my $bresult = { }; - $bresult->{'score'} = $score; - $bresult->{'scorelabel'} = $slab; - $bresult->{'time'} = $time; - $bresult->{'iterations'} = $iters; - - $results->{$name} = $bresult; - } - - close($fd); - - $results; -} - - -############################################################################ -# RESULTS PROCESSING -############################################################################ - -# Process a set of results from a single test by averaging the individal -# pass results into a single final value. -# First, though, dump the worst 1/3 of the scores. The logic is that a -# glitch in the system (background process waking up, for example) may -# make one or two runs go slow, so let's discard those. -# -# $bresult is a hashed array representing the results of a single test; -# $bresult->{'passes'} is an array of the output from the individual -# passes. -sub combinePassResults { - my ( $bench, $tdata, $bresult, $logFile ) = @_; - - $bresult->{'cat'} = $tdata->{'cat'}; - - # Computed results. - my $iterations = 0; - my $totalTime = 0; - my $sum = 0; - my $product = 0; - my $label; - - my $pres = $bresult->{'passes'}; - - # We're going to throw away the worst 1/3 of the pass results. - # Figure out how many to keep. - my $npasses = scalar(@$pres); - my $ndump = int($npasses / 3); - - foreach my $presult (sort { $a->{'COUNT0'} <=> $b->{'COUNT0'} } @$pres) { - my $count = $presult->{'COUNT0'}; - my $timebase = $presult->{'COUNT1'}; - $label = $presult->{'COUNT2'}; - my $time = $presult->{'TIME'} || $presult->{'elapsed'}; - - # Skip this result if it's one of the worst ones. - if ($ndump > 0) { - printLog($logFile, "*Dump score: %12.1f\n", $count); - --$ndump; - next; - } - - # Count this result. - ++$iterations; - printLog($logFile, "Count score: %12.1f\n", $count); - - # If $timebase is 0 the figure is a rate; else compute - # counts per $timebase. $time is always seconds. - if ($timebase > 0) { - $sum += $count / ($time / $timebase); - $product += log($count) - log($time / $timebase); - } else { - $sum += $count; - $product += log($count); - } - $totalTime += $time; - } - - # Save the results for the benchmark. - if ($iterations > 0) { - $bresult->{'score'} = exp($product / $iterations); - $bresult->{'scorelabel'} = $label; - $bresult->{'time'} = $totalTime / $iterations; - $bresult->{'iterations'} = $iterations; - } else { - $bresult->{'error'} = "No measured results"; - } -} - - -# Index the given full benchmark results against the baseline results. -# $results is a hashed array of test names to test results. -# -# Adds the following fields to each benchmark result: -# iscore The baseline score for this test -# index The index of this test against the baseline -# Adds the following fields to $results: -# indexed The number of tests for which index values were -# generated -# fullindex Non-0 if all the index tests were indexed -# index The computed overall index for the run -# Note that the index values are computed as -# result / baseline * 10 -# so an index of 523 indicates that a test ran 52.3 times faster than -# the baseline. -sub indexResults { - my ( $results ) = @_; - - # Read in the baseline result data. If we can't get it, just return - # without making indexed results. - my $index = readResultsFromFile($BINDIR . "/index.base"); - if (!defined($index)) { - return; - } - - # Count the number of results we have (indexed or not) in - # each category. - my $numCat = { }; - foreach my $bench (@{$results->{'list'}}) { - my $bresult = $results->{$bench}; - ++$numCat->{$bresult->{'cat'}}; - } - $results->{'numCat'} = $numCat; - - my $numIndex = { }; - my $indexed = { }; - my $sum = { }; - foreach my $bench (sort(keys(%$index))) { - # Get the test data for this benchmark. - my $tdata = $testParams->{$bench}; - if (!defined($tdata)) { - abortRun("unknown benchmark \"$bench\" in $BINDIR/index.base"); - } - - # Get the test category. Count the total tests in this cat. - my $cat = $tdata->{'cat'}; - ++$numIndex->{$cat}; - - # If we don't have a result for this test, skip. - next if (!defined($results->{$bench})); - - # Get the index and actual results. Calcluate the score. - my $iresult = $index->{$bench}; - my $bresult = $results->{$bench}; - my $ratio = $bresult->{'score'} / $iresult->{'score'}; - - # Save the indexed score. - $bresult->{'iscore'} = $iresult->{'score'}; - $bresult->{'index'} = $ratio * 10; - - # Sun the scores, and count this test for this category. - $sum->{$cat} += log($ratio); - ++$indexed->{$cat}; - } - - # Calculate the index scores per category. - $results->{'indexed'} = $indexed; - $results->{'numIndex'} = $numIndex; - foreach my $c (keys(%$indexed)) { - if ($indexed->{$c} > 0) { - $results->{'index'}{$c} = exp($sum->{$c} / $indexed->{$c}) * 10; - } - } -} - - -############################################################################ -# TEST EXECUTION -############################################################################ - -# Exec the given command in a sub-process. -# -# In the child process, we run the command and store its standard output. -# We also time its execution, and catch its exit status. We then write -# the command's output, plus lines containing the execution time and status, -# to a pipe. -# -# In the parent process, we immediately return an array containing the -# child PID and the filehandle to the pipe. This allows the caller to -# kick off multiple commands in parallel, then gather their output. -sub commandBuffered { - my ( $cmd ) = @_; - - # Create a pipe for parent-child communication. - my $childReader; - my $parentWriter; - pipe($childReader, $parentWriter) || abortRun("pipe() failed"); - $parentWriter->autoflush(1); - - # Fork off the child process. - my $pid = fork(); - if (!defined($pid)) { - abortRun("fork() failed (undef)"); - } elsif ($pid == 0) { - # Close the other end of the pipe. - close $childReader; - - # Start the clock and spawn the command. - my $benchStart = Time::HiRes::time(); - my ( $cmdPid, $cmdFd ) = command($cmd); - - # Read and buffer all the command's output. - my $output = [ ]; - while (<$cmdFd>) { - push(@$output, $_); - } - - # Stop the clock and save the time. - my $elTime = Time::HiRes::time() - $benchStart; - push(@$output, sprintf "elapsed|%f\n", $elTime); - - # Wait for the child to die so we can get its status. - # close($cmdFd); Doesn't work??? - waitpid($cmdPid, 0); - my $status = $?; - push(@$output, sprintf "status|%d\n", $status); - - # Now that we've got the time, play back all the output to the pipe. - # The parent can read this at its leisure. - foreach my $line (@$output) { - print $parentWriter $line; - } - - # Terminate this child. - close $parentWriter; - exit(0); - } - - # Close the other end of the pipe. - close $parentWriter; - - return ( $pid, $childReader ); -} - - -# Read the results of a benchmark execution from a child process, given -# its process ID and its filehandle. Create a results hash structure -# containing the fields returned by the child, plus: -# pid The child's process ID -# status The child's exit status -# ERROR Any stderr output from the child that isn't result data -# Note that ay result fields with ultiple values are split; so eg. -# COUNT|x|y|x -# becomes -# COUNT0 = x -# COUNT1 = y -# COUNT2 = z -sub readResults { - my ( $pid, $fd ) = @_; - - my $presult = { 'pid' => $pid }; - - # Read all the result lines from the child. - while (<$fd>) { - chomp; - - my ( $field, @params ) = split(/\|/); - if (scalar(@params) == 0) { # Error message. - $presult->{'ERROR'} .= "\n" if ($presult->{'ERROR'}); - $presult->{'ERROR'} .= $field; - } elsif (scalar(@params) == 1) { # Simple data. - $presult->{$field} = $params[0]; - } else { # Compound data. - # Store the values in separate fields, named "FIELD$i". - for (my $x = 0; $x < scalar(@params); ++$x) { - $presult->{$field . $x} = $params[$x]; - } - } - } - - # If the command had an error, make an appropriate message if we - # don't have one. - if ($presult->{'status'} != 0 && !defined($presult->{'ERROR'})) { - $presult->{'ERROR'} = "command returned status " . $presult->{'status'}; - } - - # Wait for the child to die. - close($fd); - waitpid($pid, 0); - - $presult; -} - - -# Execute a benchmark command. We set off a given number of copies in -# parallel to exercise multiple CPUs. -# -# We return an array of results hashes, one per copy; each one is as -# returned by readResults(). -sub executeBenchmark { - my ( $command, $copies ) = @_; - - # Array of contexts for all the copies we're running. - my $ctxt = [ ]; - - # Kick off all the commands at once. - for (my $i = 0; $i < $copies; ++$i) { - my ( $cmdPid, $cmdFd ) = commandBuffered($command); - $ctxt->[$i] = { - 'pid' => $cmdPid, - 'fd' => $cmdFd, - }; - } - - # Now, we can simply read back the command results in order. Because - # the child processes read and buffer the results and time the commands, - # there's no need to use select() to read the results as they appear. - my $pres = [ ]; - for (my $i = 0; $i < $copies; ++$i) { - my $presult = readResults($ctxt->[$i]{'pid'}, $ctxt->[$i]{'fd'}); - push(@$pres, $presult); - } - - $pres; -} - - -# Run one iteration of a benchmark, as specified by the given -# benchmark parameters. We run multiple parallel copies as -# specified by $copies. -sub runOnePass { - my ( $params, $verbose, $logFile, $copies ) = @_; - - # Get the command to run. - my $command = $params->{'command'}; - if ($verbose > 1) { - printf "\n"; - printf "COMMAND: \"%s\"\n", $command; - printf "COPIES: \"%d\"\n", $copies; - } - - # Remember where we are, and move to the test directory. - my $pwd = `pwd`; - chdir($TESTDIR); - - # Execute N copies of the benchmark in parallel. - my $copyResults = executeBenchmark($command, $copies); - printLog($logFile, "\n"); - - # Move back home. - chdir($pwd); - - # Sum up the scores of the copies. - my $count = 0; - my $time = 0; - my $elap = 0; - foreach my $res (@$copyResults) { - # Log the result data for each copy. - foreach my $k (sort(keys(%$res))) { - printLog($logFile, "# %s: %s\n", $k, $res->{$k}); - } - printLog($logFile, "\n"); - - # If it failed, bomb out. - if (defined($res->{'ERROR'})) { - my $name = $params->{'logmsg'}; - abortRun("\"$name\": " . $res->{'ERROR'}); - } - - # Count up the score. - $count += $res->{'COUNT0'}; - $time += $res->{'TIME'} || $res->{'elapsed'}; - $elap += $res->{'elapsed'}; - } - - # Make up a combined result. - my $passResult = $copyResults->[0]; - $passResult->{'COUNT0'} = $count; - $passResult->{'TIME'} = $time / $copies; - $passResult->{'elapsed'} = $elap / $copies; - - $passResult; -} - - -sub runBenchmark { - my ( $bench, $tparams, $verbose, $logFile, $copies ) = @_; - - # Make up the actual benchmark parameters. - my $params = mergeParams($baseParams, $tparams); - - # Make up the command string based on the parameters. - my $prog = $params->{'prog'} || $BINDIR . "/" . $bench; - my $command = sprintf "\"%s\" %s", $prog, $params->{'options'}; - $command .= " < \"" . $params->{'stdin'} . "\"" if ($params->{'stdin'}); - $command .= " 2>&1"; - $command .= $params->{'stdout'} ? (" >> \"" . $logFile . "\"") : " > /dev/null"; - $params->{'command'} = $command; - - # Set up the benchmark results structure. - my $bresult = { 'name' => $bench, 'msg' => $params->{'logmsg'} }; - - if ($verbose > 0) { - printf "\n%d x %s ", $copies, $params->{'logmsg'}; - } - - printLog($logFile, - "\n########################################################\n"); - printLog($logFile, "%s -- %s\n", - $params->{'logmsg'}, number($copies, "copy", "copies")); - printLog($logFile, "==> %s\n\n", $command); - - # Run the test iterations, as given by the "repeat" parameter. - my $repeats = $shortIterCount; - $repeats = $longIterCount if $params->{'repeat'} eq 'long'; - $repeats = 1 if $params->{'repeat'} eq 'single'; - my $pres = [ ]; - for (my $i = 1; $i <= $repeats; ++$i) { - printLog($logFile, "#### Pass %d\n\n", $i); - - # make an attempt to flush buffers - system("sync; sleep 1; sync; sleep 2"); - # display heartbeat - if ($verbose > 0) { - printf " %d", $i; - } - - # Execute one pass of the benchmark. - my $presult = runOnePass($params, $verbose, $logFile, $copies); - push(@$pres, $presult); - } - $bresult->{'passes'} = $pres; - - # Calculate the averaged results for this benchmark. - combinePassResults($bench, $tparams, $bresult, $logFile); - - # Log the results. - if ($copies == 1) { - printLog($logFile, "\n>>>> Results of 1 copy\n"); - } else { - printLog($logFile, "\n>>>> Sum of %d copies\n", $copies); - } - foreach my $k ( 'score', 'time', 'iterations' ) { - printLog($logFile, ">>>> %s: %s\n", $k, $bresult->{$k}); - } - printLog($logFile, "\n"); - - # Some specific cleanup routines. - if ($bench eq "C") { - unlink(${TESTDIR} . "/cctest.o"); - unlink(${TESTDIR} . "/a.out"); - } - - if ($verbose > 0) { - printf "\n"; - } - - $bresult; -} - - -# Run the named benchmarks. -sub runTests { - my ( $tests, $verbose, $logFile, $copies ) = @_; - - # Run all the requested tests and gather the results. - my $results = { 'start' => time(), 'copies' => $copies }; - foreach my $bench (@$tests) { - # Get the parameters for this benchmark. - my $params = $testParams->{$bench}; - if (!defined($params)) { - abortRun("unknown benchmark \"$bench\""); - } - - # If the benchmark doesn't want to run with this many copies, skip it. - my $cat = $params->{'cat'}; - my $maxCopies = $testCats->{$cat}{'maxCopies'}; - next if ($copies > $maxCopies); - - # Run the benchmark. - my $bresult = runBenchmark($bench, $params, $verbose, $logFile, $copies); - $results->{$bench} = $bresult; - } - $results->{'end'} = time(); - - # Generate a sorted list of benchmarks for which we have results. - my @benches = grep { - ref($results->{$_}) eq "HASH" && defined($results->{$_}{'msg'}) - } keys(%$results); - @benches = sort { - $results->{$a}{'msg'} cmp $results->{$b}{'msg'} - } @benches; - $results->{'list'} = \@benches; - - # Generate index scores for the results relative to the baseline data. - indexResults($results); - - $results; -} - - -############################################################################ -# TEXT REPORTS -############################################################################ - -# Display a banner indicating the configuration of the system under test -# to the given file desc. -sub displaySystem { - my ( $info, $fd ) = @_; - - # Display basic system info. - printf $fd " System: %s: %s\n", $info->{'name'}, $info->{'system'}; - printf $fd " OS: %s -- %s -- %s\n", - $info->{'os'}, $info->{'osRel'}, $info->{'osVer'}; - if (!$ENV{MINIX}) { - printf $fd " Machine: %s (%s)\n", $info->{'mach'}, $info->{'platform'}; - } - printf $fd " Machine: %s\n", $info->{'mach'}; - if (!$ENV{MINIX}) { - printf $fd " Language: %s\n", $info->{'language'}; - } - - # Get and display details on the CPUs, if possible. - my $cpus = $info->{'cpus'}; - if (!defined($cpus)) { - printf $fd " CPU: no details available\n"; - } else { - for (my $i = 0; $i <= $#$cpus; ++$i) { - printf $fd " CPU %d: %s (%.1f bogomips)\n", - $i, $cpus->[$i]{'model'}, $cpus->[$i]{'bogo'}; - printf $fd " %s\n", $cpus->[$i]{'flags'}; - } - } - - # if (!$ENV{MINIX}) { - # if ($info->{'graphics'}) { - # printf $fd " Graphics: %s\n", $info->{'graphics'}; - # } - # } - - # Display system load and usage info. - if (!$ENV{MINIX}) { - printf $fd " %s; runlevel %s\n\n", $info->{'load'}, $info->{'runlevel'}; - } -} - - -# Display the test scores from the given set of test results. -sub logResults { - my ( $results, $outFd ) = @_; - - # Display the individual test scores. - foreach my $bench (@{$results->{'list'}}) { - my $bresult = $results->{$bench}; - - printf $outFd "%-40s %12.1f %-5s (%.1f s, %d samples)\n", - $bresult->{'msg'}, - $bresult->{'score'}, - $bresult->{'scorelabel'}, - $bresult->{'time'}, - $bresult->{'iterations'}; - } - - printf $outFd "\n"; -} - - -# Display index scores, if any, for the given run results. -sub logIndexCat { - my ( $results, $cat, $outFd ) = @_; - - my $total = $results->{'numIndex'}{$cat}; - my $indexed = $results->{'indexed'}{$cat}; - my $iscore = $results->{'index'}{$cat}; - my $full = $total == $indexed; - - # If there are no indexed scores, just say so. - if (!defined($indexed) || $indexed == 0) { - printf $outFd "No index results available for %s\n\n", - $testCats->{$cat}{'name'}; - return; - } - - # Display the header, depending on whether we have a full set of index - # scores, or a partial set. - my $head = $testCats->{$cat}{'name'} . - ($full ? " Index Values" : " Partial Index"); - printf $outFd "%-40s %12s %12s %8s\n", - $head, "BASELINE", "RESULT", "INDEX"; - - # Display the individual test scores. - foreach my $bench (@{$results->{'list'}}) { - my $bresult = $results->{$bench}; - next if $bresult->{'cat'} ne $cat; - - if (defined($bresult->{'iscore'}) && defined($bresult->{'index'})) { - printf $outFd "%-40s %12.1f %12.1f %8.1f\n", - $bresult->{'msg'}, $bresult->{'iscore'}, - $bresult->{'score'}, $bresult->{'index'}; - } else { - printf $outFd "%-40s %12s %12.1f %8s\n", - $bresult->{'msg'}, "---", - $bresult->{'score'}, "---"; - } - } - - # Display the overall score. - my $title = $testCats->{$cat}{'name'} . " Index Score"; - if (!$full) { - $title .= " (Partial Only)"; - } - printf $outFd "%-40s %12s %12s %8s\n", "", "", "", "========"; - printf $outFd "%-66s %8.1f\n", $title, $iscore; - - printf $outFd "\n"; -} - - -# Display index scores, if any, for the given run results. -sub logIndex { - my ( $results, $outFd ) = @_; - - my $count = $results->{'indexed'}; - foreach my $cat (keys(%$count)) { - logIndexCat($results, $cat, $outFd); - } -} - - -# Dump the given run results into the given report file. -sub summarizeRun { - my ( $systemInfo, $results, $verbose, $reportFd ) = @_; - - # Display information about this test run. - printf $reportFd "------------------------------------------------------------------------\n"; - printf $reportFd "Benchmark Run: %s %s - %s\n", - strftime("%a %b %d %Y", localtime($results->{'start'})), - strftime("%H:%M:%S", localtime($results->{'start'})), - strftime("%H:%M:%S", localtime($results->{'end'})); - printf $reportFd "%s in system; running %s of tests\n", - number($systemInfo->{'numCpus'}, "CPU"), - number($results->{'copies'}, "parallel copy", "parallel copies"); - printf $reportFd "\n"; - - # Display the run scores. - logResults($results, $reportFd); - - # Display the indexed scores, if any. - logIndex($results, $reportFd); -} - - -############################################################################ -# HTML REPORTS -############################################################################ - -# Dump the given run results into the given report file. -sub runHeaderHtml { - my ( $systemInfo, $reportFd ) = @_; - - # Display information about this test run. - my $title = sprintf "Benchmark of %s / %s on %s", - $systemInfo->{'name'}, $systemInfo->{'system'}, - strftime("%a %b %d %Y", localtime()); - - print $reportFd < - - - - - $title - - - -EOF - - # Display information about this test run. - printf $reportFd "

%s

\n", $title; - printf $reportFd "

BYTE UNIX Benchmarks (Version %s)

\n\n", - $version; -} - - -# Display a banner indicating the configuration of the system under test -# to the given file desc. -sub displaySystemHtml { - my ( $info, $fd ) = @_; - - printf $fd "

Test System Information

\n"; - printf $fd "

\n"; - - # Display basic system info. - printf $fd "\n"; - printf $fd " \n"; - printf $fd " \n", - $info->{'name'}, $info->{'system'}; - printf $fd "\n"; - printf $fd " \n"; - printf $fd " \n", - $info->{'os'}, $info->{'osRel'}, $info->{'osVer'}; - printf $fd "\n"; - printf $fd " \n"; - if (!$ENV{MINIX}) { - printf $fd " \n", - $info->{'mach'}, $info->{'platform'}; - } - printf $fd " \n", - $info->{'mach'}; - if (!$ENV{MINIX}) { - printf $fd "\n"; - printf $fd " \n"; - printf $fd " \n", $info->{'language'}; - } - printf $fd "\n"; - - # Get and display details on the CPUs, if possible. - my $cpus = $info->{'cpus'}; - if (!defined($cpus)) { - printf $fd "\n"; - printf $fd " \n"; - printf $fd " \n"; - printf $fd "\n"; - } else { - for (my $i = 0; $i <= $#$cpus; ++$i) { - printf $fd "\n"; - if ($i == 0) { - printf $fd " \n", $#$cpus + 1; - } - printf $fd " \n", $i; - printf $fd " \n", $cpus->[$i]{'flags'}; - printf $fd "\n"; - } - } - - # Display graphics hardware info. - # if (!$ENV{MINIX}) { - # if ($info->{'graphics'}) { - # printf $fd "\n"; - # printf $fd " \n"; - # printf $fd " \n", $info->{'graphics'}; - # printf $fd "\n"; - # } - # } - - # Display system runlevel, load and usage info. - printf $fd "\n"; - printf $fd " \n"; - if (!$ENV{MINIX}) { - printf $fd " \n", - $info->{'load'}, $info->{'runlevel'}; - } - printf $fd " \n"; - - printf $fd "
System:%s: %s
OS:%s -- %s -- %s
Machine:%s: %s%s
Language:%s
CPUs:no details available
CPUs:%d:%s (%.1f bogomips)
\n", - $cpus->[$i]{'model'}, $cpus->[$i]{'bogo'}; - printf $fd " %s
Graphics:%s
Uptime:%s; runlevel %s%s\n", - $info->{'load'}; - printf $fd "

\n\n"; -} - - -# Display the test scores from the given set of test results -# for a given category of tests. -sub logCatResultsHtml { - my ( $results, $cat, $fd ) = @_; - - my $numIndex = $results->{'numIndex'}{$cat}; - my $indexed = $results->{'indexed'}{$cat}; - my $iscore = $results->{'index'}{$cat}; - my $full = defined($indexed) && $indexed == $numIndex; - - # If there are no results in this category, just ignore it. - if (!defined($results->{'numCat'}{$cat}) || - $results->{'numCat'}{$cat} == 0) { - return; - } - - # Say the category. If there are no indexed scores, just say so. - my $warn = ""; - if (!defined($indexed) || $indexed == 0) { - $warn = " — no index results available"; - } elsif (!$full) { - $warn = " — not all index tests were run;" . - " only a partial index score is available"; - } - printf $fd "

%s%s

\n", $testCats->{$cat}{'name'}, $warn; - - printf $fd "

\n"; - - printf $fd "\n"; - printf $fd " \n"; - printf $fd " \n"; - printf $fd " \n"; - printf $fd " \n"; - printf $fd " \n"; - printf $fd " \n"; - printf $fd " \n"; - printf $fd "\n"; - - # Display the individual test scores. - foreach my $bench (@{$results->{'list'}}) { - my $bresult = $results->{$bench}; - next if $bresult->{'cat'} ne $cat; - - printf $fd "\n"; - printf $fd " \n", $bresult->{'msg'}; - printf $fd " \n", - $bresult->{'score'}; - printf $fd " \n", - $bresult->{'scorelabel'}; - printf $fd " \n", - $bresult->{'time'}; - printf $fd " \n", - $bresult->{'iterations'}; - - if (defined($bresult->{'index'})) { - printf $fd " \n", - $bresult->{'iscore'}; - printf $fd " \n", - $bresult->{'index'}; - } - printf $fd "\n"; - } - - # Display the overall score. - if (defined($indexed) && $indexed > 0) { - my $title = $testCats->{$cat}{'name'} . " Index Score"; - if (!$full) { - $title .= " (Partial Only)"; - } - printf $fd "\n"; - printf $fd " \n", $title; - printf $fd " \n", $iscore; - printf $fd "\n"; - } - - printf $fd "
TestScoreUnitTimeIters.BaselineIndex
%s%.1f%s%.1f s%d%.1f%.1f
%s:%.1f

\n\n"; -} - - -# Display index scores, if any, for the given run results. -sub logResultsHtml { - my ( $results, $fd ) = @_; - - foreach my $cat (keys(%$testCats)) { - logCatResultsHtml($results, $cat, $fd); - } -} - - -# Dump the given run results into the given report file. -sub summarizeRunHtml { - my ( $systemInfo, $results, $verbose, $reportFd ) = @_; - - # Display information about this test run. - my $time = $results->{'end'} - $results->{'start'}; - printf $reportFd "


\n"; - printf $reportFd "

Benchmark Run: %s; %s

\n", - number($systemInfo->{'numCpus'}, "CPU"), - number($results->{'copies'}, "parallel process", "parallel processes"); - printf $reportFd "

Time: %s - %s; %dm %02ds

\n", - strftime("%H:%M:%S", localtime($results->{'start'})), - strftime("%H:%M:%S", localtime($results->{'end'})), - int($time / 60), $time % 60; - printf $reportFd "\n"; - - # Display the run scores. - logResultsHtml($results, $reportFd); -} - - -sub runFooterHtml { - my ( $reportFd ) = @_; - - print $reportFd <

-
No Warranties: This information is provided free of charge and "as -is" without any warranty, condition, or representation of any kind, -either express or implied, including but not limited to, any warranty -respecting non-infringement, and the implied warranties of conditions -of merchantability and fitness for a particular purpose. All logos or -trademarks on this site are the property of their respective owner. In -no event shall the author be liable for any -direct, indirect, special, incidental, consequential or other damages -howsoever caused whether arising in contract, tort, or otherwise, -arising out of or in connection with the use or performance of the -information contained on this web site.
- - -EOF -} - - -############################################################################ -# MAIN -############################################################################ - -sub main { - my @args = @_; - - my $params = parseArgs(@args); - my $verbose = $params->{'verbose'} || 1; - if ($params->{'iterations'}) { - $longIterCount = $params->{'iterations'}; - $shortIterCount = int(($params->{'iterations'} + 1) / 3); - $shortIterCount = 1 if ($shortIterCount < 1); - } - - # If no benchmark units have be specified, do "index". - my $tests = $params->{'tests'}; - if ($#$tests < 0) { - $tests = $index; - } - - preChecks(); - my $systemInfo = getSystemInfo(); - - # If the number of copies to run was not set, set it to 1 - # and the number of CPUs in the system (if > 1). - my $copies = $params->{'copies'}; - if (!$copies || scalar(@$copies) == 0) { - push(@$copies, 1); - if (defined($systemInfo->{'numCpus'}) && $systemInfo->{'numCpus'} > 1) { - push(@$copies, $systemInfo->{'numCpus'}); - } - } - - # Display the program banner. - system("cat \"${BINDIR}/unixbench.logo\""); - - if ($verbose > 1) { - printf "\n", join(", ", @$tests); - printf "Tests to run: %s\n", join(", ", @$tests); - } - - # Generate unique file names for the report and log file. - my $reportFile = logFile($systemInfo); - my $reportHtml = $reportFile . ".html"; - my $logFile = $reportFile . ".log"; - - # Open the log file for writing. - open(my $reportFd, ">", $reportFile) || - die("Run: can't write to $reportFile\n"); - open(my $reportFd2, ">", $reportHtml) || - die("Run: can't write to $reportHtml\n"); - printf $reportFd " BYTE UNIX Benchmarks (Version %s)\n\n", $version; - runHeaderHtml($systemInfo, $reportFd2); - - # Dump information about the system under test. - displaySystem($systemInfo, $reportFd); - displaySystemHtml($systemInfo, $reportFd2); - - # Run the tests! Do a test run once for each desired number of copies; - # for example, on a 2-CPU system, we may do a single-processing run - # followed by a dual-processing run. - foreach my $c (@$copies) { - if ($verbose > 1) { - printf "Run with %s\n", number($c, "copy", "copies"); - } - my $results = runTests($tests, $verbose, $logFile, $c); - - summarizeRun($systemInfo, $results, $verbose, $reportFd); - summarizeRunHtml($systemInfo, $results, $verbose, $reportFd2); - } - - runFooterHtml($reportFd2); - - # Finish the report. - close($reportFd); - close($reportFd2); - - # Display the report, if not in quiet mode. - if ($verbose > 0) { - printf "\n"; - printf "========================================================================\n"; - system("cat \"$reportFile\""); - } - - 0; -} - - -exit(main(@ARGV)); - diff --git a/minix/benchmarks/unixbench-5.1.2/USAGE b/minix/benchmarks/unixbench-5.1.2/USAGE deleted file mode 100644 index 903a83f7f..000000000 --- a/minix/benchmarks/unixbench-5.1.2/USAGE +++ /dev/null @@ -1,394 +0,0 @@ -Running the Tests -================= - -All the tests are executed using the "Run" script in the top-level directory. - -The simplest way to generate results is with the commmand: - ./Run - -This will run a standard "index" test (see "The BYTE Index" below), and -save the report in the "results" directory, with a filename like - hostname-2007-09-23-01 -An HTML version is also saved. - -If you want to generate both the basic system index and the graphics index, -then do: - ./Run gindex - -If your system has more than one CPU, the tests will be run twice -- once -with a single copy of each test running at once, and once with N copies, -where N is the number of CPUs. Some categories of tests, however (currently -the graphics tests) will only run with a single copy. - -Since the tests are based on constant time (variable work), a "system" -run usually takes about 29 minutes; the "graphics" part about 18 minutes. -A "gindex" run on a dual-core machine will do 2 "system" passes (single- -and dual-processing) and one "graphics" run, for a total around one and -a quarter hours. - -============================================================================ - -Detailed Usage -============== - -The Run script takes a number of options which you can use to customise a -test, and you can specify the names of the tests to run. The full usage -is: - - Run [ -q | -v ] [-i ] [-c [-c ...]] [test ...] - -The option flags are: - - -q Run in quiet mode. - -v Run in verbose mode. - -i Run iterations for each test -- slower tests - use / 3, but at least 1. Defaults to 10 (3 for - slow tests). - -c Run copies of each test in parallel. - -The -c option can be given multiple times; for example: - - ./Run -c 1 -c 4 - -will run a single-streamed pass, then a 4-streamed pass. Note that some -tests (currently the graphics tests) will only run in a single-streamed pass. - -The remaining non-flag arguments are taken to be the names of tests to run. -The default is to run "index". See "Tests" below. - -When running the tests, I do *not* recommend switching to single-user mode -("init 1"). This seems to change the results in ways I don't understand, -and it's not realistic (unless your system will actually be running in this -mode, of course). However, if using a windowing system, you may want to -switch to a minimal window setup (for example, log in to a "twm" session), -so that randomly-churning background processes don't randomise the results -too much. This is particularly true for the graphics tests. - - -============================================================================ - -Tests -===== - -The available tests are organised into categories; when generating index -scores (see "The BYTE Index" below) the results for each category are -produced separately. The categories are: - - system The original Unix system tests (not all are actually - in the index) - 2d 2D graphics tests (not all are actually in the index) - 3d 3D graphics tests - misc Various non-indexed tests - -The following individual tests are available: - - system: - dhry2reg Dhrystone 2 using register variables - whetstone-double Double-Precision Whetstone - syscall System Call Overhead - pipe Pipe Throughput - context1 Pipe-based Context Switching - spawn Process Creation - execl Execl Throughput - fstime-w File Write 1024 bufsize 2000 maxblocks - fstime-r File Read 1024 bufsize 2000 maxblocks - fstime File Copy 1024 bufsize 2000 maxblocks - fsbuffer-w File Write 256 bufsize 500 maxblocks - fsbuffer-r File Read 256 bufsize 500 maxblocks - fsbuffer File Copy 256 bufsize 500 maxblocks - fsdisk-w File Write 4096 bufsize 8000 maxblocks - fsdisk-r File Read 4096 bufsize 8000 maxblocks - fsdisk File Copy 4096 bufsize 8000 maxblocks - shell1 Shell Scripts (1 concurrent) (runs "looper 60 multi.sh 1") - shell8 Shell Scripts (8 concurrent) (runs "looper 60 multi.sh 8") - shell16 Shell Scripts (8 concurrent) (runs "looper 60 multi.sh 16") - - 2d: - 2d-rects 2D graphics: rectangles - 2d-lines 2D graphics: lines - 2d-circle 2D graphics: circles - 2d-ellipse 2D graphics: ellipses - 2d-shapes 2D graphics: polygons - 2d-aashapes 2D graphics: aa polygons - 2d-polys 2D graphics: complex polygons - 2d-text 2D graphics: text - 2d-blit 2D graphics: images and blits - 2d-window 2D graphics: windows - - 3d: - ubgears 3D graphics: gears - - misc: - C C Compiler Throughput ("looper 60 $cCompiler cctest.c") - arithoh Arithoh (huh?) - short Arithmetic Test (short) (this is arith.c configured for - "short" variables; ditto for the ones below) - int Arithmetic Test (int) - long Arithmetic Test (long) - float Arithmetic Test (float) - double Arithmetic Test (double) - dc Dc: sqrt(2) to 99 decimal places (runs - "looper 30 dc < dc.dat", using your system's copy of "dc") - hanoi Recursion Test -- Tower of Hanoi - grep Grep for a string in a large file, using your system's - copy of "grep" - sysexec Exercise fork() and exec(). - -The following pseudo-test names are aliases for combinations of other -tests: - - arithmetic Runs arithoh, short, int, long, float, double, - and whetstone-double - dhry Alias for dhry2reg - dhrystone Alias for dhry2reg - whets Alias for whetstone-double - whetstone Alias for whetstone-double - load Runs shell1, shell8, and shell16 - misc Runs C, dc, and hanoi - speed Runs the arithmetic and system groups - oldsystem Runs execl, fstime, fsbuffer, fsdisk, pipe, context1, - spawn, and syscall - system Runs oldsystem plus shell1, shell8, and shell16 - fs Runs fstime-w, fstime-r, fstime, fsbuffer-w, - fsbuffer-r, fsbuffer, fsdisk-w, fsdisk-r, and fsdisk - shell Runs shell1, shell8, and shell16 - - index Runs the tests which constitute the official index: - the oldsystem group, plus dhry2reg, whetstone-double, - shell1, and shell8 - See "The BYTE Index" below for more information. - graphics Runs the tests which constitute the graphics index: - 2d-rects, 2d-ellipse, 2d-aashapes, 2d-text, 2d-blit, - 2d-window, and ubgears - gindex Runs the index and graphics groups, to generate both - sets of index results - - all Runs all tests - - -============================================================================ - -The BYTE Index -============== - -The purpose of this test is to provide a basic indicator of the performance -of a Unix-like system; hence, multiple tests are used to test various -aspects of the system's performance. These test results are then compared -to the scores from a baseline system to produce an index value, which is -generally easier to handle than the raw sores. The entire set of index -values is then combined to make an overall index for the system. - -Since 1995, the baseline system has been "George", a SPARCstation 20-61 -with 128 MB RAM, a SPARC Storage Array, and Solaris 2.3, whose ratings -were set at 10.0. (So a system which scores 520 is 52 times faster than -this machine.) Since the numbers are really only useful in a relative -sense, there's no particular reason to update the base system, so for the -sake of consistency it's probably best to leave it alone. George's scores -are in the file "pgms/index.base"; this file is used to calculate the -index scores for any particular run. - -Over the years, various changes have been made to the set of tests in the -index. Although there is a desire for a consistent baseline, various tests -have been determined to be misleading, and have been removed; and a few -alternatives have been added. These changes are detailed in the README, -and should be born in mind when looking at old scores. - -A number of tests are included in the benchmark suite which are not part of -the index, for various reasons; these tests can of course be run manually. -See "Tests" above. - - -============================================================================ - -Graphics Tests -============== - -As of version 5.1, UnixBench now contains some graphics benchmarks. These -are intended to give a rough idea of the general graphics performance of -a system. - -The graphics tests are in categories "2d" and "3d", so the index scores -for these tests are separate from the basic system index. This seems -like a sensible division, since the graphics performance of a system -depends largely on the graphics adaptor. - -The tests currently consist of some 2D "x11perf" tests and "ubgears". - -* The 2D tests are a selection of the x11perf tests, using the host - system's x11perf command (which must be installed and in the search - path). Only a few of the x11perf tests are used, in the interests - of completing a test run in a reasonable time; if you want to do - detailed diagnosis of an X server or graphics chip, then use x11perf - directly. - -* The 3D test is "ubgears", a modified version of the familiar "glxgears". - This version runs for 5 seconds to "warm up", then performs a timed - run and displays the average frames-per-second. - -On multi-CPU systems, the graphics tests will only run in single-processing -mode. This is because the meaning of running two copies of a test at once -is dubious; and the test windows tend to overlay each other, meaning that -the window behind isn't actually doing any work. - - -============================================================================ - -Multiple CPUs -============= - -If your system has multiple CPUs, the default behaviour is to run the selected -tests twice -- once with one copy of each test program running at a time, -and once with N copies, where N is the number of CPUs. (You can override -this with the "-c" option; see "Detailed Usage" above.) This is designed to -allow you to assess: - - - the performance of your system when running a single task - - the performance of your system when running multiple tasks - - the gain from your system's implementation of parallel processing - -The results, however, need to be handled with care. Here are the results -of two runs on a dual-processor system, one in single-processing mode, one -dual-processing: - - Test Single Dual Gain - -------------------- ------ ------ ---- - Dhrystone 2 562.5 1110.3 97% - Double Whetstone 320.0 640.4 100% - Execl Throughput 450.4 880.3 95% - File Copy 1024 759.4 595.9 -22% - File Copy 256 535.8 438.8 -18% - File Copy 4096 1261.8 1043.4 -17% - Pipe Throughput 481.0 979.3 104% - Pipe-based Switching 326.8 1229.0 276% - Process Creation 917.2 1714.1 87% - Shell Scripts (1) 1064.9 1566.3 47% - Shell Scripts (8) 1567.7 1709.9 9% - System Call Overhead 944.2 1445.5 53% - -------------------- ------ ------ ---- - Index Score: 678.2 1026.2 51% - -As expected, the heavily CPU-dependent tasks -- dhrystone, whetstone, -execl, pipe throughput, process creation -- show close to 100% gain when -running 2 copies in parallel. - -The Pipe-based Context Switching test measures context switching overhead -by sending messages back and forth between 2 processes. I don't know why -it shows such a huge gain with 2 copies (ie. 4 processes total) running, -but it seems to be consistent on my system. I think this may be an issue -with the SMP implementation. - -The System Call Overhead shows a lesser gain, presumably because it uses a -lot of CPU time in single-threaded kernel code. The shell scripts test with -8 concurrent processes shows no gain -- because the test itself runs 8 -scripts in parallel, it's already using both CPUs, even when the benchmark -is run in single-stream mode. The same test with one process per copy -shows a real gain. - -The filesystem throughput tests show a loss, instead of a gain, when -multi-processing. That there's no gain is to be expected, since the tests -are presumably constrained by the throughput of the I/O subsystem and the -disk drive itself; the drop in performance is presumably down to the -increased contention for resources, and perhaps greater disk head movement. - -So what tests should you use, how many copies should you run, and how should -you interpret the results? Well, that's up to you, since it depends on -what it is you're trying to measure. - -Implementation --------------- - -The multi-processing mode is implemented at the level of test iterations. -During each iteration of a test, N slave processes are started using fork(). -Each of these slaves executes the test program using fork() and exec(), -reads and stores the entire output, times the run, and prints all the -results to a pipe. The Run script reads the pipes for each of the slaves -in turn to get the results and times. The scores are added, and the times -averaged. - -The result is that each test program has N copies running at once. They -should all finish at around the same time, since they run for constant time. - -If a test program itself starts off K multiple processes (as with the shell8 -test), then the effect will be that there are N * K processes running at -once. This is probably not very useful for testing multi-CPU performance. - - -============================================================================ - -The Language Setting -==================== - -The $LANG environment variable determines how programs abnd library -routines interpret text. This can have a big impact on the test results. - -If $LANG is set to POSIX, or is left unset, text is treated as ASCII; if -it is set to en_US.UTF-8, foir example, then text is treated as being -encoded in UTF-8, which is more complex and therefore slower. Setting -it to other languages can have varying results. - -To ensure consistency between test runs, the Run script now (as of version -5.1.1) sets $LANG to "en_US.utf8". - -This setting which is configured with the variable "$language". You -should not change this if you want to share your results to allow -comparisons between systems; however, you may want to change it to see -how different language settings affect performance. - -Each test report now includes the language settings in use. The reported -language is what is set in $LANG, and is not necessarily supported by the -system; but we also report the character mapping and collation order which -are actually in use (as reported by "locale"). - - -============================================================================ - -Interpreting the Results -======================== - -Interpreting the results of these tests is tricky, and totally depends on -what you're trying to measure. - -For example, are you trying to measure how fast your CPU is? Or how good -your compiler is? Because these tests are all recompiled using your host -system's compiler, the performance of the compiler will inevitably impact -the performance of the tests. Is this a problem? If you're choosing a -system, you probably care about its overall speed, which may well depend -on how good its compiler is; so including that in the test results may be -the right answer. But you may want to ensure that the right compiler is -used to build the tests. - -On the other hand, with the vast majority of Unix systems being x86 / PC -compatibles, running Linux and the GNU C compiler, the results will tend -to be more dependent on the hardware; but the versions of the compiler and -OS can make a big difference. (I measured a 50% gain between SUSE 10.1 -and OpenSUSE 10.2 on the same machine.) So you may want to make sure that -all your test systems are running the same version of the OS; or at least -publish the OS and compuiler versions with your results. Then again, it may -be compiler performance that you're interested in. - -The C test is very dubious -- it tests the speed of compilation. If you're -running the exact same compiler on each system, OK; but otherwise, the -results should probably be discarded. A slower compilation doesn't say -anything about the speed of your system, since the compiler may simply be -spending more time to super-optimise the code, which would actually make it -faster. - -This will be particularly true on architectures like IA-64 (Itanium etc.) -where the compiler spends huge amounts of effort scheduling instructions -to run in parallel, with a resultant significant gain in execution speed. - -Some tests are even more dubious in terms of host-dependency -- for example, -the "dc" test uses the host's version of dc (a calculator program). The -version of this which is available can make a huge difference to the score, -which is why it's not in the index group. Read through the release notes -for more on these kinds of issues. - -Another age-old issue is that of the benchmarks being too trivial to be -meaningful. With compilers getting ever smarter, and performing more -wide-ranging flow path analyses, the danger of parts of the benchmarks -simply being optimised out of existance is always present. - -All in all, the "index" and "gindex" tests (see above) are designed to -give a reasonable measure of overall system performance; but the results -of any test run should always be used with care. - diff --git a/minix/benchmarks/unixbench-5.1.2/WRITING_TESTS b/minix/benchmarks/unixbench-5.1.2/WRITING_TESTS deleted file mode 100644 index 28cd968dd..000000000 --- a/minix/benchmarks/unixbench-5.1.2/WRITING_TESTS +++ /dev/null @@ -1,133 +0,0 @@ -Writing a Test -============== - -Writing a test program is pretty easy. Basically, a test is configured via -a monster array in the Run script, which specifics (among other things) the -program to execute and the parameters to pass it. - -The test itself is simply a program which is given the optional parameters -on the command line, and produces logging data on stdout and its results on -stderr. - - -============================================================================ - -Test Configuration -================== - -In Run, all tests are named in the "$testList" array. This names the -individual tests, and also sets up aliases for groups of tests, eg. "index". - -The test specifications are in the "$testParams" array. This contains the -details of each individual test as a hash. The fields in the hash are: - - * "logmsg": the full name to display for this test. - * "cat": the category this test belongs to; must be configured - in $testCats. - * "prog": the name of the program to execute; defaults to the name of - the benchmark. - * "repeat": number of passes to run; either 'short' (the default), - 'long', or 'single'. For 'short' and 'long', the actual numbers of - passes are given by $shortIterCount and $longIterCount, which are - configured at the top of the script or by the "-i" flag. 'single' - means just run one pass; this should be used for test which do their - own multi-pass handling internally. - * "stdout": non-0 to add the test's stdout to the log file; defaults to 1. - Set to 0 for tests that are too wordy. - * "stdin": name of a file to send to the program's stdin; default null. - * "options": options to be put on the program's command line; default null. - - -============================================================================ - -Output Format -============= - -The results on stderr take the form of a line header and fields, separated -by "|" characters. A result line can be one of: - - COUNT|score|timebase|label - TIME|seconds - ERROR|message - -Any other text on stderr is treated as if it were: - - ERROR|text - -Any output to stdout is placed in a log file, and can be used for debugging. - -COUNT ------ - -The COUNT line is the line used to report a test score. - - * "score" is the result, typically the number of loops performed during - the run - * "timebase" is the time base used for the final report to the user. A - value of 1 reports the score as is; a value of 60, for example, divides - the time taken by 60 to get loops per minute. Atimebase of zero indicates - that the score is already a rate, ie. a count of things per second. - * "label" is the label to use for the score; like "lps" (loops per - second), etc. - -TIME ----- - -The TIME line is optionally used to report the time taken. The Run script -normally measures this, but if your test has signifant overhead outside the -actual test loop, you should use TIME to report the time taken for the actual -test. The argument is the time in seconds in floating-point. - -ERROR ------ - -The argument is an error message; this will abort the benchmarking run and -display the message. - -Any output to stderr which is not a formatted line will be treated as an -error message, so use of ERROR is optional. - - -============================================================================ - -Test Examples -============= - -Iteration Count ---------------- - -The simplest thing is to count the number of loops executed in a given time; -see eg. arith.c. The utlilty functions in timeit.c can be used to implement -the fixed time interval, which is generally passed in on the command line. - -The result is reported simply as the number of iterations completed: - - fprintf(stderr,"COUNT|%lu|1|lps\n", iterations); - -The bnenchmark framework will measure the time taken itself. If the test -code has significant overhead (eg. a "pump-priming" pass), then you should -explicitly report the time taken for the test by adding a line like this: - - fprintf(stderr, "TIME|%.1f\n", seconds); - -If you want results reported as loops per minute, then set timebase to 60: - - fprintf(stderr,"COUNT|%lu|60|lpm\n", iterations); - -Note that this only affects the final report; all times passed to or -from the test are still in seconds. - -Rate ----- - -The other technique is to calculate the rate (things per second) in the test, -and report that directly. To do this, just set timebase to 0: - - fprintf(stderr, "COUNT|%ld|0|KBps\n", kbytes_per_sec); - -Again, you can use TIME to explicitly report the time taken: - - fprintf(stderr, "TIME|%.1f\n", end - start); - -but this isn't so important since you've already calculated the rate. - diff --git a/minix/benchmarks/unixbench-5.1.2/pgms/Makefile b/minix/benchmarks/unixbench-5.1.2/pgms/Makefile deleted file mode 100644 index ba0a87424..000000000 --- a/minix/benchmarks/unixbench-5.1.2/pgms/Makefile +++ /dev/null @@ -1,6 +0,0 @@ - -SUBDIR=arithoh register short int long float double whetstone-double hanoi \ - poll select fstime syscall context1 pipe spawn execl dhry2 \ - dhry2reg looper multi.sh tst.sh unixbench.logo index.base # ubgears poll2BB - -.include diff --git a/minix/benchmarks/unixbench-5.1.2/pgms/Makefile.inc b/minix/benchmarks/unixbench-5.1.2/pgms/Makefile.inc deleted file mode 100644 index 52130c89e..000000000 --- a/minix/benchmarks/unixbench-5.1.2/pgms/Makefile.inc +++ /dev/null @@ -1,5 +0,0 @@ -CPPFLAGS+=-DTIME -DMINIX=1 -I../../src/ -HZ=60 -BINDIR=/usr/benchmarks/unixbench/pgms - -.PATH: ${.CURDIR}/../../src diff --git a/minix/benchmarks/unixbench-5.1.2/pgms/arithoh/Makefile b/minix/benchmarks/unixbench-5.1.2/pgms/arithoh/Makefile deleted file mode 100644 index b22e4eb4c..000000000 --- a/minix/benchmarks/unixbench-5.1.2/pgms/arithoh/Makefile +++ /dev/null @@ -1,7 +0,0 @@ - -PROG=arithoh -SRCS=arith.c -CPPFLAGS+=-Darithoh -MAN= - -.include diff --git a/minix/benchmarks/unixbench-5.1.2/pgms/context1/Makefile b/minix/benchmarks/unixbench-5.1.2/pgms/context1/Makefile deleted file mode 100644 index ab0ba819e..000000000 --- a/minix/benchmarks/unixbench-5.1.2/pgms/context1/Makefile +++ /dev/null @@ -1,5 +0,0 @@ - -PROG=context1 -MAN= - -.include diff --git a/minix/benchmarks/unixbench-5.1.2/pgms/dhry2/Makefile b/minix/benchmarks/unixbench-5.1.2/pgms/dhry2/Makefile deleted file mode 100644 index b4fda6ad3..000000000 --- a/minix/benchmarks/unixbench-5.1.2/pgms/dhry2/Makefile +++ /dev/null @@ -1,7 +0,0 @@ - -PROG=dhry2 -SRCS=dhry_1.c dhry_2.c -CPPFLAGS+=-DHZ=${HZ} -MAN= - -.include diff --git a/minix/benchmarks/unixbench-5.1.2/pgms/dhry2reg/Makefile b/minix/benchmarks/unixbench-5.1.2/pgms/dhry2reg/Makefile deleted file mode 100644 index 818fecfc2..000000000 --- a/minix/benchmarks/unixbench-5.1.2/pgms/dhry2reg/Makefile +++ /dev/null @@ -1,7 +0,0 @@ - -PROG=dhry2reg -SRCS=dhry_1.c dhry_2.c -CPPFLAGS+=-DHZ=${HZ} -DREG=register -MAN= - -.include diff --git a/minix/benchmarks/unixbench-5.1.2/pgms/double/Makefile b/minix/benchmarks/unixbench-5.1.2/pgms/double/Makefile deleted file mode 100644 index a2b1ce7fd..000000000 --- a/minix/benchmarks/unixbench-5.1.2/pgms/double/Makefile +++ /dev/null @@ -1,7 +0,0 @@ - -PROG=double -SRCS=arith.c -CPPFLAGS=-Ddatum='double' -MAN= - -.include diff --git a/minix/benchmarks/unixbench-5.1.2/pgms/execl/Makefile b/minix/benchmarks/unixbench-5.1.2/pgms/execl/Makefile deleted file mode 100644 index fc3f7e0d3..000000000 --- a/minix/benchmarks/unixbench-5.1.2/pgms/execl/Makefile +++ /dev/null @@ -1,5 +0,0 @@ - -PROG=execl -MAN= - -.include diff --git a/minix/benchmarks/unixbench-5.1.2/pgms/float/Makefile b/minix/benchmarks/unixbench-5.1.2/pgms/float/Makefile deleted file mode 100644 index 1194e4cec..000000000 --- a/minix/benchmarks/unixbench-5.1.2/pgms/float/Makefile +++ /dev/null @@ -1,7 +0,0 @@ - -PROG=float -SRCS=arith.c -CPPFLAGS=-Ddatum='float' -MAN= - -.include diff --git a/minix/benchmarks/unixbench-5.1.2/pgms/fstime/Makefile b/minix/benchmarks/unixbench-5.1.2/pgms/fstime/Makefile deleted file mode 100644 index 9500a6038..000000000 --- a/minix/benchmarks/unixbench-5.1.2/pgms/fstime/Makefile +++ /dev/null @@ -1,5 +0,0 @@ - -PROG=fstime -MAN= - -.include diff --git a/minix/benchmarks/unixbench-5.1.2/pgms/gfx-x11 b/minix/benchmarks/unixbench-5.1.2/pgms/gfx-x11 deleted file mode 100755 index ef8a69122..000000000 --- a/minix/benchmarks/unixbench-5.1.2/pgms/gfx-x11 +++ /dev/null @@ -1,476 +0,0 @@ -#!/usr/bin/perl -w - -use strict; - - -############################################################################ -# gfx-x11: a front-end for x11perf. Runs a selected x11perf test, and -# produces output in the format needed by UnixBench. -############################################################################ -# Modification Log: -# 2007.09.26 Ian Smith Created -############################################################################ - -# This program runs sets of x11perf tests, indexes the results against -# a common base reference system (see $testData below), and reports the -# final score. -# -# Usage: -# gfx-x11