Update ref stats: ll/sc, cpu_id, new kernel (?)

--HG--
extra : convert_revision : 060cb7319c4474429917a6347a9a47f390208ec8
This commit is contained in:
Steve Reinhardt 2006-10-08 17:07:23 -04:00
parent d3fba5aa30
commit 911381321b
66 changed files with 864 additions and 781 deletions

View file

@ -8,10 +8,10 @@ global.BPredUnit.condIncorrect 420 # Nu
global.BPredUnit.condPredicted 1302 # Number of conditional branches predicted global.BPredUnit.condPredicted 1302 # Number of conditional branches predicted
global.BPredUnit.lookups 2254 # Number of BP lookups global.BPredUnit.lookups 2254 # Number of BP lookups
global.BPredUnit.usedRAS 291 # Number of times the RAS was used to get a target. global.BPredUnit.usedRAS 291 # Number of times the RAS was used to get a target.
host_inst_rate 47059 # Simulator instruction rate (inst/s) host_inst_rate 1748 # Simulator instruction rate (inst/s)
host_mem_usage 160380 # Number of bytes of host memory used host_mem_usage 160364 # Number of bytes of host memory used
host_seconds 0.12 # Real time elapsed on the host host_seconds 3.22 # Real time elapsed on the host
host_tick_rate 57322 # Simulator tick rate (ticks/s) host_tick_rate 2135 # Simulator tick rate (ticks/s)
memdepunit.memDep.conflictingLoads 12 # Number of conflicting loads. memdepunit.memDep.conflictingLoads 12 # Number of conflicting loads.
memdepunit.memDep.conflictingStores 259 # Number of conflicting stores. memdepunit.memDep.conflictingStores 259 # Number of conflicting stores.
memdepunit.memDep.insertedLoads 2049 # Number of loads inserted to the mem dependence unit. memdepunit.memDep.insertedLoads 2049 # Number of loads inserted to the mem dependence unit.

View file

@ -6,8 +6,8 @@ The Regents of The University of Michigan
All Rights Reserved All Rights Reserved
M5 compiled Oct 7 2006 12:38:12 M5 compiled Oct 8 2006 14:00:39
M5 started Sat Oct 7 12:38:34 2006 M5 started Sun Oct 8 14:00:45 2006
M5 executing on zizzer.eecs.umich.edu M5 executing on zizzer.eecs.umich.edu
command line: build/ALPHA_SE/m5.opt -d build/ALPHA_SE/tests/opt/quick/00.hello/alpha/linux/o3-timing tests/run.py quick/00.hello/alpha/linux/o3-timing command line: build/ALPHA_SE/m5.opt -d build/ALPHA_SE/tests/opt/quick/00.hello/alpha/linux/o3-timing tests/run.py quick/00.hello/alpha/linux/o3-timing
Exiting @ tick 6868 because target called exit() Exiting @ tick 6868 because target called exit()

View file

@ -56,6 +56,7 @@ physmem=system.physmem
type=AtomicSimpleCPU type=AtomicSimpleCPU
children=workload children=workload
clock=1 clock=1
cpu_id=0
defer_registration=false defer_registration=false
function_trace=false function_trace=false
function_trace_start=0 function_trace_start=0

View file

@ -44,6 +44,7 @@ max_loads_all_threads=0
progress_interval=0 progress_interval=0
mem=system.physmem mem=system.physmem
system=system system=system
cpu_id=0
workload=system.cpu.workload workload=system.cpu.workload
clock=1 clock=1
defer_registration=false defer_registration=false

View file

@ -1,9 +1,9 @@
---------- Begin Simulation Statistics ---------- ---------- Begin Simulation Statistics ----------
host_inst_rate 74000 # Simulator instruction rate (inst/s) host_inst_rate 172802 # Simulator instruction rate (inst/s)
host_mem_usage 148088 # Number of bytes of host memory used host_mem_usage 148116 # Number of bytes of host memory used
host_seconds 0.08 # Real time elapsed on the host host_seconds 0.03 # Real time elapsed on the host
host_tick_rate 73591 # Simulator tick rate (ticks/s) host_tick_rate 170614 # Simulator tick rate (ticks/s)
sim_freq 1000000000000 # Frequency of simulated ticks sim_freq 1000000000000 # Frequency of simulated ticks
sim_insts 5642 # Number of instructions simulated sim_insts 5642 # Number of instructions simulated
sim_seconds 0.000000 # Number of seconds simulated sim_seconds 0.000000 # Number of seconds simulated

View file

@ -6,8 +6,8 @@ The Regents of The University of Michigan
All Rights Reserved All Rights Reserved
M5 compiled Oct 7 2006 11:12:49 M5 compiled Oct 8 2006 14:00:39
M5 started Sat Oct 7 11:13:02 2006 M5 started Sun Oct 8 14:00:50 2006
M5 executing on zizzer.eecs.umich.edu M5 executing on zizzer.eecs.umich.edu
command line: build/ALPHA_SE/m5.opt -d build/ALPHA_SE/tests/opt/quick/00.hello/alpha/linux/simple-atomic tests/run.py quick/00.hello/alpha/linux/simple-atomic command line: build/ALPHA_SE/m5.opt -d build/ALPHA_SE/tests/opt/quick/00.hello/alpha/linux/simple-atomic tests/run.py quick/00.hello/alpha/linux/simple-atomic
Exiting @ tick 5641 because target called exit() Exiting @ tick 5641 because target called exit()

View file

@ -56,6 +56,7 @@ physmem=system.physmem
type=TimingSimpleCPU type=TimingSimpleCPU
children=dcache icache l2cache toL2Bus workload children=dcache icache l2cache toL2Bus workload
clock=1 clock=1
cpu_id=0
defer_registration=false defer_registration=false
function_trace=false function_trace=false
function_trace_start=0 function_trace_start=0

View file

@ -83,6 +83,7 @@ max_loads_all_threads=0
progress_interval=0 progress_interval=0
mem=system.cpu.dcache mem=system.cpu.dcache
system=system system=system
cpu_id=0
workload=system.cpu.workload workload=system.cpu.workload
clock=1 clock=1
defer_registration=false defer_registration=false

View file

@ -1,9 +1,9 @@
---------- Begin Simulation Statistics ---------- ---------- Begin Simulation Statistics ----------
host_inst_rate 286207 # Simulator instruction rate (inst/s) host_inst_rate 98835 # Simulator instruction rate (inst/s)
host_mem_usage 159648 # Number of bytes of host memory used host_mem_usage 159632 # Number of bytes of host memory used
host_seconds 0.02 # Real time elapsed on the host host_seconds 0.06 # Real time elapsed on the host
host_tick_rate 413300 # Simulator tick rate (ticks/s) host_tick_rate 144603 # Simulator tick rate (ticks/s)
sim_freq 1000000000000 # Frequency of simulated ticks sim_freq 1000000000000 # Frequency of simulated ticks
sim_insts 5642 # Number of instructions simulated sim_insts 5642 # Number of instructions simulated
sim_seconds 0.000000 # Number of seconds simulated sim_seconds 0.000000 # Number of seconds simulated

View file

@ -6,8 +6,8 @@ The Regents of The University of Michigan
All Rights Reserved All Rights Reserved
M5 compiled Oct 7 2006 12:38:12 M5 compiled Oct 8 2006 14:00:39
M5 started Sat Oct 7 12:38:38 2006 M5 started Sun Oct 8 14:00:50 2006
M5 executing on zizzer.eecs.umich.edu M5 executing on zizzer.eecs.umich.edu
command line: build/ALPHA_SE/m5.opt -d build/ALPHA_SE/tests/opt/quick/00.hello/alpha/linux/simple-timing tests/run.py quick/00.hello/alpha/linux/simple-timing command line: build/ALPHA_SE/m5.opt -d build/ALPHA_SE/tests/opt/quick/00.hello/alpha/linux/simple-timing tests/run.py quick/00.hello/alpha/linux/simple-timing
Exiting @ tick 8316 because target called exit() Exiting @ tick 8316 because target called exit()

View file

@ -8,10 +8,10 @@ global.BPredUnit.condIncorrect 222 # Nu
global.BPredUnit.condPredicted 441 # Number of conditional branches predicted global.BPredUnit.condPredicted 441 # Number of conditional branches predicted
global.BPredUnit.lookups 888 # Number of BP lookups global.BPredUnit.lookups 888 # Number of BP lookups
global.BPredUnit.usedRAS 160 # Number of times the RAS was used to get a target. global.BPredUnit.usedRAS 160 # Number of times the RAS was used to get a target.
host_inst_rate 45832 # Simulator instruction rate (inst/s) host_inst_rate 26386 # Simulator instruction rate (inst/s)
host_mem_usage 159900 # Number of bytes of host memory used host_mem_usage 159884 # Number of bytes of host memory used
host_seconds 0.05 # Real time elapsed on the host host_seconds 0.09 # Real time elapsed on the host
host_tick_rate 55090 # Simulator tick rate (ticks/s) host_tick_rate 31792 # Simulator tick rate (ticks/s)
memdepunit.memDep.conflictingLoads 9 # Number of conflicting loads. memdepunit.memDep.conflictingLoads 9 # Number of conflicting loads.
memdepunit.memDep.conflictingStores 7 # Number of conflicting stores. memdepunit.memDep.conflictingStores 7 # Number of conflicting stores.
memdepunit.memDep.insertedLoads 675 # Number of loads inserted to the mem dependence unit. memdepunit.memDep.insertedLoads 675 # Number of loads inserted to the mem dependence unit.

View file

@ -6,8 +6,8 @@ The Regents of The University of Michigan
All Rights Reserved All Rights Reserved
M5 compiled Oct 7 2006 12:38:12 M5 compiled Oct 8 2006 14:00:39
M5 started Sat Oct 7 12:38:40 2006 M5 started Sun Oct 8 14:00:52 2006
M5 executing on zizzer.eecs.umich.edu M5 executing on zizzer.eecs.umich.edu
command line: build/ALPHA_SE/m5.opt -d build/ALPHA_SE/tests/opt/quick/00.hello/alpha/tru64/o3-timing tests/run.py quick/00.hello/alpha/tru64/o3-timing command line: build/ALPHA_SE/m5.opt -d build/ALPHA_SE/tests/opt/quick/00.hello/alpha/tru64/o3-timing tests/run.py quick/00.hello/alpha/tru64/o3-timing
Exiting @ tick 2886 because target called exit() Exiting @ tick 2886 because target called exit()

View file

@ -56,6 +56,7 @@ physmem=system.physmem
type=AtomicSimpleCPU type=AtomicSimpleCPU
children=workload children=workload
clock=1 clock=1
cpu_id=0
defer_registration=false defer_registration=false
function_trace=false function_trace=false
function_trace_start=0 function_trace_start=0

View file

@ -44,6 +44,7 @@ max_loads_all_threads=0
progress_interval=0 progress_interval=0
mem=system.physmem mem=system.physmem
system=system system=system
cpu_id=0
workload=system.cpu.workload workload=system.cpu.workload
clock=1 clock=1
defer_registration=false defer_registration=false

View file

@ -1,9 +1,9 @@
---------- Begin Simulation Statistics ---------- ---------- Begin Simulation Statistics ----------
host_inst_rate 548861 # Simulator instruction rate (inst/s) host_inst_rate 60702 # Simulator instruction rate (inst/s)
host_mem_usage 147820 # Number of bytes of host memory used host_mem_usage 147692 # Number of bytes of host memory used
host_seconds 0.01 # Real time elapsed on the host host_seconds 0.04 # Real time elapsed on the host
host_tick_rate 504404 # Simulator tick rate (ticks/s) host_tick_rate 60102 # Simulator tick rate (ticks/s)
sim_freq 1000000000000 # Frequency of simulated ticks sim_freq 1000000000000 # Frequency of simulated ticks
sim_insts 2578 # Number of instructions simulated sim_insts 2578 # Number of instructions simulated
sim_seconds 0.000000 # Number of seconds simulated sim_seconds 0.000000 # Number of seconds simulated

View file

@ -6,8 +6,8 @@ The Regents of The University of Michigan
All Rights Reserved All Rights Reserved
M5 compiled Oct 7 2006 11:12:49 M5 compiled Oct 8 2006 14:00:39
M5 started Sat Oct 7 11:13:09 2006 M5 started Sun Oct 8 14:00:54 2006
M5 executing on zizzer.eecs.umich.edu M5 executing on zizzer.eecs.umich.edu
command line: build/ALPHA_SE/m5.opt -d build/ALPHA_SE/tests/opt/quick/00.hello/alpha/tru64/simple-atomic tests/run.py quick/00.hello/alpha/tru64/simple-atomic command line: build/ALPHA_SE/m5.opt -d build/ALPHA_SE/tests/opt/quick/00.hello/alpha/tru64/simple-atomic tests/run.py quick/00.hello/alpha/tru64/simple-atomic
Exiting @ tick 2577 because target called exit() Exiting @ tick 2577 because target called exit()

View file

@ -56,6 +56,7 @@ physmem=system.physmem
type=TimingSimpleCPU type=TimingSimpleCPU
children=dcache icache l2cache toL2Bus workload children=dcache icache l2cache toL2Bus workload
clock=1 clock=1
cpu_id=0
defer_registration=false defer_registration=false
function_trace=false function_trace=false
function_trace_start=0 function_trace_start=0

View file

@ -83,6 +83,7 @@ max_loads_all_threads=0
progress_interval=0 progress_interval=0
mem=system.cpu.dcache mem=system.cpu.dcache
system=system system=system
cpu_id=0
workload=system.cpu.workload workload=system.cpu.workload
clock=1 clock=1
defer_registration=false defer_registration=false

View file

@ -1,9 +1,9 @@
---------- Begin Simulation Statistics ---------- ---------- Begin Simulation Statistics ----------
host_inst_rate 196989 # Simulator instruction rate (inst/s) host_inst_rate 69262 # Simulator instruction rate (inst/s)
host_mem_usage 159172 # Number of bytes of host memory used host_mem_usage 159156 # Number of bytes of host memory used
host_seconds 0.01 # Real time elapsed on the host host_seconds 0.04 # Real time elapsed on the host
host_tick_rate 279840 # Simulator tick rate (ticks/s) host_tick_rate 100319 # Simulator tick rate (ticks/s)
sim_freq 1000000000000 # Frequency of simulated ticks sim_freq 1000000000000 # Frequency of simulated ticks
sim_insts 2578 # Number of instructions simulated sim_insts 2578 # Number of instructions simulated
sim_seconds 0.000000 # Number of seconds simulated sim_seconds 0.000000 # Number of seconds simulated

View file

@ -6,8 +6,8 @@ The Regents of The University of Michigan
All Rights Reserved All Rights Reserved
M5 compiled Oct 7 2006 12:38:12 M5 compiled Oct 8 2006 14:00:39
M5 started Sat Oct 7 12:38:45 2006 M5 started Sun Oct 8 14:00:54 2006
M5 executing on zizzer.eecs.umich.edu M5 executing on zizzer.eecs.umich.edu
command line: build/ALPHA_SE/m5.opt -d build/ALPHA_SE/tests/opt/quick/00.hello/alpha/tru64/simple-timing tests/run.py quick/00.hello/alpha/tru64/simple-timing command line: build/ALPHA_SE/m5.opt -d build/ALPHA_SE/tests/opt/quick/00.hello/alpha/tru64/simple-timing tests/run.py quick/00.hello/alpha/tru64/simple-timing
Exiting @ tick 3777 because target called exit() Exiting @ tick 3777 because target called exit()

View file

@ -56,6 +56,7 @@ physmem=system.physmem
type=AtomicSimpleCPU type=AtomicSimpleCPU
children=workload children=workload
clock=1 clock=1
cpu_id=0
defer_registration=false defer_registration=false
function_trace=false function_trace=false
function_trace_start=0 function_trace_start=0
@ -64,6 +65,7 @@ max_insts_any_thread=0
max_loads_all_threads=0 max_loads_all_threads=0
max_loads_any_thread=0 max_loads_any_thread=0
mem=system.physmem mem=system.physmem
progress_interval=0
simulate_stalls=false simulate_stalls=false
system=system system=system
width=1 width=1
@ -74,11 +76,17 @@ icache_port=system.membus.port[1]
[system.cpu.workload] [system.cpu.workload]
type=LiveProcess type=LiveProcess
cmd=hello cmd=hello
egid=100
env= env=
euid=100
executable=tests/test-progs/hello/bin/mips/linux/hello executable=tests/test-progs/hello/bin/mips/linux/hello
gid=100
input=cin input=cin
output=cout output=cout
pid=100
ppid=99
system=system system=system
uid=100
[system.membus] [system.membus]
type=Bus type=Bus
@ -94,6 +102,7 @@ port=system.membus.port[0]
[trace] [trace]
bufsize=0 bufsize=0
cycle=0
dump_on_exit=false dump_on_exit=false
file=cout file=cout
flags= flags=

View file

@ -28,6 +28,12 @@ input=cin
output=cout output=cout
env= env=
system=system system=system
uid=100
euid=100
gid=100
egid=100
pid=100
ppid=99
[system.cpu] [system.cpu]
type=AtomicSimpleCPU type=AtomicSimpleCPU
@ -35,8 +41,10 @@ max_insts_any_thread=0
max_insts_all_threads=0 max_insts_all_threads=0
max_loads_any_thread=0 max_loads_any_thread=0
max_loads_all_threads=0 max_loads_all_threads=0
progress_interval=0
mem=system.physmem mem=system.physmem
system=system system=system
cpu_id=0
workload=system.cpu.workload workload=system.cpu.workload
clock=1 clock=1
defer_registration=false defer_registration=false
@ -48,6 +56,7 @@ simulate_stalls=false
[trace] [trace]
flags= flags=
start=0 start=0
cycle=0
bufsize=0 bufsize=0
file=cout file=cout
dump_on_exit=false dump_on_exit=false
@ -91,3 +100,6 @@ trace_system=client
[debug] [debug]
break_cycles= break_cycles=
[statsreset]
reset_cycle=0

View file

@ -1,9 +1,9 @@
---------- Begin Simulation Statistics ---------- ---------- Begin Simulation Statistics ----------
host_inst_rate 90956 # Simulator instruction rate (inst/s) host_inst_rate 2733 # Simulator instruction rate (inst/s)
host_mem_usage 147380 # Number of bytes of host memory used host_mem_usage 147536 # Number of bytes of host memory used
host_seconds 0.06 # Real time elapsed on the host host_seconds 2.07 # Real time elapsed on the host
host_tick_rate 90353 # Simulator tick rate (ticks/s) host_tick_rate 2732 # Simulator tick rate (ticks/s)
sim_freq 1000000000000 # Frequency of simulated ticks sim_freq 1000000000000 # Frequency of simulated ticks
sim_insts 5657 # Number of instructions simulated sim_insts 5657 # Number of instructions simulated
sim_seconds 0.000000 # Number of seconds simulated sim_seconds 0.000000 # Number of seconds simulated

View file

@ -6,8 +6,8 @@ The Regents of The University of Michigan
All Rights Reserved All Rights Reserved
M5 compiled Sep 5 2006 15:37:09 M5 compiled Oct 8 2006 14:15:37
M5 started Tue Sep 5 15:46:32 2006 M5 started Sun Oct 8 14:15:41 2006
M5 executing on zizzer.eecs.umich.edu M5 executing on zizzer.eecs.umich.edu
command line: build/MIPS_SE/m5.opt -d build/MIPS_SE/tests/opt/quick/00.hello/mips/linux/simple-atomic tests/run.py quick/00.hello/mips/linux/simple-atomic command line: build/MIPS_SE/m5.opt -d build/MIPS_SE/tests/opt/quick/00.hello/mips/linux/simple-atomic tests/run.py quick/00.hello/mips/linux/simple-atomic
Exiting @ tick 5656 because target called exit() Exiting @ tick 5656 because target called exit()

View file

@ -56,6 +56,7 @@ physmem=system.physmem
type=TimingSimpleCPU type=TimingSimpleCPU
children=dcache icache l2cache toL2Bus workload children=dcache icache l2cache toL2Bus workload
clock=1 clock=1
cpu_id=0
defer_registration=false defer_registration=false
function_trace=false function_trace=false
function_trace_start=0 function_trace_start=0
@ -64,6 +65,7 @@ max_insts_any_thread=0
max_loads_all_threads=0 max_loads_all_threads=0
max_loads_any_thread=0 max_loads_any_thread=0
mem=system.cpu.dcache mem=system.cpu.dcache
progress_interval=0
system=system system=system
workload=system.cpu.workload workload=system.cpu.workload
dcache_port=system.cpu.dcache.cpu_side dcache_port=system.cpu.dcache.cpu_side
@ -197,11 +199,17 @@ port=system.cpu.icache.mem_side system.cpu.dcache.mem_side system.cpu.l2cache.cp
[system.cpu.workload] [system.cpu.workload]
type=LiveProcess type=LiveProcess
cmd=hello cmd=hello
egid=100
env= env=
euid=100
executable=tests/test-progs/hello/bin/mips/linux/hello executable=tests/test-progs/hello/bin/mips/linux/hello
gid=100
input=cin input=cin
output=cout output=cout
pid=100
ppid=99
system=system system=system
uid=100
[system.membus] [system.membus]
type=Bus type=Bus
@ -217,6 +225,7 @@ port=system.membus.port[0]
[trace] [trace]
bufsize=0 bufsize=0
cycle=0
dump_on_exit=false dump_on_exit=false
file=cout file=cout
flags= flags=

View file

@ -67,6 +67,12 @@ input=cin
output=cout output=cout
env= env=
system=system system=system
uid=100
euid=100
gid=100
egid=100
pid=100
ppid=99
[system.cpu] [system.cpu]
type=TimingSimpleCPU type=TimingSimpleCPU
@ -74,8 +80,10 @@ max_insts_any_thread=0
max_insts_all_threads=0 max_insts_all_threads=0
max_loads_any_thread=0 max_loads_any_thread=0
max_loads_all_threads=0 max_loads_all_threads=0
progress_interval=0
mem=system.cpu.dcache mem=system.cpu.dcache
system=system system=system
cpu_id=0
workload=system.cpu.workload workload=system.cpu.workload
clock=1 clock=1
defer_registration=false defer_registration=false
@ -169,6 +177,7 @@ hit_latency=1
[trace] [trace]
flags= flags=
start=0 start=0
cycle=0
bufsize=0 bufsize=0
file=cout file=cout
dump_on_exit=false dump_on_exit=false
@ -212,3 +221,6 @@ trace_system=client
[debug] [debug]
break_cycles= break_cycles=
[statsreset]
reset_cycle=0

View file

@ -1,9 +1,9 @@
---------- Begin Simulation Statistics ---------- ---------- Begin Simulation Statistics ----------
host_inst_rate 273933 # Simulator instruction rate (inst/s) host_inst_rate 116093 # Simulator instruction rate (inst/s)
host_mem_usage 159012 # Number of bytes of host memory used host_mem_usage 158992 # Number of bytes of host memory used
host_seconds 0.02 # Real time elapsed on the host host_seconds 0.05 # Real time elapsed on the host
host_tick_rate 403699 # Simulator tick rate (ticks/s) host_tick_rate 174583 # Simulator tick rate (ticks/s)
sim_freq 1000000000000 # Frequency of simulated ticks sim_freq 1000000000000 # Frequency of simulated ticks
sim_insts 5657 # Number of instructions simulated sim_insts 5657 # Number of instructions simulated
sim_seconds 0.000000 # Number of seconds simulated sim_seconds 0.000000 # Number of seconds simulated

View file

@ -6,8 +6,8 @@ The Regents of The University of Michigan
All Rights Reserved All Rights Reserved
M5 compiled Oct 7 2006 12:52:26 M5 compiled Oct 8 2006 14:15:37
M5 started Sat Oct 7 12:52:42 2006 M5 started Sun Oct 8 14:15:43 2006
M5 executing on zizzer.eecs.umich.edu M5 executing on zizzer.eecs.umich.edu
command line: build/MIPS_SE/m5.opt -d build/MIPS_SE/tests/opt/quick/00.hello/mips/linux/simple-timing tests/run.py quick/00.hello/mips/linux/simple-timing command line: build/MIPS_SE/m5.opt -d build/MIPS_SE/tests/opt/quick/00.hello/mips/linux/simple-timing tests/run.py quick/00.hello/mips/linux/simple-timing
Exiting @ tick 8579 because target called exit() Exiting @ tick 8579 because target called exit()

View file

@ -56,6 +56,7 @@ physmem=system.physmem
type=AtomicSimpleCPU type=AtomicSimpleCPU
children=workload children=workload
clock=1 clock=1
cpu_id=0
defer_registration=false defer_registration=false
function_trace=false function_trace=false
function_trace_start=0 function_trace_start=0
@ -64,6 +65,7 @@ max_insts_any_thread=0
max_loads_all_threads=0 max_loads_all_threads=0
max_loads_any_thread=0 max_loads_any_thread=0
mem=system.physmem mem=system.physmem
progress_interval=0
simulate_stalls=false simulate_stalls=false
system=system system=system
width=1 width=1
@ -74,11 +76,17 @@ icache_port=system.membus.port[1]
[system.cpu.workload] [system.cpu.workload]
type=LiveProcess type=LiveProcess
cmd=hello cmd=hello
egid=100
env= env=
euid=100
executable=tests/test-progs/hello/bin/sparc/linux/hello executable=tests/test-progs/hello/bin/sparc/linux/hello
gid=100
input=cin input=cin
output=cout output=cout
pid=100
ppid=99
system=system system=system
uid=100
[system.membus] [system.membus]
type=Bus type=Bus
@ -94,6 +102,7 @@ port=system.membus.port[0]
[trace] [trace]
bufsize=0 bufsize=0
cycle=0
dump_on_exit=false dump_on_exit=false
file=cout file=cout
flags= flags=

View file

@ -28,6 +28,12 @@ input=cin
output=cout output=cout
env= env=
system=system system=system
uid=100
euid=100
gid=100
egid=100
pid=100
ppid=99
[system.cpu] [system.cpu]
type=AtomicSimpleCPU type=AtomicSimpleCPU
@ -35,8 +41,10 @@ max_insts_any_thread=0
max_insts_all_threads=0 max_insts_all_threads=0
max_loads_any_thread=0 max_loads_any_thread=0
max_loads_all_threads=0 max_loads_all_threads=0
progress_interval=0
mem=system.physmem mem=system.physmem
system=system system=system
cpu_id=0
workload=system.cpu.workload workload=system.cpu.workload
clock=1 clock=1
defer_registration=false defer_registration=false
@ -48,6 +56,7 @@ simulate_stalls=false
[trace] [trace]
flags= flags=
start=0 start=0
cycle=0
bufsize=0 bufsize=0
file=cout file=cout
dump_on_exit=false dump_on_exit=false
@ -91,3 +100,6 @@ trace_system=client
[debug] [debug]
break_cycles= break_cycles=
[statsreset]
reset_cycle=0

View file

@ -1,9 +1,9 @@
---------- Begin Simulation Statistics ---------- ---------- Begin Simulation Statistics ----------
host_inst_rate 61348 # Simulator instruction rate (inst/s) host_inst_rate 2175 # Simulator instruction rate (inst/s)
host_mem_usage 147288 # Number of bytes of host memory used host_mem_usage 147292 # Number of bytes of host memory used
host_seconds 0.07 # Real time elapsed on the host host_seconds 2.06 # Real time elapsed on the host
host_tick_rate 60991 # Simulator tick rate (ticks/s) host_tick_rate 2174 # Simulator tick rate (ticks/s)
sim_freq 1000000000000 # Frequency of simulated ticks sim_freq 1000000000000 # Frequency of simulated ticks
sim_insts 4483 # Number of instructions simulated sim_insts 4483 # Number of instructions simulated
sim_seconds 0.000000 # Number of seconds simulated sim_seconds 0.000000 # Number of seconds simulated

View file

@ -5,8 +5,8 @@ The Regents of The University of Michigan
All Rights Reserved All Rights Reserved
M5 compiled Sep 5 2006 15:39:50 M5 compiled Oct 8 2006 14:19:59
M5 started Tue Sep 5 15:49:24 2006 M5 started Sun Oct 8 14:20:03 2006
M5 executing on zizzer.eecs.umich.edu M5 executing on zizzer.eecs.umich.edu
command line: build/SPARC_SE/m5.opt -d build/SPARC_SE/tests/opt/quick/00.hello/sparc/linux/simple-atomic tests/run.py quick/00.hello/sparc/linux/simple-atomic command line: build/SPARC_SE/m5.opt -d build/SPARC_SE/tests/opt/quick/00.hello/sparc/linux/simple-atomic tests/run.py quick/00.hello/sparc/linux/simple-atomic
Exiting @ tick 4482 because target called exit() Exiting @ tick 4482 because target called exit()

View file

@ -8,10 +8,10 @@ global.BPredUnit.condIncorrect 1081 # Nu
global.BPredUnit.condPredicted 2449 # Number of conditional branches predicted global.BPredUnit.condPredicted 2449 # Number of conditional branches predicted
global.BPredUnit.lookups 4173 # Number of BP lookups global.BPredUnit.lookups 4173 # Number of BP lookups
global.BPredUnit.usedRAS 551 # Number of times the RAS was used to get a target. global.BPredUnit.usedRAS 551 # Number of times the RAS was used to get a target.
host_inst_rate 50082 # Simulator instruction rate (inst/s) host_inst_rate 40630 # Simulator instruction rate (inst/s)
host_mem_usage 161260 # Number of bytes of host memory used host_mem_usage 161244 # Number of bytes of host memory used
host_seconds 0.22 # Real time elapsed on the host host_seconds 0.28 # Real time elapsed on the host
host_tick_rate 37535 # Simulator tick rate (ticks/s) host_tick_rate 30458 # Simulator tick rate (ticks/s)
memdepunit.memDep.conflictingLoads 41 # Number of conflicting loads. memdepunit.memDep.conflictingLoads 41 # Number of conflicting loads.
memdepunit.memDep.conflictingLoads 39 # Number of conflicting loads. memdepunit.memDep.conflictingLoads 39 # Number of conflicting loads.
memdepunit.memDep.conflictingStores 194 # Number of conflicting stores. memdepunit.memDep.conflictingStores 194 # Number of conflicting stores.
@ -115,7 +115,7 @@ system.cpu.dcache.WriteReq_mshr_miss_rate 0.088670 # m
system.cpu.dcache.WriteReq_mshr_miss_rate_0 0.088670 # mshr miss rate for WriteReq accesses system.cpu.dcache.WriteReq_mshr_miss_rate_0 0.088670 # mshr miss rate for WriteReq accesses
system.cpu.dcache.WriteReq_mshr_misses 144 # number of WriteReq MSHR misses system.cpu.dcache.WriteReq_mshr_misses 144 # number of WriteReq MSHR misses
system.cpu.dcache.WriteReq_mshr_misses_0 144 # number of WriteReq MSHR misses system.cpu.dcache.WriteReq_mshr_misses_0 144 # number of WriteReq MSHR misses
system.cpu.dcache.avg_blocked_cycles_no_mshrs no value # average number of cycles each access was blocked system.cpu.dcache.avg_blocked_cycles_no_mshrs <err: div-0> # average number of cycles each access was blocked
system.cpu.dcache.avg_blocked_cycles_no_targets 1 # average number of cycles each access was blocked system.cpu.dcache.avg_blocked_cycles_no_targets 1 # average number of cycles each access was blocked
system.cpu.dcache.avg_refs 11.670554 # Average number of references to valid blocks. system.cpu.dcache.avg_refs 11.670554 # Average number of references to valid blocks.
system.cpu.dcache.blocked_no_mshrs 0 # number of cycles access was blocked system.cpu.dcache.blocked_no_mshrs 0 # number of cycles access was blocked
@ -610,7 +610,7 @@ system.cpu.l2cache.demand_avg_miss_latency_0 2.059561
system.cpu.l2cache.demand_avg_miss_latency_1 <err: div-0> # average overall miss latency system.cpu.l2cache.demand_avg_miss_latency_1 <err: div-0> # average overall miss latency
system.cpu.l2cache.demand_avg_mshr_miss_latency 1 # average overall mshr miss latency system.cpu.l2cache.demand_avg_mshr_miss_latency 1 # average overall mshr miss latency
system.cpu.l2cache.demand_avg_mshr_miss_latency_0 1 # average overall mshr miss latency system.cpu.l2cache.demand_avg_mshr_miss_latency_0 1 # average overall mshr miss latency
system.cpu.l2cache.demand_avg_mshr_miss_latency_1 <err: div-0> # average overall mshr miss latency system.cpu.l2cache.demand_avg_mshr_miss_latency_1 no value # average overall mshr miss latency
system.cpu.l2cache.demand_hits 9 # number of demand (read+write) hits system.cpu.l2cache.demand_hits 9 # number of demand (read+write) hits
system.cpu.l2cache.demand_hits_0 9 # number of demand (read+write) hits system.cpu.l2cache.demand_hits_0 9 # number of demand (read+write) hits
system.cpu.l2cache.demand_hits_1 0 # number of demand (read+write) hits system.cpu.l2cache.demand_hits_1 0 # number of demand (read+write) hits
@ -619,7 +619,7 @@ system.cpu.l2cache.demand_miss_latency_0 1971 # nu
system.cpu.l2cache.demand_miss_latency_1 0 # number of demand (read+write) miss cycles system.cpu.l2cache.demand_miss_latency_1 0 # number of demand (read+write) miss cycles
system.cpu.l2cache.demand_miss_rate 0.990683 # miss rate for demand accesses system.cpu.l2cache.demand_miss_rate 0.990683 # miss rate for demand accesses
system.cpu.l2cache.demand_miss_rate_0 0.990683 # miss rate for demand accesses system.cpu.l2cache.demand_miss_rate_0 0.990683 # miss rate for demand accesses
system.cpu.l2cache.demand_miss_rate_1 no value # miss rate for demand accesses system.cpu.l2cache.demand_miss_rate_1 <err: div-0> # miss rate for demand accesses
system.cpu.l2cache.demand_misses 957 # number of demand (read+write) misses system.cpu.l2cache.demand_misses 957 # number of demand (read+write) misses
system.cpu.l2cache.demand_misses_0 957 # number of demand (read+write) misses system.cpu.l2cache.demand_misses_0 957 # number of demand (read+write) misses
system.cpu.l2cache.demand_misses_1 0 # number of demand (read+write) misses system.cpu.l2cache.demand_misses_1 0 # number of demand (read+write) misses
@ -631,7 +631,7 @@ system.cpu.l2cache.demand_mshr_miss_latency_0 957
system.cpu.l2cache.demand_mshr_miss_latency_1 0 # number of demand (read+write) MSHR miss cycles system.cpu.l2cache.demand_mshr_miss_latency_1 0 # number of demand (read+write) MSHR miss cycles
system.cpu.l2cache.demand_mshr_miss_rate 0.990683 # mshr miss rate for demand accesses system.cpu.l2cache.demand_mshr_miss_rate 0.990683 # mshr miss rate for demand accesses
system.cpu.l2cache.demand_mshr_miss_rate_0 0.990683 # mshr miss rate for demand accesses system.cpu.l2cache.demand_mshr_miss_rate_0 0.990683 # mshr miss rate for demand accesses
system.cpu.l2cache.demand_mshr_miss_rate_1 no value # mshr miss rate for demand accesses system.cpu.l2cache.demand_mshr_miss_rate_1 <err: div-0> # mshr miss rate for demand accesses
system.cpu.l2cache.demand_mshr_misses 957 # number of demand (read+write) MSHR misses system.cpu.l2cache.demand_mshr_misses 957 # number of demand (read+write) MSHR misses
system.cpu.l2cache.demand_mshr_misses_0 957 # number of demand (read+write) MSHR misses system.cpu.l2cache.demand_mshr_misses_0 957 # number of demand (read+write) MSHR misses
system.cpu.l2cache.demand_mshr_misses_1 0 # number of demand (read+write) MSHR misses system.cpu.l2cache.demand_mshr_misses_1 0 # number of demand (read+write) MSHR misses

View file

@ -7,8 +7,8 @@ The Regents of The University of Michigan
All Rights Reserved All Rights Reserved
M5 compiled Oct 7 2006 12:38:12 M5 compiled Oct 8 2006 14:00:39
M5 started Sat Oct 7 12:38:47 2006 M5 started Sun Oct 8 14:00:56 2006
M5 executing on zizzer.eecs.umich.edu M5 executing on zizzer.eecs.umich.edu
command line: build/ALPHA_SE/m5.opt -d build/ALPHA_SE/tests/opt/quick/01.hello-2T-smt/alpha/linux/o3-timing tests/run.py quick/01.hello-2T-smt/alpha/linux/o3-timing command line: build/ALPHA_SE/m5.opt -d build/ALPHA_SE/tests/opt/quick/01.hello-2T-smt/alpha/linux/o3-timing tests/run.py quick/01.hello-2T-smt/alpha/linux/o3-timing
Exiting @ tick 8441 because target called exit() Exiting @ tick 8441 because target called exit()

View file

@ -75,7 +75,7 @@ side_b=system.membus.port[0]
type=AtomicSimpleCPU type=AtomicSimpleCPU
children=dtb itb children=dtb itb
clock=1 clock=1
cpu_id=-1 cpu_id=0
defer_registration=false defer_registration=false
dtb=system.cpu0.dtb dtb=system.cpu0.dtb
function_trace=false function_trace=false
@ -106,7 +106,7 @@ size=48
type=AtomicSimpleCPU type=AtomicSimpleCPU
children=dtb itb children=dtb itb
clock=1 clock=1
cpu_id=-1 cpu_id=1
defer_registration=false defer_registration=false
dtb=system.cpu1.dtb dtb=system.cpu1.dtb
function_trace=false function_trace=false

View file

@ -90,9 +90,9 @@ max_loads_all_threads=0
progress_interval=0 progress_interval=0
mem=system.physmem mem=system.physmem
system=system system=system
cpu_id=0
itb=system.cpu0.itb itb=system.cpu0.itb
dtb=system.cpu0.dtb dtb=system.cpu0.dtb
cpu_id=-1
profile=0 profile=0
clock=1 clock=1
defer_registration=false defer_registration=false
@ -118,9 +118,9 @@ max_loads_all_threads=0
progress_interval=0 progress_interval=0
mem=system.physmem mem=system.physmem
system=system system=system
cpu_id=1
itb=system.cpu1.itb itb=system.cpu1.itb
dtb=system.cpu1.dtb dtb=system.cpu1.dtb
cpu_id=-1
profile=0 profile=0
clock=1 clock=1
defer_registration=false defer_registration=false

View file

@ -3,7 +3,7 @@ M5 console: m5AlphaAccess @ 0xFFFFFD0200000000
memsize 8000000 pages 4000 memsize 8000000 pages 4000
First free page after ROM 0xFFFFFC0000018000 First free page after ROM 0xFFFFFC0000018000
HWRPB 0xFFFFFC0000018000 l1pt 0xFFFFFC0000040000 l2pt 0xFFFFFC0000042000 l3pt_rpb 0xFFFFFC0000044000 l3pt_kernel 0xFFFFFC0000048000 l2reserv 0xFFFFFC0000046000 HWRPB 0xFFFFFC0000018000 l1pt 0xFFFFFC0000040000 l2pt 0xFFFFFC0000042000 l3pt_rpb 0xFFFFFC0000044000 l3pt_kernel 0xFFFFFC0000048000 l2reserv 0xFFFFFC0000046000
kstart = 0xFFFFFC0000310000, kend = 0xFFFFFC00008064E8, kentry = 0xFFFFFC0000310000, numCPUs = 0x2 kstart = 0xFFFFFC0000310000, kend = 0xFFFFFC0000855718, kentry = 0xFFFFFC0000310000, numCPUs = 0x2
CPU Clock at 2000 MHz IntrClockFrequency=1024 CPU Clock at 2000 MHz IntrClockFrequency=1024
Booting with 2 processor(s) Booting with 2 processor(s)
KSP: 0x20043FE8 PTBR 0x20 KSP: 0x20043FE8 PTBR 0x20
@ -16,29 +16,27 @@ M5 console: m5AlphaAccess @ 0xFFFFFD0200000000
Bootstraping CPU 1 with sp=0xFFFFFC0000076000 Bootstraping CPU 1 with sp=0xFFFFFC0000076000
unix_boot_mem ends at FFFFFC0000078000 unix_boot_mem ends at FFFFFC0000078000
k_argc = 0 k_argc = 0
jumping to kernel at 0xFFFFFC0000310000, (PCBB 0xFFFFFC0000018180 pfn 1028) jumping to kernel at 0xFFFFFC0000310000, (PCBB 0xFFFFFC0000018180 pfn 1067)
CallbackFixup 0 18000, t7=FFFFFC0000700000 CallbackFixup 0 18000, t7=FFFFFC000070C000
Entering slaveloop for cpu 1 my_rpb=FFFFFC0000018400 Entering slaveloop for cpu 1 my_rpb=FFFFFC0000018400
Linux version 2.6.8.1 (binkertn@ziff.eecs.umich.edu) (gcc version 3.4.3) #36 SMP Mon May 2 19:50:53 EDT 2005 Linux version 2.6.13 (hsul@zed.eecs.umich.edu) (gcc version 3.4.3) #7 SMP Tue Aug 15 14:40:31 EDT 2006
Booting GENERIC on Tsunami variation DP264 using machine vector DP264 from SRM Booting GENERIC on Tsunami variation DP264 using machine vector DP264 from SRM
Major Options: SMP LEGACY_START VERBOSE_MCHECK Major Options: SMP LEGACY_START VERBOSE_MCHECK
Command line: root=/dev/hda1 console=ttyS0 Command line: root=/dev/hda1 console=ttyS0
memcluster 0, usage 1, start 0, end 392 memcluster 0, usage 1, start 0, end 392
memcluster 1, usage 0, start 392, end 16384 memcluster 1, usage 0, start 392, end 16384
freeing pages 1030:16384 freeing pages 1069:16384
reserving pages 1030:1031 reserving pages 1069:1070
SMP: 2 CPUs probed -- cpu_present_mask = 3 SMP: 2 CPUs probed -- cpu_present_mask = 3
Built 1 zonelists Built 1 zonelists
Kernel command line: root=/dev/hda1 console=ttyS0 Kernel command line: root=/dev/hda1 console=ttyS0
PID hash table entries: 1024 (order 10: 16384 bytes) PID hash table entries: 1024 (order: 10, 32768 bytes)
Using epoch = 1900 Using epoch = 1900
Console: colour dummy device 80x25 Console: colour dummy device 80x25
Dentry cache hash table entries: 32768 (order: 5, 262144 bytes) Dentry cache hash table entries: 32768 (order: 5, 262144 bytes)
Inode-cache hash table entries: 16384 (order: 4, 131072 bytes) Inode-cache hash table entries: 16384 (order: 4, 131072 bytes)
Memory: 119072k/131072k available (3058k kernel code, 8680k reserved, 695k data, 480k init) Memory: 118784k/131072k available (3316k kernel code, 8952k reserved, 983k data, 224k init)
Mount-cache hash table entries: 512 (order: 0, 8192 bytes) Mount-cache hash table entries: 512
per-CPU timeslice cutoff: 374.49 usecs.
task migration cache decay timeout: 0 msecs.
SMP starting up secondaries. SMP starting up secondaries.
Slave CPU 1 console command START Slave CPU 1 console command START
SlaveCmd: restart FFFFFC0000310020 FFFFFC0000310020 vptb FFFFFFFE00000000 my_rpb FFFFFC0000018400 my_rpb_phys 18400 SlaveCmd: restart FFFFFC0000310020 FFFFFC0000310020 vptb FFFFFFFE00000000 my_rpb FFFFFC0000018400 my_rpb_phys 18400
@ -53,16 +51,23 @@ SlaveCmd: restart FFFFFC0000310020 FFFFFC0000310020 vptb FFFFFFFE00000000 my_rpb
Initializing Cryptographic API Initializing Cryptographic API
rtc: Standard PC (1900) epoch (1900) detected rtc: Standard PC (1900) epoch (1900) detected
Real Time Clock Driver v1.12 Real Time Clock Driver v1.12
Serial: 8250/16550 driver $Revision: 1.90 $ 5 ports, IRQ sharing disabled Serial: 8250/16550 driver $Revision: 1.90 $ 1 ports, IRQ sharing disabled
ttyS0 at I/O 0x3f8 (irq = 4) is a 8250 ttyS0 at I/O 0x3f8 (irq = 4) is a 8250
io scheduler noop registered
io scheduler anticipatory registered
io scheduler deadline registered
io scheduler cfq registered
loop: loaded (max 8 devices) loop: loaded (max 8 devices)
Using anticipatory io scheduler
nbd: registered device at major 43 nbd: registered device at major 43
sinic.c: M5 Simple Integrated NIC driver sinic.c: M5 Simple Integrated NIC driver
ns83820.c: National Semiconductor DP83820 10/100/1000 driver. ns83820.c: National Semiconductor DP83820 10/100/1000 driver.
ns83820: irq bound to CPU 1
eth0: ns83820.c: 0x22c: 00000000, subsystem: 0000:0000 eth0: ns83820.c: 0x22c: 00000000, subsystem: 0000:0000
eth0: enabling optical transceiver eth0: enabling optical transceiver
eth0: ns83820 v0.20: DP83820 v1.3: 00:90:00:00:00:01 io=0x09000000 irq=30 f=sg eth0: using 64 bit addressing.
eth0: ns83820 v0.22: DP83820 v1.3: 00:90:00:00:00:01 io=0x09000000 irq=30 f=h,sg
tun: Universal TUN/TAP device driver, 1.6
tun: (C) 1999-2004 Max Krasnyansky <maxk@qualcomm.com>
Uniform Multi-Platform E-IDE driver Revision: 7.00alpha2 Uniform Multi-Platform E-IDE driver Revision: 7.00alpha2
ide: Assuming 33MHz system bus speed for PIO modes; override with idebus=xx ide: Assuming 33MHz system bus speed for PIO modes; override with idebus=xx
PIIX4: IDE controller at PCI slot 0000:00:00.0 PIIX4: IDE controller at PCI slot 0000:00:00.0
@ -75,24 +80,23 @@ SlaveCmd: restart FFFFFC0000310020 FFFFFC0000310020 vptb FFFFFFFE00000000 my_rpb
ide0 at 0x8410-0x8417,0x8422 on irq 31 ide0 at 0x8410-0x8417,0x8422 on irq 31
hda: max request size: 128KiB hda: max request size: 128KiB
hda: 511056 sectors (261 MB), CHS=507/16/63, UDMA(33) hda: 511056 sectors (261 MB), CHS=507/16/63, UDMA(33)
hda: cache flushes not supported
hda: hda1 hda: hda1
hdb: max request size: 128KiB hdb: max request size: 128KiB
hdb: 4177920 sectors (2139 MB), CHS=4144/16/63, UDMA(33) hdb: 4177920 sectors (2139 MB), CHS=4144/16/63, UDMA(33)
hdb: cache flushes not supported
hdb: unknown partition table hdb: unknown partition table
scsi0 : scsi_m5, version 1.73 [20040518], dev_size_mb=8, opts=0x0
Vendor: Linux Model: scsi_m5 Li Rev: 0004
Type: Direct-Access ANSI SCSI revision: 03
SCSI device sda: 16384 512-byte hdwr sectors (8 MB)
SCSI device sda: drive cache: write back
sda: unknown partition table
Attached scsi disk sda at scsi0, channel 0, id 0, lun 0
mice: PS/2 mouse device common for all mice mice: PS/2 mouse device common for all mice
NET: Registered protocol family 2 NET: Registered protocol family 2
IP: routing cache hash table of 1024 buckets, 16Kbytes IP route cache hash table entries: 4096 (order: 2, 32768 bytes)
TCP: Hash tables configured (established 8192 bind 8192) TCP established hash table entries: 16384 (order: 5, 262144 bytes)
ip_conntrack version 2.1 (512 buckets, 4096 max) - 440 bytes per conntrack TCP bind hash table entries: 16384 (order: 5, 262144 bytes)
TCP: Hash tables configured (established 16384 bind 16384)
TCP reno registered
ip_conntrack version 2.1 (512 buckets, 4096 max) - 296 bytes per conntrack
ip_tables: (C) 2000-2002 Netfilter core team ip_tables: (C) 2000-2002 Netfilter core team
arp_tables: (C) 2002 David S. Miller arp_tables: (C) 2002 David S. Miller
TCP bic registered
Initializing IPsec netlink socket Initializing IPsec netlink socket
NET: Registered protocol family 1 NET: Registered protocol family 1
NET: Registered protocol family 17 NET: Registered protocol family 17
@ -101,7 +105,7 @@ SlaveCmd: restart FFFFFC0000310020 FFFFFC0000310020 vptb FFFFFFFE00000000 my_rpb
802.1Q VLAN Support v1.8 Ben Greear <greearb@candelatech.com> 802.1Q VLAN Support v1.8 Ben Greear <greearb@candelatech.com>
All bugs added by David S. Miller <davem@redhat.com> All bugs added by David S. Miller <davem@redhat.com>
VFS: Mounted root (ext2 filesystem) readonly. VFS: Mounted root (ext2 filesystem) readonly.
Freeing unused kernel memory: 480k freed Freeing unused kernel memory: 224k freed
init started: BusyBox v1.1.0 (2006.08.17-02:54+0000) multi-call binary init started: BusyBox v1.1.0 (2006.08.17-02:54+0000) multi-call binary
mounting filesystems... mounting filesystems...
loading script... loading script...

View file

@ -1,232 +1,225 @@
---------- Begin Simulation Statistics ---------- ---------- Begin Simulation Statistics ----------
host_inst_rate 1361363 # Simulator instruction rate (inst/s) host_inst_rate 1292093 # Simulator instruction rate (inst/s)
host_mem_usage 194440 # Number of bytes of host memory used host_mem_usage 197872 # Number of bytes of host memory used
host_seconds 45.04 # Real time elapsed on the host host_seconds 51.53 # Real time elapsed on the host
host_tick_rate 78691874 # Simulator tick rate (ticks/s) host_tick_rate 72118724 # Simulator tick rate (ticks/s)
sim_freq 2000000000 # Frequency of simulated ticks sim_freq 2000000000 # Frequency of simulated ticks
sim_insts 61314617 # Number of instructions simulated sim_insts 66579941 # Number of instructions simulated
sim_seconds 1.772124 # Number of seconds simulated sim_seconds 1.858108 # Number of seconds simulated
sim_ticks 3544247159 # Number of ticks simulated sim_ticks 3716216351 # Number of ticks simulated
system.cpu0.dtb.accesses 1850344 # DTB accesses system.cpu0.dtb.accesses 604194 # DTB accesses
system.cpu0.dtb.acv 301 # DTB access violations system.cpu0.dtb.acv 337 # DTB access violations
system.cpu0.dtb.hits 12691711 # DTB hits system.cpu0.dtb.hits 12597930 # DTB hits
system.cpu0.dtb.misses 8349 # DTB misses system.cpu0.dtb.misses 7857 # DTB misses
system.cpu0.dtb.read_accesses 509385 # DTB read accesses system.cpu0.dtb.read_accesses 426113 # DTB read accesses
system.cpu0.dtb.read_acv 184 # DTB read access violations system.cpu0.dtb.read_acv 210 # DTB read access violations
system.cpu0.dtb.read_hits 7018751 # DTB read hits system.cpu0.dtb.read_hits 7793080 # DTB read hits
system.cpu0.dtb.read_misses 6579 # DTB read misses system.cpu0.dtb.read_misses 7107 # DTB read misses
system.cpu0.dtb.write_accesses 1340959 # DTB write accesses system.cpu0.dtb.write_accesses 178081 # DTB write accesses
system.cpu0.dtb.write_acv 117 # DTB write access violations system.cpu0.dtb.write_acv 127 # DTB write access violations
system.cpu0.dtb.write_hits 5672960 # DTB write hits system.cpu0.dtb.write_hits 4804850 # DTB write hits
system.cpu0.dtb.write_misses 1770 # DTB write misses system.cpu0.dtb.write_misses 750 # DTB write misses
system.cpu0.idle_fraction 0.984893 # Percentage of idle cycles system.cpu0.idle_fraction 0.986701 # Percentage of idle cycles
system.cpu0.itb.accesses 1981604 # ITB accesses system.cpu0.itb.accesses 1567177 # ITB accesses
system.cpu0.itb.acv 161 # ITB acv system.cpu0.itb.acv 184 # ITB acv
system.cpu0.itb.hits 1978255 # ITB hits system.cpu0.itb.hits 1563535 # ITB hits
system.cpu0.itb.misses 3349 # ITB misses system.cpu0.itb.misses 3642 # ITB misses
system.cpu0.kern.callpal 176688 # number of callpals executed system.cpu0.kern.callpal 140535 # number of callpals executed
system.cpu0.kern.callpal_cserve 1 0.00% 0.00% # number of callpals executed system.cpu0.kern.callpal_cserve 1 0.00% 0.00% # number of callpals executed
system.cpu0.kern.callpal_wripir 97 0.05% 0.06% # number of callpals executed system.cpu0.kern.callpal_wripir 567 0.40% 0.40% # number of callpals executed
system.cpu0.kern.callpal_wrmces 1 0.00% 0.06% # number of callpals executed system.cpu0.kern.callpal_wrmces 1 0.00% 0.40% # number of callpals executed
system.cpu0.kern.callpal_wrfen 1 0.00% 0.06% # number of callpals executed system.cpu0.kern.callpal_wrfen 1 0.00% 0.41% # number of callpals executed
system.cpu0.kern.callpal_wrvptptr 1 0.00% 0.06% # number of callpals executed system.cpu0.kern.callpal_wrvptptr 1 0.00% 0.41% # number of callpals executed
system.cpu0.kern.callpal_swpctx 1117 0.63% 0.69% # number of callpals executed system.cpu0.kern.callpal_swpctx 2926 2.08% 2.49% # number of callpals executed
system.cpu0.kern.callpal_tbi 44 0.02% 0.71% # number of callpals executed system.cpu0.kern.callpal_tbi 49 0.03% 2.52% # number of callpals executed
system.cpu0.kern.callpal_wrent 7 0.00% 0.72% # number of callpals executed system.cpu0.kern.callpal_wrent 7 0.00% 2.53% # number of callpals executed
system.cpu0.kern.callpal_swpipl 166811 94.41% 95.13% # number of callpals executed system.cpu0.kern.callpal_swpipl 126411 89.95% 92.48% # number of callpals executed
system.cpu0.kern.callpal_rdps 4911 2.78% 97.91% # number of callpals executed system.cpu0.kern.callpal_rdps 5784 4.12% 96.59% # number of callpals executed
system.cpu0.kern.callpal_wrkgp 1 0.00% 97.91% # number of callpals executed system.cpu0.kern.callpal_wrkgp 1 0.00% 96.59% # number of callpals executed
system.cpu0.kern.callpal_wrusp 3 0.00% 97.91% # number of callpals executed system.cpu0.kern.callpal_wrusp 2 0.00% 96.60% # number of callpals executed
system.cpu0.kern.callpal_rdusp 9 0.01% 97.91% # number of callpals executed system.cpu0.kern.callpal_rdusp 9 0.01% 96.60% # number of callpals executed
system.cpu0.kern.callpal_whami 2 0.00% 97.92% # number of callpals executed system.cpu0.kern.callpal_whami 2 0.00% 96.60% # number of callpals executed
system.cpu0.kern.callpal_rti 3236 1.83% 99.75% # number of callpals executed system.cpu0.kern.callpal_rti 4273 3.04% 99.64% # number of callpals executed
system.cpu0.kern.callpal_callsys 325 0.18% 99.93% # number of callpals executed system.cpu0.kern.callpal_callsys 366 0.26% 99.90% # number of callpals executed
system.cpu0.kern.callpal_imb 121 0.07% 100.00% # number of callpals executed system.cpu0.kern.callpal_imb 134 0.10% 100.00% # number of callpals executed
system.cpu0.kern.inst.arm 0 # number of arm instructions executed system.cpu0.kern.inst.arm 0 # number of arm instructions executed
system.cpu0.kern.inst.hwrei 190918 # number of hwrei instructions executed system.cpu0.kern.inst.hwrei 155157 # number of hwrei instructions executed
system.cpu0.kern.inst.ivlb 0 # number of ivlb instructions executed system.cpu0.kern.inst.ivlb 0 # number of ivlb instructions executed
system.cpu0.kern.inst.ivle 0 # number of ivle instructions executed system.cpu0.kern.inst.ivle 0 # number of ivle instructions executed
system.cpu0.kern.inst.quiesce 1922 # number of quiesce instructions executed system.cpu0.kern.inst.quiesce 6712 # number of quiesce instructions executed
system.cpu0.kern.ipl_count 172116 # number of times we switched to this ipl system.cpu0.kern.ipl_count 133285 # number of times we switched to this ipl
system.cpu0.kern.ipl_count_0 72060 41.87% 41.87% # number of times we switched to this ipl system.cpu0.kern.ipl_count_0 53228 39.94% 39.94% # number of times we switched to this ipl
system.cpu0.kern.ipl_count_21 251 0.15% 42.01% # number of times we switched to this ipl system.cpu0.kern.ipl_count_21 245 0.18% 40.12% # number of times we switched to this ipl
system.cpu0.kern.ipl_count_22 5518 3.21% 45.22% # number of times we switched to this ipl system.cpu0.kern.ipl_count_22 1895 1.42% 41.54% # number of times we switched to this ipl
system.cpu0.kern.ipl_count_30 7 0.00% 45.22% # number of times we switched to this ipl system.cpu0.kern.ipl_count_30 460 0.35% 41.89% # number of times we switched to this ipl
system.cpu0.kern.ipl_count_31 94280 54.78% 100.00% # number of times we switched to this ipl system.cpu0.kern.ipl_count_31 77457 58.11% 100.00% # number of times we switched to this ipl
system.cpu0.kern.ipl_good 153515 # number of times we switched to this ipl from a different ipl system.cpu0.kern.ipl_good 107676 # number of times we switched to this ipl from a different ipl
system.cpu0.kern.ipl_good_0 72019 46.91% 46.91% # number of times we switched to this ipl from a different ipl system.cpu0.kern.ipl_good_0 52768 49.01% 49.01% # number of times we switched to this ipl from a different ipl
system.cpu0.kern.ipl_good_21 251 0.16% 47.08% # number of times we switched to this ipl from a different ipl system.cpu0.kern.ipl_good_21 245 0.23% 49.23% # number of times we switched to this ipl from a different ipl
system.cpu0.kern.ipl_good_22 5518 3.59% 50.67% # number of times we switched to this ipl from a different ipl system.cpu0.kern.ipl_good_22 1895 1.76% 50.99% # number of times we switched to this ipl from a different ipl
system.cpu0.kern.ipl_good_30 7 0.00% 50.68% # number of times we switched to this ipl from a different ipl system.cpu0.kern.ipl_good_30 460 0.43% 51.42% # number of times we switched to this ipl from a different ipl
system.cpu0.kern.ipl_good_31 75720 49.32% 100.00% # number of times we switched to this ipl from a different ipl system.cpu0.kern.ipl_good_31 52308 48.58% 100.00% # number of times we switched to this ipl from a different ipl
system.cpu0.kern.ipl_ticks 3543835079 # number of cycles we spent at this ipl system.cpu0.kern.ipl_ticks 3716215936 # number of cycles we spent at this ipl
system.cpu0.kern.ipl_ticks_0 3521923327 99.38% 99.38% # number of cycles we spent at this ipl system.cpu0.kern.ipl_ticks_0 3683825506 99.13% 99.13% # number of cycles we spent at this ipl
system.cpu0.kern.ipl_ticks_21 39982 0.00% 99.38% # number of cycles we spent at this ipl system.cpu0.kern.ipl_ticks_21 40474 0.00% 99.13% # number of cycles we spent at this ipl
system.cpu0.kern.ipl_ticks_22 1005040 0.03% 99.41% # number of cycles we spent at this ipl system.cpu0.kern.ipl_ticks_22 162970 0.00% 99.13% # number of cycles we spent at this ipl
system.cpu0.kern.ipl_ticks_30 1756 0.00% 99.41% # number of cycles we spent at this ipl system.cpu0.kern.ipl_ticks_30 103364 0.00% 99.14% # number of cycles we spent at this ipl
system.cpu0.kern.ipl_ticks_31 20864974 0.59% 100.00% # number of cycles we spent at this ipl system.cpu0.kern.ipl_ticks_31 32083622 0.86% 100.00% # number of cycles we spent at this ipl
system.cpu0.kern.ipl_used 0.891928 # fraction of swpipl calls that actually changed the ipl system.cpu0.kern.ipl_used 0.807863 # fraction of swpipl calls that actually changed the ipl
system.cpu0.kern.ipl_used_0 0.999431 # fraction of swpipl calls that actually changed the ipl system.cpu0.kern.ipl_used_0 0.991358 # fraction of swpipl calls that actually changed the ipl
system.cpu0.kern.ipl_used_21 1 # fraction of swpipl calls that actually changed the ipl system.cpu0.kern.ipl_used_21 1 # fraction of swpipl calls that actually changed the ipl
system.cpu0.kern.ipl_used_22 1 # fraction of swpipl calls that actually changed the ipl system.cpu0.kern.ipl_used_22 1 # fraction of swpipl calls that actually changed the ipl
system.cpu0.kern.ipl_used_30 1 # fraction of swpipl calls that actually changed the ipl system.cpu0.kern.ipl_used_30 1 # fraction of swpipl calls that actually changed the ipl
system.cpu0.kern.ipl_used_31 0.803140 # fraction of swpipl calls that actually changed the ipl system.cpu0.kern.ipl_used_31 0.675317 # fraction of swpipl calls that actually changed the ipl
system.cpu0.kern.mode_good_kernel 1277 system.cpu0.kern.mode_good_kernel 1221
system.cpu0.kern.mode_good_user 1129 system.cpu0.kern.mode_good_user 1222
system.cpu0.kern.mode_good_idle 148 system.cpu0.kern.mode_good_idle 0
system.cpu0.kern.mode_switch_kernel 2253 # number of protection mode switches system.cpu0.kern.mode_switch_kernel 6758 # number of protection mode switches
system.cpu0.kern.mode_switch_user 1129 # number of protection mode switches system.cpu0.kern.mode_switch_user 1222 # number of protection mode switches
system.cpu0.kern.mode_switch_idle 2074 # number of protection mode switches system.cpu0.kern.mode_switch_idle 0 # number of protection mode switches
system.cpu0.kern.mode_switch_good 0.468109 # fraction of useful protection mode switches system.cpu0.kern.mode_switch_good 0.306140 # fraction of useful protection mode switches
system.cpu0.kern.mode_switch_good_kernel 0.566800 # fraction of useful protection mode switches system.cpu0.kern.mode_switch_good_kernel 0.180675 # fraction of useful protection mode switches
system.cpu0.kern.mode_switch_good_user 1 # fraction of useful protection mode switches system.cpu0.kern.mode_switch_good_user 1 # fraction of useful protection mode switches
system.cpu0.kern.mode_switch_good_idle 0.071360 # fraction of useful protection mode switches system.cpu0.kern.mode_switch_good_idle <err: div-0> # fraction of useful protection mode switches
system.cpu0.kern.mode_ticks_kernel 28710240 0.81% 0.81% # number of ticks spent at the given mode system.cpu0.kern.mode_ticks_kernel 3714429703 99.95% 99.95% # number of ticks spent at the given mode
system.cpu0.kern.mode_ticks_user 2184201 0.06% 0.87% # number of ticks spent at the given mode system.cpu0.kern.mode_ticks_user 1786231 0.05% 100.00% # number of ticks spent at the given mode
system.cpu0.kern.mode_ticks_idle 3512891779 99.13% 100.00% # number of ticks spent at the given mode system.cpu0.kern.mode_ticks_idle 0 0.00% 100.00% # number of ticks spent at the given mode
system.cpu0.kern.swap_context 1118 # number of times the context was actually changed system.cpu0.kern.swap_context 2927 # number of times the context was actually changed
system.cpu0.kern.syscall 192 # number of syscalls executed system.cpu0.kern.syscall 217 # number of syscalls executed
system.cpu0.kern.syscall_fork 7 3.65% 3.65% # number of syscalls executed system.cpu0.kern.syscall_fork 8 3.69% 3.69% # number of syscalls executed
system.cpu0.kern.syscall_read 13 6.77% 10.42% # number of syscalls executed system.cpu0.kern.syscall_read 19 8.76% 12.44% # number of syscalls executed
system.cpu0.kern.syscall_write 4 2.08% 12.50% # number of syscalls executed system.cpu0.kern.syscall_write 3 1.38% 13.82% # number of syscalls executed
system.cpu0.kern.syscall_close 28 14.58% 27.08% # number of syscalls executed system.cpu0.kern.syscall_close 31 14.29% 28.11% # number of syscalls executed
system.cpu0.kern.syscall_chdir 1 0.52% 27.60% # number of syscalls executed system.cpu0.kern.syscall_chdir 1 0.46% 28.57% # number of syscalls executed
system.cpu0.kern.syscall_obreak 7 3.65% 31.25% # number of syscalls executed system.cpu0.kern.syscall_obreak 6 2.76% 31.34% # number of syscalls executed
system.cpu0.kern.syscall_lseek 6 3.12% 34.37% # number of syscalls executed system.cpu0.kern.syscall_lseek 10 4.61% 35.94% # number of syscalls executed
system.cpu0.kern.syscall_getpid 4 2.08% 36.46% # number of syscalls executed system.cpu0.kern.syscall_getpid 6 2.76% 38.71% # number of syscalls executed
system.cpu0.kern.syscall_setuid 1 0.52% 36.98% # number of syscalls executed system.cpu0.kern.syscall_setuid 2 0.92% 39.63% # number of syscalls executed
system.cpu0.kern.syscall_getuid 3 1.56% 38.54% # number of syscalls executed system.cpu0.kern.syscall_getuid 4 1.84% 41.47% # number of syscalls executed
system.cpu0.kern.syscall_access 7 3.65% 42.19% # number of syscalls executed system.cpu0.kern.syscall_access 6 2.76% 44.24% # number of syscalls executed
system.cpu0.kern.syscall_dup 2 1.04% 43.23% # number of syscalls executed system.cpu0.kern.syscall_dup 2 0.92% 45.16% # number of syscalls executed
system.cpu0.kern.syscall_open 34 17.71% 60.94% # number of syscalls executed system.cpu0.kern.syscall_open 33 15.21% 60.37% # number of syscalls executed
system.cpu0.kern.syscall_getgid 3 1.56% 62.50% # number of syscalls executed system.cpu0.kern.syscall_getgid 4 1.84% 62.21% # number of syscalls executed
system.cpu0.kern.syscall_sigprocmask 8 4.17% 66.67% # number of syscalls executed system.cpu0.kern.syscall_sigprocmask 10 4.61% 66.82% # number of syscalls executed
system.cpu0.kern.syscall_ioctl 9 4.69% 71.35% # number of syscalls executed system.cpu0.kern.syscall_ioctl 9 4.15% 70.97% # number of syscalls executed
system.cpu0.kern.syscall_readlink 1 0.52% 71.87% # number of syscalls executed system.cpu0.kern.syscall_execve 6 2.76% 73.73% # number of syscalls executed
system.cpu0.kern.syscall_execve 5 2.60% 74.48% # number of syscalls executed system.cpu0.kern.syscall_mmap 25 11.52% 85.25% # number of syscalls executed
system.cpu0.kern.syscall_mmap 22 11.46% 85.94% # number of syscalls executed system.cpu0.kern.syscall_munmap 3 1.38% 86.64% # number of syscalls executed
system.cpu0.kern.syscall_munmap 2 1.04% 86.98% # number of syscalls executed system.cpu0.kern.syscall_mprotect 7 3.23% 89.86% # number of syscalls executed
system.cpu0.kern.syscall_mprotect 6 3.12% 90.10% # number of syscalls executed system.cpu0.kern.syscall_gethostname 1 0.46% 90.32% # number of syscalls executed
system.cpu0.kern.syscall_gethostname 1 0.52% 90.62% # number of syscalls executed system.cpu0.kern.syscall_dup2 3 1.38% 91.71% # number of syscalls executed
system.cpu0.kern.syscall_dup2 2 1.04% 91.67% # number of syscalls executed system.cpu0.kern.syscall_fcntl 8 3.69% 95.39% # number of syscalls executed
system.cpu0.kern.syscall_fcntl 8 4.17% 95.83% # number of syscalls executed system.cpu0.kern.syscall_socket 2 0.92% 96.31% # number of syscalls executed
system.cpu0.kern.syscall_socket 2 1.04% 96.87% # number of syscalls executed system.cpu0.kern.syscall_connect 2 0.92% 97.24% # number of syscalls executed
system.cpu0.kern.syscall_connect 2 1.04% 97.92% # number of syscalls executed system.cpu0.kern.syscall_setgid 2 0.92% 98.16% # number of syscalls executed
system.cpu0.kern.syscall_setgid 1 0.52% 98.44% # number of syscalls executed system.cpu0.kern.syscall_getrlimit 2 0.92% 99.08% # number of syscalls executed
system.cpu0.kern.syscall_getrlimit 1 0.52% 98.96% # number of syscalls executed system.cpu0.kern.syscall_setsid 2 0.92% 100.00% # number of syscalls executed
system.cpu0.kern.syscall_setsid 2 1.04% 100.00% # number of syscalls executed system.cpu0.not_idle_fraction 0.013299 # Percentage of non-idle cycles
system.cpu0.not_idle_fraction 0.015107 # Percentage of non-idle cycles system.cpu0.numCycles 49421041 # number of cpu cycles simulated
system.cpu0.numCycles 53543489 # number of cpu cycles simulated system.cpu0.num_insts 49417215 # Number of instructions executed
system.cpu0.num_insts 53539979 # Number of instructions executed system.cpu0.num_refs 12829669 # Number of memory references
system.cpu0.num_refs 12727196 # Number of memory references system.cpu1.dtb.accesses 701326 # DTB accesses
system.cpu1.dtb.accesses 460215 # DTB accesses system.cpu1.dtb.acv 30 # DTB access violations
system.cpu1.dtb.acv 72 # DTB access violations system.cpu1.dtb.hits 5286923 # DTB hits
system.cpu1.dtb.hits 2012555 # DTB hits system.cpu1.dtb.misses 3658 # DTB misses
system.cpu1.dtb.misses 4236 # DTB misses system.cpu1.dtb.read_accesses 474933 # DTB read accesses
system.cpu1.dtb.read_accesses 319867 # DTB read accesses system.cpu1.dtb.read_acv 0 # DTB read access violations
system.cpu1.dtb.read_acv 26 # DTB read access violations system.cpu1.dtb.read_hits 3100008 # DTB read hits
system.cpu1.dtb.read_hits 1276251 # DTB read hits system.cpu1.dtb.read_misses 3260 # DTB read misses
system.cpu1.dtb.read_misses 3800 # DTB read misses system.cpu1.dtb.write_accesses 226393 # DTB write accesses
system.cpu1.dtb.write_accesses 140348 # DTB write accesses system.cpu1.dtb.write_acv 30 # DTB write access violations
system.cpu1.dtb.write_acv 46 # DTB write access violations system.cpu1.dtb.write_hits 2186915 # DTB write hits
system.cpu1.dtb.write_hits 736304 # DTB write hits system.cpu1.dtb.write_misses 398 # DTB write misses
system.cpu1.dtb.write_misses 436 # DTB write misses system.cpu1.idle_fraction 0.995381 # Percentage of idle cycles
system.cpu1.idle_fraction 0.997806 # Percentage of idle cycles system.cpu1.itb.accesses 1714255 # ITB accesses
system.cpu1.itb.accesses 1302484 # ITB accesses system.cpu1.itb.acv 0 # ITB acv
system.cpu1.itb.acv 23 # ITB acv system.cpu1.itb.hits 1712856 # ITB hits
system.cpu1.itb.hits 1300768 # ITB hits system.cpu1.itb.misses 1399 # ITB misses
system.cpu1.itb.misses 1716 # ITB misses system.cpu1.kern.callpal 81795 # number of callpals executed
system.cpu1.kern.callpal 27118 # number of callpals executed
system.cpu1.kern.callpal_cserve 1 0.00% 0.00% # number of callpals executed system.cpu1.kern.callpal_cserve 1 0.00% 0.00% # number of callpals executed
system.cpu1.kern.callpal_wripir 7 0.03% 0.03% # number of callpals executed system.cpu1.kern.callpal_wripir 460 0.56% 0.56% # number of callpals executed
system.cpu1.kern.callpal_wrmces 1 0.00% 0.03% # number of callpals executed system.cpu1.kern.callpal_wrmces 1 0.00% 0.56% # number of callpals executed
system.cpu1.kern.callpal_wrfen 1 0.00% 0.04% # number of callpals executed system.cpu1.kern.callpal_wrfen 1 0.00% 0.57% # number of callpals executed
system.cpu1.kern.callpal_swpctx 515 1.90% 1.94% # number of callpals executed system.cpu1.kern.callpal_swpctx 2245 2.74% 3.31% # number of callpals executed
system.cpu1.kern.callpal_tbi 10 0.04% 1.97% # number of callpals executed system.cpu1.kern.callpal_tbi 4 0.00% 3.32% # number of callpals executed
system.cpu1.kern.callpal_wrent 7 0.03% 2.00% # number of callpals executed system.cpu1.kern.callpal_wrent 7 0.01% 3.32% # number of callpals executed
system.cpu1.kern.callpal_swpipl 23496 86.64% 88.64% # number of callpals executed system.cpu1.kern.callpal_swpipl 71908 87.91% 91.24% # number of callpals executed
system.cpu1.kern.callpal_rdps 251 0.93% 89.57% # number of callpals executed system.cpu1.kern.callpal_rdps 3034 3.71% 94.95% # number of callpals executed
system.cpu1.kern.callpal_wrkgp 1 0.00% 89.57% # number of callpals executed system.cpu1.kern.callpal_wrkgp 1 0.00% 94.95% # number of callpals executed
system.cpu1.kern.callpal_wrusp 4 0.01% 89.59% # number of callpals executed system.cpu1.kern.callpal_wrusp 5 0.01% 94.95% # number of callpals executed
system.cpu1.kern.callpal_rdusp 1 0.00% 89.59% # number of callpals executed system.cpu1.kern.callpal_whami 3 0.00% 94.96% # number of callpals executed
system.cpu1.kern.callpal_whami 3 0.01% 89.60% # number of callpals executed system.cpu1.kern.callpal_rti 3913 4.78% 99.74% # number of callpals executed
system.cpu1.kern.callpal_rti 2552 9.41% 99.01% # number of callpals executed system.cpu1.kern.callpal_callsys 165 0.20% 99.94% # number of callpals executed
system.cpu1.kern.callpal_callsys 208 0.77% 99.78% # number of callpals executed system.cpu1.kern.callpal_imb 46 0.06% 100.00% # number of callpals executed
system.cpu1.kern.callpal_imb 59 0.22% 100.00% # number of callpals executed
system.cpu1.kern.callpal_rdunique 1 0.00% 100.00% # number of callpals executed system.cpu1.kern.callpal_rdunique 1 0.00% 100.00% # number of callpals executed
system.cpu1.kern.inst.arm 0 # number of arm instructions executed system.cpu1.kern.inst.arm 0 # number of arm instructions executed
system.cpu1.kern.inst.hwrei 35069 # number of hwrei instructions executed system.cpu1.kern.inst.hwrei 89345 # number of hwrei instructions executed
system.cpu1.kern.inst.ivlb 0 # number of ivlb instructions executed system.cpu1.kern.inst.ivlb 0 # number of ivlb instructions executed
system.cpu1.kern.inst.ivle 0 # number of ivle instructions executed system.cpu1.kern.inst.ivle 0 # number of ivle instructions executed
system.cpu1.kern.inst.quiesce 1947 # number of quiesce instructions executed system.cpu1.kern.inst.quiesce 2592 # number of quiesce instructions executed
system.cpu1.kern.ipl_count 27951 # number of times we switched to this ipl system.cpu1.kern.ipl_count 78283 # number of times we switched to this ipl
system.cpu1.kern.ipl_count_0 10084 36.08% 36.08% # number of times we switched to this ipl system.cpu1.kern.ipl_count_0 30809 39.36% 39.36% # number of times we switched to this ipl
system.cpu1.kern.ipl_count_22 5485 19.62% 55.70% # number of times we switched to this ipl system.cpu1.kern.ipl_count_22 1894 2.42% 41.78% # number of times we switched to this ipl
system.cpu1.kern.ipl_count_30 97 0.35% 56.05% # number of times we switched to this ipl system.cpu1.kern.ipl_count_30 567 0.72% 42.50% # number of times we switched to this ipl
system.cpu1.kern.ipl_count_31 12285 43.95% 100.00% # number of times we switched to this ipl system.cpu1.kern.ipl_count_31 45013 57.50% 100.00% # number of times we switched to this ipl
system.cpu1.kern.ipl_good 27484 # number of times we switched to this ipl from a different ipl system.cpu1.kern.ipl_good 61674 # number of times we switched to this ipl from a different ipl
system.cpu1.kern.ipl_good_0 10061 36.61% 36.61% # number of times we switched to this ipl from a different ipl system.cpu1.kern.ipl_good_0 29890 48.46% 48.46% # number of times we switched to this ipl from a different ipl
system.cpu1.kern.ipl_good_22 5485 19.96% 56.56% # number of times we switched to this ipl from a different ipl system.cpu1.kern.ipl_good_22 1894 3.07% 51.54% # number of times we switched to this ipl from a different ipl
system.cpu1.kern.ipl_good_30 97 0.35% 56.92% # number of times we switched to this ipl from a different ipl system.cpu1.kern.ipl_good_30 567 0.92% 52.45% # number of times we switched to this ipl from a different ipl
system.cpu1.kern.ipl_good_31 11841 43.08% 100.00% # number of times we switched to this ipl from a different ipl system.cpu1.kern.ipl_good_31 29323 47.55% 100.00% # number of times we switched to this ipl from a different ipl
system.cpu1.kern.ipl_ticks 3544246744 # number of cycles we spent at this ipl system.cpu1.kern.ipl_ticks 3715795413 # number of cycles we spent at this ipl
system.cpu1.kern.ipl_ticks_0 3521927913 99.37% 99.37% # number of cycles we spent at this ipl system.cpu1.kern.ipl_ticks_0 3690163762 99.31% 99.31% # number of cycles we spent at this ipl
system.cpu1.kern.ipl_ticks_22 1037048 0.03% 99.40% # number of cycles we spent at this ipl system.cpu1.kern.ipl_ticks_22 162884 0.00% 99.31% # number of cycles we spent at this ipl
system.cpu1.kern.ipl_ticks_30 25211 0.00% 99.40% # number of cycles we spent at this ipl system.cpu1.kern.ipl_ticks_30 130370 0.00% 99.32% # number of cycles we spent at this ipl
system.cpu1.kern.ipl_ticks_31 21256572 0.60% 100.00% # number of cycles we spent at this ipl system.cpu1.kern.ipl_ticks_31 25338397 0.68% 100.00% # number of cycles we spent at this ipl
system.cpu1.kern.ipl_used 0.983292 # fraction of swpipl calls that actually changed the ipl system.cpu1.kern.ipl_used 0.787834 # fraction of swpipl calls that actually changed the ipl
system.cpu1.kern.ipl_used_0 0.997719 # fraction of swpipl calls that actually changed the ipl system.cpu1.kern.ipl_used_0 0.970171 # fraction of swpipl calls that actually changed the ipl
system.cpu1.kern.ipl_used_22 1 # fraction of swpipl calls that actually changed the ipl system.cpu1.kern.ipl_used_22 1 # fraction of swpipl calls that actually changed the ipl
system.cpu1.kern.ipl_used_30 1 # fraction of swpipl calls that actually changed the ipl system.cpu1.kern.ipl_used_30 1 # fraction of swpipl calls that actually changed the ipl
system.cpu1.kern.ipl_used_31 0.963858 # fraction of swpipl calls that actually changed the ipl system.cpu1.kern.ipl_used_31 0.651434 # fraction of swpipl calls that actually changed the ipl
system.cpu1.kern.mode_good_kernel 636 system.cpu1.kern.mode_good_kernel 1028
system.cpu1.kern.mode_good_user 637 system.cpu1.kern.mode_good_user 535
system.cpu1.kern.mode_good_idle 0 system.cpu1.kern.mode_good_idle 493
system.cpu1.kern.mode_switch_kernel 3063 # number of protection mode switches system.cpu1.kern.mode_switch_kernel 2307 # number of protection mode switches
system.cpu1.kern.mode_switch_user 637 # number of protection mode switches system.cpu1.kern.mode_switch_user 535 # number of protection mode switches
system.cpu1.kern.mode_switch_idle 0 # number of protection mode switches system.cpu1.kern.mode_switch_idle 2948 # number of protection mode switches
system.cpu1.kern.mode_switch_good 0.344054 # fraction of useful protection mode switches system.cpu1.kern.mode_switch_good 0.355095 # fraction of useful protection mode switches
system.cpu1.kern.mode_switch_good_kernel 0.207640 # fraction of useful protection mode switches system.cpu1.kern.mode_switch_good_kernel 0.445600 # fraction of useful protection mode switches
system.cpu1.kern.mode_switch_good_user 1 # fraction of useful protection mode switches system.cpu1.kern.mode_switch_good_user 1 # fraction of useful protection mode switches
system.cpu1.kern.mode_switch_good_idle <err: div-0> # fraction of useful protection mode switches system.cpu1.kern.mode_switch_good_idle 0.167232 # fraction of useful protection mode switches
system.cpu1.kern.mode_ticks_kernel 3542834137 99.96% 99.96% # number of ticks spent at the given mode system.cpu1.kern.mode_ticks_kernel 12634755 0.34% 0.34% # number of ticks spent at the given mode
system.cpu1.kern.mode_ticks_user 1412605 0.04% 100.00% # number of ticks spent at the given mode system.cpu1.kern.mode_ticks_user 1807179 0.05% 0.39% # number of ticks spent at the given mode
system.cpu1.kern.mode_ticks_idle 0 0.00% 100.00% # number of ticks spent at the given mode system.cpu1.kern.mode_ticks_idle 3700889452 99.61% 100.00% # number of ticks spent at the given mode
system.cpu1.kern.swap_context 516 # number of times the context was actually changed system.cpu1.kern.swap_context 2246 # number of times the context was actually changed
system.cpu1.kern.syscall 137 # number of syscalls executed system.cpu1.kern.syscall 112 # number of syscalls executed
system.cpu1.kern.syscall_fork 1 0.73% 0.73% # number of syscalls executed system.cpu1.kern.syscall_read 11 9.82% 9.82% # number of syscalls executed
system.cpu1.kern.syscall_read 17 12.41% 13.14% # number of syscalls executed system.cpu1.kern.syscall_write 1 0.89% 10.71% # number of syscalls executed
system.cpu1.kern.syscall_close 15 10.95% 24.09% # number of syscalls executed system.cpu1.kern.syscall_close 12 10.71% 21.43% # number of syscalls executed
system.cpu1.kern.syscall_chmod 1 0.73% 24.82% # number of syscalls executed system.cpu1.kern.syscall_chmod 1 0.89% 22.32% # number of syscalls executed
system.cpu1.kern.syscall_obreak 8 5.84% 30.66% # number of syscalls executed system.cpu1.kern.syscall_obreak 9 8.04% 30.36% # number of syscalls executed
system.cpu1.kern.syscall_lseek 4 2.92% 33.58% # number of syscalls executed system.cpu1.kern.syscall_setuid 2 1.79% 32.14% # number of syscalls executed
system.cpu1.kern.syscall_getpid 2 1.46% 35.04% # number of syscalls executed system.cpu1.kern.syscall_getuid 2 1.79% 33.93% # number of syscalls executed
system.cpu1.kern.syscall_setuid 3 2.19% 37.23% # number of syscalls executed system.cpu1.kern.syscall_access 5 4.46% 38.39% # number of syscalls executed
system.cpu1.kern.syscall_getuid 3 2.19% 39.42% # number of syscalls executed system.cpu1.kern.syscall_open 22 19.64% 58.04% # number of syscalls executed
system.cpu1.kern.syscall_access 4 2.92% 42.34% # number of syscalls executed system.cpu1.kern.syscall_getgid 2 1.79% 59.82% # number of syscalls executed
system.cpu1.kern.syscall_open 21 15.33% 57.66% # number of syscalls executed system.cpu1.kern.syscall_ioctl 1 0.89% 60.71% # number of syscalls executed
system.cpu1.kern.syscall_getgid 3 2.19% 59.85% # number of syscalls executed system.cpu1.kern.syscall_readlink 1 0.89% 61.61% # number of syscalls executed
system.cpu1.kern.syscall_sigprocmask 2 1.46% 61.31% # number of syscalls executed system.cpu1.kern.syscall_execve 1 0.89% 62.50% # number of syscalls executed
system.cpu1.kern.syscall_ioctl 1 0.73% 62.04% # number of syscalls executed system.cpu1.kern.syscall_mmap 29 25.89% 88.39% # number of syscalls executed
system.cpu1.kern.syscall_execve 2 1.46% 63.50% # number of syscalls executed system.cpu1.kern.syscall_mprotect 9 8.04% 96.43% # number of syscalls executed
system.cpu1.kern.syscall_mmap 32 23.36% 86.86% # number of syscalls executed system.cpu1.kern.syscall_fcntl 2 1.79% 98.21% # number of syscalls executed
system.cpu1.kern.syscall_munmap 1 0.73% 87.59% # number of syscalls executed system.cpu1.kern.syscall_setgid 2 1.79% 100.00% # number of syscalls executed
system.cpu1.kern.syscall_mprotect 10 7.30% 94.89% # number of syscalls executed system.cpu1.not_idle_fraction 0.004619 # Percentage of non-idle cycles
system.cpu1.kern.syscall_dup2 1 0.73% 95.62% # number of syscalls executed system.cpu1.numCycles 17164125 # number of cpu cycles simulated
system.cpu1.kern.syscall_fcntl 2 1.46% 97.08% # number of syscalls executed system.cpu1.num_insts 17162726 # Number of instructions executed
system.cpu1.kern.syscall_setgid 3 2.19% 99.27% # number of syscalls executed system.cpu1.num_refs 5316705 # Number of memory references
system.cpu1.kern.syscall_getrlimit 1 0.73% 100.00% # number of syscalls executed
system.cpu1.not_idle_fraction 0.002194 # Percentage of non-idle cycles
system.cpu1.numCycles 7776377 # number of cpu cycles simulated
system.cpu1.num_insts 7774638 # Number of instructions executed
system.cpu1.num_refs 2025195 # Number of memory references
system.disk0.dma_read_bytes 1024 # Number of bytes transfered via DMA reads (not PRD). system.disk0.dma_read_bytes 1024 # Number of bytes transfered via DMA reads (not PRD).
system.disk0.dma_read_full_pages 0 # Number of full page size DMA reads (not PRD). system.disk0.dma_read_full_pages 0 # Number of full page size DMA reads (not PRD).
system.disk0.dma_read_txs 1 # Number of DMA read transactions (not PRD). system.disk0.dma_read_txs 1 # Number of DMA read transactions (not PRD).
system.disk0.dma_write_bytes 2735104 # Number of bytes transfered via DMA writes. system.disk0.dma_write_bytes 2702336 # Number of bytes transfered via DMA writes.
system.disk0.dma_write_full_pages 306 # Number of full page size DMA writes. system.disk0.dma_write_full_pages 302 # Number of full page size DMA writes.
system.disk0.dma_write_txs 412 # Number of DMA write transactions. system.disk0.dma_write_txs 408 # Number of DMA write transactions.
system.disk2.dma_read_bytes 0 # Number of bytes transfered via DMA reads (not PRD). system.disk2.dma_read_bytes 0 # Number of bytes transfered via DMA reads (not PRD).
system.disk2.dma_read_full_pages 0 # Number of full page size DMA reads (not PRD). system.disk2.dma_read_full_pages 0 # Number of full page size DMA reads (not PRD).
system.disk2.dma_read_txs 0 # Number of DMA read transactions (not PRD). system.disk2.dma_read_txs 0 # Number of DMA read transactions (not PRD).

View file

@ -1,6 +1,6 @@
0: system.tsunami.io.rtc: Real-time clock set to Sun Jan 1 00:00:00 2006 0: system.tsunami.io.rtc: Real-time clock set to Sun Jan 1 00:00:00 2006
Listening for console connection on port 3456 Listening for console connection on port 3457
0: system.remote_gdb.listener: listening for remote gdb #0 on port 7000 0: system.remote_gdb.listener: listening for remote gdb #0 on port 7001
0: system.remote_gdb.listener: listening for remote gdb #1 on port 7001 0: system.remote_gdb.listener: listening for remote gdb #1 on port 7002
warn: Entering event queue @ 0. Starting simulation... warn: Entering event queue @ 0. Starting simulation...
warn: 195722: Trying to launch CPU number 1! warn: 195723: Trying to launch CPU number 1!

View file

@ -5,8 +5,8 @@ The Regents of The University of Michigan
All Rights Reserved All Rights Reserved
M5 compiled Oct 5 2006 22:13:02 M5 compiled Oct 8 2006 14:07:02
M5 started Fri Oct 6 00:24:12 2006 M5 started Sun Oct 8 14:07:57 2006
M5 executing on zizzer.eecs.umich.edu M5 executing on zizzer.eecs.umich.edu
command line: build/ALPHA_FS/m5.opt -d build/ALPHA_FS/tests/opt/quick/10.linux-boot/alpha/linux/tsunami-simple-atomic-dual tests/run.py quick/10.linux-boot/alpha/linux/tsunami-simple-atomic-dual command line: build/ALPHA_FS/m5.opt -d build/ALPHA_FS/tests/opt/quick/10.linux-boot/alpha/linux/tsunami-simple-atomic-dual tests/run.py quick/10.linux-boot/alpha/linux/tsunami-simple-atomic-dual
Exiting @ tick 3544247159 because m5_exit instruction encountered Exiting @ tick 3716216351 because m5_exit instruction encountered

View file

@ -75,7 +75,7 @@ side_b=system.membus.port[0]
type=AtomicSimpleCPU type=AtomicSimpleCPU
children=dtb itb children=dtb itb
clock=1 clock=1
cpu_id=-1 cpu_id=0
defer_registration=false defer_registration=false
dtb=system.cpu.dtb dtb=system.cpu.dtb
function_trace=false function_trace=false

View file

@ -90,9 +90,9 @@ max_loads_all_threads=0
progress_interval=0 progress_interval=0
mem=system.physmem mem=system.physmem
system=system system=system
cpu_id=0
itb=system.cpu.itb itb=system.cpu.itb
dtb=system.cpu.dtb dtb=system.cpu.dtb
cpu_id=-1
profile=0 profile=0
clock=1 clock=1
defer_registration=false defer_registration=false

View file

@ -3,7 +3,7 @@ M5 console: m5AlphaAccess @ 0xFFFFFD0200000000
memsize 8000000 pages 4000 memsize 8000000 pages 4000
First free page after ROM 0xFFFFFC0000018000 First free page after ROM 0xFFFFFC0000018000
HWRPB 0xFFFFFC0000018000 l1pt 0xFFFFFC0000040000 l2pt 0xFFFFFC0000042000 l3pt_rpb 0xFFFFFC0000044000 l3pt_kernel 0xFFFFFC0000048000 l2reserv 0xFFFFFC0000046000 HWRPB 0xFFFFFC0000018000 l1pt 0xFFFFFC0000040000 l2pt 0xFFFFFC0000042000 l3pt_rpb 0xFFFFFC0000044000 l3pt_kernel 0xFFFFFC0000048000 l2reserv 0xFFFFFC0000046000
kstart = 0xFFFFFC0000310000, kend = 0xFFFFFC00008064E8, kentry = 0xFFFFFC0000310000, numCPUs = 0x1 kstart = 0xFFFFFC0000310000, kend = 0xFFFFFC0000855718, kentry = 0xFFFFFC0000310000, numCPUs = 0x1
CPU Clock at 2000 MHz IntrClockFrequency=1024 CPU Clock at 2000 MHz IntrClockFrequency=1024
Booting with 1 processor(s) Booting with 1 processor(s)
KSP: 0x20043FE8 PTBR 0x20 KSP: 0x20043FE8 PTBR 0x20
@ -14,28 +14,26 @@ M5 console: m5AlphaAccess @ 0xFFFFFD0200000000
ConsoleDispatch at virt 10000658 phys 18658 val FFFFFC00000100A8 ConsoleDispatch at virt 10000658 phys 18658 val FFFFFC00000100A8
unix_boot_mem ends at FFFFFC0000076000 unix_boot_mem ends at FFFFFC0000076000
k_argc = 0 k_argc = 0
jumping to kernel at 0xFFFFFC0000310000, (PCBB 0xFFFFFC0000018180 pfn 1028) jumping to kernel at 0xFFFFFC0000310000, (PCBB 0xFFFFFC0000018180 pfn 1067)
CallbackFixup 0 18000, t7=FFFFFC0000700000 CallbackFixup 0 18000, t7=FFFFFC000070C000
Linux version 2.6.8.1 (binkertn@ziff.eecs.umich.edu) (gcc version 3.4.3) #36 SMP Mon May 2 19:50:53 EDT 2005 Linux version 2.6.13 (hsul@zed.eecs.umich.edu) (gcc version 3.4.3) #7 SMP Tue Aug 15 14:40:31 EDT 2006
Booting GENERIC on Tsunami variation DP264 using machine vector DP264 from SRM Booting GENERIC on Tsunami variation DP264 using machine vector DP264 from SRM
Major Options: SMP LEGACY_START VERBOSE_MCHECK Major Options: SMP LEGACY_START VERBOSE_MCHECK
Command line: root=/dev/hda1 console=ttyS0 Command line: root=/dev/hda1 console=ttyS0
memcluster 0, usage 1, start 0, end 392 memcluster 0, usage 1, start 0, end 392
memcluster 1, usage 0, start 392, end 16384 memcluster 1, usage 0, start 392, end 16384
freeing pages 1030:16384 freeing pages 1069:16384
reserving pages 1030:1031 reserving pages 1069:1070
SMP: 1 CPUs probed -- cpu_present_mask = 1 SMP: 1 CPUs probed -- cpu_present_mask = 1
Built 1 zonelists Built 1 zonelists
Kernel command line: root=/dev/hda1 console=ttyS0 Kernel command line: root=/dev/hda1 console=ttyS0
PID hash table entries: 1024 (order 10: 16384 bytes) PID hash table entries: 1024 (order: 10, 32768 bytes)
Using epoch = 1900 Using epoch = 1900
Console: colour dummy device 80x25 Console: colour dummy device 80x25
Dentry cache hash table entries: 32768 (order: 5, 262144 bytes) Dentry cache hash table entries: 32768 (order: 5, 262144 bytes)
Inode-cache hash table entries: 16384 (order: 4, 131072 bytes) Inode-cache hash table entries: 16384 (order: 4, 131072 bytes)
Memory: 119072k/131072k available (3058k kernel code, 8680k reserved, 695k data, 480k init) Memory: 118784k/131072k available (3316k kernel code, 8952k reserved, 983k data, 224k init)
Mount-cache hash table entries: 512 (order: 0, 8192 bytes) Mount-cache hash table entries: 512
per-CPU timeslice cutoff: 374.49 usecs.
task migration cache decay timeout: 0 msecs.
SMP mode deactivated. SMP mode deactivated.
Brought up 1 CPUs Brought up 1 CPUs
SMP: Total of 1 processors activated (4002.20 BogoMIPS). SMP: Total of 1 processors activated (4002.20 BogoMIPS).
@ -48,16 +46,23 @@ M5 console: m5AlphaAccess @ 0xFFFFFD0200000000
Initializing Cryptographic API Initializing Cryptographic API
rtc: Standard PC (1900) epoch (1900) detected rtc: Standard PC (1900) epoch (1900) detected
Real Time Clock Driver v1.12 Real Time Clock Driver v1.12
Serial: 8250/16550 driver $Revision: 1.90 $ 5 ports, IRQ sharing disabled Serial: 8250/16550 driver $Revision: 1.90 $ 1 ports, IRQ sharing disabled
ttyS0 at I/O 0x3f8 (irq = 4) is a 8250 ttyS0 at I/O 0x3f8 (irq = 4) is a 8250
io scheduler noop registered
io scheduler anticipatory registered
io scheduler deadline registered
io scheduler cfq registered
loop: loaded (max 8 devices) loop: loaded (max 8 devices)
Using anticipatory io scheduler
nbd: registered device at major 43 nbd: registered device at major 43
sinic.c: M5 Simple Integrated NIC driver sinic.c: M5 Simple Integrated NIC driver
ns83820.c: National Semiconductor DP83820 10/100/1000 driver. ns83820.c: National Semiconductor DP83820 10/100/1000 driver.
ns83820: irq bound to CPU 0
eth0: ns83820.c: 0x22c: 00000000, subsystem: 0000:0000 eth0: ns83820.c: 0x22c: 00000000, subsystem: 0000:0000
eth0: enabling optical transceiver eth0: enabling optical transceiver
eth0: ns83820 v0.20: DP83820 v1.3: 00:90:00:00:00:01 io=0x09000000 irq=30 f=sg eth0: using 64 bit addressing.
eth0: ns83820 v0.22: DP83820 v1.3: 00:90:00:00:00:01 io=0x09000000 irq=30 f=h,sg
tun: Universal TUN/TAP device driver, 1.6
tun: (C) 1999-2004 Max Krasnyansky <maxk@qualcomm.com>
Uniform Multi-Platform E-IDE driver Revision: 7.00alpha2 Uniform Multi-Platform E-IDE driver Revision: 7.00alpha2
ide: Assuming 33MHz system bus speed for PIO modes; override with idebus=xx ide: Assuming 33MHz system bus speed for PIO modes; override with idebus=xx
PIIX4: IDE controller at PCI slot 0000:00:00.0 PIIX4: IDE controller at PCI slot 0000:00:00.0
@ -70,24 +75,23 @@ M5 console: m5AlphaAccess @ 0xFFFFFD0200000000
ide0 at 0x8410-0x8417,0x8422 on irq 31 ide0 at 0x8410-0x8417,0x8422 on irq 31
hda: max request size: 128KiB hda: max request size: 128KiB
hda: 511056 sectors (261 MB), CHS=507/16/63, UDMA(33) hda: 511056 sectors (261 MB), CHS=507/16/63, UDMA(33)
hda: cache flushes not supported
hda: hda1 hda: hda1
hdb: max request size: 128KiB hdb: max request size: 128KiB
hdb: 4177920 sectors (2139 MB), CHS=4144/16/63, UDMA(33) hdb: 4177920 sectors (2139 MB), CHS=4144/16/63, UDMA(33)
hdb: cache flushes not supported
hdb: unknown partition table hdb: unknown partition table
scsi0 : scsi_m5, version 1.73 [20040518], dev_size_mb=8, opts=0x0
Vendor: Linux Model: scsi_m5 Li Rev: 0004
Type: Direct-Access ANSI SCSI revision: 03
SCSI device sda: 16384 512-byte hdwr sectors (8 MB)
SCSI device sda: drive cache: write back
sda: unknown partition table
Attached scsi disk sda at scsi0, channel 0, id 0, lun 0
mice: PS/2 mouse device common for all mice mice: PS/2 mouse device common for all mice
NET: Registered protocol family 2 NET: Registered protocol family 2
IP: routing cache hash table of 1024 buckets, 16Kbytes IP route cache hash table entries: 4096 (order: 2, 32768 bytes)
TCP: Hash tables configured (established 8192 bind 8192) TCP established hash table entries: 16384 (order: 5, 262144 bytes)
ip_conntrack version 2.1 (512 buckets, 4096 max) - 440 bytes per conntrack TCP bind hash table entries: 16384 (order: 5, 262144 bytes)
TCP: Hash tables configured (established 16384 bind 16384)
TCP reno registered
ip_conntrack version 2.1 (512 buckets, 4096 max) - 296 bytes per conntrack
ip_tables: (C) 2000-2002 Netfilter core team ip_tables: (C) 2000-2002 Netfilter core team
arp_tables: (C) 2002 David S. Miller arp_tables: (C) 2002 David S. Miller
TCP bic registered
Initializing IPsec netlink socket Initializing IPsec netlink socket
NET: Registered protocol family 1 NET: Registered protocol family 1
NET: Registered protocol family 17 NET: Registered protocol family 17
@ -96,7 +100,7 @@ M5 console: m5AlphaAccess @ 0xFFFFFD0200000000
802.1Q VLAN Support v1.8 Ben Greear <greearb@candelatech.com> 802.1Q VLAN Support v1.8 Ben Greear <greearb@candelatech.com>
All bugs added by David S. Miller <davem@redhat.com> All bugs added by David S. Miller <davem@redhat.com>
VFS: Mounted root (ext2 filesystem) readonly. VFS: Mounted root (ext2 filesystem) readonly.
Freeing unused kernel memory: 480k freed Freeing unused kernel memory: 224k freed
init started: BusyBox v1.1.0 (2006.08.17-02:54+0000) multi-call binary init started: BusyBox v1.1.0 (2006.08.17-02:54+0000) multi-call binary
mounting filesystems... mounting filesystems...
loading script... loading script...

View file

@ -1,86 +1,86 @@
---------- Begin Simulation Statistics ---------- ---------- Begin Simulation Statistics ----------
host_inst_rate 1121378 # Simulator instruction rate (inst/s) host_inst_rate 1269893 # Simulator instruction rate (inst/s)
host_mem_usage 194272 # Number of bytes of host memory used host_mem_usage 197712 # Number of bytes of host memory used
host_seconds 51.72 # Real time elapsed on the host host_seconds 48.70 # Real time elapsed on the host
host_tick_rate 67313414 # Simulator tick rate (ticks/s) host_tick_rate 74667785 # Simulator tick rate (ticks/s)
sim_freq 2000000000 # Frequency of simulated ticks sim_freq 2000000000 # Frequency of simulated ticks
sim_insts 58001813 # Number of instructions simulated sim_insts 61839827 # Number of instructions simulated
sim_seconds 1.740863 # Number of seconds simulated sim_seconds 1.818060 # Number of seconds simulated
sim_ticks 3481726167 # Number of ticks simulated sim_ticks 3636120569 # Number of ticks simulated
system.cpu.dtb.accesses 2309470 # DTB accesses system.cpu.dtb.accesses 1304498 # DTB accesses
system.cpu.dtb.acv 367 # DTB access violations system.cpu.dtb.acv 367 # DTB access violations
system.cpu.dtb.hits 13711941 # DTB hits system.cpu.dtb.hits 16565944 # DTB hits
system.cpu.dtb.misses 12493 # DTB misses system.cpu.dtb.misses 11425 # DTB misses
system.cpu.dtb.read_accesses 828530 # DTB read accesses system.cpu.dtb.read_accesses 900427 # DTB read accesses
system.cpu.dtb.read_acv 210 # DTB read access violations system.cpu.dtb.read_acv 210 # DTB read access violations
system.cpu.dtb.read_hits 7597829 # DTB read hits system.cpu.dtb.read_hits 10044011 # DTB read hits
system.cpu.dtb.read_misses 10298 # DTB read misses system.cpu.dtb.read_misses 10280 # DTB read misses
system.cpu.dtb.write_accesses 1480940 # DTB write accesses system.cpu.dtb.write_accesses 404071 # DTB write accesses
system.cpu.dtb.write_acv 157 # DTB write access violations system.cpu.dtb.write_acv 157 # DTB write access violations
system.cpu.dtb.write_hits 6114112 # DTB write hits system.cpu.dtb.write_hits 6521933 # DTB write hits
system.cpu.dtb.write_misses 2195 # DTB write misses system.cpu.dtb.write_misses 1145 # DTB write misses
system.cpu.idle_fraction 0.983340 # Percentage of idle cycles system.cpu.idle_fraction 0.982991 # Percentage of idle cycles
system.cpu.itb.accesses 3281346 # ITB accesses system.cpu.itb.accesses 3281310 # ITB accesses
system.cpu.itb.acv 184 # ITB acv system.cpu.itb.acv 184 # ITB acv
system.cpu.itb.hits 3276356 # ITB hits system.cpu.itb.hits 3276320 # ITB hits
system.cpu.itb.misses 4990 # ITB misses system.cpu.itb.misses 4990 # ITB misses
system.cpu.kern.callpal 182718 # number of callpals executed system.cpu.kern.callpal 193942 # number of callpals executed
system.cpu.kern.callpal_cserve 1 0.00% 0.00% # number of callpals executed system.cpu.kern.callpal_cserve 1 0.00% 0.00% # number of callpals executed
system.cpu.kern.callpal_wrmces 1 0.00% 0.00% # number of callpals executed system.cpu.kern.callpal_wrmces 1 0.00% 0.00% # number of callpals executed
system.cpu.kern.callpal_wrfen 1 0.00% 0.00% # number of callpals executed system.cpu.kern.callpal_wrfen 1 0.00% 0.00% # number of callpals executed
system.cpu.kern.callpal_wrvptptr 1 0.00% 0.00% # number of callpals executed system.cpu.kern.callpal_wrvptptr 1 0.00% 0.00% # number of callpals executed
system.cpu.kern.callpal_swpctx 1574 0.86% 0.86% # number of callpals executed system.cpu.kern.callpal_swpctx 4207 2.17% 2.17% # number of callpals executed
system.cpu.kern.callpal_tbi 54 0.03% 0.89% # number of callpals executed system.cpu.kern.callpal_tbi 54 0.03% 2.20% # number of callpals executed
system.cpu.kern.callpal_wrent 7 0.00% 0.90% # number of callpals executed system.cpu.kern.callpal_wrent 7 0.00% 2.20% # number of callpals executed
system.cpu.kern.callpal_swpipl 171359 93.78% 94.68% # number of callpals executed system.cpu.kern.callpal_swpipl 176844 91.18% 93.39% # number of callpals executed
system.cpu.kern.callpal_rdps 5159 2.82% 97.50% # number of callpals executed system.cpu.kern.callpal_rdps 6881 3.55% 96.93% # number of callpals executed
system.cpu.kern.callpal_wrkgp 1 0.00% 97.50% # number of callpals executed system.cpu.kern.callpal_wrkgp 1 0.00% 96.94% # number of callpals executed
system.cpu.kern.callpal_wrusp 7 0.00% 97.51% # number of callpals executed system.cpu.kern.callpal_wrusp 7 0.00% 96.94% # number of callpals executed
system.cpu.kern.callpal_rdusp 10 0.01% 97.51% # number of callpals executed system.cpu.kern.callpal_rdusp 9 0.00% 96.94% # number of callpals executed
system.cpu.kern.callpal_whami 2 0.00% 97.51% # number of callpals executed system.cpu.kern.callpal_whami 2 0.00% 96.94% # number of callpals executed
system.cpu.kern.callpal_rti 3829 2.10% 99.61% # number of callpals executed system.cpu.kern.callpal_rti 5214 2.69% 99.63% # number of callpals executed
system.cpu.kern.callpal_callsys 531 0.29% 99.90% # number of callpals executed system.cpu.kern.callpal_callsys 531 0.27% 99.91% # number of callpals executed
system.cpu.kern.callpal_imb 181 0.10% 100.00% # number of callpals executed system.cpu.kern.callpal_imb 181 0.09% 100.00% # number of callpals executed
system.cpu.kern.inst.arm 0 # number of arm instructions executed system.cpu.kern.inst.arm 0 # number of arm instructions executed
system.cpu.kern.inst.hwrei 202783 # number of hwrei instructions executed system.cpu.kern.inst.hwrei 213009 # number of hwrei instructions executed
system.cpu.kern.inst.ivlb 0 # number of ivlb instructions executed system.cpu.kern.inst.ivlb 0 # number of ivlb instructions executed
system.cpu.kern.inst.ivle 0 # number of ivle instructions executed system.cpu.kern.inst.ivle 0 # number of ivle instructions executed
system.cpu.kern.inst.quiesce 1877 # number of quiesce instructions executed system.cpu.kern.inst.quiesce 6282 # number of quiesce instructions executed
system.cpu.kern.ipl_count 177218 # number of times we switched to this ipl system.cpu.kern.ipl_count 184158 # number of times we switched to this ipl
system.cpu.kern.ipl_count_0 74624 42.11% 42.11% # number of times we switched to this ipl system.cpu.kern.ipl_count_0 75390 40.94% 40.94% # number of times we switched to this ipl
system.cpu.kern.ipl_count_21 251 0.14% 42.25% # number of times we switched to this ipl system.cpu.kern.ipl_count_21 245 0.13% 41.07% # number of times we switched to this ipl
system.cpu.kern.ipl_count_22 5425 3.06% 45.31% # number of times we switched to this ipl system.cpu.kern.ipl_count_22 1854 1.01% 42.08% # number of times we switched to this ipl
system.cpu.kern.ipl_count_31 96918 54.69% 100.00% # number of times we switched to this ipl system.cpu.kern.ipl_count_31 106669 57.92% 100.00% # number of times we switched to this ipl
system.cpu.kern.ipl_good 158463 # number of times we switched to this ipl from a different ipl system.cpu.kern.ipl_good 150141 # number of times we switched to this ipl from a different ipl
system.cpu.kern.ipl_good_0 74570 47.06% 47.06% # number of times we switched to this ipl from a different ipl system.cpu.kern.ipl_good_0 74021 49.30% 49.30% # number of times we switched to this ipl from a different ipl
system.cpu.kern.ipl_good_21 251 0.16% 47.22% # number of times we switched to this ipl from a different ipl system.cpu.kern.ipl_good_21 245 0.16% 49.46% # number of times we switched to this ipl from a different ipl
system.cpu.kern.ipl_good_22 5425 3.42% 50.64% # number of times we switched to this ipl from a different ipl system.cpu.kern.ipl_good_22 1854 1.23% 50.70% # number of times we switched to this ipl from a different ipl
system.cpu.kern.ipl_good_31 78217 49.36% 100.00% # number of times we switched to this ipl from a different ipl system.cpu.kern.ipl_good_31 74021 49.30% 100.00% # number of times we switched to this ipl from a different ipl
system.cpu.kern.ipl_ticks 3481725752 # number of cycles we spent at this ipl system.cpu.kern.ipl_ticks 3636120154 # number of cycles we spent at this ipl
system.cpu.kern.ipl_ticks_0 3459659082 99.37% 99.37% # number of cycles we spent at this ipl system.cpu.kern.ipl_ticks_0 3601418096 99.05% 99.05% # number of cycles we spent at this ipl
system.cpu.kern.ipl_ticks_21 39982 0.00% 99.37% # number of cycles we spent at this ipl system.cpu.kern.ipl_ticks_21 40474 0.00% 99.05% # number of cycles we spent at this ipl
system.cpu.kern.ipl_ticks_22 930159 0.03% 99.39% # number of cycles we spent at this ipl system.cpu.kern.ipl_ticks_22 159444 0.00% 99.05% # number of cycles we spent at this ipl
system.cpu.kern.ipl_ticks_31 21096529 0.61% 100.00% # number of cycles we spent at this ipl system.cpu.kern.ipl_ticks_31 34502140 0.95% 100.00% # number of cycles we spent at this ipl
system.cpu.kern.ipl_used 0.894170 # fraction of swpipl calls that actually changed the ipl system.cpu.kern.ipl_used 0.815284 # fraction of swpipl calls that actually changed the ipl
system.cpu.kern.ipl_used_0 0.999276 # fraction of swpipl calls that actually changed the ipl system.cpu.kern.ipl_used_0 0.981841 # fraction of swpipl calls that actually changed the ipl
system.cpu.kern.ipl_used_21 1 # fraction of swpipl calls that actually changed the ipl system.cpu.kern.ipl_used_21 1 # fraction of swpipl calls that actually changed the ipl
system.cpu.kern.ipl_used_22 1 # fraction of swpipl calls that actually changed the ipl system.cpu.kern.ipl_used_22 1 # fraction of swpipl calls that actually changed the ipl
system.cpu.kern.ipl_used_31 0.807043 # fraction of swpipl calls that actually changed the ipl system.cpu.kern.ipl_used_31 0.693932 # fraction of swpipl calls that actually changed the ipl
system.cpu.kern.mode_good_kernel 1939 system.cpu.kern.mode_good_kernel 1937
system.cpu.kern.mode_good_user 1757 system.cpu.kern.mode_good_user 1757
system.cpu.kern.mode_good_idle 182 system.cpu.kern.mode_good_idle 180
system.cpu.kern.mode_switch_kernel 3320 # number of protection mode switches system.cpu.kern.mode_switch_kernel 5982 # number of protection mode switches
system.cpu.kern.mode_switch_user 1757 # number of protection mode switches system.cpu.kern.mode_switch_user 1757 # number of protection mode switches
system.cpu.kern.mode_switch_idle 2061 # number of protection mode switches system.cpu.kern.mode_switch_idle 2103 # number of protection mode switches
system.cpu.kern.mode_switch_good 0.543289 # fraction of useful protection mode switches system.cpu.kern.mode_switch_good 0.393619 # fraction of useful protection mode switches
system.cpu.kern.mode_switch_good_kernel 0.584036 # fraction of useful protection mode switches system.cpu.kern.mode_switch_good_kernel 0.323805 # fraction of useful protection mode switches
system.cpu.kern.mode_switch_good_user 1 # fraction of useful protection mode switches system.cpu.kern.mode_switch_good_user 1 # fraction of useful protection mode switches
system.cpu.kern.mode_switch_good_idle 0.088307 # fraction of useful protection mode switches system.cpu.kern.mode_switch_good_idle 0.085592 # fraction of useful protection mode switches
system.cpu.kern.mode_ticks_kernel 31887159 0.92% 0.92% # number of ticks spent at the given mode system.cpu.kern.mode_ticks_kernel 54647278 1.50% 1.50% # number of ticks spent at the given mode
system.cpu.kern.mode_ticks_user 3591270 0.10% 1.02% # number of ticks spent at the given mode system.cpu.kern.mode_ticks_user 3591234 0.10% 1.60% # number of ticks spent at the given mode
system.cpu.kern.mode_ticks_idle 3446247321 98.98% 100.00% # number of ticks spent at the given mode system.cpu.kern.mode_ticks_idle 3577881640 98.40% 100.00% # number of ticks spent at the given mode
system.cpu.kern.swap_context 1575 # number of times the context was actually changed system.cpu.kern.swap_context 4208 # number of times the context was actually changed
system.cpu.kern.syscall 329 # number of syscalls executed system.cpu.kern.syscall 329 # number of syscalls executed
system.cpu.kern.syscall_fork 8 2.43% 2.43% # number of syscalls executed system.cpu.kern.syscall_fork 8 2.43% 2.43% # number of syscalls executed
system.cpu.kern.syscall_read 30 9.12% 11.55% # number of syscalls executed system.cpu.kern.syscall_read 30 9.12% 11.55% # number of syscalls executed
@ -112,16 +112,16 @@ system.cpu.kern.syscall_connect 2 0.61% 97.57% # nu
system.cpu.kern.syscall_setgid 4 1.22% 98.78% # number of syscalls executed system.cpu.kern.syscall_setgid 4 1.22% 98.78% # number of syscalls executed
system.cpu.kern.syscall_getrlimit 2 0.61% 99.39% # number of syscalls executed system.cpu.kern.syscall_getrlimit 2 0.61% 99.39% # number of syscalls executed
system.cpu.kern.syscall_setsid 2 0.61% 100.00% # number of syscalls executed system.cpu.kern.syscall_setsid 2 0.61% 100.00% # number of syscalls executed
system.cpu.not_idle_fraction 0.016660 # Percentage of non-idle cycles system.cpu.not_idle_fraction 0.017009 # Percentage of non-idle cycles
system.cpu.numCycles 58006987 # number of cpu cycles simulated system.cpu.numCycles 61845001 # number of cpu cycles simulated
system.cpu.num_insts 58001813 # Number of instructions executed system.cpu.num_insts 61839827 # Number of instructions executed
system.cpu.num_refs 13757191 # Number of memory references system.cpu.num_refs 16814484 # Number of memory references
system.disk0.dma_read_bytes 1024 # Number of bytes transfered via DMA reads (not PRD). system.disk0.dma_read_bytes 1024 # Number of bytes transfered via DMA reads (not PRD).
system.disk0.dma_read_full_pages 0 # Number of full page size DMA reads (not PRD). system.disk0.dma_read_full_pages 0 # Number of full page size DMA reads (not PRD).
system.disk0.dma_read_txs 1 # Number of DMA read transactions (not PRD). system.disk0.dma_read_txs 1 # Number of DMA read transactions (not PRD).
system.disk0.dma_write_bytes 2735104 # Number of bytes transfered via DMA writes. system.disk0.dma_write_bytes 2702336 # Number of bytes transfered via DMA writes.
system.disk0.dma_write_full_pages 306 # Number of full page size DMA writes. system.disk0.dma_write_full_pages 302 # Number of full page size DMA writes.
system.disk0.dma_write_txs 412 # Number of DMA write transactions. system.disk0.dma_write_txs 408 # Number of DMA write transactions.
system.disk2.dma_read_bytes 0 # Number of bytes transfered via DMA reads (not PRD). system.disk2.dma_read_bytes 0 # Number of bytes transfered via DMA reads (not PRD).
system.disk2.dma_read_full_pages 0 # Number of full page size DMA reads (not PRD). system.disk2.dma_read_full_pages 0 # Number of full page size DMA reads (not PRD).
system.disk2.dma_read_txs 0 # Number of DMA read transactions (not PRD). system.disk2.dma_read_txs 0 # Number of DMA read transactions (not PRD).

View file

@ -1,4 +1,4 @@
0: system.tsunami.io.rtc: Real-time clock set to Sun Jan 1 00:00:00 2006 0: system.tsunami.io.rtc: Real-time clock set to Sun Jan 1 00:00:00 2006
Listening for console connection on port 3456 Listening for console connection on port 3457
0: system.remote_gdb.listener: listening for remote gdb #0 on port 7000 0: system.remote_gdb.listener: listening for remote gdb #0 on port 7001
warn: Entering event queue @ 0. Starting simulation... warn: Entering event queue @ 0. Starting simulation...

View file

@ -5,8 +5,8 @@ The Regents of The University of Michigan
All Rights Reserved All Rights Reserved
M5 compiled Oct 5 2006 22:13:02 M5 compiled Oct 8 2006 14:07:02
M5 started Fri Oct 6 00:23:19 2006 M5 started Sun Oct 8 14:07:07 2006
M5 executing on zizzer.eecs.umich.edu M5 executing on zizzer.eecs.umich.edu
command line: build/ALPHA_FS/m5.opt -d build/ALPHA_FS/tests/opt/quick/10.linux-boot/alpha/linux/tsunami-simple-atomic tests/run.py quick/10.linux-boot/alpha/linux/tsunami-simple-atomic command line: build/ALPHA_FS/m5.opt -d build/ALPHA_FS/tests/opt/quick/10.linux-boot/alpha/linux/tsunami-simple-atomic tests/run.py quick/10.linux-boot/alpha/linux/tsunami-simple-atomic
Exiting @ tick 3481726167 because m5_exit instruction encountered Exiting @ tick 3636120569 because m5_exit instruction encountered

View file

@ -75,7 +75,7 @@ side_b=system.membus.port[0]
type=TimingSimpleCPU type=TimingSimpleCPU
children=dtb itb children=dtb itb
clock=1 clock=1
cpu_id=-1 cpu_id=0
defer_registration=false defer_registration=false
dtb=system.cpu0.dtb dtb=system.cpu0.dtb
function_trace=false function_trace=false
@ -104,7 +104,7 @@ size=48
type=TimingSimpleCPU type=TimingSimpleCPU
children=dtb itb children=dtb itb
clock=1 clock=1
cpu_id=-1 cpu_id=1
defer_registration=false defer_registration=false
dtb=system.cpu1.dtb dtb=system.cpu1.dtb
function_trace=false function_trace=false

View file

@ -90,9 +90,9 @@ max_loads_all_threads=0
progress_interval=0 progress_interval=0
mem=system.physmem mem=system.physmem
system=system system=system
cpu_id=0
itb=system.cpu0.itb itb=system.cpu0.itb
dtb=system.cpu0.dtb dtb=system.cpu0.dtb
cpu_id=-1
profile=0 profile=0
clock=1 clock=1
defer_registration=false defer_registration=false
@ -118,9 +118,9 @@ max_loads_all_threads=0
progress_interval=0 progress_interval=0
mem=system.physmem mem=system.physmem
system=system system=system
cpu_id=1
itb=system.cpu1.itb itb=system.cpu1.itb
dtb=system.cpu1.dtb dtb=system.cpu1.dtb
cpu_id=-1
profile=0 profile=0
clock=1 clock=1
defer_registration=false defer_registration=false

View file

@ -3,7 +3,7 @@ M5 console: m5AlphaAccess @ 0xFFFFFD0200000000
memsize 8000000 pages 4000 memsize 8000000 pages 4000
First free page after ROM 0xFFFFFC0000018000 First free page after ROM 0xFFFFFC0000018000
HWRPB 0xFFFFFC0000018000 l1pt 0xFFFFFC0000040000 l2pt 0xFFFFFC0000042000 l3pt_rpb 0xFFFFFC0000044000 l3pt_kernel 0xFFFFFC0000048000 l2reserv 0xFFFFFC0000046000 HWRPB 0xFFFFFC0000018000 l1pt 0xFFFFFC0000040000 l2pt 0xFFFFFC0000042000 l3pt_rpb 0xFFFFFC0000044000 l3pt_kernel 0xFFFFFC0000048000 l2reserv 0xFFFFFC0000046000
kstart = 0xFFFFFC0000310000, kend = 0xFFFFFC00008064E8, kentry = 0xFFFFFC0000310000, numCPUs = 0x2 kstart = 0xFFFFFC0000310000, kend = 0xFFFFFC0000855718, kentry = 0xFFFFFC0000310000, numCPUs = 0x2
CPU Clock at 2000 MHz IntrClockFrequency=1024 CPU Clock at 2000 MHz IntrClockFrequency=1024
Booting with 2 processor(s) Booting with 2 processor(s)
KSP: 0x20043FE8 PTBR 0x20 KSP: 0x20043FE8 PTBR 0x20
@ -16,29 +16,27 @@ M5 console: m5AlphaAccess @ 0xFFFFFD0200000000
Bootstraping CPU 1 with sp=0xFFFFFC0000076000 Bootstraping CPU 1 with sp=0xFFFFFC0000076000
unix_boot_mem ends at FFFFFC0000078000 unix_boot_mem ends at FFFFFC0000078000
k_argc = 0 k_argc = 0
jumping to kernel at 0xFFFFFC0000310000, (PCBB 0xFFFFFC0000018180 pfn 1028) jumping to kernel at 0xFFFFFC0000310000, (PCBB 0xFFFFFC0000018180 pfn 1067)
CallbackFixup 0 18000, t7=FFFFFC0000700000 CallbackFixup 0 18000, t7=FFFFFC000070C000
Entering slaveloop for cpu 1 my_rpb=FFFFFC0000018400 Entering slaveloop for cpu 1 my_rpb=FFFFFC0000018400
Linux version 2.6.8.1 (binkertn@ziff.eecs.umich.edu) (gcc version 3.4.3) #36 SMP Mon May 2 19:50:53 EDT 2005 Linux version 2.6.13 (hsul@zed.eecs.umich.edu) (gcc version 3.4.3) #7 SMP Tue Aug 15 14:40:31 EDT 2006
Booting GENERIC on Tsunami variation DP264 using machine vector DP264 from SRM Booting GENERIC on Tsunami variation DP264 using machine vector DP264 from SRM
Major Options: SMP LEGACY_START VERBOSE_MCHECK Major Options: SMP LEGACY_START VERBOSE_MCHECK
Command line: root=/dev/hda1 console=ttyS0 Command line: root=/dev/hda1 console=ttyS0
memcluster 0, usage 1, start 0, end 392 memcluster 0, usage 1, start 0, end 392
memcluster 1, usage 0, start 392, end 16384 memcluster 1, usage 0, start 392, end 16384
freeing pages 1030:16384 freeing pages 1069:16384
reserving pages 1030:1031 reserving pages 1069:1070
SMP: 2 CPUs probed -- cpu_present_mask = 3 SMP: 2 CPUs probed -- cpu_present_mask = 3
Built 1 zonelists Built 1 zonelists
Kernel command line: root=/dev/hda1 console=ttyS0 Kernel command line: root=/dev/hda1 console=ttyS0
PID hash table entries: 1024 (order 10: 16384 bytes) PID hash table entries: 1024 (order: 10, 32768 bytes)
Using epoch = 1900 Using epoch = 1900
Console: colour dummy device 80x25 Console: colour dummy device 80x25
Dentry cache hash table entries: 32768 (order: 5, 262144 bytes) Dentry cache hash table entries: 32768 (order: 5, 262144 bytes)
Inode-cache hash table entries: 16384 (order: 4, 131072 bytes) Inode-cache hash table entries: 16384 (order: 4, 131072 bytes)
Memory: 119072k/131072k available (3058k kernel code, 8680k reserved, 695k data, 480k init) Memory: 118784k/131072k available (3316k kernel code, 8952k reserved, 983k data, 224k init)
Mount-cache hash table entries: 512 (order: 0, 8192 bytes) Mount-cache hash table entries: 512
per-CPU timeslice cutoff: 374.49 usecs.
task migration cache decay timeout: 0 msecs.
SMP starting up secondaries. SMP starting up secondaries.
Slave CPU 1 console command START Slave CPU 1 console command START
SlaveCmd: restart FFFFFC0000310020 FFFFFC0000310020 vptb FFFFFFFE00000000 my_rpb FFFFFC0000018400 my_rpb_phys 18400 SlaveCmd: restart FFFFFC0000310020 FFFFFC0000310020 vptb FFFFFFFE00000000 my_rpb FFFFFC0000018400 my_rpb_phys 18400
@ -53,16 +51,23 @@ SlaveCmd: restart FFFFFC0000310020 FFFFFC0000310020 vptb FFFFFFFE00000000 my_rpb
Initializing Cryptographic API Initializing Cryptographic API
rtc: Standard PC (1900) epoch (1900) detected rtc: Standard PC (1900) epoch (1900) detected
Real Time Clock Driver v1.12 Real Time Clock Driver v1.12
Serial: 8250/16550 driver $Revision: 1.90 $ 5 ports, IRQ sharing disabled Serial: 8250/16550 driver $Revision: 1.90 $ 1 ports, IRQ sharing disabled
ttyS0 at I/O 0x3f8 (irq = 4) is a 8250 ttyS0 at I/O 0x3f8 (irq = 4) is a 8250
io scheduler noop registered
io scheduler anticipatory registered
io scheduler deadline registered
io scheduler cfq registered
loop: loaded (max 8 devices) loop: loaded (max 8 devices)
Using anticipatory io scheduler
nbd: registered device at major 43 nbd: registered device at major 43
sinic.c: M5 Simple Integrated NIC driver sinic.c: M5 Simple Integrated NIC driver
ns83820.c: National Semiconductor DP83820 10/100/1000 driver. ns83820.c: National Semiconductor DP83820 10/100/1000 driver.
ns83820: irq bound to CPU 1
eth0: ns83820.c: 0x22c: 00000000, subsystem: 0000:0000 eth0: ns83820.c: 0x22c: 00000000, subsystem: 0000:0000
eth0: enabling optical transceiver eth0: enabling optical transceiver
eth0: ns83820 v0.20: DP83820 v1.3: 00:90:00:00:00:01 io=0x09000000 irq=30 f=sg eth0: using 64 bit addressing.
eth0: ns83820 v0.22: DP83820 v1.3: 00:90:00:00:00:01 io=0x09000000 irq=30 f=h,sg
tun: Universal TUN/TAP device driver, 1.6
tun: (C) 1999-2004 Max Krasnyansky <maxk@qualcomm.com>
Uniform Multi-Platform E-IDE driver Revision: 7.00alpha2 Uniform Multi-Platform E-IDE driver Revision: 7.00alpha2
ide: Assuming 33MHz system bus speed for PIO modes; override with idebus=xx ide: Assuming 33MHz system bus speed for PIO modes; override with idebus=xx
PIIX4: IDE controller at PCI slot 0000:00:00.0 PIIX4: IDE controller at PCI slot 0000:00:00.0
@ -75,24 +80,23 @@ SlaveCmd: restart FFFFFC0000310020 FFFFFC0000310020 vptb FFFFFFFE00000000 my_rpb
ide0 at 0x8410-0x8417,0x8422 on irq 31 ide0 at 0x8410-0x8417,0x8422 on irq 31
hda: max request size: 128KiB hda: max request size: 128KiB
hda: 511056 sectors (261 MB), CHS=507/16/63, UDMA(33) hda: 511056 sectors (261 MB), CHS=507/16/63, UDMA(33)
hda: cache flushes not supported
hda: hda1 hda: hda1
hdb: max request size: 128KiB hdb: max request size: 128KiB
hdb: 4177920 sectors (2139 MB), CHS=4144/16/63, UDMA(33) hdb: 4177920 sectors (2139 MB), CHS=4144/16/63, UDMA(33)
hdb: cache flushes not supported
hdb: unknown partition table hdb: unknown partition table
scsi0 : scsi_m5, version 1.73 [20040518], dev_size_mb=8, opts=0x0
Vendor: Linux Model: scsi_m5 Li Rev: 0004
Type: Direct-Access ANSI SCSI revision: 03
SCSI device sda: 16384 512-byte hdwr sectors (8 MB)
SCSI device sda: drive cache: write back
sda: unknown partition table
Attached scsi disk sda at scsi0, channel 0, id 0, lun 0
mice: PS/2 mouse device common for all mice mice: PS/2 mouse device common for all mice
NET: Registered protocol family 2 NET: Registered protocol family 2
IP: routing cache hash table of 1024 buckets, 16Kbytes IP route cache hash table entries: 4096 (order: 2, 32768 bytes)
TCP: Hash tables configured (established 8192 bind 8192) TCP established hash table entries: 16384 (order: 5, 262144 bytes)
ip_conntrack version 2.1 (512 buckets, 4096 max) - 440 bytes per conntrack TCP bind hash table entries: 16384 (order: 5, 262144 bytes)
TCP: Hash tables configured (established 16384 bind 16384)
TCP reno registered
ip_conntrack version 2.1 (512 buckets, 4096 max) - 296 bytes per conntrack
ip_tables: (C) 2000-2002 Netfilter core team ip_tables: (C) 2000-2002 Netfilter core team
arp_tables: (C) 2002 David S. Miller arp_tables: (C) 2002 David S. Miller
TCP bic registered
Initializing IPsec netlink socket Initializing IPsec netlink socket
NET: Registered protocol family 1 NET: Registered protocol family 1
NET: Registered protocol family 17 NET: Registered protocol family 17
@ -101,7 +105,7 @@ SlaveCmd: restart FFFFFC0000310020 FFFFFC0000310020 vptb FFFFFFFE00000000 my_rpb
802.1Q VLAN Support v1.8 Ben Greear <greearb@candelatech.com> 802.1Q VLAN Support v1.8 Ben Greear <greearb@candelatech.com>
All bugs added by David S. Miller <davem@redhat.com> All bugs added by David S. Miller <davem@redhat.com>
VFS: Mounted root (ext2 filesystem) readonly. VFS: Mounted root (ext2 filesystem) readonly.
Freeing unused kernel memory: 480k freed Freeing unused kernel memory: 224k freed
init started: BusyBox v1.1.0 (2006.08.17-02:54+0000) multi-call binary init started: BusyBox v1.1.0 (2006.08.17-02:54+0000) multi-call binary
mounting filesystems... mounting filesystems...
loading script... loading script...

View file

@ -1,232 +1,231 @@
---------- Begin Simulation Statistics ---------- ---------- Begin Simulation Statistics ----------
host_inst_rate 825990 # Simulator instruction rate (inst/s) host_inst_rate 779301 # Simulator instruction rate (inst/s)
host_mem_usage 193572 # Number of bytes of host memory used host_mem_usage 197344 # Number of bytes of host memory used
host_seconds 74.01 # Real time elapsed on the host host_seconds 85.22 # Real time elapsed on the host
host_tick_rate 47654938 # Simulator tick rate (ticks/s) host_tick_rate 43826709 # Simulator tick rate (ticks/s)
sim_freq 2000000000 # Frequency of simulated ticks sim_freq 2000000000 # Frequency of simulated ticks
sim_insts 61131962 # Number of instructions simulated sim_insts 66411500 # Number of instructions simulated
sim_seconds 1.763494 # Number of seconds simulated sim_seconds 1.867451 # Number of seconds simulated
sim_ticks 3526987181 # Number of ticks simulated sim_ticks 3734901822 # Number of ticks simulated
system.cpu0.dtb.accesses 1987164 # DTB accesses system.cpu0.dtb.accesses 828318 # DTB accesses
system.cpu0.dtb.acv 291 # DTB access violations system.cpu0.dtb.acv 315 # DTB access violations
system.cpu0.dtb.hits 10431590 # DTB hits system.cpu0.dtb.hits 13279471 # DTB hits
system.cpu0.dtb.misses 9590 # DTB misses system.cpu0.dtb.misses 7094 # DTB misses
system.cpu0.dtb.read_accesses 606328 # DTB read accesses system.cpu0.dtb.read_accesses 572336 # DTB read accesses
system.cpu0.dtb.read_acv 174 # DTB read access violations system.cpu0.dtb.read_acv 200 # DTB read access violations
system.cpu0.dtb.read_hits 5831565 # DTB read hits system.cpu0.dtb.read_hits 8207004 # DTB read hits
system.cpu0.dtb.read_misses 7663 # DTB read misses system.cpu0.dtb.read_misses 6394 # DTB read misses
system.cpu0.dtb.write_accesses 1380836 # DTB write accesses system.cpu0.dtb.write_accesses 255982 # DTB write accesses
system.cpu0.dtb.write_acv 117 # DTB write access violations system.cpu0.dtb.write_acv 115 # DTB write access violations
system.cpu0.dtb.write_hits 4600025 # DTB write hits system.cpu0.dtb.write_hits 5072467 # DTB write hits
system.cpu0.dtb.write_misses 1927 # DTB write misses system.cpu0.dtb.write_misses 700 # DTB write misses
system.cpu0.idle_fraction 0.984514 # Percentage of idle cycles system.cpu0.idle_fraction 0.982495 # Percentage of idle cycles
system.cpu0.itb.accesses 2372045 # ITB accesses system.cpu0.itb.accesses 1888651 # ITB accesses
system.cpu0.itb.acv 143 # ITB acv system.cpu0.itb.acv 166 # ITB acv
system.cpu0.itb.hits 2368331 # ITB hits system.cpu0.itb.hits 1885318 # ITB hits
system.cpu0.itb.misses 3714 # ITB misses system.cpu0.itb.misses 3333 # ITB misses
system.cpu0.kern.callpal 145084 # number of callpals executed system.cpu0.kern.callpal 146866 # number of callpals executed
system.cpu0.kern.callpal_cserve 1 0.00% 0.00% # number of callpals executed system.cpu0.kern.callpal_cserve 1 0.00% 0.00% # number of callpals executed
system.cpu0.kern.callpal_wripir 54 0.04% 0.04% # number of callpals executed system.cpu0.kern.callpal_wripir 507 0.35% 0.35% # number of callpals executed
system.cpu0.kern.callpal_wrmces 1 0.00% 0.04% # number of callpals executed system.cpu0.kern.callpal_wrmces 1 0.00% 0.35% # number of callpals executed
system.cpu0.kern.callpal_wrfen 1 0.00% 0.04% # number of callpals executed system.cpu0.kern.callpal_wrfen 1 0.00% 0.35% # number of callpals executed
system.cpu0.kern.callpal_wrvptptr 1 0.00% 0.04% # number of callpals executed system.cpu0.kern.callpal_wrvptptr 1 0.00% 0.35% # number of callpals executed
system.cpu0.kern.callpal_swpctx 1182 0.81% 0.85% # number of callpals executed system.cpu0.kern.callpal_swpctx 2966 2.02% 2.37% # number of callpals executed
system.cpu0.kern.callpal_tbi 42 0.03% 0.88% # number of callpals executed system.cpu0.kern.callpal_tbi 47 0.03% 2.40% # number of callpals executed
system.cpu0.kern.callpal_wrent 7 0.00% 0.89% # number of callpals executed system.cpu0.kern.callpal_wrent 7 0.00% 2.40% # number of callpals executed
system.cpu0.kern.callpal_swpipl 135050 93.08% 93.97% # number of callpals executed system.cpu0.kern.callpal_swpipl 132441 90.18% 92.58% # number of callpals executed
system.cpu0.kern.callpal_rdps 4795 3.30% 97.28% # number of callpals executed system.cpu0.kern.callpal_rdps 6235 4.25% 96.83% # number of callpals executed
system.cpu0.kern.callpal_wrkgp 1 0.00% 97.28% # number of callpals executed system.cpu0.kern.callpal_wrkgp 1 0.00% 96.83% # number of callpals executed
system.cpu0.kern.callpal_wrusp 5 0.00% 97.28% # number of callpals executed system.cpu0.kern.callpal_wrusp 2 0.00% 96.83% # number of callpals executed
system.cpu0.kern.callpal_rdusp 8 0.01% 97.29% # number of callpals executed system.cpu0.kern.callpal_rdusp 8 0.01% 96.84% # number of callpals executed
system.cpu0.kern.callpal_whami 2 0.00% 97.29% # number of callpals executed system.cpu0.kern.callpal_whami 2 0.00% 96.84% # number of callpals executed
system.cpu0.kern.callpal_rti 3431 2.36% 99.65% # number of callpals executed system.cpu0.kern.callpal_rti 4201 2.86% 99.70% # number of callpals executed
system.cpu0.kern.callpal_callsys 364 0.25% 99.90% # number of callpals executed system.cpu0.kern.callpal_callsys 317 0.22% 99.91% # number of callpals executed
system.cpu0.kern.callpal_imb 139 0.10% 100.00% # number of callpals executed system.cpu0.kern.callpal_imb 128 0.09% 100.00% # number of callpals executed
system.cpu0.kern.inst.arm 0 # number of arm instructions executed system.cpu0.kern.inst.arm 0 # number of arm instructions executed
system.cpu0.kern.inst.hwrei 160926 # number of hwrei instructions executed system.cpu0.kern.inst.hwrei 160336 # number of hwrei instructions executed
system.cpu0.kern.inst.ivlb 0 # number of ivlb instructions executed system.cpu0.kern.inst.ivlb 0 # number of ivlb instructions executed
system.cpu0.kern.inst.ivle 0 # number of ivle instructions executed system.cpu0.kern.inst.ivle 0 # number of ivle instructions executed
system.cpu0.kern.inst.quiesce 1958 # number of quiesce instructions executed system.cpu0.kern.inst.quiesce 6712 # number of quiesce instructions executed
system.cpu0.kern.ipl_count 140584 # number of times we switched to this ipl system.cpu0.kern.ipl_count 139203 # number of times we switched to this ipl
system.cpu0.kern.ipl_count_0 56549 40.22% 40.22% # number of times we switched to this ipl system.cpu0.kern.ipl_count_0 55746 40.05% 40.05% # number of times we switched to this ipl
system.cpu0.kern.ipl_count_21 251 0.18% 40.40% # number of times we switched to this ipl system.cpu0.kern.ipl_count_21 245 0.18% 40.22% # number of times we switched to this ipl
system.cpu0.kern.ipl_count_22 5487 3.90% 44.31% # number of times we switched to this ipl system.cpu0.kern.ipl_count_22 1904 1.37% 41.59% # number of times we switched to this ipl
system.cpu0.kern.ipl_count_30 51 0.04% 44.34% # number of times we switched to this ipl system.cpu0.kern.ipl_count_30 411 0.30% 41.89% # number of times we switched to this ipl
system.cpu0.kern.ipl_count_31 78246 55.66% 100.00% # number of times we switched to this ipl system.cpu0.kern.ipl_count_31 80897 58.11% 100.00% # number of times we switched to this ipl
system.cpu0.kern.ipl_good 122461 # number of times we switched to this ipl from a different ipl system.cpu0.kern.ipl_good 112531 # number of times we switched to this ipl from a different ipl
system.cpu0.kern.ipl_good_0 56518 46.15% 46.15% # number of times we switched to this ipl from a different ipl system.cpu0.kern.ipl_good_0 55191 49.05% 49.05% # number of times we switched to this ipl from a different ipl
system.cpu0.kern.ipl_good_21 251 0.20% 46.36% # number of times we switched to this ipl from a different ipl system.cpu0.kern.ipl_good_21 245 0.22% 49.26% # number of times we switched to this ipl from a different ipl
system.cpu0.kern.ipl_good_22 5487 4.48% 50.84% # number of times we switched to this ipl from a different ipl system.cpu0.kern.ipl_good_22 1904 1.69% 50.95% # number of times we switched to this ipl from a different ipl
system.cpu0.kern.ipl_good_30 51 0.04% 50.88% # number of times we switched to this ipl from a different ipl system.cpu0.kern.ipl_good_30 411 0.37% 51.32% # number of times we switched to this ipl from a different ipl
system.cpu0.kern.ipl_good_31 60154 49.12% 100.00% # number of times we switched to this ipl from a different ipl system.cpu0.kern.ipl_good_31 54780 48.68% 100.00% # number of times we switched to this ipl from a different ipl
system.cpu0.kern.ipl_ticks 3526986735 # number of cycles we spent at this ipl system.cpu0.kern.ipl_ticks 3734378988 # number of cycles we spent at this ipl
system.cpu0.kern.ipl_ticks_0 3501352281 99.27% 99.27% # number of cycles we spent at this ipl system.cpu0.kern.ipl_ticks_0 3696129107 98.98% 98.98% # number of cycles we spent at this ipl
system.cpu0.kern.ipl_ticks_21 53019 0.00% 99.27% # number of cycles we spent at this ipl system.cpu0.kern.ipl_ticks_21 53683 0.00% 98.98% # number of cycles we spent at this ipl
system.cpu0.kern.ipl_ticks_22 1348211 0.04% 99.31% # number of cycles we spent at this ipl system.cpu0.kern.ipl_ticks_22 224672 0.01% 98.98% # number of cycles we spent at this ipl
system.cpu0.kern.ipl_ticks_30 18326 0.00% 99.31% # number of cycles we spent at this ipl system.cpu0.kern.ipl_ticks_30 128598 0.00% 98.99% # number of cycles we spent at this ipl
system.cpu0.kern.ipl_ticks_31 24214898 0.69% 100.00% # number of cycles we spent at this ipl system.cpu0.kern.ipl_ticks_31 37842928 1.01% 100.00% # number of cycles we spent at this ipl
system.cpu0.kern.ipl_used 0.871088 # fraction of swpipl calls that actually changed the ipl system.cpu0.kern.ipl_used 0.808395 # fraction of swpipl calls that actually changed the ipl
system.cpu0.kern.ipl_used_0 0.999452 # fraction of swpipl calls that actually changed the ipl system.cpu0.kern.ipl_used_0 0.990044 # fraction of swpipl calls that actually changed the ipl
system.cpu0.kern.ipl_used_21 1 # fraction of swpipl calls that actually changed the ipl system.cpu0.kern.ipl_used_21 1 # fraction of swpipl calls that actually changed the ipl
system.cpu0.kern.ipl_used_22 1 # fraction of swpipl calls that actually changed the ipl system.cpu0.kern.ipl_used_22 1 # fraction of swpipl calls that actually changed the ipl
system.cpu0.kern.ipl_used_30 1 # fraction of swpipl calls that actually changed the ipl system.cpu0.kern.ipl_used_30 1 # fraction of swpipl calls that actually changed the ipl
system.cpu0.kern.ipl_used_31 0.768781 # fraction of swpipl calls that actually changed the ipl system.cpu0.kern.ipl_used_31 0.677157 # fraction of swpipl calls that actually changed the ipl
system.cpu0.kern.mode_good_kernel 1448 system.cpu0.kern.mode_good_kernel 1095
system.cpu0.kern.mode_good_user 1300 system.cpu0.kern.mode_good_user 1095
system.cpu0.kern.mode_good_idle 148 system.cpu0.kern.mode_good_idle 0
system.cpu0.kern.mode_switch_kernel 2490 # number of protection mode switches system.cpu0.kern.mode_switch_kernel 6633 # number of protection mode switches
system.cpu0.kern.mode_switch_user 1300 # number of protection mode switches system.cpu0.kern.mode_switch_user 1095 # number of protection mode switches
system.cpu0.kern.mode_switch_idle 2110 # number of protection mode switches system.cpu0.kern.mode_switch_idle 0 # number of protection mode switches
system.cpu0.kern.mode_switch_good 0.490847 # fraction of useful protection mode switches system.cpu0.kern.mode_switch_good 0.283385 # fraction of useful protection mode switches
system.cpu0.kern.mode_switch_good_kernel 0.581526 # fraction of useful protection mode switches system.cpu0.kern.mode_switch_good_kernel 0.165084 # fraction of useful protection mode switches
system.cpu0.kern.mode_switch_good_user 1 # fraction of useful protection mode switches system.cpu0.kern.mode_switch_good_user 1 # fraction of useful protection mode switches
system.cpu0.kern.mode_switch_good_idle 0.070142 # fraction of useful protection mode switches system.cpu0.kern.mode_switch_good_idle <err: div-0> # fraction of useful protection mode switches
system.cpu0.kern.mode_ticks_kernel 23256451 0.66% 0.66% # number of ticks spent at the given mode system.cpu0.kern.mode_ticks_kernel 3730045371 99.93% 99.93% # number of ticks spent at the given mode
system.cpu0.kern.mode_ticks_user 3397192 0.10% 0.76% # number of ticks spent at the given mode system.cpu0.kern.mode_ticks_user 2718822 0.07% 100.00% # number of ticks spent at the given mode
system.cpu0.kern.mode_ticks_idle 3500333090 99.24% 100.00% # number of ticks spent at the given mode system.cpu0.kern.mode_ticks_idle 0 0.00% 100.00% # number of ticks spent at the given mode
system.cpu0.kern.swap_context 1183 # number of times the context was actually changed system.cpu0.kern.swap_context 2967 # number of times the context was actually changed
system.cpu0.kern.syscall 231 # number of syscalls executed system.cpu0.kern.syscall 179 # number of syscalls executed
system.cpu0.kern.syscall_fork 6 2.60% 2.60% # number of syscalls executed system.cpu0.kern.syscall_fork 7 3.91% 3.91% # number of syscalls executed
system.cpu0.kern.syscall_read 17 7.36% 9.96% # number of syscalls executed system.cpu0.kern.syscall_read 14 7.82% 11.73% # number of syscalls executed
system.cpu0.kern.syscall_write 4 1.73% 11.69% # number of syscalls executed system.cpu0.kern.syscall_write 4 2.23% 13.97% # number of syscalls executed
system.cpu0.kern.syscall_close 31 13.42% 25.11% # number of syscalls executed system.cpu0.kern.syscall_close 27 15.08% 29.05% # number of syscalls executed
system.cpu0.kern.syscall_chdir 1 0.43% 25.54% # number of syscalls executed system.cpu0.kern.syscall_chdir 1 0.56% 29.61% # number of syscalls executed
system.cpu0.kern.syscall_obreak 11 4.76% 30.30% # number of syscalls executed system.cpu0.kern.syscall_obreak 6 3.35% 32.96% # number of syscalls executed
system.cpu0.kern.syscall_lseek 6 2.60% 32.90% # number of syscalls executed system.cpu0.kern.syscall_lseek 7 3.91% 36.87% # number of syscalls executed
system.cpu0.kern.syscall_getpid 4 1.73% 34.63% # number of syscalls executed system.cpu0.kern.syscall_getpid 4 2.23% 39.11% # number of syscalls executed
system.cpu0.kern.syscall_setuid 2 0.87% 35.50% # number of syscalls executed system.cpu0.kern.syscall_setuid 1 0.56% 39.66% # number of syscalls executed
system.cpu0.kern.syscall_getuid 4 1.73% 37.23% # number of syscalls executed system.cpu0.kern.syscall_getuid 3 1.68% 41.34% # number of syscalls executed
system.cpu0.kern.syscall_access 9 3.90% 41.13% # number of syscalls executed system.cpu0.kern.syscall_access 6 3.35% 44.69% # number of syscalls executed
system.cpu0.kern.syscall_dup 2 0.87% 41.99% # number of syscalls executed system.cpu0.kern.syscall_dup 2 1.12% 45.81% # number of syscalls executed
system.cpu0.kern.syscall_open 42 18.18% 60.17% # number of syscalls executed system.cpu0.kern.syscall_open 30 16.76% 62.57% # number of syscalls executed
system.cpu0.kern.syscall_getgid 4 1.73% 61.90% # number of syscalls executed system.cpu0.kern.syscall_getgid 3 1.68% 64.25% # number of syscalls executed
system.cpu0.kern.syscall_sigprocmask 7 3.03% 64.94% # number of syscalls executed system.cpu0.kern.syscall_sigprocmask 8 4.47% 68.72% # number of syscalls executed
system.cpu0.kern.syscall_ioctl 9 3.90% 68.83% # number of syscalls executed system.cpu0.kern.syscall_ioctl 8 4.47% 73.18% # number of syscalls executed
system.cpu0.kern.syscall_readlink 1 0.43% 69.26% # number of syscalls executed system.cpu0.kern.syscall_execve 5 2.79% 75.98% # number of syscalls executed
system.cpu0.kern.syscall_execve 4 1.73% 71.00% # number of syscalls executed system.cpu0.kern.syscall_mmap 17 9.50% 85.47% # number of syscalls executed
system.cpu0.kern.syscall_mmap 35 15.15% 86.15% # number of syscalls executed system.cpu0.kern.syscall_munmap 3 1.68% 87.15% # number of syscalls executed
system.cpu0.kern.syscall_munmap 2 0.87% 87.01% # number of syscalls executed system.cpu0.kern.syscall_mprotect 4 2.23% 89.39% # number of syscalls executed
system.cpu0.kern.syscall_mprotect 10 4.33% 91.34% # number of syscalls executed system.cpu0.kern.syscall_gethostname 1 0.56% 89.94% # number of syscalls executed
system.cpu0.kern.syscall_gethostname 1 0.43% 91.77% # number of syscalls executed system.cpu0.kern.syscall_dup2 2 1.12% 91.06% # number of syscalls executed
system.cpu0.kern.syscall_dup2 2 0.87% 92.64% # number of syscalls executed system.cpu0.kern.syscall_fcntl 8 4.47% 95.53% # number of syscalls executed
system.cpu0.kern.syscall_fcntl 8 3.46% 96.10% # number of syscalls executed system.cpu0.kern.syscall_socket 2 1.12% 96.65% # number of syscalls executed
system.cpu0.kern.syscall_socket 2 0.87% 96.97% # number of syscalls executed system.cpu0.kern.syscall_connect 2 1.12% 97.77% # number of syscalls executed
system.cpu0.kern.syscall_connect 2 0.87% 97.84% # number of syscalls executed system.cpu0.kern.syscall_setgid 1 0.56% 98.32% # number of syscalls executed
system.cpu0.kern.syscall_setgid 2 0.87% 98.70% # number of syscalls executed system.cpu0.kern.syscall_getrlimit 1 0.56% 98.88% # number of syscalls executed
system.cpu0.kern.syscall_getrlimit 1 0.43% 99.13% # number of syscalls executed system.cpu0.kern.syscall_setsid 2 1.12% 100.00% # number of syscalls executed
system.cpu0.kern.syscall_setsid 2 0.87% 100.00% # number of syscalls executed system.cpu0.not_idle_fraction 0.017505 # Percentage of non-idle cycles
system.cpu0.not_idle_fraction 0.015486 # Percentage of non-idle cycles
system.cpu0.numCycles 0 # number of cpu cycles simulated system.cpu0.numCycles 0 # number of cpu cycles simulated
system.cpu0.num_insts 44155958 # Number of instructions executed system.cpu0.num_insts 52039310 # Number of instructions executed
system.cpu0.num_refs 10463340 # Number of memory references system.cpu0.num_refs 13510641 # Number of memory references
system.cpu1.dtb.accesses 323344 # DTB accesses system.cpu1.dtb.accesses 477045 # DTB accesses
system.cpu1.dtb.acv 82 # DTB access violations system.cpu1.dtb.acv 52 # DTB access violations
system.cpu1.dtb.hits 4234985 # DTB hits system.cpu1.dtb.hits 4567143 # DTB hits
system.cpu1.dtb.misses 2977 # DTB misses system.cpu1.dtb.misses 4359 # DTB misses
system.cpu1.dtb.read_accesses 222873 # DTB read accesses system.cpu1.dtb.read_accesses 328553 # DTB read accesses
system.cpu1.dtb.read_acv 36 # DTB read access violations system.cpu1.dtb.read_acv 10 # DTB read access violations
system.cpu1.dtb.read_hits 2431648 # DTB read hits system.cpu1.dtb.read_hits 2660612 # DTB read hits
system.cpu1.dtb.read_misses 2698 # DTB read misses system.cpu1.dtb.read_misses 3911 # DTB read misses
system.cpu1.dtb.write_accesses 100471 # DTB write accesses system.cpu1.dtb.write_accesses 148492 # DTB write accesses
system.cpu1.dtb.write_acv 46 # DTB write access violations system.cpu1.dtb.write_acv 42 # DTB write access violations
system.cpu1.dtb.write_hits 1803337 # DTB write hits system.cpu1.dtb.write_hits 1906531 # DTB write hits
system.cpu1.dtb.write_misses 279 # DTB write misses system.cpu1.dtb.write_misses 448 # DTB write misses
system.cpu1.idle_fraction 0.993979 # Percentage of idle cycles system.cpu1.idle_fraction 0.994923 # Percentage of idle cycles
system.cpu1.itb.accesses 912010 # ITB accesses system.cpu1.itb.accesses 1392687 # ITB accesses
system.cpu1.itb.acv 41 # ITB acv system.cpu1.itb.acv 18 # ITB acv
system.cpu1.itb.hits 910678 # ITB hits system.cpu1.itb.hits 1391015 # ITB hits
system.cpu1.itb.misses 1332 # ITB misses system.cpu1.itb.misses 1672 # ITB misses
system.cpu1.kern.callpal 57529 # number of callpals executed system.cpu1.kern.callpal 74475 # number of callpals executed
system.cpu1.kern.callpal_cserve 1 0.00% 0.00% # number of callpals executed system.cpu1.kern.callpal_cserve 1 0.00% 0.00% # number of callpals executed
system.cpu1.kern.callpal_wripir 51 0.09% 0.09% # number of callpals executed system.cpu1.kern.callpal_wripir 411 0.55% 0.55% # number of callpals executed
system.cpu1.kern.callpal_wrmces 1 0.00% 0.09% # number of callpals executed system.cpu1.kern.callpal_wrmces 1 0.00% 0.55% # number of callpals executed
system.cpu1.kern.callpal_wrfen 1 0.00% 0.09% # number of callpals executed system.cpu1.kern.callpal_wrfen 1 0.00% 0.56% # number of callpals executed
system.cpu1.kern.callpal_swpctx 451 0.78% 0.88% # number of callpals executed system.cpu1.kern.callpal_swpctx 2106 2.83% 3.38% # number of callpals executed
system.cpu1.kern.callpal_tbi 12 0.02% 0.90% # number of callpals executed system.cpu1.kern.callpal_tbi 6 0.01% 3.39% # number of callpals executed
system.cpu1.kern.callpal_wrent 7 0.01% 0.91% # number of callpals executed system.cpu1.kern.callpal_wrent 7 0.01% 3.40% # number of callpals executed
system.cpu1.kern.callpal_swpipl 54081 94.01% 94.92% # number of callpals executed system.cpu1.kern.callpal_swpipl 65169 87.50% 90.91% # number of callpals executed
system.cpu1.kern.callpal_rdps 368 0.64% 95.56% # number of callpals executed system.cpu1.kern.callpal_rdps 2603 3.50% 94.40% # number of callpals executed
system.cpu1.kern.callpal_wrkgp 1 0.00% 95.56% # number of callpals executed system.cpu1.kern.callpal_wrkgp 1 0.00% 94.40% # number of callpals executed
system.cpu1.kern.callpal_wrusp 2 0.00% 95.56% # number of callpals executed system.cpu1.kern.callpal_wrusp 5 0.01% 94.41% # number of callpals executed
system.cpu1.kern.callpal_rdusp 2 0.00% 95.57% # number of callpals executed system.cpu1.kern.callpal_rdusp 1 0.00% 94.41% # number of callpals executed
system.cpu1.kern.callpal_whami 3 0.01% 95.57% # number of callpals executed system.cpu1.kern.callpal_whami 3 0.00% 94.41% # number of callpals executed
system.cpu1.kern.callpal_rti 2337 4.06% 99.63% # number of callpals executed system.cpu1.kern.callpal_rti 3893 5.23% 99.64% # number of callpals executed
system.cpu1.kern.callpal_callsys 169 0.29% 99.93% # number of callpals executed system.cpu1.kern.callpal_callsys 214 0.29% 99.93% # number of callpals executed
system.cpu1.kern.callpal_imb 41 0.07% 100.00% # number of callpals executed system.cpu1.kern.callpal_imb 52 0.07% 100.00% # number of callpals executed
system.cpu1.kern.callpal_rdunique 1 0.00% 100.00% # number of callpals executed system.cpu1.kern.callpal_rdunique 1 0.00% 100.00% # number of callpals executed
system.cpu1.kern.inst.arm 0 # number of arm instructions executed system.cpu1.kern.inst.arm 0 # number of arm instructions executed
system.cpu1.kern.inst.hwrei 63811 # number of hwrei instructions executed system.cpu1.kern.inst.hwrei 82987 # number of hwrei instructions executed
system.cpu1.kern.inst.ivlb 0 # number of ivlb instructions executed system.cpu1.kern.inst.ivlb 0 # number of ivlb instructions executed
system.cpu1.kern.inst.ivle 0 # number of ivle instructions executed system.cpu1.kern.inst.ivle 0 # number of ivle instructions executed
system.cpu1.kern.inst.quiesce 1898 # number of quiesce instructions executed system.cpu1.kern.inst.quiesce 2512 # number of quiesce instructions executed
system.cpu1.kern.ipl_count 58267 # number of times we switched to this ipl system.cpu1.kern.ipl_count 71472 # number of times we switched to this ipl
system.cpu1.kern.ipl_count_0 25040 42.97% 42.97% # number of times we switched to this ipl system.cpu1.kern.ipl_count_0 27792 38.89% 38.89% # number of times we switched to this ipl
system.cpu1.kern.ipl_count_22 5452 9.36% 52.33% # number of times we switched to this ipl system.cpu1.kern.ipl_count_22 1902 2.66% 41.55% # number of times we switched to this ipl
system.cpu1.kern.ipl_count_30 54 0.09% 52.42% # number of times we switched to this ipl system.cpu1.kern.ipl_count_30 507 0.71% 42.26% # number of times we switched to this ipl
system.cpu1.kern.ipl_count_31 27721 47.58% 100.00% # number of times we switched to this ipl system.cpu1.kern.ipl_count_31 41271 57.74% 100.00% # number of times we switched to this ipl
system.cpu1.kern.ipl_good 57331 # number of times we switched to this ipl from a different ipl system.cpu1.kern.ipl_good 55838 # number of times we switched to this ipl from a different ipl
system.cpu1.kern.ipl_good_0 25007 43.62% 43.62% # number of times we switched to this ipl from a different ipl system.cpu1.kern.ipl_good_0 26968 48.30% 48.30% # number of times we switched to this ipl from a different ipl
system.cpu1.kern.ipl_good_22 5452 9.51% 53.13% # number of times we switched to this ipl from a different ipl system.cpu1.kern.ipl_good_22 1902 3.41% 51.70% # number of times we switched to this ipl from a different ipl
system.cpu1.kern.ipl_good_30 54 0.09% 53.22% # number of times we switched to this ipl from a different ipl system.cpu1.kern.ipl_good_30 507 0.91% 52.61% # number of times we switched to this ipl from a different ipl
system.cpu1.kern.ipl_good_31 26818 46.78% 100.00% # number of times we switched to this ipl from a different ipl system.cpu1.kern.ipl_good_31 26461 47.39% 100.00% # number of times we switched to this ipl from a different ipl
system.cpu1.kern.ipl_ticks 3526422675 # number of cycles we spent at this ipl system.cpu1.kern.ipl_ticks 3734901376 # number of cycles we spent at this ipl
system.cpu1.kern.ipl_ticks_0 3497592433 99.18% 99.18% # number of cycles we spent at this ipl system.cpu1.kern.ipl_ticks_0 3704875983 99.20% 99.20% # number of cycles we spent at this ipl
system.cpu1.kern.ipl_ticks_22 1410084 0.04% 99.22% # number of cycles we spent at this ipl system.cpu1.kern.ipl_ticks_22 224436 0.01% 99.20% # number of cycles we spent at this ipl
system.cpu1.kern.ipl_ticks_30 19740 0.00% 99.22% # number of cycles we spent at this ipl system.cpu1.kern.ipl_ticks_30 162794 0.00% 99.21% # number of cycles we spent at this ipl
system.cpu1.kern.ipl_ticks_31 27400418 0.78% 100.00% # number of cycles we spent at this ipl system.cpu1.kern.ipl_ticks_31 29638163 0.79% 100.00% # number of cycles we spent at this ipl
system.cpu1.kern.ipl_used 0.983936 # fraction of swpipl calls that actually changed the ipl system.cpu1.kern.ipl_used 0.781257 # fraction of swpipl calls that actually changed the ipl
system.cpu1.kern.ipl_used_0 0.998682 # fraction of swpipl calls that actually changed the ipl system.cpu1.kern.ipl_used_0 0.970351 # fraction of swpipl calls that actually changed the ipl
system.cpu1.kern.ipl_used_22 1 # fraction of swpipl calls that actually changed the ipl system.cpu1.kern.ipl_used_22 1 # fraction of swpipl calls that actually changed the ipl
system.cpu1.kern.ipl_used_30 1 # fraction of swpipl calls that actually changed the ipl system.cpu1.kern.ipl_used_30 1 # fraction of swpipl calls that actually changed the ipl
system.cpu1.kern.ipl_used_31 0.967425 # fraction of swpipl calls that actually changed the ipl system.cpu1.kern.ipl_used_31 0.641152 # fraction of swpipl calls that actually changed the ipl
system.cpu1.kern.mode_good_kernel 465 system.cpu1.kern.mode_good_kernel 1094
system.cpu1.kern.mode_good_user 465 system.cpu1.kern.mode_good_user 662
system.cpu1.kern.mode_good_idle 0 system.cpu1.kern.mode_good_idle 432
system.cpu1.kern.mode_switch_kernel 2771 # number of protection mode switches system.cpu1.kern.mode_switch_kernel 2358 # number of protection mode switches
system.cpu1.kern.mode_switch_user 465 # number of protection mode switches system.cpu1.kern.mode_switch_user 662 # number of protection mode switches
system.cpu1.kern.mode_switch_idle 0 # number of protection mode switches system.cpu1.kern.mode_switch_idle 2831 # number of protection mode switches
system.cpu1.kern.mode_switch_good 0.287392 # fraction of useful protection mode switches system.cpu1.kern.mode_switch_good 0.373953 # fraction of useful protection mode switches
system.cpu1.kern.mode_switch_good_kernel 0.167809 # fraction of useful protection mode switches system.cpu1.kern.mode_switch_good_kernel 0.463953 # fraction of useful protection mode switches
system.cpu1.kern.mode_switch_good_user 1 # fraction of useful protection mode switches system.cpu1.kern.mode_switch_good_user 1 # fraction of useful protection mode switches
system.cpu1.kern.mode_switch_good_idle no value # fraction of useful protection mode switches system.cpu1.kern.mode_switch_good_idle 0.152596 # fraction of useful protection mode switches
system.cpu1.kern.mode_ticks_kernel 3525066043 99.96% 99.96% # number of ticks spent at the given mode system.cpu1.kern.mode_ticks_kernel 13374855 0.36% 0.36% # number of ticks spent at the given mode
system.cpu1.kern.mode_ticks_user 1294184 0.04% 100.00% # number of ticks spent at the given mode system.cpu1.kern.mode_ticks_user 1967356 0.05% 0.41% # number of ticks spent at the given mode
system.cpu1.kern.mode_ticks_idle 0 0.00% 100.00% # number of ticks spent at the given mode system.cpu1.kern.mode_ticks_idle 3719559163 99.59% 100.00% # number of ticks spent at the given mode
system.cpu1.kern.swap_context 452 # number of times the context was actually changed system.cpu1.kern.swap_context 2107 # number of times the context was actually changed
system.cpu1.kern.syscall 98 # number of syscalls executed system.cpu1.kern.syscall 150 # number of syscalls executed
system.cpu1.kern.syscall_fork 2 2.04% 2.04% # number of syscalls executed system.cpu1.kern.syscall_fork 1 0.67% 0.67% # number of syscalls executed
system.cpu1.kern.syscall_read 13 13.27% 15.31% # number of syscalls executed system.cpu1.kern.syscall_read 16 10.67% 11.33% # number of syscalls executed
system.cpu1.kern.syscall_close 12 12.24% 27.55% # number of syscalls executed system.cpu1.kern.syscall_close 16 10.67% 22.00% # number of syscalls executed
system.cpu1.kern.syscall_chmod 1 1.02% 28.57% # number of syscalls executed system.cpu1.kern.syscall_chmod 1 0.67% 22.67% # number of syscalls executed
system.cpu1.kern.syscall_obreak 4 4.08% 32.65% # number of syscalls executed system.cpu1.kern.syscall_obreak 9 6.00% 28.67% # number of syscalls executed
system.cpu1.kern.syscall_lseek 4 4.08% 36.73% # number of syscalls executed system.cpu1.kern.syscall_lseek 3 2.00% 30.67% # number of syscalls executed
system.cpu1.kern.syscall_getpid 2 2.04% 38.78% # number of syscalls executed system.cpu1.kern.syscall_getpid 2 1.33% 32.00% # number of syscalls executed
system.cpu1.kern.syscall_setuid 2 2.04% 40.82% # number of syscalls executed system.cpu1.kern.syscall_setuid 3 2.00% 34.00% # number of syscalls executed
system.cpu1.kern.syscall_getuid 2 2.04% 42.86% # number of syscalls executed system.cpu1.kern.syscall_getuid 3 2.00% 36.00% # number of syscalls executed
system.cpu1.kern.syscall_access 2 2.04% 44.90% # number of syscalls executed system.cpu1.kern.syscall_access 5 3.33% 39.33% # number of syscalls executed
system.cpu1.kern.syscall_open 13 13.27% 58.16% # number of syscalls executed system.cpu1.kern.syscall_open 25 16.67% 56.00% # number of syscalls executed
system.cpu1.kern.syscall_getgid 2 2.04% 60.20% # number of syscalls executed system.cpu1.kern.syscall_getgid 3 2.00% 58.00% # number of syscalls executed
system.cpu1.kern.syscall_sigprocmask 3 3.06% 63.27% # number of syscalls executed system.cpu1.kern.syscall_sigprocmask 2 1.33% 59.33% # number of syscalls executed
system.cpu1.kern.syscall_ioctl 1 1.02% 64.29% # number of syscalls executed system.cpu1.kern.syscall_ioctl 2 1.33% 60.67% # number of syscalls executed
system.cpu1.kern.syscall_execve 3 3.06% 67.35% # number of syscalls executed system.cpu1.kern.syscall_readlink 1 0.67% 61.33% # number of syscalls executed
system.cpu1.kern.syscall_mmap 19 19.39% 86.73% # number of syscalls executed system.cpu1.kern.syscall_execve 2 1.33% 62.67% # number of syscalls executed
system.cpu1.kern.syscall_munmap 1 1.02% 87.76% # number of syscalls executed system.cpu1.kern.syscall_mmap 37 24.67% 87.33% # number of syscalls executed
system.cpu1.kern.syscall_mprotect 6 6.12% 93.88% # number of syscalls executed system.cpu1.kern.syscall_mprotect 12 8.00% 95.33% # number of syscalls executed
system.cpu1.kern.syscall_dup2 1 1.02% 94.90% # number of syscalls executed system.cpu1.kern.syscall_dup2 1 0.67% 96.00% # number of syscalls executed
system.cpu1.kern.syscall_fcntl 2 2.04% 96.94% # number of syscalls executed system.cpu1.kern.syscall_fcntl 2 1.33% 97.33% # number of syscalls executed
system.cpu1.kern.syscall_setgid 2 2.04% 98.98% # number of syscalls executed system.cpu1.kern.syscall_setgid 3 2.00% 99.33% # number of syscalls executed
system.cpu1.kern.syscall_getrlimit 1 1.02% 100.00% # number of syscalls executed system.cpu1.kern.syscall_getrlimit 1 0.67% 100.00% # number of syscalls executed
system.cpu1.not_idle_fraction 0.006021 # Percentage of non-idle cycles system.cpu1.not_idle_fraction 0.005077 # Percentage of non-idle cycles
system.cpu1.numCycles 0 # number of cpu cycles simulated system.cpu1.numCycles 0 # number of cpu cycles simulated
system.cpu1.num_insts 16976004 # Number of instructions executed system.cpu1.num_insts 14372190 # Number of instructions executed
system.cpu1.num_refs 4251312 # Number of memory references system.cpu1.num_refs 4596339 # Number of memory references
system.disk0.dma_read_bytes 1024 # Number of bytes transfered via DMA reads (not PRD). system.disk0.dma_read_bytes 1024 # Number of bytes transfered via DMA reads (not PRD).
system.disk0.dma_read_full_pages 0 # Number of full page size DMA reads (not PRD). system.disk0.dma_read_full_pages 0 # Number of full page size DMA reads (not PRD).
system.disk0.dma_read_txs 1 # Number of DMA read transactions (not PRD). system.disk0.dma_read_txs 1 # Number of DMA read transactions (not PRD).
system.disk0.dma_write_bytes 2735104 # Number of bytes transfered via DMA writes. system.disk0.dma_write_bytes 2702336 # Number of bytes transfered via DMA writes.
system.disk0.dma_write_full_pages 306 # Number of full page size DMA writes. system.disk0.dma_write_full_pages 302 # Number of full page size DMA writes.
system.disk0.dma_write_txs 412 # Number of DMA write transactions. system.disk0.dma_write_txs 408 # Number of DMA write transactions.
system.disk2.dma_read_bytes 0 # Number of bytes transfered via DMA reads (not PRD). system.disk2.dma_read_bytes 0 # Number of bytes transfered via DMA reads (not PRD).
system.disk2.dma_read_full_pages 0 # Number of full page size DMA reads (not PRD). system.disk2.dma_read_full_pages 0 # Number of full page size DMA reads (not PRD).
system.disk2.dma_read_txs 0 # Number of DMA read transactions (not PRD). system.disk2.dma_read_txs 0 # Number of DMA read transactions (not PRD).

View file

@ -1,6 +1,6 @@
0: system.tsunami.io.rtc: Real-time clock set to Sun Jan 1 00:00:00 2006 0: system.tsunami.io.rtc: Real-time clock set to Sun Jan 1 00:00:00 2006
Listening for console connection on port 3456 Listening for console connection on port 3457
0: system.remote_gdb.listener: listening for remote gdb #0 on port 7000 0: system.remote_gdb.listener: listening for remote gdb #0 on port 7001
0: system.remote_gdb.listener: listening for remote gdb #1 on port 7001 0: system.remote_gdb.listener: listening for remote gdb #1 on port 7002
warn: Entering event queue @ 0. Starting simulation... warn: Entering event queue @ 0. Starting simulation...
warn: 271342: Trying to launch CPU number 1! warn: 271343: Trying to launch CPU number 1!

View file

@ -5,8 +5,8 @@ The Regents of The University of Michigan
All Rights Reserved All Rights Reserved
M5 compiled Oct 5 2006 22:13:02 M5 compiled Oct 8 2006 14:07:02
M5 started Fri Oct 6 00:26:09 2006 M5 started Sun Oct 8 14:10:09 2006
M5 executing on zizzer.eecs.umich.edu M5 executing on zizzer.eecs.umich.edu
command line: build/ALPHA_FS/m5.opt -d build/ALPHA_FS/tests/opt/quick/10.linux-boot/alpha/linux/tsunami-simple-timing-dual tests/run.py quick/10.linux-boot/alpha/linux/tsunami-simple-timing-dual command line: build/ALPHA_FS/m5.opt -d build/ALPHA_FS/tests/opt/quick/10.linux-boot/alpha/linux/tsunami-simple-timing-dual tests/run.py quick/10.linux-boot/alpha/linux/tsunami-simple-timing-dual
Exiting @ tick 3526987181 because m5_exit instruction encountered Exiting @ tick 3734901822 because m5_exit instruction encountered

View file

@ -75,7 +75,7 @@ side_b=system.membus.port[0]
type=TimingSimpleCPU type=TimingSimpleCPU
children=dtb itb children=dtb itb
clock=1 clock=1
cpu_id=-1 cpu_id=0
defer_registration=false defer_registration=false
dtb=system.cpu.dtb dtb=system.cpu.dtb
function_trace=false function_trace=false

View file

@ -90,9 +90,9 @@ max_loads_all_threads=0
progress_interval=0 progress_interval=0
mem=system.physmem mem=system.physmem
system=system system=system
cpu_id=0
itb=system.cpu.itb itb=system.cpu.itb
dtb=system.cpu.dtb dtb=system.cpu.dtb
cpu_id=-1
profile=0 profile=0
clock=1 clock=1
defer_registration=false defer_registration=false

View file

@ -3,7 +3,7 @@ M5 console: m5AlphaAccess @ 0xFFFFFD0200000000
memsize 8000000 pages 4000 memsize 8000000 pages 4000
First free page after ROM 0xFFFFFC0000018000 First free page after ROM 0xFFFFFC0000018000
HWRPB 0xFFFFFC0000018000 l1pt 0xFFFFFC0000040000 l2pt 0xFFFFFC0000042000 l3pt_rpb 0xFFFFFC0000044000 l3pt_kernel 0xFFFFFC0000048000 l2reserv 0xFFFFFC0000046000 HWRPB 0xFFFFFC0000018000 l1pt 0xFFFFFC0000040000 l2pt 0xFFFFFC0000042000 l3pt_rpb 0xFFFFFC0000044000 l3pt_kernel 0xFFFFFC0000048000 l2reserv 0xFFFFFC0000046000
kstart = 0xFFFFFC0000310000, kend = 0xFFFFFC00008064E8, kentry = 0xFFFFFC0000310000, numCPUs = 0x1 kstart = 0xFFFFFC0000310000, kend = 0xFFFFFC0000855718, kentry = 0xFFFFFC0000310000, numCPUs = 0x1
CPU Clock at 2000 MHz IntrClockFrequency=1024 CPU Clock at 2000 MHz IntrClockFrequency=1024
Booting with 1 processor(s) Booting with 1 processor(s)
KSP: 0x20043FE8 PTBR 0x20 KSP: 0x20043FE8 PTBR 0x20
@ -14,28 +14,26 @@ M5 console: m5AlphaAccess @ 0xFFFFFD0200000000
ConsoleDispatch at virt 10000658 phys 18658 val FFFFFC00000100A8 ConsoleDispatch at virt 10000658 phys 18658 val FFFFFC00000100A8
unix_boot_mem ends at FFFFFC0000076000 unix_boot_mem ends at FFFFFC0000076000
k_argc = 0 k_argc = 0
jumping to kernel at 0xFFFFFC0000310000, (PCBB 0xFFFFFC0000018180 pfn 1028) jumping to kernel at 0xFFFFFC0000310000, (PCBB 0xFFFFFC0000018180 pfn 1067)
CallbackFixup 0 18000, t7=FFFFFC0000700000 CallbackFixup 0 18000, t7=FFFFFC000070C000
Linux version 2.6.8.1 (binkertn@ziff.eecs.umich.edu) (gcc version 3.4.3) #36 SMP Mon May 2 19:50:53 EDT 2005 Linux version 2.6.13 (hsul@zed.eecs.umich.edu) (gcc version 3.4.3) #7 SMP Tue Aug 15 14:40:31 EDT 2006
Booting GENERIC on Tsunami variation DP264 using machine vector DP264 from SRM Booting GENERIC on Tsunami variation DP264 using machine vector DP264 from SRM
Major Options: SMP LEGACY_START VERBOSE_MCHECK Major Options: SMP LEGACY_START VERBOSE_MCHECK
Command line: root=/dev/hda1 console=ttyS0 Command line: root=/dev/hda1 console=ttyS0
memcluster 0, usage 1, start 0, end 392 memcluster 0, usage 1, start 0, end 392
memcluster 1, usage 0, start 392, end 16384 memcluster 1, usage 0, start 392, end 16384
freeing pages 1030:16384 freeing pages 1069:16384
reserving pages 1030:1031 reserving pages 1069:1070
SMP: 1 CPUs probed -- cpu_present_mask = 1 SMP: 1 CPUs probed -- cpu_present_mask = 1
Built 1 zonelists Built 1 zonelists
Kernel command line: root=/dev/hda1 console=ttyS0 Kernel command line: root=/dev/hda1 console=ttyS0
PID hash table entries: 1024 (order 10: 16384 bytes) PID hash table entries: 1024 (order: 10, 32768 bytes)
Using epoch = 1900 Using epoch = 1900
Console: colour dummy device 80x25 Console: colour dummy device 80x25
Dentry cache hash table entries: 32768 (order: 5, 262144 bytes) Dentry cache hash table entries: 32768 (order: 5, 262144 bytes)
Inode-cache hash table entries: 16384 (order: 4, 131072 bytes) Inode-cache hash table entries: 16384 (order: 4, 131072 bytes)
Memory: 119072k/131072k available (3058k kernel code, 8680k reserved, 695k data, 480k init) Memory: 118784k/131072k available (3316k kernel code, 8952k reserved, 983k data, 224k init)
Mount-cache hash table entries: 512 (order: 0, 8192 bytes) Mount-cache hash table entries: 512
per-CPU timeslice cutoff: 374.49 usecs.
task migration cache decay timeout: 0 msecs.
SMP mode deactivated. SMP mode deactivated.
Brought up 1 CPUs Brought up 1 CPUs
SMP: Total of 1 processors activated (4002.20 BogoMIPS). SMP: Total of 1 processors activated (4002.20 BogoMIPS).
@ -48,16 +46,23 @@ M5 console: m5AlphaAccess @ 0xFFFFFD0200000000
Initializing Cryptographic API Initializing Cryptographic API
rtc: Standard PC (1900) epoch (1900) detected rtc: Standard PC (1900) epoch (1900) detected
Real Time Clock Driver v1.12 Real Time Clock Driver v1.12
Serial: 8250/16550 driver $Revision: 1.90 $ 5 ports, IRQ sharing disabled Serial: 8250/16550 driver $Revision: 1.90 $ 1 ports, IRQ sharing disabled
ttyS0 at I/O 0x3f8 (irq = 4) is a 8250 ttyS0 at I/O 0x3f8 (irq = 4) is a 8250
io scheduler noop registered
io scheduler anticipatory registered
io scheduler deadline registered
io scheduler cfq registered
loop: loaded (max 8 devices) loop: loaded (max 8 devices)
Using anticipatory io scheduler
nbd: registered device at major 43 nbd: registered device at major 43
sinic.c: M5 Simple Integrated NIC driver sinic.c: M5 Simple Integrated NIC driver
ns83820.c: National Semiconductor DP83820 10/100/1000 driver. ns83820.c: National Semiconductor DP83820 10/100/1000 driver.
ns83820: irq bound to CPU 0
eth0: ns83820.c: 0x22c: 00000000, subsystem: 0000:0000 eth0: ns83820.c: 0x22c: 00000000, subsystem: 0000:0000
eth0: enabling optical transceiver eth0: enabling optical transceiver
eth0: ns83820 v0.20: DP83820 v1.3: 00:90:00:00:00:01 io=0x09000000 irq=30 f=sg eth0: using 64 bit addressing.
eth0: ns83820 v0.22: DP83820 v1.3: 00:90:00:00:00:01 io=0x09000000 irq=30 f=h,sg
tun: Universal TUN/TAP device driver, 1.6
tun: (C) 1999-2004 Max Krasnyansky <maxk@qualcomm.com>
Uniform Multi-Platform E-IDE driver Revision: 7.00alpha2 Uniform Multi-Platform E-IDE driver Revision: 7.00alpha2
ide: Assuming 33MHz system bus speed for PIO modes; override with idebus=xx ide: Assuming 33MHz system bus speed for PIO modes; override with idebus=xx
PIIX4: IDE controller at PCI slot 0000:00:00.0 PIIX4: IDE controller at PCI slot 0000:00:00.0
@ -70,24 +75,23 @@ M5 console: m5AlphaAccess @ 0xFFFFFD0200000000
ide0 at 0x8410-0x8417,0x8422 on irq 31 ide0 at 0x8410-0x8417,0x8422 on irq 31
hda: max request size: 128KiB hda: max request size: 128KiB
hda: 511056 sectors (261 MB), CHS=507/16/63, UDMA(33) hda: 511056 sectors (261 MB), CHS=507/16/63, UDMA(33)
hda: cache flushes not supported
hda: hda1 hda: hda1
hdb: max request size: 128KiB hdb: max request size: 128KiB
hdb: 4177920 sectors (2139 MB), CHS=4144/16/63, UDMA(33) hdb: 4177920 sectors (2139 MB), CHS=4144/16/63, UDMA(33)
hdb: cache flushes not supported
hdb: unknown partition table hdb: unknown partition table
scsi0 : scsi_m5, version 1.73 [20040518], dev_size_mb=8, opts=0x0
Vendor: Linux Model: scsi_m5 Li Rev: 0004
Type: Direct-Access ANSI SCSI revision: 03
SCSI device sda: 16384 512-byte hdwr sectors (8 MB)
SCSI device sda: drive cache: write back
sda: unknown partition table
Attached scsi disk sda at scsi0, channel 0, id 0, lun 0
mice: PS/2 mouse device common for all mice mice: PS/2 mouse device common for all mice
NET: Registered protocol family 2 NET: Registered protocol family 2
IP: routing cache hash table of 1024 buckets, 16Kbytes IP route cache hash table entries: 4096 (order: 2, 32768 bytes)
TCP: Hash tables configured (established 8192 bind 8192) TCP established hash table entries: 16384 (order: 5, 262144 bytes)
ip_conntrack version 2.1 (512 buckets, 4096 max) - 440 bytes per conntrack TCP bind hash table entries: 16384 (order: 5, 262144 bytes)
TCP: Hash tables configured (established 16384 bind 16384)
TCP reno registered
ip_conntrack version 2.1 (512 buckets, 4096 max) - 296 bytes per conntrack
ip_tables: (C) 2000-2002 Netfilter core team ip_tables: (C) 2000-2002 Netfilter core team
arp_tables: (C) 2002 David S. Miller arp_tables: (C) 2002 David S. Miller
TCP bic registered
Initializing IPsec netlink socket Initializing IPsec netlink socket
NET: Registered protocol family 1 NET: Registered protocol family 1
NET: Registered protocol family 17 NET: Registered protocol family 17
@ -96,7 +100,7 @@ M5 console: m5AlphaAccess @ 0xFFFFFD0200000000
802.1Q VLAN Support v1.8 Ben Greear <greearb@candelatech.com> 802.1Q VLAN Support v1.8 Ben Greear <greearb@candelatech.com>
All bugs added by David S. Miller <davem@redhat.com> All bugs added by David S. Miller <davem@redhat.com>
VFS: Mounted root (ext2 filesystem) readonly. VFS: Mounted root (ext2 filesystem) readonly.
Freeing unused kernel memory: 480k freed Freeing unused kernel memory: 224k freed
init started: BusyBox v1.1.0 (2006.08.17-02:54+0000) multi-call binary init started: BusyBox v1.1.0 (2006.08.17-02:54+0000) multi-call binary
mounting filesystems... mounting filesystems...
loading script... loading script...

View file

@ -1,86 +1,86 @@
---------- Begin Simulation Statistics ---------- ---------- Begin Simulation Statistics ----------
host_inst_rate 820839 # Simulator instruction rate (inst/s) host_inst_rate 778282 # Simulator instruction rate (inst/s)
host_mem_usage 193264 # Number of bytes of host memory used host_mem_usage 196900 # Number of bytes of host memory used
host_seconds 70.65 # Real time elapsed on the host host_seconds 79.42 # Real time elapsed on the host
host_tick_rate 49454399 # Simulator tick rate (ticks/s) host_tick_rate 45984556 # Simulator tick rate (ticks/s)
sim_freq 2000000000 # Frequency of simulated ticks sim_freq 2000000000 # Frequency of simulated ticks
sim_insts 57989043 # Number of instructions simulated sim_insts 61806956 # Number of instructions simulated
sim_seconds 1.746889 # Number of seconds simulated sim_seconds 1.825933 # Number of seconds simulated
sim_ticks 3493777466 # Number of ticks simulated sim_ticks 3651865694 # Number of ticks simulated
system.cpu.dtb.accesses 2309470 # DTB accesses system.cpu.dtb.accesses 1304498 # DTB accesses
system.cpu.dtb.acv 367 # DTB access violations system.cpu.dtb.acv 367 # DTB access violations
system.cpu.dtb.hits 13707871 # DTB hits system.cpu.dtb.hits 16557993 # DTB hits
system.cpu.dtb.misses 12493 # DTB misses system.cpu.dtb.misses 11425 # DTB misses
system.cpu.dtb.read_accesses 828530 # DTB read accesses system.cpu.dtb.read_accesses 900427 # DTB read accesses
system.cpu.dtb.read_acv 210 # DTB read access violations system.cpu.dtb.read_acv 210 # DTB read access violations
system.cpu.dtb.read_hits 7595606 # DTB read hits system.cpu.dtb.read_hits 10039007 # DTB read hits
system.cpu.dtb.read_misses 10298 # DTB read misses system.cpu.dtb.read_misses 10280 # DTB read misses
system.cpu.dtb.write_accesses 1480940 # DTB write accesses system.cpu.dtb.write_accesses 404071 # DTB write accesses
system.cpu.dtb.write_acv 157 # DTB write access violations system.cpu.dtb.write_acv 157 # DTB write access violations
system.cpu.dtb.write_hits 6112265 # DTB write hits system.cpu.dtb.write_hits 6518986 # DTB write hits
system.cpu.dtb.write_misses 2195 # DTB write misses system.cpu.dtb.write_misses 1145 # DTB write misses
system.cpu.idle_fraction 0.979465 # Percentage of idle cycles system.cpu.idle_fraction 0.978522 # Percentage of idle cycles
system.cpu.itb.accesses 3281347 # ITB accesses system.cpu.itb.accesses 3281311 # ITB accesses
system.cpu.itb.acv 184 # ITB acv system.cpu.itb.acv 184 # ITB acv
system.cpu.itb.hits 3276357 # ITB hits system.cpu.itb.hits 3276321 # ITB hits
system.cpu.itb.misses 4990 # ITB misses system.cpu.itb.misses 4990 # ITB misses
system.cpu.kern.callpal 182454 # number of callpals executed system.cpu.kern.callpal 194059 # number of callpals executed
system.cpu.kern.callpal_cserve 1 0.00% 0.00% # number of callpals executed system.cpu.kern.callpal_cserve 1 0.00% 0.00% # number of callpals executed
system.cpu.kern.callpal_wrmces 1 0.00% 0.00% # number of callpals executed system.cpu.kern.callpal_wrmces 1 0.00% 0.00% # number of callpals executed
system.cpu.kern.callpal_wrfen 1 0.00% 0.00% # number of callpals executed system.cpu.kern.callpal_wrfen 1 0.00% 0.00% # number of callpals executed
system.cpu.kern.callpal_wrvptptr 1 0.00% 0.00% # number of callpals executed system.cpu.kern.callpal_wrvptptr 1 0.00% 0.00% # number of callpals executed
system.cpu.kern.callpal_swpctx 1571 0.86% 0.86% # number of callpals executed system.cpu.kern.callpal_swpctx 4207 2.17% 2.17% # number of callpals executed
system.cpu.kern.callpal_tbi 54 0.03% 0.89% # number of callpals executed system.cpu.kern.callpal_tbi 54 0.03% 2.20% # number of callpals executed
system.cpu.kern.callpal_wrent 7 0.00% 0.90% # number of callpals executed system.cpu.kern.callpal_wrent 7 0.00% 2.20% # number of callpals executed
system.cpu.kern.callpal_swpipl 171092 93.77% 94.67% # number of callpals executed system.cpu.kern.callpal_swpipl 176948 91.18% 93.38% # number of callpals executed
system.cpu.kern.callpal_rdps 5160 2.83% 97.50% # number of callpals executed system.cpu.kern.callpal_rdps 6887 3.55% 96.93% # number of callpals executed
system.cpu.kern.callpal_wrkgp 1 0.00% 97.50% # number of callpals executed system.cpu.kern.callpal_wrkgp 1 0.00% 96.93% # number of callpals executed
system.cpu.kern.callpal_wrusp 7 0.00% 97.50% # number of callpals executed system.cpu.kern.callpal_wrusp 7 0.00% 96.94% # number of callpals executed
system.cpu.kern.callpal_rdusp 10 0.01% 97.51% # number of callpals executed system.cpu.kern.callpal_rdusp 9 0.00% 96.94% # number of callpals executed
system.cpu.kern.callpal_whami 2 0.00% 97.51% # number of callpals executed system.cpu.kern.callpal_whami 2 0.00% 96.94% # number of callpals executed
system.cpu.kern.callpal_rti 3834 2.10% 99.61% # number of callpals executed system.cpu.kern.callpal_rti 5221 2.69% 99.63% # number of callpals executed
system.cpu.kern.callpal_callsys 531 0.29% 99.90% # number of callpals executed system.cpu.kern.callpal_callsys 531 0.27% 99.91% # number of callpals executed
system.cpu.kern.callpal_imb 181 0.10% 100.00% # number of callpals executed system.cpu.kern.callpal_imb 181 0.09% 100.00% # number of callpals executed
system.cpu.kern.inst.arm 0 # number of arm instructions executed system.cpu.kern.inst.arm 0 # number of arm instructions executed
system.cpu.kern.inst.hwrei 202524 # number of hwrei instructions executed system.cpu.kern.inst.hwrei 213133 # number of hwrei instructions executed
system.cpu.kern.inst.ivlb 0 # number of ivlb instructions executed system.cpu.kern.inst.ivlb 0 # number of ivlb instructions executed
system.cpu.kern.inst.ivle 0 # number of ivle instructions executed system.cpu.kern.inst.ivle 0 # number of ivle instructions executed
system.cpu.kern.inst.quiesce 1876 # number of quiesce instructions executed system.cpu.kern.inst.quiesce 6280 # number of quiesce instructions executed
system.cpu.kern.ipl_count 176961 # number of times we switched to this ipl system.cpu.kern.ipl_count 184276 # number of times we switched to this ipl
system.cpu.kern.ipl_count_0 74471 42.08% 42.08% # number of times we switched to this ipl system.cpu.kern.ipl_count_0 75422 40.93% 40.93% # number of times we switched to this ipl
system.cpu.kern.ipl_count_21 251 0.14% 42.23% # number of times we switched to this ipl system.cpu.kern.ipl_count_21 245 0.13% 41.06% # number of times we switched to this ipl
system.cpu.kern.ipl_count_22 5439 3.07% 45.30% # number of times we switched to this ipl system.cpu.kern.ipl_count_22 1861 1.01% 42.07% # number of times we switched to this ipl
system.cpu.kern.ipl_count_31 96800 54.70% 100.00% # number of times we switched to this ipl system.cpu.kern.ipl_count_31 106748 57.93% 100.00% # number of times we switched to this ipl
system.cpu.kern.ipl_good 158180 # number of times we switched to this ipl from a different ipl system.cpu.kern.ipl_good 150212 # number of times we switched to this ipl from a different ipl
system.cpu.kern.ipl_good_0 74417 47.05% 47.05% # number of times we switched to this ipl from a different ipl system.cpu.kern.ipl_good_0 74053 49.30% 49.30% # number of times we switched to this ipl from a different ipl
system.cpu.kern.ipl_good_21 251 0.16% 47.20% # number of times we switched to this ipl from a different ipl system.cpu.kern.ipl_good_21 245 0.16% 49.46% # number of times we switched to this ipl from a different ipl
system.cpu.kern.ipl_good_22 5439 3.44% 50.64% # number of times we switched to this ipl from a different ipl system.cpu.kern.ipl_good_22 1861 1.24% 50.70% # number of times we switched to this ipl from a different ipl
system.cpu.kern.ipl_good_31 78073 49.36% 100.00% # number of times we switched to this ipl from a different ipl system.cpu.kern.ipl_good_31 74053 49.30% 100.00% # number of times we switched to this ipl from a different ipl
system.cpu.kern.ipl_ticks 3493777020 # number of cycles we spent at this ipl system.cpu.kern.ipl_ticks 3651865248 # number of cycles we spent at this ipl
system.cpu.kern.ipl_ticks_0 3466334940 99.21% 99.21% # number of cycles we spent at this ipl system.cpu.kern.ipl_ticks_0 3611061665 98.88% 98.88% # number of cycles we spent at this ipl
system.cpu.kern.ipl_ticks_21 53019 0.00% 99.22% # number of cycles we spent at this ipl system.cpu.kern.ipl_ticks_21 53683 0.00% 98.88% # number of cycles we spent at this ipl
system.cpu.kern.ipl_ticks_22 1268195 0.04% 99.25% # number of cycles we spent at this ipl system.cpu.kern.ipl_ticks_22 219598 0.01% 98.89% # number of cycles we spent at this ipl
system.cpu.kern.ipl_ticks_31 26120866 0.75% 100.00% # number of cycles we spent at this ipl system.cpu.kern.ipl_ticks_31 40530302 1.11% 100.00% # number of cycles we spent at this ipl
system.cpu.kern.ipl_used 0.893869 # fraction of swpipl calls that actually changed the ipl system.cpu.kern.ipl_used 0.815147 # fraction of swpipl calls that actually changed the ipl
system.cpu.kern.ipl_used_0 0.999275 # fraction of swpipl calls that actually changed the ipl system.cpu.kern.ipl_used_0 0.981849 # fraction of swpipl calls that actually changed the ipl
system.cpu.kern.ipl_used_21 1 # fraction of swpipl calls that actually changed the ipl system.cpu.kern.ipl_used_21 1 # fraction of swpipl calls that actually changed the ipl
system.cpu.kern.ipl_used_22 1 # fraction of swpipl calls that actually changed the ipl system.cpu.kern.ipl_used_22 1 # fraction of swpipl calls that actually changed the ipl
system.cpu.kern.ipl_used_31 0.806539 # fraction of swpipl calls that actually changed the ipl system.cpu.kern.ipl_used_31 0.693718 # fraction of swpipl calls that actually changed the ipl
system.cpu.kern.mode_good_kernel 1938 system.cpu.kern.mode_good_kernel 1935
system.cpu.kern.mode_good_user 1757 system.cpu.kern.mode_good_user 1755
system.cpu.kern.mode_good_idle 181 system.cpu.kern.mode_good_idle 180
system.cpu.kern.mode_switch_kernel 3323 # number of protection mode switches system.cpu.kern.mode_switch_kernel 5988 # number of protection mode switches
system.cpu.kern.mode_switch_user 1757 # number of protection mode switches system.cpu.kern.mode_switch_user 1755 # number of protection mode switches
system.cpu.kern.mode_switch_idle 2060 # number of protection mode switches system.cpu.kern.mode_switch_idle 2104 # number of protection mode switches
system.cpu.kern.mode_switch_good 0.542857 # fraction of useful protection mode switches system.cpu.kern.mode_switch_good 0.393013 # fraction of useful protection mode switches
system.cpu.kern.mode_switch_good_kernel 0.583208 # fraction of useful protection mode switches system.cpu.kern.mode_switch_good_kernel 0.323146 # fraction of useful protection mode switches
system.cpu.kern.mode_switch_good_user 1 # fraction of useful protection mode switches system.cpu.kern.mode_switch_good_user 1 # fraction of useful protection mode switches
system.cpu.kern.mode_switch_good_idle 0.087864 # fraction of useful protection mode switches system.cpu.kern.mode_switch_good_idle 0.085551 # fraction of useful protection mode switches
system.cpu.kern.mode_ticks_kernel 39254786 1.12% 1.12% # number of ticks spent at the given mode system.cpu.kern.mode_ticks_kernel 58882589 1.61% 1.61% # number of ticks spent at the given mode
system.cpu.kern.mode_ticks_user 4685669 0.13% 1.26% # number of ticks spent at the given mode system.cpu.kern.mode_ticks_user 4685612 0.13% 1.74% # number of ticks spent at the given mode
system.cpu.kern.mode_ticks_idle 3449836563 98.74% 100.00% # number of ticks spent at the given mode system.cpu.kern.mode_ticks_idle 3588297045 98.26% 100.00% # number of ticks spent at the given mode
system.cpu.kern.swap_context 1572 # number of times the context was actually changed system.cpu.kern.swap_context 4208 # number of times the context was actually changed
system.cpu.kern.syscall 329 # number of syscalls executed system.cpu.kern.syscall 329 # number of syscalls executed
system.cpu.kern.syscall_fork 8 2.43% 2.43% # number of syscalls executed system.cpu.kern.syscall_fork 8 2.43% 2.43% # number of syscalls executed
system.cpu.kern.syscall_read 30 9.12% 11.55% # number of syscalls executed system.cpu.kern.syscall_read 30 9.12% 11.55% # number of syscalls executed
@ -112,16 +112,16 @@ system.cpu.kern.syscall_connect 2 0.61% 97.57% # nu
system.cpu.kern.syscall_setgid 4 1.22% 98.78% # number of syscalls executed system.cpu.kern.syscall_setgid 4 1.22% 98.78% # number of syscalls executed
system.cpu.kern.syscall_getrlimit 2 0.61% 99.39% # number of syscalls executed system.cpu.kern.syscall_getrlimit 2 0.61% 99.39% # number of syscalls executed
system.cpu.kern.syscall_setsid 2 0.61% 100.00% # number of syscalls executed system.cpu.kern.syscall_setsid 2 0.61% 100.00% # number of syscalls executed
system.cpu.not_idle_fraction 0.020535 # Percentage of non-idle cycles system.cpu.not_idle_fraction 0.021478 # Percentage of non-idle cycles
system.cpu.numCycles 0 # number of cpu cycles simulated system.cpu.numCycles 0 # number of cpu cycles simulated
system.cpu.num_insts 57989043 # Number of instructions executed system.cpu.num_insts 61806956 # Number of instructions executed
system.cpu.num_refs 13753099 # Number of memory references system.cpu.num_refs 16806539 # Number of memory references
system.disk0.dma_read_bytes 1024 # Number of bytes transfered via DMA reads (not PRD). system.disk0.dma_read_bytes 1024 # Number of bytes transfered via DMA reads (not PRD).
system.disk0.dma_read_full_pages 0 # Number of full page size DMA reads (not PRD). system.disk0.dma_read_full_pages 0 # Number of full page size DMA reads (not PRD).
system.disk0.dma_read_txs 1 # Number of DMA read transactions (not PRD). system.disk0.dma_read_txs 1 # Number of DMA read transactions (not PRD).
system.disk0.dma_write_bytes 2735104 # Number of bytes transfered via DMA writes. system.disk0.dma_write_bytes 2702336 # Number of bytes transfered via DMA writes.
system.disk0.dma_write_full_pages 306 # Number of full page size DMA writes. system.disk0.dma_write_full_pages 302 # Number of full page size DMA writes.
system.disk0.dma_write_txs 412 # Number of DMA write transactions. system.disk0.dma_write_txs 408 # Number of DMA write transactions.
system.disk2.dma_read_bytes 0 # Number of bytes transfered via DMA reads (not PRD). system.disk2.dma_read_bytes 0 # Number of bytes transfered via DMA reads (not PRD).
system.disk2.dma_read_full_pages 0 # Number of full page size DMA reads (not PRD). system.disk2.dma_read_full_pages 0 # Number of full page size DMA reads (not PRD).
system.disk2.dma_read_txs 0 # Number of DMA read transactions (not PRD). system.disk2.dma_read_txs 0 # Number of DMA read transactions (not PRD).
@ -130,9 +130,9 @@ system.disk2.dma_write_full_pages 1 # Nu
system.disk2.dma_write_txs 1 # Number of DMA write transactions. system.disk2.dma_write_txs 1 # Number of DMA write transactions.
system.tsunami.ethernet.coalescedRxDesc <err: div-0> # average number of RxDesc's coalesced into each post system.tsunami.ethernet.coalescedRxDesc <err: div-0> # average number of RxDesc's coalesced into each post
system.tsunami.ethernet.coalescedRxIdle <err: div-0> # average number of RxIdle's coalesced into each post system.tsunami.ethernet.coalescedRxIdle <err: div-0> # average number of RxIdle's coalesced into each post
system.tsunami.ethernet.coalescedRxOk <err: div-0> # average number of RxOk's coalesced into each post system.tsunami.ethernet.coalescedRxOk no value # average number of RxOk's coalesced into each post
system.tsunami.ethernet.coalescedRxOrn <err: div-0> # average number of RxOrn's coalesced into each post system.tsunami.ethernet.coalescedRxOrn <err: div-0> # average number of RxOrn's coalesced into each post
system.tsunami.ethernet.coalescedSwi <err: div-0> # average number of Swi's coalesced into each post system.tsunami.ethernet.coalescedSwi no value # average number of Swi's coalesced into each post
system.tsunami.ethernet.coalescedTotal <err: div-0> # average number of interrupts coalesced into each post system.tsunami.ethernet.coalescedTotal <err: div-0> # average number of interrupts coalesced into each post
system.tsunami.ethernet.coalescedTxDesc <err: div-0> # average number of TxDesc's coalesced into each post system.tsunami.ethernet.coalescedTxDesc <err: div-0> # average number of TxDesc's coalesced into each post
system.tsunami.ethernet.coalescedTxIdle <err: div-0> # average number of TxIdle's coalesced into each post system.tsunami.ethernet.coalescedTxIdle <err: div-0> # average number of TxIdle's coalesced into each post

View file

@ -1,4 +1,4 @@
0: system.tsunami.io.rtc: Real-time clock set to Sun Jan 1 00:00:00 2006 0: system.tsunami.io.rtc: Real-time clock set to Sun Jan 1 00:00:00 2006
Listening for console connection on port 3456 Listening for console connection on port 3457
0: system.remote_gdb.listener: listening for remote gdb #0 on port 7000 0: system.remote_gdb.listener: listening for remote gdb #0 on port 7001
warn: Entering event queue @ 0. Starting simulation... warn: Entering event queue @ 0. Starting simulation...

View file

@ -5,8 +5,8 @@ The Regents of The University of Michigan
All Rights Reserved All Rights Reserved
M5 compiled Oct 5 2006 22:13:02 M5 compiled Oct 8 2006 14:07:02
M5 started Fri Oct 6 00:24:58 2006 M5 started Sun Oct 8 14:08:49 2006
M5 executing on zizzer.eecs.umich.edu M5 executing on zizzer.eecs.umich.edu
command line: build/ALPHA_FS/m5.opt -d build/ALPHA_FS/tests/opt/quick/10.linux-boot/alpha/linux/tsunami-simple-timing tests/run.py quick/10.linux-boot/alpha/linux/tsunami-simple-timing command line: build/ALPHA_FS/m5.opt -d build/ALPHA_FS/tests/opt/quick/10.linux-boot/alpha/linux/tsunami-simple-timing tests/run.py quick/10.linux-boot/alpha/linux/tsunami-simple-timing
Exiting @ tick 3493777466 because m5_exit instruction encountered Exiting @ tick 3651865694 because m5_exit instruction encountered

View file

@ -56,6 +56,7 @@ physmem=system.physmem
type=AtomicSimpleCPU type=AtomicSimpleCPU
children=workload children=workload
clock=1 clock=1
cpu_id=0
defer_registration=false defer_registration=false
function_trace=false function_trace=false
function_trace_start=0 function_trace_start=0

View file

@ -36,6 +36,7 @@ max_loads_all_threads=0
progress_interval=0 progress_interval=0
mem=system.physmem mem=system.physmem
system=system system=system
cpu_id=0
workload=system.cpu.workload workload=system.cpu.workload
clock=1 clock=1
defer_registration=false defer_registration=false

View file

@ -1,9 +1,9 @@
---------- Begin Simulation Statistics ---------- ---------- Begin Simulation Statistics ----------
host_inst_rate 1393697 # Simulator instruction rate (inst/s) host_inst_rate 1432213 # Simulator instruction rate (inst/s)
host_mem_usage 147652 # Number of bytes of host memory used host_mem_usage 147652 # Number of bytes of host memory used
host_seconds 0.36 # Real time elapsed on the host host_seconds 0.35 # Real time elapsed on the host
host_tick_rate 1391995 # Simulator tick rate (ticks/s) host_tick_rate 1430432 # Simulator tick rate (ticks/s)
sim_freq 1000000000000 # Frequency of simulated ticks sim_freq 1000000000000 # Frequency of simulated ticks
sim_insts 500000 # Number of instructions simulated sim_insts 500000 # Number of instructions simulated
sim_seconds 0.000000 # Number of seconds simulated sim_seconds 0.000000 # Number of seconds simulated

View file

@ -7,8 +7,8 @@ The Regents of The University of Michigan
All Rights Reserved All Rights Reserved
M5 compiled Oct 7 2006 11:12:49 M5 compiled Oct 8 2006 14:00:39
M5 started Sat Oct 7 11:13:17 2006 M5 started Sun Oct 8 14:00:58 2006
M5 executing on zizzer.eecs.umich.edu M5 executing on zizzer.eecs.umich.edu
command line: build/ALPHA_SE/m5.opt -d build/ALPHA_SE/tests/opt/quick/20.eio-short/alpha/eio/simple-atomic tests/run.py quick/20.eio-short/alpha/eio/simple-atomic command line: build/ALPHA_SE/m5.opt -d build/ALPHA_SE/tests/opt/quick/20.eio-short/alpha/eio/simple-atomic tests/run.py quick/20.eio-short/alpha/eio/simple-atomic
Exiting @ tick 499999 because a thread reached the max instruction count Exiting @ tick 499999 because a thread reached the max instruction count

View file

@ -56,6 +56,7 @@ physmem=system.physmem
type=TimingSimpleCPU type=TimingSimpleCPU
children=dcache icache l2cache toL2Bus workload children=dcache icache l2cache toL2Bus workload
clock=1 clock=1
cpu_id=0
defer_registration=false defer_registration=false
function_trace=false function_trace=false
function_trace_start=0 function_trace_start=0

View file

@ -75,6 +75,7 @@ max_loads_all_threads=0
progress_interval=0 progress_interval=0
mem=system.cpu.dcache mem=system.cpu.dcache
system=system system=system
cpu_id=0
workload=system.cpu.workload workload=system.cpu.workload
clock=1 clock=1
defer_registration=false defer_registration=false

View file

@ -1,9 +1,9 @@
---------- Begin Simulation Statistics ---------- ---------- Begin Simulation Statistics ----------
host_inst_rate 618043 # Simulator instruction rate (inst/s) host_inst_rate 598582 # Simulator instruction rate (inst/s)
host_mem_usage 159232 # Number of bytes of host memory used host_mem_usage 159216 # Number of bytes of host memory used
host_seconds 0.81 # Real time elapsed on the host host_seconds 0.84 # Real time elapsed on the host
host_tick_rate 843177 # Simulator tick rate (ticks/s) host_tick_rate 816632 # Simulator tick rate (ticks/s)
sim_freq 1000000000000 # Frequency of simulated ticks sim_freq 1000000000000 # Frequency of simulated ticks
sim_insts 500000 # Number of instructions simulated sim_insts 500000 # Number of instructions simulated
sim_seconds 0.000001 # Number of seconds simulated sim_seconds 0.000001 # Number of seconds simulated
@ -115,7 +115,7 @@ system.cpu.icache.no_allocate_misses 0 # Nu
system.cpu.icache.overall_accesses 500000 # number of overall (read+write) accesses system.cpu.icache.overall_accesses 500000 # number of overall (read+write) accesses
system.cpu.icache.overall_avg_miss_latency 3 # average overall miss latency system.cpu.icache.overall_avg_miss_latency 3 # average overall miss latency
system.cpu.icache.overall_avg_mshr_miss_latency 2 # average overall mshr miss latency system.cpu.icache.overall_avg_mshr_miss_latency 2 # average overall mshr miss latency
system.cpu.icache.overall_avg_mshr_uncacheable_latency <err: div-0> # average overall mshr uncacheable latency system.cpu.icache.overall_avg_mshr_uncacheable_latency no value # average overall mshr uncacheable latency
system.cpu.icache.overall_hits 499597 # number of overall hits system.cpu.icache.overall_hits 499597 # number of overall hits
system.cpu.icache.overall_miss_latency 1209 # number of overall miss cycles system.cpu.icache.overall_miss_latency 1209 # number of overall miss cycles
system.cpu.icache.overall_miss_rate 0.000806 # miss rate for overall accesses system.cpu.icache.overall_miss_rate 0.000806 # miss rate for overall accesses

View file

@ -7,8 +7,8 @@ The Regents of The University of Michigan
All Rights Reserved All Rights Reserved
M5 compiled Oct 7 2006 12:38:12 M5 compiled Oct 8 2006 14:00:39
M5 started Sat Oct 7 12:38:52 2006 M5 started Sun Oct 8 14:00:59 2006
M5 executing on zizzer.eecs.umich.edu M5 executing on zizzer.eecs.umich.edu
command line: build/ALPHA_SE/m5.opt -d build/ALPHA_SE/tests/opt/quick/20.eio-short/alpha/eio/simple-timing tests/run.py quick/20.eio-short/alpha/eio/simple-timing command line: build/ALPHA_SE/m5.opt -d build/ALPHA_SE/tests/opt/quick/20.eio-short/alpha/eio/simple-timing tests/run.py quick/20.eio-short/alpha/eio/simple-timing
Exiting @ tick 682488 because a thread reached the max instruction count Exiting @ tick 682488 because a thread reached the max instruction count