Merge zizzer.eecs.umich.edu:/z/m5/Bitkeeper/newmem

into  zizzer.eecs.umich.edu:/tmp/newmem

--HG--
extra : convert_revision : 162876cb1ad96ca7ca6a2e0f549c98b29e5a8d2d
This commit is contained in:
Ali Saidi 2007-05-13 04:48:42 -04:00
commit 404a91265e
33 changed files with 494 additions and 300 deletions

View file

@ -1,4 +1,4 @@
# Copyright (c) 2006 The Regents of The University of Michigan
# Copyright (c) 2006-2007 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
@ -32,7 +32,7 @@ from m5.objects import *
class L1Cache(BaseCache):
assoc = 2
block_size = 64
latency = 1
latency = '1ns'
mshrs = 10
tgts_per_mshr = 5
protocol = CoherenceProtocol(protocol='moesi')
@ -40,7 +40,7 @@ class L1Cache(BaseCache):
class L2Cache(BaseCache):
assoc = 8
block_size = 64
latency = 10
latency = '10ns'
mshrs = 20
tgts_per_mshr = 12

View file

@ -61,7 +61,7 @@ def makeLinuxAlphaSystem(mem_mode, mdesc = None):
self.readfile = mdesc.script()
self.iobus = Bus(bus_id=0)
self.membus = Bus(bus_id=1)
self.bridge = Bridge(fix_partial_write_b=True)
self.bridge = Bridge(fix_partial_write_b=True, delay='50ns', nack_delay='4ns')
self.physmem = PhysicalMemory(range = AddrRange(mdesc.mem()))
self.bridge.side_a = self.iobus.port
self.bridge.side_b = self.membus.port
@ -94,7 +94,7 @@ def makeSparcSystem(mem_mode, mdesc = None):
self.readfile = mdesc.script()
self.iobus = Bus(bus_id=0)
self.membus = Bus(bus_id=1)
self.bridge = Bridge()
self.bridge = Bridge(fix_partial_write_b=True, delay='50ns', nack_delay='4ns')
self.t1000 = T1000()
self.t1000.attachOnChipIO(self.membus)
self.t1000.attachIO(self.iobus)

View file

@ -96,8 +96,9 @@ inline
T
insertBits(T val, int first, int last, B bit_val)
{
T t_bit_val = bit_val;
T bmask = mask(first - last + 1) << last;
return ((bit_val << last) & bmask) | (val & ~bmask);
return ((t_bit_val << last) & bmask) | (val & ~bmask);
}
/**

View file

@ -2094,9 +2094,13 @@ class UnaryNode : public Node
return vresult;
}
Result total() const {
Op op;
return op(l->total());
Result total() const
{
const VResult &vec = this->result();
Result total = 0;
for (int i = 0; i < size(); i++)
total += vec[i];
return total;
}
virtual size_t size() const { return l->size(); }
@ -2149,9 +2153,13 @@ class BinaryNode : public Node
return vresult;
}
Result total() const {
Op op;
return op(l->total(), r->total());
Result total() const
{
const VResult &vec = this->result();
Result total = 0;
for (int i = 0; i < size(); i++)
total += vec[i];
return total;
}
virtual size_t size() const {

View file

@ -727,12 +727,8 @@ IGbE::RxDescCache::pktComplete()
if (igbe->regs.rdtr.delay()) {
DPRINTF(EthernetSM, "RXS: Scheduling DTR for %d\n",
igbe->regs.rdtr.delay() * igbe->intClock());
if (igbe->rdtrEvent.scheduled())
igbe->rdtrEvent.reschedule(curTick + igbe->regs.rdtr.delay() *
igbe->intClock());
else
igbe->rdtrEvent.schedule(curTick + igbe->regs.rdtr.delay() *
igbe->intClock());
igbe->rdtrEvent.reschedule(curTick + igbe->regs.rdtr.delay() *
igbe->intClock(),true);
}
if (igbe->regs.radv.idv() && igbe->regs.rdtr.delay()) {
@ -895,6 +891,7 @@ IGbE::TxDescCache::pktComplete()
pktPtr = NULL;
DPRINTF(EthernetDesc, "Partial Packet Descriptor Done\n");
enableSm();
return;
}
@ -946,12 +943,8 @@ IGbE::TxDescCache::pktComplete()
DPRINTF(EthernetDesc, "Descriptor had IDE set\n");
if (igbe->regs.tidv.idv()) {
DPRINTF(EthernetDesc, "setting tidv\n");
if (igbe->tidvEvent.scheduled())
igbe->tidvEvent.reschedule(curTick + igbe->regs.tidv.idv() *
igbe->intClock());
else
igbe->tidvEvent.schedule(curTick + igbe->regs.tidv.idv() *
igbe->intClock());
igbe->tidvEvent.reschedule(curTick + igbe->regs.tidv.idv() *
igbe->intClock(), true);
}
if (igbe->regs.tadv.idv() && igbe->regs.tidv.idv()) {
@ -979,6 +972,7 @@ IGbE::TxDescCache::pktComplete()
DPRINTF(EthernetDesc, "used > WTHRESH, writing back descriptor\n");
writeback((igbe->cacheBlockSize()-1)>>4);
}
enableSm();
igbe->checkDrain();
}
@ -1158,6 +1152,8 @@ IGbE::txStateMachine()
return;
}
DPRINTF(EthernetSM, "TXS: Nothing to do, stopping ticking\n");
txTick = false;
}
bool

View file

@ -111,10 +111,7 @@ DmaPort::recvTiming(PacketPtr pkt)
else if (backoffTime < device->maxBackoffDelay)
backoffTime <<= 1;
if (backoffEvent.scheduled())
backoffEvent.reschedule(curTick + backoffTime);
else
backoffEvent.schedule(curTick + backoffTime);
backoffEvent.reschedule(curTick + backoffTime, true);
DPRINTF(DMA, "Backoff time set to %d ticks\n", backoffTime);

View file

@ -2310,10 +2310,7 @@ NSGigE::transferDone()
DPRINTF(Ethernet, "transfer complete: data in txFifo...schedule xmit\n");
if (txEvent.scheduled())
txEvent.reschedule(curTick + cycles(1));
else
txEvent.schedule(curTick + cycles(1));
txEvent.reschedule(curTick + cycles(1), true);
}
bool

View file

@ -1199,10 +1199,7 @@ Device::transferDone()
DPRINTF(Ethernet, "transfer complete: data in txFifo...schedule xmit\n");
if (txEvent.scheduled())
txEvent.reschedule(curTick + cycles(1));
else
txEvent.schedule(curTick + cycles(1));
txEvent.reschedule(curTick + cycles(1), true);
}
bool

View file

@ -171,8 +171,9 @@ bool
Bus::recvTiming(PacketPtr pkt)
{
Port *port;
DPRINTF(Bus, "recvTiming: packet src %d dest %d addr 0x%x cmd %s\n",
pkt->getSrc(), pkt->getDest(), pkt->getAddr(), pkt->cmdString());
DPRINTF(Bus, "recvTiming: packet src %d dest %d addr 0x%x cmd %s result %d\n",
pkt->getSrc(), pkt->getDest(), pkt->getAddr(), pkt->cmdString(),
pkt->result);
BusPort *pktPort;
if (pkt->getSrc() == defaultId)
@ -272,20 +273,14 @@ Bus::recvRetry(int id)
retryList.pop_front();
inRetry = false;
if (id != -1) {
//Bring tickNextIdle up to the present
while (tickNextIdle < curTick)
tickNextIdle += clock;
//Burn a cycle for the missed grant.
//Bring tickNextIdle up to the present
while (tickNextIdle < curTick)
tickNextIdle += clock;
if (!busIdle.scheduled()) {
busIdle.schedule(tickNextIdle);
} else {
busIdle.reschedule(tickNextIdle);
}
} // id != -1
//Burn a cycle for the missed grant.
tickNextIdle += clock;
busIdle.reschedule(tickNextIdle, true);
}
}
//If we weren't able to drain before, we might be able to now.

View file

@ -134,7 +134,6 @@ BEGIN_DECLARE_SIM_OBJECT_PARAMS(BaseCache)
Param<bool> prefetch_cache_check_push;
Param<bool> prefetch_use_cpu_id;
Param<bool> prefetch_data_accesses_only;
Param<int> hit_latency;
END_DECLARE_SIM_OBJECT_PARAMS(BaseCache)
@ -190,8 +189,7 @@ BEGIN_INIT_SIM_OBJECT_PARAMS(BaseCache)
INIT_PARAM_DFLT(prefetch_policy, "Type of prefetcher to use", "none"),
INIT_PARAM_DFLT(prefetch_cache_check_push, "Check if in cash on push or pop of prefetch queue", true),
INIT_PARAM_DFLT(prefetch_use_cpu_id, "Use the CPU ID to seperate calculations of prefetches", true),
INIT_PARAM_DFLT(prefetch_data_accesses_only, "Only prefetch on data not on instruction accesses", false),
INIT_PARAM_DFLT(hit_latency, "Hit Latecny for a succesful access", 1)
INIT_PARAM_DFLT(prefetch_data_accesses_only, "Only prefetch on data not on instruction accesses", false)
END_INIT_SIM_OBJECT_PARAMS(BaseCache)
@ -211,7 +209,7 @@ END_INIT_SIM_OBJECT_PARAMS(BaseCache)
BUILD_NULL_PREFETCHER(TAGS); \
} \
Cache<TAGS, c>::Params params(tags, mq, coh, base_params, \
pf, prefetch_access, hit_latency, \
pf, prefetch_access, latency, \
true, \
store_compressed, \
adaptive_compression, \

View file

@ -81,7 +81,6 @@ SimObject('m5/objects/Ethernet.py')
SimObject('m5/objects/FUPool.py')
SimObject('m5/objects/FastCPU.py')
#SimObject('m5/objects/FreebsdSystem.py')
SimObject('m5/objects/FullCPU.py')
SimObject('m5/objects/FuncUnit.py')
SimObject('m5/objects/FuncUnitConfig.py')
SimObject('m5/objects/FunctionalMemory.py')
@ -97,7 +96,6 @@ SimObject('m5/objects/O3CPU.py')
SimObject('m5/objects/OzoneCPU.py')
SimObject('m5/objects/Pci.py')
SimObject('m5/objects/PhysicalMemory.py')
SimObject('m5/objects/PipeTrace.py')
SimObject('m5/objects/Platform.py')
SimObject('m5/objects/Process.py')
SimObject('m5/objects/Repl.py')

View file

@ -9,7 +9,7 @@ class BaseCache(MemObject):
"Use an adaptive compression scheme")
assoc = Param.Int("associativity")
block_size = Param.Int("block size in bytes")
latency = Param.Int("Latency")
latency = Param.Latency("Latency")
compressed_bus = Param.Bool(False,
"This cache connects to a compressed memory")
compression_latency = Param.Latency('0ns',
@ -59,6 +59,5 @@ class BaseCache(MemObject):
"Use the CPU ID to seperate calculations of prefetches")
prefetch_data_accesses_only = Param.Bool(False,
"Only prefetch on data not on instruction accesses")
hit_latency = Param.Int(1,"Hit Latency of the cache")
cpu_side = Port("Port on side closer to CPU")
mem_side = Port("Port on side closer to MEM")

View file

@ -5,9 +5,12 @@ class Bridge(MemObject):
type = 'Bridge'
side_a = Port('Side A port')
side_b = Port('Side B port')
queue_size_a = Param.Int(16, "The number of requests to buffer")
queue_size_b = Param.Int(16, "The number of requests to buffer")
req_size_a = Param.Int(16, "The number of requests to buffer")
req_size_b = Param.Int(16, "The number of requests to buffer")
resp_size_a = Param.Int(16, "The number of requests to buffer")
resp_size_b = Param.Int(16, "The number of requests to buffer")
delay = Param.Latency('0ns', "The latency of this bridge")
nack_delay = Param.Latency('0ns', "The latency of this bridge")
write_ack = Param.Bool(False, "Should this bridge ack writes")
fix_partial_write_a = Param.Bool(False, "Should this bridge fixup partial block writes")
fix_partial_write_b = Param.Bool(False, "Should this bridge fixup partial block writes")

View file

@ -19,6 +19,12 @@ class DmaDevice(PioDevice):
type = 'DmaDevice'
abstract = True
dma = Port(Self.pio.peerObj.port, "DMA port")
min_backoff_delay = Param.Latency('4ns',
"min time between a nack packet being received and the next request made by the device")
max_backoff_delay = Param.Latency('10us',
"max time between a nack packet being received and the next request made by the device")
class IsaFake(BasicPioDevice):
type = 'IsaFake'

View file

@ -348,7 +348,7 @@ class UdpPort(CheckedInt): cxx_type = 'uint16_t'; size = 16; unsigned = True
class Percent(CheckedInt): cxx_type = 'int'; min = 0; max = 100
class Float(ParamValue, float):
pass
cxx_type = 'double'
class MemorySize(CheckedInt):
cxx_type = 'uint64_t'

View file

@ -210,7 +210,8 @@ class Event : public Serializable, public FastAlloc
void schedule(Tick t);
/// Reschedule the event with the current priority
void reschedule(Tick t);
// always parameter means to schedule if not already scheduled
void reschedule(Tick t, bool always = false);
/// Remove the event from the current schedule
void deschedule();
@ -402,16 +403,22 @@ Event::deschedule()
}
inline void
Event::reschedule(Tick t)
Event::reschedule(Tick t, bool always)
{
assert(scheduled());
clearFlags(Squashed);
assert(scheduled() || always);
#if TRACING_ON
when_scheduled = curTick;
#endif
_when = t;
queue->reschedule(this);
if (scheduled()) {
clearFlags(Squashed);
queue->reschedule(this);
} else {
setFlags(Scheduled);
queue->schedule(this);
}
}
inline void

View file

@ -87,10 +87,7 @@ namespace PseudoInst
Tick resume = curTick + Clock::Int::ns * ns;
if (quiesceEvent->scheduled())
quiesceEvent->reschedule(resume);
else
quiesceEvent->schedule(resume);
quiesceEvent->reschedule(resume, true);
DPRINTF(Quiesce, "%s: quiesceNs(%d) until %d\n",
tc->getCpuPtr()->name(), ns, resume);
@ -110,10 +107,7 @@ namespace PseudoInst
Tick resume = curTick + tc->getCpuPtr()->cycles(cycles);
if (quiesceEvent->scheduled())
quiesceEvent->reschedule(resume);
else
quiesceEvent->schedule(resume);
quiesceEvent->reschedule(resume, true);
DPRINTF(Quiesce, "%s: quiesceCycles(%d) until %d\n",
tc->getCpuPtr()->name(), cycles, resume);

View file

@ -34,7 +34,7 @@ from m5.objects import *
# ====================
class L1(BaseCache):
latency = 1
latency = '1ns'
block_size = 64
mshrs = 12
tgts_per_mshr = 8
@ -46,7 +46,7 @@ class L1(BaseCache):
class L2(BaseCache):
block_size = 64
latency = 10
latency = '10ns'
mshrs = 92
tgts_per_mshr = 16
write_buffers = 8

View file

@ -35,7 +35,7 @@ m5.AddToPath('../configs/common')
# ====================
class L1(BaseCache):
latency = 1
latency = '1ns'
block_size = 64
mshrs = 4
tgts_per_mshr = 8
@ -47,7 +47,7 @@ class L1(BaseCache):
class L2(BaseCache):
block_size = 64
latency = 100
latency = '10ns'
mshrs = 92
tgts_per_mshr = 16
write_buffers = 8

View file

@ -33,7 +33,7 @@ m5.AddToPath('../configs/common')
class MyCache(BaseCache):
assoc = 2
block_size = 64
latency = 1
latency = '1ns'
mshrs = 10
tgts_per_mshr = 5

View file

@ -34,7 +34,7 @@ from m5.objects import *
# ====================
class L1(BaseCache):
latency = 1
latency = '1ns'
block_size = 64
mshrs = 4
tgts_per_mshr = 8
@ -46,7 +46,7 @@ class L1(BaseCache):
class L2(BaseCache):
block_size = 64
latency = 100
latency = '10ns'
mshrs = 92
tgts_per_mshr = 16
write_buffers = 8

View file

@ -34,7 +34,7 @@ from m5.objects import *
# ====================
class L1(BaseCache):
latency = 1
latency = '1ns'
block_size = 64
mshrs = 4
tgts_per_mshr = 8
@ -46,7 +46,7 @@ class L1(BaseCache):
class L2(BaseCache):
block_size = 64
latency = 100
latency = '10ns'
mshrs = 92
tgts_per_mshr = 16
write_buffers = 8

View file

@ -32,13 +32,13 @@ from m5.objects import *
class MyCache(BaseCache):
assoc = 2
block_size = 64
latency = 1
latency = '1ns'
mshrs = 10
tgts_per_mshr = 5
cpu = TimingSimpleCPU(cpu_id=0)
cpu.addTwoLevelCacheHierarchy(MyCache(size = '128kB'), MyCache(size = '256kB'),
MyCache(size = '2MB'))
MyCache(size = '2MB', latency='10ns'))
system = System(cpu = cpu,
physmem = PhysicalMemory(),
membus = Bus())

View file

@ -31,12 +31,49 @@ from m5.objects import *
m5.AddToPath('../configs/common')
import FSConfig
# --------------------
# Base L1 Cache
# ====================
class L1(BaseCache):
latency = '1ns'
block_size = 64
mshrs = 4
tgts_per_mshr = 8
protocol = CoherenceProtocol(protocol='moesi')
# ----------------------
# Base L2 Cache
# ----------------------
class L2(BaseCache):
block_size = 64
latency = '10ns'
mshrs = 92
tgts_per_mshr = 16
write_buffers = 8
#cpu
cpus = [ AtomicSimpleCPU(cpu_id=i) for i in xrange(2) ]
#the system
system = FSConfig.makeLinuxAlphaSystem('atomic')
system.cpu = cpus
#create the l1/l2 bus
system.toL2Bus = Bus()
#connect up the l2 cache
system.l2c = L2(size='4MB', assoc=8)
system.l2c.cpu_side = system.toL2Bus.port
system.l2c.mem_side = system.membus.port
#connect up the cpu and l1s
for c in cpus:
c.connectMemPorts(system.membus)
c.addPrivateSplitL1Caches(L1(size = '32kB', assoc = 1),
L1(size = '32kB', assoc = 4))
# connect cpu level-1 caches to shared level-2 cache
c.connectMemPorts(system.toL2Bus)
c.clock = '2GHz'
root = Root(system=system)
m5.ticks.setGlobalFrequency('2GHz')
m5.ticks.setGlobalFrequency('1THz')

View file

@ -31,10 +31,49 @@ from m5.objects import *
m5.AddToPath('../configs/common')
import FSConfig
# --------------------
# Base L1 Cache
# ====================
class L1(BaseCache):
latency = '1ns'
block_size = 64
mshrs = 4
tgts_per_mshr = 8
protocol = CoherenceProtocol(protocol='moesi')
# ----------------------
# Base L2 Cache
# ----------------------
class L2(BaseCache):
block_size = 64
latency = '10ns'
mshrs = 92
tgts_per_mshr = 16
write_buffers = 8
#cpu
cpu = AtomicSimpleCPU(cpu_id=0)
#the system
system = FSConfig.makeLinuxAlphaSystem('atomic')
system.cpu = cpu
cpu.connectMemPorts(system.membus)
#create the l1/l2 bus
system.toL2Bus = Bus()
#connect up the l2 cache
system.l2c = L2(size='4MB', assoc=8)
system.l2c.cpu_side = system.toL2Bus.port
system.l2c.mem_side = system.membus.port
#connect up the cpu and l1s
cpu.addPrivateSplitL1Caches(L1(size = '32kB', assoc = 1),
L1(size = '32kB', assoc = 4))
# connect cpu level-1 caches to shared level-2 cache
cpu.connectMemPorts(system.toL2Bus)
cpu.clock = '2GHz'
root = Root(system=system)
m5.ticks.setGlobalFrequency('2GHz')
m5.ticks.setGlobalFrequency('1THz')

View file

@ -31,11 +31,51 @@ from m5.objects import *
m5.AddToPath('../configs/common')
import FSConfig
# --------------------
# Base L1 Cache
# ====================
class L1(BaseCache):
latency = '1ns'
block_size = 64
mshrs = 4
tgts_per_mshr = 8
protocol = CoherenceProtocol(protocol='moesi')
# ----------------------
# Base L2 Cache
# ----------------------
class L2(BaseCache):
block_size = 64
latency = '10ns'
mshrs = 92
tgts_per_mshr = 16
write_buffers = 8
#cpu
cpus = [ TimingSimpleCPU(cpu_id=i) for i in xrange(2) ]
#the system
system = FSConfig.makeLinuxAlphaSystem('timing')
system.cpu = cpus
#create the l1/l2 bus
system.toL2Bus = Bus()
#connect up the l2 cache
system.l2c = L2(size='4MB', assoc=8)
system.l2c.cpu_side = system.toL2Bus.port
system.l2c.mem_side = system.membus.port
#connect up the cpu and l1s
for c in cpus:
c.connectMemPorts(system.membus)
c.addPrivateSplitL1Caches(L1(size = '32kB', assoc = 1),
L1(size = '32kB', assoc = 4))
# connect cpu level-1 caches to shared level-2 cache
c.connectMemPorts(system.toL2Bus)
c.clock = '2GHz'
root = Root(system=system)
m5.ticks.setGlobalFrequency('2GHz')
m5.ticks.setGlobalFrequency('1THz')

View file

@ -31,10 +31,50 @@ from m5.objects import *
m5.AddToPath('../configs/common')
import FSConfig
# --------------------
# Base L1 Cache
# ====================
class L1(BaseCache):
latency = '1ns'
block_size = 64
mshrs = 4
tgts_per_mshr = 8
protocol = CoherenceProtocol(protocol='moesi')
# ----------------------
# Base L2 Cache
# ----------------------
class L2(BaseCache):
block_size = 64
latency = '10ns'
mshrs = 92
tgts_per_mshr = 16
write_buffers = 8
#cpu
cpu = TimingSimpleCPU(cpu_id=0)
#the system
system = FSConfig.makeLinuxAlphaSystem('timing')
system.cpu = cpu
cpu.connectMemPorts(system.membus)
#create the l1/l2 bus
system.toL2Bus = Bus()
#connect up the l2 cache
system.l2c = L2(size='4MB', assoc=8)
system.l2c.cpu_side = system.toL2Bus.port
system.l2c.mem_side = system.membus.port
#connect up the cpu and l1s
cpu.addPrivateSplitL1Caches(L1(size = '32kB', assoc = 1),
L1(size = '32kB', assoc = 4))
# connect cpu level-1 caches to shared level-2 cache
cpu.connectMemPorts(system.toL2Bus)
cpu.clock = '2GHz'
root = Root(system=system)
m5.ticks.setGlobalFrequency('2GHz')
m5.ticks.setGlobalFrequency('1THz')

View file

@ -8,11 +8,11 @@ type=LinuxAlphaSystem
children=bridge cpu0 cpu1 disk0 disk2 intrctrl iobus membus physmem sim_console simple_disk tsunami
boot_cpu_frequency=1
boot_osflags=root=/dev/hda1 console=ttyS0
console=/dist/m5/system/binaries/console
console=/Users/ali/work/system/binaries/console
init_param=0
kernel=/dist/m5/system/binaries/vmlinux
kernel=/Users/ali/work/system/binaries/vmlinux
mem_mode=timing
pal=/dist/m5/system/binaries/ts_osfpal
pal=/Users/ali/work/system/binaries/ts_osfpal
physmem=system.physmem
readfile=tests/halt.sh
symbolfile=
@ -22,8 +22,13 @@ system_type=34
[system.bridge]
type=Bridge
delay=0
queue_size_a=16
queue_size_b=16
fix_partial_write_a=false
fix_partial_write_b=true
nack_delay=0
req_size_a=16
req_size_b=16
resp_size_a=16
resp_size_b=16
write_ack=false
side_a=system.iobus.port[0]
side_b=system.membus.port[0]
@ -108,7 +113,7 @@ table_size=65536
[system.disk0.image.child]
type=RawDiskImage
image_file=/dist/m5/system/disks/linux-latest.img
image_file=/Users/ali/work/system/disks/linux-latest.img
read_only=true
[system.disk2]
@ -127,7 +132,7 @@ table_size=65536
[system.disk2.image.child]
type=RawDiskImage
image_file=/dist/m5/system/disks/linux-bigswap2.img
image_file=/Users/ali/work/system/disks/linux-bigswap2.img
read_only=true
[system.intrctrl]
@ -136,6 +141,7 @@ sys=system
[system.iobus]
type=Bus
block_size=64
bus_id=0
clock=2
responder_set=true
@ -146,6 +152,7 @@ port=system.bridge.side_a system.tsunami.cchip.pio system.tsunami.pchip.pio syst
[system.membus]
type=Bus
children=responder
block_size=64
bus_id=1
clock=2
responder_set=false
@ -193,7 +200,7 @@ system=system
[system.simple_disk.disk]
type=RawDiskImage
image_file=/dist/m5/system/disks/linux-latest.img
image_file=/Users/ali/work/system/disks/linux-latest.img
read_only=true
[system.tsunami]
@ -242,6 +249,8 @@ dma_write_delay=0
dma_write_factor=0
hardware_address=00:90:00:00:00:01
intr_delay=20000
max_backoff_delay=20000
min_backoff_delay=8
pci_bus=0
pci_dev=1
pci_func=0
@ -614,6 +623,8 @@ children=configdata
config_latency=40
configdata=system.tsunami.ide.configdata
disks=system.disk0 system.disk2
max_backoff_delay=20000
min_backoff_delay=8
pci_bus=0
pci_dev=0
pci_func=0

View file

@ -14,9 +14,9 @@ type=LinuxAlphaSystem
boot_cpu_frequency=1
physmem=system.physmem
mem_mode=timing
kernel=/dist/m5/system/binaries/vmlinux
console=/dist/m5/system/binaries/console
pal=/dist/m5/system/binaries/ts_osfpal
kernel=/Users/ali/work/system/binaries/vmlinux
console=/Users/ali/work/system/binaries/console
pal=/Users/ali/work/system/binaries/ts_osfpal
boot_osflags=root=/dev/hda1 console=ttyS0
readfile=tests/halt.sh
symbolfile=
@ -30,6 +30,7 @@ bus_id=1
clock=2
width=64
responder_set=false
block_size=64
[system.intrctrl]
type=IntrControl
@ -57,14 +58,19 @@ system=system
[system.bridge]
type=Bridge
queue_size_a=16
queue_size_b=16
req_size_a=16
req_size_b=16
resp_size_a=16
resp_size_b=16
delay=0
nack_delay=0
write_ack=false
fix_partial_write_a=false
fix_partial_write_b=true
[system.disk0.image.child]
type=RawDiskImage
image_file=/dist/m5/system/disks/linux-latest.img
image_file=/Users/ali/work/system/disks/linux-latest.img
read_only=true
[system.disk0.image]
@ -82,7 +88,7 @@ delay=2000
[system.disk2.image.child]
type=RawDiskImage
image_file=/dist/m5/system/disks/linux-bigswap2.img
image_file=/Users/ali/work/system/disks/linux-bigswap2.img
read_only=true
[system.disk2.image]
@ -162,7 +168,7 @@ function_trace_start=0
[system.simple_disk.disk]
type=RawDiskImage
image_file=/dist/m5/system/disks/linux-latest.img
image_file=/Users/ali/work/system/disks/linux-latest.img
read_only=true
[system.simple_disk]
@ -510,6 +516,8 @@ BAR5Size=0
type=NSGigE
system=system
platform=system.tsunami
min_backoff_delay=8
max_backoff_delay=20000
configdata=system.tsunami.ethernet.configdata
pci_bus=0
pci_dev=1
@ -632,6 +640,8 @@ BAR5Size=0
type=IdeController
system=system
platform=system.tsunami
min_backoff_delay=8
max_backoff_delay=20000
configdata=system.tsunami.ide.configdata
pci_bus=0
pci_dev=0
@ -646,4 +656,5 @@ bus_id=0
clock=2
width=64
responder_set=true
block_size=64

View file

@ -1,89 +1,88 @@
---------- Begin Simulation Statistics ----------
host_inst_rate 176514 # Simulator instruction rate (inst/s)
host_mem_usage 193420 # Number of bytes of host memory used
host_seconds 369.13 # Real time elapsed on the host
host_tick_rate 10780504 # Simulator tick rate (ticks/s)
host_inst_rate 159511 # Simulator instruction rate (inst/s)
host_seconds 408.44 # Real time elapsed on the host
host_tick_rate 9737848 # Simulator tick rate (ticks/s)
sim_freq 2000000000 # Frequency of simulated ticks
sim_insts 65155632 # Number of instructions simulated
sim_seconds 1.989678 # Number of seconds simulated
sim_ticks 3979356760 # Number of ticks simulated
system.cpu0.dtb.accesses 676537 # DTB accesses
sim_insts 65151264 # Number of instructions simulated
sim_seconds 1.988681 # Number of seconds simulated
sim_ticks 3977362808 # Number of ticks simulated
system.cpu0.dtb.accesses 676531 # DTB accesses
system.cpu0.dtb.acv 306 # DTB access violations
system.cpu0.dtb.hits 12789393 # DTB hits
system.cpu0.dtb.misses 8263 # DTB misses
system.cpu0.dtb.read_accesses 494246 # DTB read accesses
system.cpu0.dtb.hits 12726999 # DTB hits
system.cpu0.dtb.misses 8261 # DTB misses
system.cpu0.dtb.read_accesses 494241 # DTB read accesses
system.cpu0.dtb.read_acv 184 # DTB read access violations
system.cpu0.dtb.read_hits 7941036 # DTB read hits
system.cpu0.dtb.read_misses 7535 # DTB read misses
system.cpu0.dtb.write_accesses 182291 # DTB write accesses
system.cpu0.dtb.read_hits 7906690 # DTB read hits
system.cpu0.dtb.read_misses 7534 # DTB read misses
system.cpu0.dtb.write_accesses 182290 # DTB write accesses
system.cpu0.dtb.write_acv 122 # DTB write access violations
system.cpu0.dtb.write_hits 4848357 # DTB write hits
system.cpu0.dtb.write_misses 728 # DTB write misses
system.cpu0.idle_fraction 0.930790 # Percentage of idle cycles
system.cpu0.itb.accesses 3420080 # ITB accesses
system.cpu0.dtb.write_hits 4820309 # DTB write hits
system.cpu0.dtb.write_misses 727 # DTB write misses
system.cpu0.idle_fraction 0.930953 # Percentage of idle cycles
system.cpu0.itb.accesses 3412195 # ITB accesses
system.cpu0.itb.acv 161 # ITB acv
system.cpu0.itb.hits 3416243 # ITB hits
system.cpu0.itb.misses 3837 # ITB misses
system.cpu0.kern.callpal 143414 # number of callpals executed
system.cpu0.itb.hits 3408362 # ITB hits
system.cpu0.itb.misses 3833 # ITB misses
system.cpu0.kern.callpal 142550 # number of callpals executed
system.cpu0.kern.callpal_cserve 1 0.00% 0.00% # number of callpals executed
system.cpu0.kern.callpal_wripir 566 0.39% 0.40% # number of callpals executed
system.cpu0.kern.callpal_wripir 572 0.40% 0.40% # number of callpals executed
system.cpu0.kern.callpal_wrmces 1 0.00% 0.40% # number of callpals executed
system.cpu0.kern.callpal_wrfen 1 0.00% 0.40% # number of callpals executed
system.cpu0.kern.callpal_wrvptptr 1 0.00% 0.40% # number of callpals executed
system.cpu0.kern.callpal_swpctx 2893 2.02% 2.41% # number of callpals executed
system.cpu0.kern.callpal_tbi 47 0.03% 2.45% # number of callpals executed
system.cpu0.kern.callpal_wrent 7 0.00% 2.45% # number of callpals executed
system.cpu0.kern.callpal_swpipl 128466 89.58% 92.03% # number of callpals executed
system.cpu0.kern.callpal_rdps 6699 4.67% 96.70% # number of callpals executed
system.cpu0.kern.callpal_wrkgp 1 0.00% 96.70% # number of callpals executed
system.cpu0.kern.callpal_wrusp 3 0.00% 96.70% # number of callpals executed
system.cpu0.kern.callpal_rdusp 8 0.01% 96.71% # number of callpals executed
system.cpu0.kern.callpal_whami 2 0.00% 96.71% # number of callpals executed
system.cpu0.kern.callpal_rti 4216 2.94% 99.65% # number of callpals executed
system.cpu0.kern.callpal_swpctx 2878 2.02% 2.42% # number of callpals executed
system.cpu0.kern.callpal_tbi 47 0.03% 2.46% # number of callpals executed
system.cpu0.kern.callpal_wrent 7 0.00% 2.46% # number of callpals executed
system.cpu0.kern.callpal_swpipl 127700 89.58% 92.04% # number of callpals executed
system.cpu0.kern.callpal_rdps 6611 4.64% 96.68% # number of callpals executed
system.cpu0.kern.callpal_wrkgp 1 0.00% 96.68% # number of callpals executed
system.cpu0.kern.callpal_wrusp 3 0.00% 96.68% # number of callpals executed
system.cpu0.kern.callpal_rdusp 8 0.01% 96.69% # number of callpals executed
system.cpu0.kern.callpal_whami 2 0.00% 96.69% # number of callpals executed
system.cpu0.kern.callpal_rti 4215 2.96% 99.65% # number of callpals executed
system.cpu0.kern.callpal_callsys 355 0.25% 99.90% # number of callpals executed
system.cpu0.kern.callpal_imb 147 0.10% 100.00% # number of callpals executed
system.cpu0.kern.inst.arm 0 # number of arm instructions executed
system.cpu0.kern.inst.hwrei 158606 # number of hwrei instructions executed
system.cpu0.kern.inst.quiesce 6630 # number of quiesce instructions executed
system.cpu0.kern.ipl_count 135306 # number of times we switched to this ipl
system.cpu0.kern.ipl_count_0 54074 39.96% 39.96% # number of times we switched to this ipl
system.cpu0.kern.ipl_count_21 131 0.10% 40.06% # number of times we switched to this ipl
system.cpu0.kern.ipl_count_22 2010 1.49% 41.55% # number of times we switched to this ipl
system.cpu0.kern.ipl_count_30 482 0.36% 41.90% # number of times we switched to this ipl
system.cpu0.kern.ipl_count_31 78609 58.10% 100.00% # number of times we switched to this ipl
system.cpu0.kern.ipl_good 109457 # number of times we switched to this ipl from a different ipl
system.cpu0.kern.ipl_good_0 53658 49.02% 49.02% # number of times we switched to this ipl from a different ipl
system.cpu0.kern.inst.hwrei 157735 # number of hwrei instructions executed
system.cpu0.kern.inst.quiesce 6620 # number of quiesce instructions executed
system.cpu0.kern.ipl_count 134538 # number of times we switched to this ipl
system.cpu0.kern.ipl_count_0 53716 39.93% 39.93% # number of times we switched to this ipl
system.cpu0.kern.ipl_count_21 131 0.10% 40.02% # number of times we switched to this ipl
system.cpu0.kern.ipl_count_22 2009 1.49% 41.52% # number of times we switched to this ipl
system.cpu0.kern.ipl_count_30 482 0.36% 41.88% # number of times we switched to this ipl
system.cpu0.kern.ipl_count_31 78200 58.12% 100.00% # number of times we switched to this ipl
system.cpu0.kern.ipl_good 108740 # number of times we switched to this ipl from a different ipl
system.cpu0.kern.ipl_good_0 53300 49.02% 49.02% # number of times we switched to this ipl from a different ipl
system.cpu0.kern.ipl_good_21 131 0.12% 49.14% # number of times we switched to this ipl from a different ipl
system.cpu0.kern.ipl_good_22 2010 1.84% 50.98% # number of times we switched to this ipl from a different ipl
system.cpu0.kern.ipl_good_30 482 0.44% 51.42% # number of times we switched to this ipl from a different ipl
system.cpu0.kern.ipl_good_31 53176 48.58% 100.00% # number of times we switched to this ipl from a different ipl
system.cpu0.kern.ipl_ticks 3978541594 # number of cycles we spent at this ipl
system.cpu0.kern.ipl_ticks_0 3845416172 96.65% 96.65% # number of cycles we spent at this ipl
system.cpu0.kern.ipl_ticks_21 119304 0.00% 96.66% # number of cycles we spent at this ipl
system.cpu0.kern.ipl_ticks_22 1874808 0.05% 96.70% # number of cycles we spent at this ipl
system.cpu0.kern.ipl_ticks_30 1202656 0.03% 96.73% # number of cycles we spent at this ipl
system.cpu0.kern.ipl_ticks_31 129928654 3.27% 100.00% # number of cycles we spent at this ipl
system.cpu0.kern.ipl_used 0.808959 # fraction of swpipl calls that actually changed the ipl
system.cpu0.kern.ipl_used_0 0.992307 # fraction of swpipl calls that actually changed the ipl
system.cpu0.kern.ipl_good_22 2009 1.85% 50.98% # number of times we switched to this ipl from a different ipl
system.cpu0.kern.ipl_good_30 482 0.44% 51.43% # number of times we switched to this ipl from a different ipl
system.cpu0.kern.ipl_good_31 52818 48.57% 100.00% # number of times we switched to this ipl from a different ipl
system.cpu0.kern.ipl_ticks 3976579702 # number of cycles we spent at this ipl
system.cpu0.kern.ipl_ticks_0 3843619308 96.66% 96.66% # number of cycles we spent at this ipl
system.cpu0.kern.ipl_ticks_21 123584 0.00% 96.66% # number of cycles we spent at this ipl
system.cpu0.kern.ipl_ticks_22 1873872 0.05% 96.71% # number of cycles we spent at this ipl
system.cpu0.kern.ipl_ticks_30 1201752 0.03% 96.74% # number of cycles we spent at this ipl
system.cpu0.kern.ipl_ticks_31 129761186 3.26% 100.00% # number of cycles we spent at this ipl
system.cpu0.kern.ipl_used 0.808247 # fraction of swpipl calls that actually changed the ipl
system.cpu0.kern.ipl_used_0 0.992256 # fraction of swpipl calls that actually changed the ipl
system.cpu0.kern.ipl_used_21 1 # fraction of swpipl calls that actually changed the ipl
system.cpu0.kern.ipl_used_22 1 # fraction of swpipl calls that actually changed the ipl
system.cpu0.kern.ipl_used_30 1 # fraction of swpipl calls that actually changed the ipl
system.cpu0.kern.ipl_used_31 0.676462 # fraction of swpipl calls that actually changed the ipl
system.cpu0.kern.mode_good_kernel 1189
system.cpu0.kern.mode_good_user 1189
system.cpu0.kern.ipl_used_31 0.675422 # fraction of swpipl calls that actually changed the ipl
system.cpu0.kern.mode_good_kernel 1193
system.cpu0.kern.mode_good_user 1193
system.cpu0.kern.mode_good_idle 0
system.cpu0.kern.mode_switch_kernel 6717 # number of protection mode switches
system.cpu0.kern.mode_switch_user 1189 # number of protection mode switches
system.cpu0.kern.mode_switch_kernel 6700 # number of protection mode switches
system.cpu0.kern.mode_switch_user 1193 # number of protection mode switches
system.cpu0.kern.mode_switch_idle 0 # number of protection mode switches
system.cpu0.kern.mode_switch_good 0.300784 # fraction of useful protection mode switches
system.cpu0.kern.mode_switch_good_kernel 0.177014 # fraction of useful protection mode switches
system.cpu0.kern.mode_switch_good 0.302293 # fraction of useful protection mode switches
system.cpu0.kern.mode_switch_good_kernel 0.178060 # fraction of useful protection mode switches
system.cpu0.kern.mode_switch_good_user 1 # fraction of useful protection mode switches
system.cpu0.kern.mode_switch_good_idle <err: div-0> # fraction of useful protection mode switches
system.cpu0.kern.mode_ticks_kernel 3967314670 99.76% 99.76% # number of ticks spent at the given mode
system.cpu0.kern.mode_ticks_user 9570844 0.24% 100.00% # number of ticks spent at the given mode
system.cpu0.kern.mode_ticks_kernel 3965295376 99.76% 99.76% # number of ticks spent at the given mode
system.cpu0.kern.mode_ticks_user 9600934 0.24% 100.00% # number of ticks spent at the given mode
system.cpu0.kern.mode_ticks_idle 0 0.00% 100.00% # number of ticks spent at the given mode
system.cpu0.kern.swap_context 2894 # number of times the context was actually changed
system.cpu0.kern.swap_context 2879 # number of times the context was actually changed
system.cpu0.kern.syscall 216 # number of syscalls executed
system.cpu0.kern.syscall_2 7 3.24% 3.24% # number of syscalls executed
system.cpu0.kern.syscall_3 18 8.33% 11.57% # number of syscalls executed
@ -115,82 +114,82 @@ system.cpu0.kern.syscall_98 2 0.93% 97.69% # nu
system.cpu0.kern.syscall_132 2 0.93% 98.61% # number of syscalls executed
system.cpu0.kern.syscall_144 1 0.46% 99.07% # number of syscalls executed
system.cpu0.kern.syscall_147 2 0.93% 100.00% # number of syscalls executed
system.cpu0.not_idle_fraction 0.069210 # Percentage of non-idle cycles
system.cpu0.numCycles 3978541834 # number of cpu cycles simulated
system.cpu0.num_insts 50446812 # Number of instructions executed
system.cpu0.num_refs 13021282 # Number of memory references
system.cpu1.dtb.accesses 346250 # DTB accesses
system.cpu0.not_idle_fraction 0.069047 # Percentage of non-idle cycles
system.cpu0.numCycles 3976579942 # number of cpu cycles simulated
system.cpu0.num_insts 50252314 # Number of instructions executed
system.cpu0.num_refs 12958725 # Number of memory references
system.cpu1.dtb.accesses 346252 # DTB accesses
system.cpu1.dtb.acv 67 # DTB access violations
system.cpu1.dtb.hits 4679272 # DTB hits
system.cpu1.dtb.misses 3343 # DTB misses
system.cpu1.dtb.read_accesses 235842 # DTB read accesses
system.cpu1.dtb.hits 4740996 # DTB hits
system.cpu1.dtb.misses 3345 # DTB misses
system.cpu1.dtb.read_accesses 235843 # DTB read accesses
system.cpu1.dtb.read_acv 26 # DTB read access violations
system.cpu1.dtb.read_hits 2672655 # DTB read hits
system.cpu1.dtb.read_misses 2917 # DTB read misses
system.cpu1.dtb.write_accesses 110408 # DTB write accesses
system.cpu1.dtb.read_hits 2707487 # DTB read hits
system.cpu1.dtb.read_misses 2918 # DTB read misses
system.cpu1.dtb.write_accesses 110409 # DTB write accesses
system.cpu1.dtb.write_acv 41 # DTB write access violations
system.cpu1.dtb.write_hits 2006617 # DTB write hits
system.cpu1.dtb.write_misses 426 # DTB write misses
system.cpu1.idle_fraction 0.974905 # Percentage of idle cycles
system.cpu1.itb.accesses 2089153 # ITB accesses
system.cpu1.dtb.write_hits 2033509 # DTB write hits
system.cpu1.dtb.write_misses 427 # DTB write misses
system.cpu1.idle_fraction 0.974578 # Percentage of idle cycles
system.cpu1.itb.accesses 2097175 # ITB accesses
system.cpu1.itb.acv 23 # ITB acv
system.cpu1.itb.hits 2087881 # ITB hits
system.cpu1.itb.hits 2095903 # ITB hits
system.cpu1.itb.misses 1272 # ITB misses
system.cpu1.kern.callpal 80102 # number of callpals executed
system.cpu1.kern.callpal 80960 # number of callpals executed
system.cpu1.kern.callpal_cserve 1 0.00% 0.00% # number of callpals executed
system.cpu1.kern.callpal_wripir 482 0.60% 0.60% # number of callpals executed
system.cpu1.kern.callpal_wrmces 1 0.00% 0.60% # number of callpals executed
system.cpu1.kern.callpal_wrfen 1 0.00% 0.61% # number of callpals executed
system.cpu1.kern.callpal_swpctx 2276 2.84% 3.45% # number of callpals executed
system.cpu1.kern.callpal_tbi 7 0.01% 3.46% # number of callpals executed
system.cpu1.kern.callpal_wrent 7 0.01% 3.46% # number of callpals executed
system.cpu1.kern.callpal_swpipl 70820 88.41% 91.88% # number of callpals executed
system.cpu1.kern.callpal_rdps 2215 2.77% 94.64% # number of callpals executed
system.cpu1.kern.callpal_wrkgp 1 0.00% 94.64% # number of callpals executed
system.cpu1.kern.callpal_wrusp 4 0.00% 94.65% # number of callpals executed
system.cpu1.kern.callpal_rdusp 1 0.00% 94.65% # number of callpals executed
system.cpu1.kern.callpal_whami 3 0.00% 94.65% # number of callpals executed
system.cpu1.kern.callpal_rti 4087 5.10% 99.76% # number of callpals executed
system.cpu1.kern.callpal_wrfen 1 0.00% 0.60% # number of callpals executed
system.cpu1.kern.callpal_swpctx 2289 2.83% 3.43% # number of callpals executed
system.cpu1.kern.callpal_tbi 7 0.01% 3.44% # number of callpals executed
system.cpu1.kern.callpal_wrent 7 0.01% 3.44% # number of callpals executed
system.cpu1.kern.callpal_swpipl 71572 88.40% 91.85% # number of callpals executed
system.cpu1.kern.callpal_rdps 2303 2.84% 94.69% # number of callpals executed
system.cpu1.kern.callpal_wrkgp 1 0.00% 94.69% # number of callpals executed
system.cpu1.kern.callpal_wrusp 4 0.00% 94.70% # number of callpals executed
system.cpu1.kern.callpal_rdusp 1 0.00% 94.70% # number of callpals executed
system.cpu1.kern.callpal_whami 3 0.00% 94.70% # number of callpals executed
system.cpu1.kern.callpal_rti 4092 5.05% 99.76% # number of callpals executed
system.cpu1.kern.callpal_callsys 162 0.20% 99.96% # number of callpals executed
system.cpu1.kern.callpal_imb 33 0.04% 100.00% # number of callpals executed
system.cpu1.kern.callpal_rdunique 1 0.00% 100.00% # number of callpals executed
system.cpu1.kern.inst.arm 0 # number of arm instructions executed
system.cpu1.kern.inst.hwrei 87377 # number of hwrei instructions executed
system.cpu1.kern.inst.quiesce 2792 # number of quiesce instructions executed
system.cpu1.kern.ipl_count 77476 # number of times we switched to this ipl
system.cpu1.kern.ipl_count_0 30110 38.86% 38.86% # number of times we switched to this ipl
system.cpu1.kern.ipl_count_22 2002 2.58% 41.45% # number of times we switched to this ipl
system.cpu1.kern.ipl_count_30 566 0.73% 42.18% # number of times we switched to this ipl
system.cpu1.kern.ipl_count_31 44798 57.82% 100.00% # number of times we switched to this ipl
system.cpu1.kern.ipl_good 60300 # number of times we switched to this ipl from a different ipl
system.cpu1.kern.ipl_good_0 29149 48.34% 48.34% # number of times we switched to this ipl from a different ipl
system.cpu1.kern.ipl_good_22 2002 3.32% 51.66% # number of times we switched to this ipl from a different ipl
system.cpu1.kern.ipl_good_30 566 0.94% 52.60% # number of times we switched to this ipl from a different ipl
system.cpu1.kern.ipl_good_31 28583 47.40% 100.00% # number of times we switched to this ipl from a different ipl
system.cpu1.kern.ipl_ticks 3979354976 # number of cycles we spent at this ipl
system.cpu1.kern.ipl_ticks_0 3857760682 96.94% 96.94% # number of cycles we spent at this ipl
system.cpu1.kern.ipl_ticks_22 1872502 0.05% 96.99% # number of cycles we spent at this ipl
system.cpu1.kern.ipl_ticks_30 1446416 0.04% 97.03% # number of cycles we spent at this ipl
system.cpu1.kern.ipl_ticks_31 118275376 2.97% 100.00% # number of cycles we spent at this ipl
system.cpu1.kern.ipl_used 0.778306 # fraction of swpipl calls that actually changed the ipl
system.cpu1.kern.ipl_used_0 0.968084 # fraction of swpipl calls that actually changed the ipl
system.cpu1.kern.inst.hwrei 88242 # number of hwrei instructions executed
system.cpu1.kern.inst.quiesce 2815 # number of quiesce instructions executed
system.cpu1.kern.ipl_count 78238 # number of times we switched to this ipl
system.cpu1.kern.ipl_count_0 30461 38.93% 38.93% # number of times we switched to this ipl
system.cpu1.kern.ipl_count_22 2001 2.56% 41.49% # number of times we switched to this ipl
system.cpu1.kern.ipl_count_30 572 0.73% 42.22% # number of times we switched to this ipl
system.cpu1.kern.ipl_count_31 45204 57.78% 100.00% # number of times we switched to this ipl
system.cpu1.kern.ipl_good 61001 # number of times we switched to this ipl from a different ipl
system.cpu1.kern.ipl_good_0 29500 48.36% 48.36% # number of times we switched to this ipl from a different ipl
system.cpu1.kern.ipl_good_22 2001 3.28% 51.64% # number of times we switched to this ipl from a different ipl
system.cpu1.kern.ipl_good_30 572 0.94% 52.58% # number of times we switched to this ipl from a different ipl
system.cpu1.kern.ipl_good_31 28928 47.42% 100.00% # number of times we switched to this ipl from a different ipl
system.cpu1.kern.ipl_ticks 3977361024 # number of cycles we spent at this ipl
system.cpu1.kern.ipl_ticks_0 3855399740 96.93% 96.93% # number of cycles we spent at this ipl
system.cpu1.kern.ipl_ticks_22 1871566 0.05% 96.98% # number of cycles we spent at this ipl
system.cpu1.kern.ipl_ticks_30 1461344 0.04% 97.02% # number of cycles we spent at this ipl
system.cpu1.kern.ipl_ticks_31 118628374 2.98% 100.00% # number of cycles we spent at this ipl
system.cpu1.kern.ipl_used 0.779685 # fraction of swpipl calls that actually changed the ipl
system.cpu1.kern.ipl_used_0 0.968451 # fraction of swpipl calls that actually changed the ipl
system.cpu1.kern.ipl_used_22 1 # fraction of swpipl calls that actually changed the ipl
system.cpu1.kern.ipl_used_30 1 # fraction of swpipl calls that actually changed the ipl
system.cpu1.kern.ipl_used_31 0.638042 # fraction of swpipl calls that actually changed the ipl
system.cpu1.kern.mode_good_kernel 1051
system.cpu1.kern.mode_good_user 561
system.cpu1.kern.mode_good_idle 490
system.cpu1.kern.mode_switch_kernel 2388 # number of protection mode switches
system.cpu1.kern.mode_switch_user 561 # number of protection mode switches
system.cpu1.kern.mode_switch_idle 3025 # number of protection mode switches
system.cpu1.kern.mode_switch_good 0.351858 # fraction of useful protection mode switches
system.cpu1.kern.mode_switch_good_kernel 0.440117 # fraction of useful protection mode switches
system.cpu1.kern.ipl_used_31 0.639943 # fraction of swpipl calls that actually changed the ipl
system.cpu1.kern.mode_good_kernel 1058
system.cpu1.kern.mode_good_user 562
system.cpu1.kern.mode_good_idle 496
system.cpu1.kern.mode_switch_kernel 2397 # number of protection mode switches
system.cpu1.kern.mode_switch_user 562 # number of protection mode switches
system.cpu1.kern.mode_switch_idle 3035 # number of protection mode switches
system.cpu1.kern.mode_switch_good 0.353020 # fraction of useful protection mode switches
system.cpu1.kern.mode_switch_good_kernel 0.441385 # fraction of useful protection mode switches
system.cpu1.kern.mode_switch_good_user 1 # fraction of useful protection mode switches
system.cpu1.kern.mode_switch_good_idle 0.161983 # fraction of useful protection mode switches
system.cpu1.kern.mode_ticks_kernel 62784640 1.58% 1.58% # number of ticks spent at the given mode
system.cpu1.kern.mode_ticks_user 5748262 0.14% 1.72% # number of ticks spent at the given mode
system.cpu1.kern.mode_ticks_idle 3910822066 98.28% 100.00% # number of ticks spent at the given mode
system.cpu1.kern.swap_context 2277 # number of times the context was actually changed
system.cpu1.kern.mode_switch_good_idle 0.163427 # fraction of useful protection mode switches
system.cpu1.kern.mode_ticks_kernel 64032120 1.61% 1.61% # number of ticks spent at the given mode
system.cpu1.kern.mode_ticks_user 5754658 0.14% 1.75% # number of ticks spent at the given mode
system.cpu1.kern.mode_ticks_idle 3907574238 98.25% 100.00% # number of ticks spent at the given mode
system.cpu1.kern.swap_context 2290 # number of times the context was actually changed
system.cpu1.kern.syscall 110 # number of syscalls executed
system.cpu1.kern.syscall_2 1 0.91% 0.91% # number of syscalls executed
system.cpu1.kern.syscall_3 12 10.91% 11.82% # number of syscalls executed
@ -213,10 +212,10 @@ system.cpu1.kern.syscall_90 1 0.91% 95.45% # nu
system.cpu1.kern.syscall_92 2 1.82% 97.27% # number of syscalls executed
system.cpu1.kern.syscall_132 2 1.82% 99.09% # number of syscalls executed
system.cpu1.kern.syscall_144 1 0.91% 100.00% # number of syscalls executed
system.cpu1.not_idle_fraction 0.025095 # Percentage of non-idle cycles
system.cpu1.numCycles 3979356760 # number of cpu cycles simulated
system.cpu1.num_insts 14708820 # Number of instructions executed
system.cpu1.num_refs 4709061 # Number of memory references
system.cpu1.not_idle_fraction 0.025422 # Percentage of non-idle cycles
system.cpu1.numCycles 3977362808 # number of cpu cycles simulated
system.cpu1.num_insts 14898950 # Number of instructions executed
system.cpu1.num_refs 4770935 # Number of memory references
system.disk0.dma_read_bytes 1024 # Number of bytes transfered via DMA reads (not PRD).
system.disk0.dma_read_full_pages 0 # Number of full page size DMA reads (not PRD).
system.disk0.dma_read_txs 1 # Number of DMA read transactions (not PRD).

View file

@ -8,11 +8,11 @@ type=LinuxAlphaSystem
children=bridge cpu disk0 disk2 intrctrl iobus membus physmem sim_console simple_disk tsunami
boot_cpu_frequency=1
boot_osflags=root=/dev/hda1 console=ttyS0
console=/dist/m5/system/binaries/console
console=/Users/ali/work/system/binaries/console
init_param=0
kernel=/dist/m5/system/binaries/vmlinux
kernel=/Users/ali/work/system/binaries/vmlinux
mem_mode=timing
pal=/dist/m5/system/binaries/ts_osfpal
pal=/Users/ali/work/system/binaries/ts_osfpal
physmem=system.physmem
readfile=tests/halt.sh
symbolfile=
@ -22,8 +22,13 @@ system_type=34
[system.bridge]
type=Bridge
delay=0
queue_size_a=16
queue_size_b=16
fix_partial_write_a=false
fix_partial_write_b=true
nack_delay=0
req_size_a=16
req_size_b=16
resp_size_a=16
resp_size_b=16
write_ack=false
side_a=system.iobus.port[0]
side_b=system.membus.port[0]
@ -76,7 +81,7 @@ table_size=65536
[system.disk0.image.child]
type=RawDiskImage
image_file=/dist/m5/system/disks/linux-latest.img
image_file=/Users/ali/work/system/disks/linux-latest.img
read_only=true
[system.disk2]
@ -95,7 +100,7 @@ table_size=65536
[system.disk2.image.child]
type=RawDiskImage
image_file=/dist/m5/system/disks/linux-bigswap2.img
image_file=/Users/ali/work/system/disks/linux-bigswap2.img
read_only=true
[system.intrctrl]
@ -104,6 +109,7 @@ sys=system
[system.iobus]
type=Bus
block_size=64
bus_id=0
clock=2
responder_set=true
@ -114,6 +120,7 @@ port=system.bridge.side_a system.tsunami.cchip.pio system.tsunami.pchip.pio syst
[system.membus]
type=Bus
children=responder
block_size=64
bus_id=1
clock=2
responder_set=false
@ -161,7 +168,7 @@ system=system
[system.simple_disk.disk]
type=RawDiskImage
image_file=/dist/m5/system/disks/linux-latest.img
image_file=/Users/ali/work/system/disks/linux-latest.img
read_only=true
[system.tsunami]
@ -210,6 +217,8 @@ dma_write_delay=0
dma_write_factor=0
hardware_address=00:90:00:00:00:01
intr_delay=20000
max_backoff_delay=20000
min_backoff_delay=8
pci_bus=0
pci_dev=1
pci_func=0
@ -582,6 +591,8 @@ children=configdata
config_latency=40
configdata=system.tsunami.ide.configdata
disks=system.disk0 system.disk2
max_backoff_delay=20000
min_backoff_delay=8
pci_bus=0
pci_dev=0
pci_func=0

View file

@ -14,9 +14,9 @@ type=LinuxAlphaSystem
boot_cpu_frequency=1
physmem=system.physmem
mem_mode=timing
kernel=/dist/m5/system/binaries/vmlinux
console=/dist/m5/system/binaries/console
pal=/dist/m5/system/binaries/ts_osfpal
kernel=/Users/ali/work/system/binaries/vmlinux
console=/Users/ali/work/system/binaries/console
pal=/Users/ali/work/system/binaries/ts_osfpal
boot_osflags=root=/dev/hda1 console=ttyS0
readfile=tests/halt.sh
symbolfile=
@ -30,6 +30,7 @@ bus_id=1
clock=2
width=64
responder_set=false
block_size=64
[system.intrctrl]
type=IntrControl
@ -57,14 +58,19 @@ system=system
[system.bridge]
type=Bridge
queue_size_a=16
queue_size_b=16
req_size_a=16
req_size_b=16
resp_size_a=16
resp_size_b=16
delay=0
nack_delay=0
write_ack=false
fix_partial_write_a=false
fix_partial_write_b=true
[system.disk0.image.child]
type=RawDiskImage
image_file=/dist/m5/system/disks/linux-latest.img
image_file=/Users/ali/work/system/disks/linux-latest.img
read_only=true
[system.disk0.image]
@ -82,7 +88,7 @@ delay=2000
[system.disk2.image.child]
type=RawDiskImage
image_file=/dist/m5/system/disks/linux-bigswap2.img
image_file=/Users/ali/work/system/disks/linux-bigswap2.img
read_only=true
[system.disk2.image]
@ -100,7 +106,7 @@ delay=2000
[system.simple_disk.disk]
type=RawDiskImage
image_file=/dist/m5/system/disks/linux-latest.img
image_file=/Users/ali/work/system/disks/linux-latest.img
read_only=true
[system.simple_disk]
@ -479,6 +485,8 @@ BAR5Size=0
type=NSGigE
system=system
platform=system.tsunami
min_backoff_delay=8
max_backoff_delay=20000
configdata=system.tsunami.ethernet.configdata
pci_bus=0
pci_dev=1
@ -601,6 +609,8 @@ BAR5Size=0
type=IdeController
system=system
platform=system.tsunami
min_backoff_delay=8
max_backoff_delay=20000
configdata=system.tsunami.ide.configdata
pci_bus=0
pci_dev=0
@ -615,4 +625,5 @@ bus_id=0
clock=2
width=64
responder_set=true
block_size=64

View file

@ -1,39 +1,38 @@
---------- Begin Simulation Statistics ----------
host_inst_rate 261150 # Simulator instruction rate (inst/s)
host_mem_usage 193084 # Number of bytes of host memory used
host_seconds 230.08 # Real time elapsed on the host
host_tick_rate 16884971 # Simulator tick rate (ticks/s)
host_inst_rate 233672 # Simulator instruction rate (inst/s)
host_seconds 257.14 # Real time elapsed on the host
host_tick_rate 15108417 # Simulator tick rate (ticks/s)
sim_freq 2000000000 # Frequency of simulated ticks
sim_insts 60085806 # Number of instructions simulated
sim_insts 60085488 # Number of instructions simulated
sim_seconds 1.942464 # Number of seconds simulated
sim_ticks 3884928812 # Number of ticks simulated
system.cpu.dtb.accesses 1020801 # DTB accesses
system.cpu.dtb.accesses 1020784 # DTB accesses
system.cpu.dtb.acv 367 # DTB access violations
system.cpu.dtb.hits 16070687 # DTB hits
system.cpu.dtb.misses 11476 # DTB misses
system.cpu.dtb.read_accesses 728869 # DTB read accesses
system.cpu.dtb.hits 16070353 # DTB hits
system.cpu.dtb.misses 11466 # DTB misses
system.cpu.dtb.read_accesses 728853 # DTB read accesses
system.cpu.dtb.read_acv 210 # DTB read access violations
system.cpu.dtb.read_hits 9714773 # DTB read hits
system.cpu.dtb.read_misses 10333 # DTB read misses
system.cpu.dtb.write_accesses 291932 # DTB write accesses
system.cpu.dtb.read_hits 9714571 # DTB read hits
system.cpu.dtb.read_misses 10324 # DTB read misses
system.cpu.dtb.write_accesses 291931 # DTB write accesses
system.cpu.dtb.write_acv 157 # DTB write access violations
system.cpu.dtb.write_hits 6355914 # DTB write hits
system.cpu.dtb.write_misses 1143 # DTB write misses
system.cpu.dtb.write_hits 6355782 # DTB write hits
system.cpu.dtb.write_misses 1142 # DTB write misses
system.cpu.idle_fraction 0.921526 # Percentage of idle cycles
system.cpu.itb.accesses 4986026 # ITB accesses
system.cpu.itb.accesses 4985698 # ITB accesses
system.cpu.itb.acv 184 # ITB acv
system.cpu.itb.hits 4981016 # ITB hits
system.cpu.itb.hits 4980688 # ITB hits
system.cpu.itb.misses 5010 # ITB misses
system.cpu.kern.callpal 193489 # number of callpals executed
system.cpu.kern.callpal 193483 # number of callpals executed
system.cpu.kern.callpal_cserve 1 0.00% 0.00% # number of callpals executed
system.cpu.kern.callpal_wrmces 1 0.00% 0.00% # number of callpals executed
system.cpu.kern.callpal_wrfen 1 0.00% 0.00% # number of callpals executed
system.cpu.kern.callpal_wrvptptr 1 0.00% 0.00% # number of callpals executed
system.cpu.kern.callpal_swpctx 4146 2.14% 2.14% # number of callpals executed
system.cpu.kern.callpal_swpctx 4144 2.14% 2.14% # number of callpals executed
system.cpu.kern.callpal_tbi 54 0.03% 2.17% # number of callpals executed
system.cpu.kern.callpal_wrent 7 0.00% 2.18% # number of callpals executed
system.cpu.kern.callpal_swpipl 176515 91.23% 93.40% # number of callpals executed
system.cpu.kern.callpal_swpipl 176511 91.23% 93.40% # number of callpals executed
system.cpu.kern.callpal_rdps 6861 3.55% 96.95% # number of callpals executed
system.cpu.kern.callpal_wrkgp 1 0.00% 96.95% # number of callpals executed
system.cpu.kern.callpal_wrusp 7 0.00% 96.95% # number of callpals executed
@ -43,42 +42,42 @@ system.cpu.kern.callpal_rti 5187 2.68% 99.64% # nu
system.cpu.kern.callpal_callsys 515 0.27% 99.91% # number of callpals executed
system.cpu.kern.callpal_imb 181 0.09% 100.00% # number of callpals executed
system.cpu.kern.inst.arm 0 # number of arm instructions executed
system.cpu.kern.inst.hwrei 212621 # number of hwrei instructions executed
system.cpu.kern.inst.quiesce 6152 # number of quiesce instructions executed
system.cpu.kern.ipl_count 183796 # number of times we switched to this ipl
system.cpu.kern.ipl_count_0 75070 40.84% 40.84% # number of times we switched to this ipl
system.cpu.kern.inst.hwrei 212605 # number of hwrei instructions executed
system.cpu.kern.inst.quiesce 6153 # number of quiesce instructions executed
system.cpu.kern.ipl_count 183792 # number of times we switched to this ipl
system.cpu.kern.ipl_count_0 75069 40.84% 40.84% # number of times we switched to this ipl
system.cpu.kern.ipl_count_21 131 0.07% 40.92% # number of times we switched to this ipl
system.cpu.kern.ipl_count_22 1962 1.07% 41.98% # number of times we switched to this ipl
system.cpu.kern.ipl_count_31 106633 58.02% 100.00% # number of times we switched to this ipl
system.cpu.kern.ipl_good 149499 # number of times we switched to this ipl from a different ipl
system.cpu.kern.ipl_good_0 73703 49.30% 49.30% # number of times we switched to this ipl from a different ipl
system.cpu.kern.ipl_count_31 106630 58.02% 100.00% # number of times we switched to this ipl
system.cpu.kern.ipl_good 149497 # number of times we switched to this ipl from a different ipl
system.cpu.kern.ipl_good_0 73702 49.30% 49.30% # number of times we switched to this ipl from a different ipl
system.cpu.kern.ipl_good_21 131 0.09% 49.39% # number of times we switched to this ipl from a different ipl
system.cpu.kern.ipl_good_22 1962 1.31% 50.70% # number of times we switched to this ipl from a different ipl
system.cpu.kern.ipl_good_31 73703 49.30% 100.00% # number of times we switched to this ipl from a different ipl
system.cpu.kern.ipl_good_31 73702 49.30% 100.00% # number of times we switched to this ipl from a different ipl
system.cpu.kern.ipl_ticks 3884927028 # number of cycles we spent at this ipl
system.cpu.kern.ipl_ticks_0 3757862392 96.73% 96.73% # number of cycles we spent at this ipl
system.cpu.kern.ipl_ticks_0 3757863794 96.73% 96.73% # number of cycles we spent at this ipl
system.cpu.kern.ipl_ticks_21 112456 0.00% 96.73% # number of cycles we spent at this ipl
system.cpu.kern.ipl_ticks_22 918216 0.02% 96.76% # number of cycles we spent at this ipl
system.cpu.kern.ipl_ticks_31 126033964 3.24% 100.00% # number of cycles we spent at this ipl
system.cpu.kern.ipl_used 0.813396 # fraction of swpipl calls that actually changed the ipl
system.cpu.kern.ipl_ticks_31 126032562 3.24% 100.00% # number of cycles we spent at this ipl
system.cpu.kern.ipl_used 0.813403 # fraction of swpipl calls that actually changed the ipl
system.cpu.kern.ipl_used_0 0.981790 # fraction of swpipl calls that actually changed the ipl
system.cpu.kern.ipl_used_21 1 # fraction of swpipl calls that actually changed the ipl
system.cpu.kern.ipl_used_22 1 # fraction of swpipl calls that actually changed the ipl
system.cpu.kern.ipl_used_31 0.691184 # fraction of swpipl calls that actually changed the ipl
system.cpu.kern.mode_good_kernel 1898
system.cpu.kern.mode_good_user 1744
system.cpu.kern.mode_good_idle 154
system.cpu.kern.mode_switch_kernel 5934 # number of protection mode switches
system.cpu.kern.mode_switch_user 1744 # number of protection mode switches
system.cpu.kern.mode_switch_idle 2065 # number of protection mode switches
system.cpu.kern.mode_switch_good 0.389613 # fraction of useful protection mode switches
system.cpu.kern.mode_switch_good_kernel 0.319852 # fraction of useful protection mode switches
system.cpu.kern.ipl_used_31 0.691194 # fraction of swpipl calls that actually changed the ipl
system.cpu.kern.mode_good_kernel 1895
system.cpu.kern.mode_good_user 1742
system.cpu.kern.mode_good_idle 153
system.cpu.kern.mode_switch_kernel 5935 # number of protection mode switches
system.cpu.kern.mode_switch_user 1742 # number of protection mode switches
system.cpu.kern.mode_switch_idle 2062 # number of protection mode switches
system.cpu.kern.mode_switch_good 0.389157 # fraction of useful protection mode switches
system.cpu.kern.mode_switch_good_kernel 0.319292 # fraction of useful protection mode switches
system.cpu.kern.mode_switch_good_user 1 # fraction of useful protection mode switches
system.cpu.kern.mode_switch_good_idle 0.074576 # fraction of useful protection mode switches
system.cpu.kern.mode_ticks_kernel 112858396 2.91% 2.91% # number of ticks spent at the given mode
system.cpu.kern.mode_ticks_user 15210848 0.39% 3.30% # number of ticks spent at the given mode
system.cpu.kern.mode_ticks_idle 3756857776 96.70% 100.00% # number of ticks spent at the given mode
system.cpu.kern.swap_context 4147 # number of times the context was actually changed
system.cpu.kern.mode_switch_good_idle 0.074200 # fraction of useful protection mode switches
system.cpu.kern.mode_ticks_kernel 112890486 2.91% 2.91% # number of ticks spent at the given mode
system.cpu.kern.mode_ticks_user 15209884 0.39% 3.30% # number of ticks spent at the given mode
system.cpu.kern.mode_ticks_idle 3756826650 96.70% 100.00% # number of ticks spent at the given mode
system.cpu.kern.swap_context 4145 # number of times the context was actually changed
system.cpu.kern.syscall 326 # number of syscalls executed
system.cpu.kern.syscall_2 8 2.45% 2.45% # number of syscalls executed
system.cpu.kern.syscall_3 30 9.20% 11.66% # number of syscalls executed
@ -112,8 +111,8 @@ system.cpu.kern.syscall_144 2 0.61% 99.39% # nu
system.cpu.kern.syscall_147 2 0.61% 100.00% # number of syscalls executed
system.cpu.not_idle_fraction 0.078474 # Percentage of non-idle cycles
system.cpu.numCycles 3884928812 # number of cpu cycles simulated
system.cpu.num_insts 60085806 # Number of instructions executed
system.cpu.num_refs 16318611 # Number of memory references
system.cpu.num_insts 60085488 # Number of instructions executed
system.cpu.num_refs 16318244 # Number of memory references
system.disk0.dma_read_bytes 1024 # Number of bytes transfered via DMA reads (not PRD).
system.disk0.dma_read_full_pages 0 # Number of full page size DMA reads (not PRD).
system.disk0.dma_read_txs 1 # Number of DMA read transactions (not PRD).