Merge from head.

--HG--
extra : convert_revision : c5d045208e521f0bc9e47484a380917e27066798
This commit is contained in:
Steve Reinhardt 2007-07-15 23:22:04 -04:00
commit d5bb145590
109 changed files with 3033 additions and 6538 deletions

16
.hgtags
View file

@ -1,16 +0,0 @@
6b99127531fd692ff0f202e327d0826ed2bfcf5f m5_1.0_beta1
1a40e60270c11ec24f11c783e70367e2740cdc56 m5_1.0_beta1
069849384988e553b6edae71ecaf1fb6e918d738 m5_1.0_beta2
4cfa92eca35d532b339507f1c631e1986d87b258 m5_1.0_tutorial
ffe1942d845c67fb3fd04692420c9433638eba13 m5_1.0_web
af8bf5e4e85ba1a773183cc3f6c43bcdf0ce146a m5_1.1
1c0eeb0dae9b6a2a5479faf3ab52fb1ed0ce703f m5_1.1
c486924ed90eb6805e8cf44ddee5ad5435c79051 m5_1.1
01e679b66ca9474f10f8f96d391693adf76fc73a m5_1.1
2608cd7def85c9fdc84251295c8023fab990d530 m5_1.1
cdd48642d9bf584bd445b40abec9e7f934a5900b m5_1.1
8d690c7c2efced99f7991b7ace56d769bae7cfdd m5_2.0_beta1
d83885ad2b41777c97b94882aa8f07e761e55ac1 m5_2.0_beta1_patch1
1906dcace7c27b2153bfb95ca1218660e1cc1f70 m5_2.0_beta2
b174ae14f007ba0c341f8df77d36f57f48369cc8 m5_2.0_beta2
91a9ac67662aa3a79315ade29b17a85961fecd88 m5_2.0_beta3

View file

@ -35,7 +35,6 @@ class L1Cache(BaseCache):
latency = '1ns'
mshrs = 10
tgts_per_mshr = 5
protocol = CoherenceProtocol(protocol='moesi')
class L2Cache(BaseCache):
assoc = 8

View file

@ -33,14 +33,55 @@ m5.AddToPath('../common')
parser = optparse.OptionParser()
parser.add_option("--caches", action="store_true")
parser.add_option("-t", "--timing", action="store_true")
parser.add_option("-m", "--maxtick", type="int")
parser.add_option("-l", "--maxloads", default = "1000000000000", type="int")
parser.add_option("-n", "--numtesters", default = "8", type="int")
parser.add_option("-p", "--protocol",
default="moesi",
help="The coherence protocol to use for the L1'a (i.e. MOESI, MOSI)")
parser.add_option("-a", "--atomic", action="store_true",
help="Use atomic (non-timing) mode")
parser.add_option("-b", "--blocking", action="store_true",
help="Use blocking caches")
parser.add_option("-l", "--maxloads", metavar="N", default=0,
help="Stop after N loads")
parser.add_option("-m", "--maxtick", type="int", default=m5.MaxTick,
metavar="T",
help="Stop after T ticks")
#
# The "tree" specification is a colon-separated list of one or more
# integers. The first integer is the number of caches/testers
# connected directly to main memory. The last integer in the list is
# the number of testers associated with the uppermost level of memory
# (L1 cache, if there are caches, or main memory if no caches). Thus
# if there is only one integer, there are no caches, and the integer
# specifies the number of testers connected directly to main memory.
# The other integers (if any) specify the number of caches at each
# level of the hierarchy between.
#
# Examples:
#
# "2:1" Two caches connected to memory with a single tester behind each
# (single-level hierarchy, two testers total)
#
# "2:2:1" Two-level hierarchy, 2 L1s behind each of 2 L2s, 4 testers total
#
parser.add_option("-t", "--treespec", type="string", default="8:1",
help="Colon-separated multilevel tree specification, "
"see script comments for details "
"[default: %default]")
parser.add_option("--force-bus", action="store_true",
help="Use bus between levels even with single cache")
parser.add_option("-f", "--functional", type="int", default=0,
metavar="PCT",
help="Target percentage of functional accesses "
"[default: %default]")
parser.add_option("-u", "--uncacheable", type="int", default=0,
metavar="PCT",
help="Target percentage of uncacheable accesses "
"[default: %default]")
parser.add_option("--progress", type="int", default=1000,
metavar="NLOADS",
help="Progress message interval "
"[default: %default]")
(options, args) = parser.parse_args()
@ -48,74 +89,92 @@ if args:
print "Error: script doesn't take any positional arguments"
sys.exit(1)
# --------------------
# Base L1 Cache
# ====================
block_size = 64
class L1(BaseCache):
latency = '1ns'
block_size = 64
mshrs = 12
tgts_per_mshr = 8
protocol = CoherenceProtocol(protocol=options.protocol)
try:
treespec = [int(x) for x in options.treespec.split(':')]
numtesters = reduce(lambda x,y: x*y, treespec)
except:
print "Error parsing treespec option"
sys.exit(1)
# ----------------------
# Base L2 Cache
# ----------------------
if numtesters > block_size:
print "Error: Number of testers limited to %s because of false sharing" \
% (block_size)
sys.exit(1)
class L2(BaseCache):
block_size = 64
latency = '10ns'
mshrs = 92
tgts_per_mshr = 16
write_buffers = 8
if len(treespec) < 1:
print "Error parsing treespec"
sys.exit(1)
#MAX CORES IS 8 with the false sharing method
if options.numtesters > 8:
print "Error: NUmber of testers limited to 8 because of false sharing"
sys,exit(1)
# define prototype L1 cache
proto_l1 = BaseCache(size = '32kB', assoc = 4, block_size = block_size,
latency = '1ns', tgts_per_mshr = 8)
cpus = [ MemTest(atomic=not options.timing, max_loads=options.maxloads,
percent_functional=50, percent_uncacheable=10,
progress_interval=1000)
for i in xrange(options.numtesters) ]
if options.blocking:
proto_l1.mshrs = 1
else:
proto_l1.mshrs = 8
# build a list of prototypes, one for each level of treespec, starting
# at the end (last entry is tester objects)
prototypes = [ MemTest(atomic=options.atomic, max_loads=options.maxloads,
percent_functional=options.functional,
percent_uncacheable=options.uncacheable,
progress_interval=options.progress) ]
# next comes L1 cache, if any
if len(treespec) > 1:
prototypes.insert(0, proto_l1)
# now add additional cache levels (if any) by scaling L1 params
while len(prototypes) < len(treespec):
# clone previous level and update params
prev = prototypes[0]
next = prev()
next.size = prev.size * 4
next.latency = prev.latency * 10
next.assoc = prev.assoc * 2
prototypes.insert(0, next)
# system simulated
system = System(cpu = cpus, funcmem = PhysicalMemory(),
physmem = PhysicalMemory(latency = "50ns"), membus = Bus(clock="500MHz", width=16))
system = System(funcmem = PhysicalMemory(),
physmem = PhysicalMemory(latency = "100ns"))
# l2cache & bus
if options.caches:
system.toL2Bus = Bus(clock="500MHz", width=16)
system.l2c = L2(size='64kB', assoc=8)
system.l2c.cpu_side = system.toL2Bus.port
# connect l2c to membus
system.l2c.mem_side = system.membus.port
# add L1 caches
for cpu in cpus:
if options.caches:
cpu.l1c = L1(size = '32kB', assoc = 4)
cpu.test = cpu.l1c.cpu_side
cpu.l1c.mem_side = system.toL2Bus.port
else:
cpu.test = system.membus.port
system.funcmem.port = cpu.functional
# connect memory to membus
system.physmem.port = system.membus.port
def make_level(spec, prototypes, attach_obj, attach_port):
fanout = spec[0]
parent = attach_obj # use attach obj as config parent too
if len(spec) > 1 and (fanout > 1 or options.force_bus):
new_bus = Bus(clock="500MHz", width=16)
new_bus.port = getattr(attach_obj, attach_port)
parent.cpu_side_bus = new_bus
attach_obj = new_bus
attach_port = "port"
objs = [prototypes[0]() for i in xrange(fanout)]
if len(spec) > 1:
# we just built caches, more levels to go
parent.cache = objs
for cache in objs:
cache.mem_side = getattr(attach_obj, attach_port)
make_level(spec[1:], prototypes[1:], cache, "cpu_side")
else:
# we just built the MemTest objects
parent.cpu = objs
for t in objs:
t.test = getattr(attach_obj, attach_port)
t.functional = system.funcmem.port
make_level(treespec, prototypes, system.physmem, "port")
# -----------------------
# run simulation
# -----------------------
root = Root( system = system )
if options.timing:
root.system.mem_mode = 'timing'
else:
if options.atomic:
root.system.mem_mode = 'atomic'
else:
root.system.mem_mode = 'timing'
# Not much point in this being higher than the L1 latency
m5.ticks.setGlobalFrequency('1ns')
@ -124,9 +183,6 @@ m5.ticks.setGlobalFrequency('1ns')
m5.instantiate(root)
# simulate until program terminates
if options.maxtick:
exit_event = m5.simulate(options.maxtick)
else:
exit_event = m5.simulate(10000000000000)
exit_event = m5.simulate(options.maxtick)
print 'Exiting @ tick', m5.curTick(), 'because', exit_event.getCause()

View file

@ -1,4 +1,4 @@
# Copyright (c) 2006 The Regents of The University of Michigan
# Copyright (c) 2006-2007 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
@ -51,9 +51,6 @@ parser.add_option("-n", "--numcpus",
parser.add_option("-f", "--frequency",
default = "1GHz",
help="Frequency of each CPU")
parser.add_option("-p", "--protocol",
default="moesi",
help="The coherence protocol to use for the L1'a (i.e. MOESI, MOSI)")
parser.add_option("--l1size",
default = "32kB")
parser.add_option("--l1latency",
@ -141,7 +138,6 @@ class L1(BaseCache):
block_size = 64
mshrs = 12
tgts_per_mshr = 8
protocol = CoherenceProtocol(protocol=options.protocol)
# ----------------------
# Base L2 Cache Definition

View file

@ -48,9 +48,6 @@ parser.add_option("-n", "--numcpus",
parser.add_option("-f", "--frequency",
default = "1GHz",
help="Frequency of each CPU")
parser.add_option("-p", "--protocol",
default="moesi",
help="The coherence protocol to use for the L1'a (i.e. MOESI, MOSI)")
parser.add_option("--l1size",
default = "32kB")
parser.add_option("--l1latency",
@ -162,7 +159,6 @@ class L1(BaseCache):
block_size = 64
mshrs = 12
tgts_per_mshr = 8
protocol = CoherenceProtocol(protocol=options.protocol)
# ----------------------
# Base L2 Cache Definition

View file

@ -357,7 +357,7 @@ MiscRegFile::CP0Event::process()
const char *
MiscRegFile::CP0Event::description()
{
return "Coprocessor-0 event";
return "Coprocessor-0";
}
void

View file

@ -1023,7 +1023,7 @@ doMmuReadError:
panic("need to impl DTB::doMmuRegRead() got asi=%#x, va=%#x\n",
(uint32_t)asi, va);
}
pkt->result = Packet::Success;
pkt->makeAtomicResponse();
return tc->getCpuPtr()->cycles(1);
}
@ -1268,7 +1268,7 @@ doMmuWriteError:
panic("need to impl DTB::doMmuRegWrite() got asi=%#x, va=%#x d=%#x\n",
(uint32_t)pkt->req->getAsi(), pkt->getAddr(), data);
}
pkt->result = Packet::Success;
pkt->makeAtomicResponse();
return tc->getCpuPtr()->cycles(1);
}

View file

@ -47,6 +47,7 @@ baseFlags = [
'BusBridge',
'Cache',
'CachePort',
'CacheRepl',
'Chains',
'Checker',
'Clock',
@ -128,6 +129,7 @@ baseFlags = [
'Mbox',
'MemDepUnit',
'MemoryAccess',
'MemTest',
'MipsPRA',
'O3CPU',
'OzoneCPU',

View file

@ -91,7 +91,7 @@ CPUProgressEvent::process()
const char *
CPUProgressEvent::description()
{
return "CPU Progress event";
return "CPU Progress";
}
#if FULL_SYSTEM

View file

@ -33,7 +33,7 @@ from m5 import build_env
class MemTest(SimObject):
type = 'MemTest'
max_loads = Param.Counter("number of loads to execute")
max_loads = Param.Counter(0, "number of loads to execute")
atomic = Param.Bool(False, "Execute tester in atomic mode? (or timing)\n")
memory_size = Param.Int(65536, "memory size")
percent_dest_unaligned = Param.Percent(50,

View file

@ -64,7 +64,9 @@ MemTest::CpuPort::recvTiming(PacketPtr pkt)
Tick
MemTest::CpuPort::recvAtomic(PacketPtr pkt)
{
panic("MemTest doesn't expect recvAtomic callback!");
// must be snoop upcall
assert(pkt->isRequest());
assert(pkt->getDest() == Packet::Broadcast);
return curTick;
}
@ -102,7 +104,6 @@ void
MemTest::sendPkt(PacketPtr pkt) {
if (atomic) {
cachePort.sendAtomic(pkt);
pkt->makeAtomicResponse();
completeRequest(pkt);
}
else if (!cachePort.sendTiming(pkt)) {
@ -165,8 +166,6 @@ MemTest::MemTest(const string &name,
tickEvent.schedule(0);
id = TESTER_ALLOCATOR++;
if (TESTER_ALLOCATOR > 8)
panic("False sharing memtester only allows up to 8 testers");
accessRetry = false;
}
@ -194,29 +193,25 @@ MemTest::init()
// memory should be 0; no need to initialize them.
}
static void
printData(ostream &os, uint8_t *data, int nbytes)
{
os << hex << setfill('0');
// assume little-endian: print bytes from highest address to lowest
for (uint8_t *dp = data + nbytes - 1; dp >= data; --dp) {
os << setw(2) << (unsigned)*dp;
}
os << dec;
}
void
MemTest::completeRequest(PacketPtr pkt)
{
Request *req = pkt->req;
DPRINTF(MemTest, "completing %s at address %x (blk %x)\n",
pkt->isWrite() ? "write" : "read",
req->getPaddr(), blockAddr(req->getPaddr()));
MemTestSenderState *state =
dynamic_cast<MemTestSenderState *>(pkt->senderState);
uint8_t *data = state->data;
uint8_t *pkt_data = pkt->getPtr<uint8_t>();
Request *req = pkt->req;
//Remove the address from the list of outstanding
std::set<unsigned>::iterator removeAddr = outstandingAddrs.find(req->getPaddr());
std::set<unsigned>::iterator removeAddr =
outstandingAddrs.find(req->getPaddr());
assert(removeAddr != outstandingAddrs.end());
outstandingAddrs.erase(removeAddr);
@ -224,15 +219,10 @@ MemTest::completeRequest(PacketPtr pkt)
case MemCmd::ReadResp:
if (memcmp(pkt_data, data, pkt->getSize()) != 0) {
cerr << name() << ": on read of 0x" << hex << req->getPaddr()
<< " (0x" << hex << blockAddr(req->getPaddr()) << ")"
<< "@ cycle " << dec << curTick
<< ", cache returns 0x";
printData(cerr, pkt_data, pkt->getSize());
cerr << ", expected 0x";
printData(cerr, data, pkt->getSize());
cerr << endl;
fatal("");
panic("%s: read of %x (blk %x) @ cycle %d "
"returns %x, expected %x\n", name(),
req->getPaddr(), blockAddr(req->getPaddr()), curTick,
*pkt_data, *data);
}
numReads++;
@ -244,40 +234,18 @@ MemTest::completeRequest(PacketPtr pkt)
nextProgressMessage += progressInterval;
}
if (numReads >= maxLoads)
exitSimLoop("Maximum number of loads reached!");
if (maxLoads != 0 && numReads >= maxLoads)
exitSimLoop("maximum number of loads reached");
break;
case MemCmd::WriteResp:
numWritesStat++;
break;
/*
case Copy:
//Also remove dest from outstanding list
removeAddr = outstandingAddrs.find(req->dest);
assert(removeAddr != outstandingAddrs.end());
outstandingAddrs.erase(removeAddr);
numCopiesStat++;
break;
*/
default:
panic("invalid command %s (%d)", pkt->cmdString(), pkt->cmd.toInt());
}
if (blockAddr(req->getPaddr()) == traceBlockAddr) {
cerr << name() << ": completed "
<< (pkt->isWrite() ? "write" : "read")
<< " access of "
<< dec << pkt->getSize() << " bytes at address 0x"
<< hex << req->getPaddr()
<< " (0x" << hex << blockAddr(req->getPaddr()) << ")"
<< ", value = 0x";
printData(cerr, pkt_data, pkt->getSize());
cerr << " @ cycle " << dec << curTick;
cerr << endl;
}
noResponseCycles = 0;
delete state;
delete [] data;
@ -333,7 +301,7 @@ MemTest::tick()
//mem tester
//We can eliminate the lower bits of the offset, and then use the id
//to offset within the blks
offset &= ~63; //Not the low order bits
offset = blockAddr(offset);
offset += id;
access_size = 0;
@ -359,31 +327,26 @@ MemTest::tick()
if (cmd < percentReads) {
// read
//For now we only allow one outstanding request per addreess per tester
//This means we assume CPU does write forwarding to reads that alias something
//in the cpu store buffer.
// For now we only allow one outstanding request per address
// per tester This means we assume CPU does write forwarding
// to reads that alias something in the cpu store buffer.
if (outstandingAddrs.find(paddr) != outstandingAddrs.end()) {
delete [] result;
delete req;
return;
}
else outstandingAddrs.insert(paddr);
outstandingAddrs.insert(paddr);
// ***** NOTE FOR RON: I'm not sure how to access checkMem. - Kevin
funcPort.readBlob(req->getPaddr(), result, req->getSize());
if (blockAddr(paddr) == traceBlockAddr) {
cerr << name()
<< ": initiating read "
<< ((probe) ? "probe of " : "access of ")
<< dec << req->getSize() << " bytes from addr 0x"
<< hex << paddr
<< " (0x" << hex << blockAddr(paddr) << ")"
<< " at cycle "
<< dec << curTick << endl;
}
DPRINTF(MemTest,
"initiating read at address %x (blk %x) expecting %x\n",
req->getPaddr(), blockAddr(req->getPaddr()), *result);
PacketPtr pkt = new Packet(req, MemCmd::ReadReq, Packet::Broadcast);
pkt->setSrc(0);
pkt->dataDynamicArray(new uint8_t[req->getSize()]);
MemTestSenderState *state = new MemTestSenderState(result);
pkt->senderState = state;
@ -393,37 +356,27 @@ MemTest::tick()
pkt->makeAtomicResponse();
completeRequest(pkt);
} else {
// req->completionEvent = new MemCompleteEvent(req, result, this);
sendPkt(pkt);
}
} else {
// write
//For now we only allow one outstanding request per addreess per tester
//This means we assume CPU does write forwarding to reads that alias something
//in the cpu store buffer.
// For now we only allow one outstanding request per addreess
// per tester. This means we assume CPU does write forwarding
// to reads that alias something in the cpu store buffer.
if (outstandingAddrs.find(paddr) != outstandingAddrs.end()) {
delete [] result;
delete req;
return;
}
else outstandingAddrs.insert(paddr);
outstandingAddrs.insert(paddr);
DPRINTF(MemTest, "initiating write at address %x (blk %x) value %x\n",
req->getPaddr(), blockAddr(req->getPaddr()), data & 0xff);
/*
if (blockAddr(req->getPaddr()) == traceBlockAddr) {
cerr << name() << ": initiating write "
<< ((probe)?"probe of ":"access of ")
<< dec << req->getSize() << " bytes (value = 0x";
printData(cerr, data_pkt->getPtr(), req->getSize());
cerr << ") to addr 0x"
<< hex << req->getPaddr()
<< " (0x" << hex << blockAddr(req->getPaddr()) << ")"
<< " at cycle "
<< dec << curTick << endl;
}
*/
PacketPtr pkt = new Packet(req, MemCmd::WriteReq, Packet::Broadcast);
pkt->setSrc(0);
uint8_t *pkt_data = new uint8_t[req->getSize()];
pkt->dataDynamicArray(pkt_data);
memcpy(pkt_data, &data, req->getSize());
@ -437,54 +390,9 @@ MemTest::tick()
pkt->makeAtomicResponse();
completeRequest(pkt);
} else {
// req->completionEvent = new MemCompleteEvent(req, NULL, this);
sendPkt(pkt);
}
}
/* else {
// copy
unsigned source_align = random() % 100;
unsigned dest_align = random() % 100;
unsigned offset2 = random() % size;
Addr source = ((base) ? baseAddr1 : baseAddr2) + offset;
Addr dest = ((base) ? baseAddr2 : baseAddr1) + offset2;
if (outstandingAddrs.find(source) != outstandingAddrs.end()) return;
else outstandingAddrs.insert(source);
if (outstandingAddrs.find(dest) != outstandingAddrs.end()) return;
else outstandingAddrs.insert(dest);
if (source_align >= percentSourceUnaligned) {
source = blockAddr(source);
}
if (dest_align >= percentDestUnaligned) {
dest = blockAddr(dest);
}
req->cmd = Copy;
req->flags &= ~UNCACHEABLE;
req->paddr = source;
req->dest = dest;
delete [] req->data;
req->data = new uint8_t[blockSize];
req->size = blockSize;
if (source == traceBlockAddr || dest == traceBlockAddr) {
cerr << name()
<< ": initiating copy of "
<< dec << req->size << " bytes from addr 0x"
<< hex << source
<< " (0x" << hex << blockAddr(source) << ")"
<< " to addr 0x"
<< hex << dest
<< " (0x" << hex << blockAddr(dest) << ")"
<< " at cycle "
<< dec << curTick << endl;
}*
cacheInterface->access(req);
uint8_t result[blockSize];
checkMem->access(Read, source, &result, blockSize);
checkMem->access(Write, dest, &result, blockSize);
}
*/
}
void

View file

@ -35,8 +35,6 @@
#include <set>
#include "base/statistics.hh"
//#include "mem/functional/functional.hh"
//#include "mem/mem_interface.hh"
#include "sim/eventq.hh"
#include "sim/sim_exit.hh"
#include "sim/sim_object.hh"
@ -50,9 +48,6 @@ class MemTest : public MemObject
public:
MemTest(const std::string &name,
// MemInterface *_cache_interface,
// PhysicalMemory *main_mem,
// PhysicalMemory *check_mem,
unsigned _memorySize,
unsigned _percentReads,
unsigned _percentFunctional,
@ -85,13 +80,13 @@ class MemTest : public MemObject
TickEvent(MemTest *c)
: Event(&mainEventQueue, CPU_Tick_Pri), cpu(c) {}
void process() {cpu->tick();}
virtual const char *description() { return "tick event"; }
virtual const char *description() { return "MemTest tick"; }
};
TickEvent tickEvent;
class CpuPort : public Port
{
MemTest *memtest;
public:
@ -116,7 +111,7 @@ class MemTest : public MemObject
virtual void getDeviceAddressRanges(AddrRangeList &resp,
bool &snoop)
{ resp.clear(); snoop = true; }
{ resp.clear(); snoop = false; }
};
CpuPort cachePort;
@ -136,12 +131,7 @@ class MemTest : public MemObject
uint8_t *data;
};
// Request *dataReq;
PacketPtr retryPkt;
// MemInterface *cacheInterface;
// PhysicalMemory *mainMem;
// PhysicalMemory *checkMem;
// SimpleThread *thread;
bool accessRetry;

View file

@ -67,7 +67,7 @@ template <class Impl>
const char *
DefaultCommit<Impl>::TrapEvent::description()
{
return "Trap event";
return "Trap";
}
template <class Impl>

View file

@ -83,7 +83,7 @@ template <class Impl>
const char *
FullO3CPU<Impl>::TickEvent::description()
{
return "FullO3CPU tick event";
return "FullO3CPU tick";
}
template <class Impl>
@ -112,7 +112,7 @@ template <class Impl>
const char *
FullO3CPU<Impl>::ActivateThreadEvent::description()
{
return "FullO3CPU \"Activate Thread\" event";
return "FullO3CPU \"Activate Thread\"";
}
template <class Impl>
@ -144,7 +144,7 @@ template <class Impl>
const char *
FullO3CPU<Impl>::DeallocateContextEvent::description()
{
return "FullO3CPU \"Deallocate Context\" event";
return "FullO3CPU \"Deallocate Context\"";
}
template <class Impl>

View file

@ -628,12 +628,6 @@ DefaultFetch<Impl>::fetchCacheLine(Addr fetch_PC, Fault &ret_fault, unsigned tid
// Now do the timing access to see whether or not the instruction
// exists within the cache.
if (!icachePort->sendTiming(data_pkt)) {
if (data_pkt->result == Packet::BadAddress) {
fault = TheISA::genMachineCheckFault();
delete mem_req;
memReq[tid] = NULL;
warn("Bad address!\n");
}
assert(retryPkt == NULL);
assert(retryTid == -1);
DPRINTF(Fetch, "[tid:%i] Out of MSHRs!\n", tid);

View file

@ -60,7 +60,7 @@ template <class Impl>
const char *
InstructionQueue<Impl>::FUCompletion::description()
{
return "Functional unit completion event";
return "Functional unit completion";
}
template <class Impl>

View file

@ -84,9 +84,10 @@ LSQ<Impl>::DcachePort::recvTiming(PacketPtr pkt)
lsq->thread[pkt->req->getThreadNum()].completeDataAccess(pkt);
}
else {
//else it is a coherence request, maybe you need to do something
warn("Recieved a coherence request (Invalidate?), 03CPU doesn't"
"update LSQ for these\n");
// must be a snoop
// @TODO someday may need to process invalidations in LSQ here
// to provide stronger consistency model
}
return true;
}

View file

@ -643,7 +643,10 @@ LSQUnit<Impl>::read(Request *req, T &data, int load_idx)
// if we the cache is not blocked, do cache access
if (!lsq->cacheBlocked()) {
PacketPtr data_pkt =
new Packet(req, MemCmd::ReadReq, Packet::Broadcast);
new Packet(req,
(req->isLocked() ?
MemCmd::LoadLockedReq : MemCmd::ReadReq),
Packet::Broadcast);
data_pkt->dataStatic(load_inst->memData);
LSQSenderState *state = new LSQSenderState;
@ -653,8 +656,6 @@ LSQUnit<Impl>::read(Request *req, T &data, int load_idx)
data_pkt->senderState = state;
if (!dcachePort->sendTiming(data_pkt)) {
Packet::Result result = data_pkt->result;
// Delete state and data packet because a load retry
// initiates a pipeline restart; it does not retry.
delete state;
@ -663,10 +664,6 @@ LSQUnit<Impl>::read(Request *req, T &data, int load_idx)
req = NULL;
if (result == Packet::BadAddress) {
return TheISA::genMachineCheckFault();
}
// If the access didn't succeed, tell the LSQ by setting
// the retry thread id.
lsq->setRetryTid(lsqID);

View file

@ -69,7 +69,7 @@ template<class Impl>
const char *
LSQUnit<Impl>::WritebackEvent::description()
{
return "Store writeback event";
return "Store writeback";
}
template<class Impl>
@ -647,7 +647,9 @@ LSQUnit<Impl>::writebackStores()
memcpy(inst->memData, storeQueue[storeWBIdx].data, req->getSize());
MemCmd command = req->isSwap() ? MemCmd::SwapReq : MemCmd::WriteReq;
MemCmd command =
req->isSwap() ? MemCmd::SwapReq :
(req->isLocked() ? MemCmd::WriteReq : MemCmd::StoreCondReq);
PacketPtr data_pkt = new Packet(req, command,
Packet::Broadcast);
data_pkt->dataStatic(inst->memData);
@ -690,9 +692,6 @@ LSQUnit<Impl>::writebackStores()
}
if (!dcachePort->sendTiming(data_pkt)) {
if (data_pkt->result == Packet::BadAddress) {
panic("LSQ sent out a bad address for a completed store!");
}
// Need to handle becoming blocked on a store.
DPRINTF(IEW, "D-Cache became blocked when writing [sn:%lli], will"
"retry later\n",
@ -844,26 +843,6 @@ LSQUnit<Impl>::storePostSend(PacketPtr pkt)
#endif
}
if (pkt->result != Packet::Success) {
DPRINTF(LSQUnit,"D-Cache Write Miss on idx:%i!\n",
storeWBIdx);
DPRINTF(Activity, "Active st accessing mem miss [sn:%lli]\n",
storeQueue[storeWBIdx].inst->seqNum);
//mshrSeqNums.push_back(storeQueue[storeWBIdx].inst->seqNum);
//DPRINTF(LSQUnit, "Added MSHR. count = %i\n",mshrSeqNums.size());
// @todo: Increment stat here.
} else {
DPRINTF(LSQUnit,"D-Cache: Write Hit on idx:%i !\n",
storeWBIdx);
DPRINTF(Activity, "Active st accessing mem hit [sn:%lli]\n",
storeQueue[storeWBIdx].inst->seqNum);
}
incrStIdx(storeWBIdx);
}
@ -952,9 +931,6 @@ LSQUnit<Impl>::recvRetry()
assert(retryPkt != NULL);
if (dcachePort->sendTiming(retryPkt)) {
if (retryPkt->result == Packet::BadAddress) {
panic("LSQ sent out a bad address for a completed store!");
}
storePostSend(retryPkt);
retryPkt = NULL;
isStoreBlocked = false;

View file

@ -583,7 +583,7 @@ template<class Impl>
const char *
BackEnd<Impl>::LdWritebackEvent::description()
{
return "Load writeback event";
return "Load writeback";
}
@ -603,7 +603,7 @@ template <class Impl>
const char *
BackEnd<Impl>::DCacheCompletionEvent::description()
{
return "Cache completion event";
return "Cache completion";
}
template <class Impl>

View file

@ -84,7 +84,7 @@ template <class Impl>
const char *
OzoneCPU<Impl>::TickEvent::description()
{
return "OzoneCPU tick event";
return "OzoneCPU tick";
}
template <class Impl>

View file

@ -540,5 +540,5 @@ template <class Impl>
const char *
InorderBackEnd<Impl>::DCacheCompletionEvent::description()
{
return "DCache completion event";
return "DCache completion";
}

View file

@ -64,7 +64,7 @@ template <class Impl>
const char *
InstQueue<Impl>::FUCompletion::description()
{
return "Functional unit completion event";
return "Functional unit completion";
}
#endif
template <class Impl>

View file

@ -62,7 +62,7 @@ template <class Impl>
const char *
OzoneLSQ<Impl>::StoreCompletionEvent::description()
{
return "LSQ store completion event";
return "LSQ store completion";
}
template <class Impl>

View file

@ -121,7 +121,7 @@ template <class Impl>
const char *
LWBackEnd<Impl>::TrapEvent::description()
{
return "Trap event";
return "Trap";
}
template <class Impl>

View file

@ -632,7 +632,11 @@ OzoneLWLSQ<Impl>::read(RequestPtr req, T &data, int load_idx)
DPRINTF(OzoneLSQ, "Doing timing access for inst PC %#x\n",
inst->readPC());
PacketPtr data_pkt = new Packet(req, Packet::ReadReq, Packet::Broadcast);
PacketPtr data_pkt =
new Packet(req,
(req->isLocked() ?
MemCmd::LoadLockedReq : Packet::ReadReq),
Packet::Broadcast);
data_pkt->dataStatic(inst->memData);
LSQSenderState *state = new LSQSenderState;
@ -661,16 +665,6 @@ OzoneLWLSQ<Impl>::read(RequestPtr req, T &data, int load_idx)
cpu->lockFlag = true;
}
if (data_pkt->result != Packet::Success) {
DPRINTF(OzoneLSQ, "OzoneLSQ: D-cache miss!\n");
DPRINTF(Activity, "Activity: ld accessing mem miss [sn:%lli]\n",
inst->seqNum);
} else {
DPRINTF(OzoneLSQ, "OzoneLSQ: D-cache hit!\n");
DPRINTF(Activity, "Activity: ld accessing mem hit [sn:%lli]\n",
inst->seqNum);
}
return NoFault;
}

View file

@ -57,7 +57,7 @@ template<class Impl>
const char *
OzoneLWLSQ<Impl>::WritebackEvent::description()
{
return "Store writeback event";
return "Store writeback";
}
template <class Impl>
@ -587,7 +587,10 @@ OzoneLWLSQ<Impl>::writebackStores()
memcpy(inst->memData, (uint8_t *)&(*sq_it).data,
req->getSize());
PacketPtr data_pkt = new Packet(req, Packet::WriteReq, Packet::Broadcast);
MemCmd command =
req->isSwap() ? MemCmd::SwapReq :
(req->isLocked() ? MemCmd::WriteReq : MemCmd::StoreCondReq);
PacketPtr data_pkt = new Packet(req, command, Packet::Broadcast);
data_pkt->dataStatic(inst->memData);
LSQSenderState *state = new LSQSenderState;
@ -853,24 +856,6 @@ OzoneLWLSQ<Impl>::storePostSend(PacketPtr pkt, DynInstPtr &inst)
}
#endif
}
if (pkt->result != Packet::Success) {
DPRINTF(OzoneLSQ,"D-Cache Write Miss!\n");
DPRINTF(Activity, "Active st accessing mem miss [sn:%lli]\n",
inst->seqNum);
//mshrSeqNums.push_back(storeQueue[storeWBIdx].inst->seqNum);
//DPRINTF(OzoneLWLSQ, "Added MSHR. count = %i\n",mshrSeqNums.size());
// @todo: Increment stat here.
} else {
DPRINTF(OzoneLSQ,"D-Cache: Write Hit!\n");
DPRINTF(Activity, "Active st accessing mem hit [sn:%lli]\n",
inst->seqNum);
}
}
template <class Impl>

View file

@ -47,5 +47,5 @@ EndQuiesceEvent::process()
const char*
EndQuiesceEvent::description()
{
return "End Quiesce Event.";
return "End Quiesce";
}

View file

@ -57,7 +57,7 @@ AtomicSimpleCPU::TickEvent::process()
const char *
AtomicSimpleCPU::TickEvent::description()
{
return "AtomicSimpleCPU tick event";
return "AtomicSimpleCPU tick";
}
Port *
@ -148,23 +148,9 @@ AtomicSimpleCPU::AtomicSimpleCPU(Params *p)
icachePort.snoopRangeSent = false;
dcachePort.snoopRangeSent = false;
ifetch_req = new Request();
ifetch_req->setThreadContext(p->cpu_id, 0); // Add thread ID if we add MT
ifetch_pkt = new Packet(ifetch_req, MemCmd::ReadReq, Packet::Broadcast);
ifetch_pkt->dataStatic(&inst);
data_read_req = new Request();
data_read_req->setThreadContext(p->cpu_id, 0); // Add thread ID here too
data_read_pkt = new Packet(data_read_req, MemCmd::ReadReq,
Packet::Broadcast);
data_read_pkt->dataStatic(&dataReg);
data_write_req = new Request();
data_write_req->setThreadContext(p->cpu_id, 0); // Add thread ID here too
data_write_pkt = new Packet(data_write_req, MemCmd::WriteReq,
Packet::Broadcast);
data_swap_pkt = new Packet(data_write_req, MemCmd::SwapReq,
Packet::Broadcast);
ifetch_req.setThreadContext(p->cpu_id, 0); // Add thread ID if we add MT
data_read_req.setThreadContext(p->cpu_id, 0); // Add thread ID here too
data_write_req.setThreadContext(p->cpu_id, 0); // Add thread ID here too
}
@ -282,9 +268,7 @@ Fault
AtomicSimpleCPU::read(Addr addr, T &data, unsigned flags)
{
// use the CPU's statically allocated read request and packet objects
Request *req = data_read_req;
PacketPtr pkt = data_read_pkt;
Request *req = &data_read_req;
req->setVirt(0, addr, sizeof(T), flags, thread->readPC());
if (traceData) {
@ -296,19 +280,18 @@ AtomicSimpleCPU::read(Addr addr, T &data, unsigned flags)
// Now do the access.
if (fault == NoFault) {
pkt->reinitFromRequest();
Packet pkt =
Packet(req,
req->isLocked() ? MemCmd::LoadLockedReq : MemCmd::ReadReq,
Packet::Broadcast);
pkt.dataStatic(&data);
if (req->isMmapedIpr())
dcache_latency = TheISA::handleIprRead(thread->getTC(),pkt);
dcache_latency = TheISA::handleIprRead(thread->getTC(), &pkt);
else
dcache_latency = dcachePort.sendAtomic(pkt);
dcache_latency = dcachePort.sendAtomic(&pkt);
dcache_access = true;
#if !defined(NDEBUG)
if (pkt->result != Packet::Success)
panic("Unable to find responder for address pa = %#X va = %#X\n",
pkt->req->getPaddr(), pkt->req->getVaddr());
#endif
data = pkt->get<T>();
assert(!pkt.isError());
if (req->isLocked()) {
TheISA::handleLockedRead(thread, req);
@ -378,16 +361,9 @@ Fault
AtomicSimpleCPU::write(T data, Addr addr, unsigned flags, uint64_t *res)
{
// use the CPU's statically allocated write request and packet objects
Request *req = data_write_req;
PacketPtr pkt;
Request *req = &data_write_req;
req->setVirt(0, addr, sizeof(T), flags, thread->readPC());
if (req->isSwap())
pkt = data_swap_pkt;
else
pkt = data_write_pkt;
if (traceData) {
traceData->setAddr(addr);
}
@ -397,40 +373,40 @@ AtomicSimpleCPU::write(T data, Addr addr, unsigned flags, uint64_t *res)
// Now do the access.
if (fault == NoFault) {
MemCmd cmd = MemCmd::WriteReq; // default
bool do_access = true; // flag to suppress cache access
if (req->isLocked()) {
cmd = MemCmd::StoreCondReq;
do_access = TheISA::handleLockedWrite(thread, req);
} else if (req->isSwap()) {
cmd = MemCmd::SwapReq;
if (req->isCondSwap()) {
assert(res);
req->setExtraData(*res);
}
}
if (req->isCondSwap()) {
assert(res);
req->setExtraData(*res);
}
if (do_access) {
pkt->reinitFromRequest();
pkt->dataStatic(&data);
Packet pkt = Packet(req, cmd, Packet::Broadcast);
pkt.dataStatic(&data);
if (req->isMmapedIpr()) {
dcache_latency = TheISA::handleIprWrite(thread->getTC(), pkt);
dcache_latency = TheISA::handleIprWrite(thread->getTC(), &pkt);
} else {
data = htog(data);
dcache_latency = dcachePort.sendAtomic(pkt);
dcache_latency = dcachePort.sendAtomic(&pkt);
}
dcache_access = true;
assert(!pkt.isError());
#if !defined(NDEBUG)
if (pkt->result != Packet::Success)
panic("Unable to find responder for address pa = %#X va = %#X\n",
pkt->req->getPaddr(), pkt->req->getVaddr());
#endif
if (req->isSwap()) {
assert(res);
*res = pkt.get<T>();
}
}
if (req->isSwap()) {
assert(res);
*res = pkt->get<T>();
} else if (res) {
if (res && !req->isSwap()) {
*res = req->getExtraData();
}
}
@ -513,7 +489,7 @@ AtomicSimpleCPU::tick()
if (!curStaticInst || !curStaticInst->isDelayedCommit())
checkForInterrupts();
Fault fault = setupFetchRequest(ifetch_req);
Fault fault = setupFetchRequest(&ifetch_req);
if (fault == NoFault) {
Tick icache_latency = 0;
@ -524,9 +500,11 @@ AtomicSimpleCPU::tick()
//if(predecoder.needMoreBytes())
//{
icache_access = true;
ifetch_pkt->reinitFromRequest();
Packet ifetch_pkt = Packet(&ifetch_req, MemCmd::ReadReq,
Packet::Broadcast);
ifetch_pkt.dataStatic(&inst);
icache_latency = icachePort.sendAtomic(ifetch_pkt);
icache_latency = icachePort.sendAtomic(&ifetch_pkt);
// ifetch_req is initialized to read the instruction directly
// into the CPU object's inst field.
//}

View file

@ -121,13 +121,9 @@ class AtomicSimpleCPU : public BaseSimpleCPU
};
DcachePort dcachePort;
Request *ifetch_req;
PacketPtr ifetch_pkt;
Request *data_read_req;
PacketPtr data_read_pkt;
Request *data_write_req;
PacketPtr data_write_pkt;
PacketPtr data_swap_pkt;
Request ifetch_req;
Request data_read_req;
Request data_write_req;
bool dcache_access;
Tick dcache_latency;

View file

@ -131,9 +131,6 @@ class BaseSimpleCPU : public BaseCPU
// The predecoder
TheISA::Predecoder predecoder;
// Static data storage
TheISA::LargestRead dataReg;
StaticInstPtr curStaticInst;
StaticInstPtr curMacroStaticInst;

View file

@ -260,7 +260,10 @@ TimingSimpleCPU::read(Addr addr, T &data, unsigned flags)
// Now do the access.
if (fault == NoFault) {
PacketPtr pkt =
new Packet(req, MemCmd::ReadReq, Packet::Broadcast);
new Packet(req,
(req->isLocked() ?
MemCmd::LoadLockedReq : MemCmd::ReadReq),
Packet::Broadcast);
pkt->dataDynamic<T>(new T);
if (!dcachePort.sendTiming(pkt)) {
@ -350,23 +353,26 @@ TimingSimpleCPU::write(T data, Addr addr, unsigned flags, uint64_t *res)
// Now do the access.
if (fault == NoFault) {
assert(dcache_pkt == NULL);
if (req->isSwap())
dcache_pkt = new Packet(req, MemCmd::SwapReq, Packet::Broadcast);
else
dcache_pkt = new Packet(req, MemCmd::WriteReq, Packet::Broadcast);
dcache_pkt->allocate();
dcache_pkt->set(data);
MemCmd cmd = MemCmd::WriteReq; // default
bool do_access = true; // flag to suppress cache access
if (req->isLocked()) {
cmd = MemCmd::StoreCondReq;
do_access = TheISA::handleLockedWrite(thread, req);
} else if (req->isSwap()) {
cmd = MemCmd::SwapReq;
if (req->isCondSwap()) {
assert(res);
req->setExtraData(*res);
}
}
if (req->isCondSwap()) {
assert(res);
req->setExtraData(*res);
}
// Note: need to allocate dcache_pkt even if do_access is
// false, as it's used unconditionally to call completeAcc().
assert(dcache_pkt == NULL);
dcache_pkt = new Packet(req, cmd, Packet::Broadcast);
dcache_pkt->allocate();
dcache_pkt->set(data);
if (do_access) {
if (!dcachePort.sendTiming(dcache_pkt)) {
@ -501,7 +507,7 @@ TimingSimpleCPU::completeIfetch(PacketPtr pkt)
{
// received a response from the icache: execute the received
// instruction
assert(pkt->result == Packet::Success);
assert(!pkt->isError());
assert(_status == IcacheWaitResponse);
_status = Running;
@ -569,7 +575,7 @@ TimingSimpleCPU::IcachePort::recvTiming(PacketPtr pkt)
return true;
}
else if (pkt->result == Packet::Nacked) {
else if (pkt->wasNacked()) {
assert(cpu->_status == IcacheWaitResponse);
pkt->reinitNacked();
if (!sendTiming(pkt)) {
@ -600,7 +606,7 @@ TimingSimpleCPU::completeDataAccess(PacketPtr pkt)
{
// received a response from the dcache: complete the load or store
// instruction
assert(pkt->result == Packet::Success);
assert(!pkt->isError());
assert(_status == DcacheWaitResponse);
_status = Running;
@ -609,7 +615,7 @@ TimingSimpleCPU::completeDataAccess(PacketPtr pkt)
Fault fault = curStaticInst->completeAcc(pkt, this, traceData);
if (pkt->isRead() && pkt->req->isLocked()) {
if (pkt->isRead() && pkt->isLocked()) {
TheISA::handleLockedRead(thread, pkt->req);
}
@ -663,7 +669,7 @@ TimingSimpleCPU::DcachePort::recvTiming(PacketPtr pkt)
return true;
}
else if (pkt->result == Packet::Nacked) {
else if (pkt->wasNacked()) {
assert(cpu->_status == DcacheWaitResponse);
pkt->reinitNacked();
if (!sendTiming(pkt)) {

View file

@ -101,7 +101,7 @@ class TimingSimpleCPU : public BaseSimpleCPU
TickEvent(TimingSimpleCPU *_cpu)
:Event(&mainEventQueue), cpu(_cpu) {}
const char *description() { return "Timing CPU clock event"; }
const char *description() { return "Timing CPU tick"; }
void schedule(PacketPtr _pkt, Tick t);
};
@ -127,7 +127,7 @@ class TimingSimpleCPU : public BaseSimpleCPU
ITickEvent(TimingSimpleCPU *_cpu)
: TickEvent(_cpu) {}
void process();
const char *description() { return "Timing CPU clock event"; }
const char *description() { return "Timing CPU icache tick"; }
};
ITickEvent tickEvent;
@ -155,7 +155,7 @@ class TimingSimpleCPU : public BaseSimpleCPU
DTickEvent(TimingSimpleCPU *_cpu)
: TickEvent(_cpu) {}
void process();
const char *description() { return "Timing CPU clock event"; }
const char *description() { return "Timing CPU dcache tick"; }
};
DTickEvent tickEvent;

View file

@ -207,7 +207,7 @@ OptCPU::TickEvent::process()
const char *
OptCPU::TickEvent::description()
{
return "OptCPU tick event";
return "OptCPU tick";
}

View file

@ -148,7 +148,7 @@ TraceCPU::TickEvent::process()
const char *
TraceCPU::TickEvent::description()
{
return "TraceCPU tick event";
return "TraceCPU tick";
}

View file

@ -102,7 +102,6 @@ AlphaConsole::read(PacketPtr pkt)
* machine dependent address swizzle is required?
*/
assert(pkt->result == Packet::Unknown);
assert(pkt->getAddr() >= pioAddr && pkt->getAddr() < pioAddr + pioSize);
Addr daddr = pkt->getAddr() - pioAddr;
@ -130,7 +129,7 @@ AlphaConsole::read(PacketPtr pkt)
/* Old console code read in everyting as a 32bit int
* we now break that for better error checking.
*/
pkt->result = Packet::BadAddress;
pkt->setBadAddress();
}
DPRINTF(AlphaConsole, "read: offset=%#x val=%#x\n", daddr,
pkt->get<uint32_t>());
@ -187,17 +186,15 @@ AlphaConsole::read(PacketPtr pkt)
pkt->get<uint64_t>());
break;
default:
pkt->result = Packet::BadAddress;
pkt->setBadAddress();
}
if (pkt->result == Packet::Unknown)
pkt->result = Packet::Success;
pkt->makeAtomicResponse();
return pioDelay;
}
Tick
AlphaConsole::write(PacketPtr pkt)
{
assert(pkt->result == Packet::Unknown);
assert(pkt->getAddr() >= pioAddr && pkt->getAddr() < pioAddr + pioSize);
Addr daddr = pkt->getAddr() - pioAddr;
@ -245,7 +242,7 @@ AlphaConsole::write(PacketPtr pkt)
panic("Unknown 64bit access, %#x\n", daddr);
}
pkt->result = Packet::Success;
pkt->makeAtomicResponse();
return pioDelay;
}

View file

@ -78,7 +78,6 @@ TsunamiCChip::read(PacketPtr pkt)
{
DPRINTF(Tsunami, "read va=%#x size=%d\n", pkt->getAddr(), pkt->getSize());
assert(pkt->result == Packet::Unknown);
assert(pkt->getAddr() >= pioAddr && pkt->getAddr() < pioAddr + pioSize);
Addr regnum = (pkt->getAddr() - pioAddr) >> 6;
@ -181,7 +180,7 @@ TsunamiCChip::read(PacketPtr pkt)
DPRINTF(Tsunami, "Tsunami CChip: read regnum=%#x size=%d data=%lld\n",
regnum, pkt->getSize(), pkt->get<uint64_t>());
pkt->result = Packet::Success;
pkt->makeAtomicResponse();
return pioDelay;
}
@ -365,7 +364,7 @@ TsunamiCChip::write(PacketPtr pkt)
panic("default in cchip read reached, accessing 0x%x\n");
} // swtich(regnum)
} // not BIG_TSUNAMI write
pkt->result = Packet::Success;
pkt->makeAtomicResponse();
return pioDelay;
}

View file

@ -461,7 +461,6 @@ TsunamiIO::frequency() const
Tick
TsunamiIO::read(PacketPtr pkt)
{
assert(pkt->result == Packet::Unknown);
assert(pkt->getAddr() >= pioAddr && pkt->getAddr() < pioAddr + pioSize);
Addr daddr = pkt->getAddr() - pioAddr;
@ -520,14 +519,13 @@ TsunamiIO::read(PacketPtr pkt)
} else {
panic("I/O Read - invalid size - va %#x size %d\n", pkt->getAddr(), pkt->getSize());
}
pkt->result = Packet::Success;
pkt->makeAtomicResponse();
return pioDelay;
}
Tick
TsunamiIO::write(PacketPtr pkt)
{
assert(pkt->result == Packet::Unknown);
assert(pkt->getAddr() >= pioAddr && pkt->getAddr() < pioAddr + pioSize);
Addr daddr = pkt->getAddr() - pioAddr;
@ -600,7 +598,7 @@ TsunamiIO::write(PacketPtr pkt)
panic("I/O Write - va%#x size %d data %#x\n", pkt->getAddr(), pkt->getSize(), pkt->get<uint8_t>());
}
pkt->result = Packet::Success;
pkt->makeAtomicResponse();
return pioDelay;
}

View file

@ -71,7 +71,6 @@ TsunamiPChip::TsunamiPChip(Params *p)
Tick
TsunamiPChip::read(PacketPtr pkt)
{
assert(pkt->result == Packet::Unknown);
assert(pkt->getAddr() >= pioAddr && pkt->getAddr() < pioAddr + pioSize);
pkt->allocate();
@ -145,7 +144,7 @@ TsunamiPChip::read(PacketPtr pkt)
default:
panic("Default in PChip Read reached reading 0x%x\n", daddr);
}
pkt->result = Packet::Success;
pkt->makeAtomicResponse();
return pioDelay;
}
@ -153,7 +152,6 @@ TsunamiPChip::read(PacketPtr pkt)
Tick
TsunamiPChip::write(PacketPtr pkt)
{
assert(pkt->result == Packet::Unknown);
assert(pkt->getAddr() >= pioAddr && pkt->getAddr() < pioAddr + pioSize);
Addr daddr = (pkt->getAddr() - pioAddr) >> 6;
@ -224,7 +222,7 @@ TsunamiPChip::write(PacketPtr pkt)
} // uint64_t
pkt->result = Packet::Success;
pkt->makeAtomicResponse();
return pioDelay;
}

View file

@ -89,7 +89,7 @@ class EtherTap : public EtherInt
TxEvent(EtherTap *_tap)
: Event(&mainEventQueue), tap(_tap) {}
void process() { tap->retransmit(); }
virtual const char *description() { return "retransmit event"; }
virtual const char *description() { return "EtherTap retransmit"; }
};
friend class TxEvent;

View file

@ -271,7 +271,7 @@ IGbE::read(PacketPtr pkt)
pkt->set<uint32_t>(0);
};
pkt->result = Packet::Success;
pkt->makeAtomicResponse();
return pioDelay;
}
@ -543,7 +543,7 @@ IGbE::write(PacketPtr pkt)
panic("Write request to unknown register number: %#x\n", daddr);
};
pkt->result = Packet::Success;
pkt->makeAtomicResponse();
return pioDelay;
}

View file

@ -232,8 +232,10 @@ Tick
IdeController::readConfig(PacketPtr pkt)
{
int offset = pkt->getAddr() & PCI_CONFIG_SIZE;
if (offset < PCI_DEVICE_SPECIFIC)
return PciDev::readConfig(pkt);
if (offset < PCI_DEVICE_SPECIFIC) {
return PciDev::readConfig(pkt);
}
assert(offset >= IDE_CTRL_CONF_START && (offset + 1) <= IDE_CTRL_CONF_END);
pkt->allocate();
@ -295,9 +297,8 @@ IdeController::readConfig(PacketPtr pkt)
default:
panic("invalid access size(?) for PCI configspace!\n");
}
pkt->result = Packet::Success;
pkt->makeAtomicResponse();
return configDelay;
}
@ -361,6 +362,7 @@ IdeController::writeConfig(PacketPtr pkt)
default:
panic("invalid access size(?) for PCI configspace!\n");
}
pkt->makeAtomicResponse();
}
/* Trap command register writes and enable IO/BM as appropriate as well as
@ -403,7 +405,6 @@ IdeController::writeConfig(PacketPtr pkt)
bm_enabled = false;
break;
}
pkt->result = Packet::Success;
return configDelay;
}
@ -423,7 +424,7 @@ IdeController::read(PacketPtr pkt)
parseAddr(pkt->getAddr(), offset, channel, reg_type);
if (!io_enabled) {
pkt->result = Packet::Success;
pkt->makeAtomicResponse();
return pioDelay;
}
@ -490,7 +491,7 @@ IdeController::read(PacketPtr pkt)
DPRINTF(IdeCtrl, "read from offset: %#x size: %#x data: %#x\n",
offset, pkt->getSize(), pkt->get<uint32_t>());
pkt->result = Packet::Success;
pkt->makeAtomicResponse();
return pioDelay;
}
@ -506,7 +507,7 @@ IdeController::write(PacketPtr pkt)
parseAddr(pkt->getAddr(), offset, channel, reg_type);
if (!io_enabled) {
pkt->result = Packet::Success;
pkt->makeAtomicResponse();
DPRINTF(IdeCtrl, "io not enabled\n");
return pioDelay;
}
@ -514,7 +515,7 @@ IdeController::write(PacketPtr pkt)
switch (reg_type) {
case BMI_BLOCK:
if (!bm_enabled) {
pkt->result = Packet::Success;
pkt->makeAtomicResponse();
return pioDelay;
}
@ -673,7 +674,7 @@ IdeController::write(PacketPtr pkt)
offset, pkt->getSize(), pkt->get<uint32_t>());
pkt->result = Packet::Success;
pkt->makeAtomicResponse();
return pioDelay;
}

View file

@ -100,9 +100,7 @@ DmaPort::DmaPort(DmaDevice *dev, System *s)
bool
DmaPort::recvTiming(PacketPtr pkt)
{
if (pkt->result == Packet::Nacked) {
if (pkt->wasNacked()) {
DPRINTF(DMA, "Received nacked %s addr %#x\n",
pkt->cmdString(), pkt->getAddr());

View file

@ -56,7 +56,6 @@ IsaFake::IsaFake(Params *p)
Tick
IsaFake::read(PacketPtr pkt)
{
assert(pkt->result == Packet::Unknown);
if (params()->warnAccess != "")
warn("Device %s accessed by read to address %#x size=%d\n",
@ -64,7 +63,7 @@ IsaFake::read(PacketPtr pkt)
if (params()->retBadAddr) {
DPRINTF(Tsunami, "read to bad address va=%#x size=%d\n",
pkt->getAddr(), pkt->getSize());
pkt->result = Packet::BadAddress;
pkt->setBadAddress();
} else {
assert(pkt->getAddr() >= pioAddr && pkt->getAddr() < pioAddr + pioSize);
DPRINTF(Tsunami, "read va=%#x size=%d\n",
@ -85,7 +84,7 @@ IsaFake::read(PacketPtr pkt)
default:
panic("invalid access size!\n");
}
pkt->result = Packet::Success;
pkt->makeAtomicResponse();
}
return pioDelay;
}
@ -117,7 +116,7 @@ IsaFake::write(PacketPtr pkt)
if (params()->retBadAddr) {
DPRINTF(Tsunami, "write to bad address va=%#x size=%d \n",
pkt->getAddr(), pkt->getSize());
pkt->result = Packet::BadAddress;
pkt->setBadAddress();
} else {
DPRINTF(Tsunami, "write - va=%#x size=%d \n",
pkt->getAddr(), pkt->getSize());
@ -140,7 +139,7 @@ IsaFake::write(PacketPtr pkt)
panic("invalid access size!\n");
}
}
pkt->result = Packet::Success;
pkt->makeAtomicResponse();
}
return pioDelay;
}

View file

@ -487,7 +487,7 @@ NSGigE::writeConfig(PacketPtr pkt)
ioEnable = false;
break;
}
pkt->result = Packet::Success;
return configDelay;
}
@ -519,7 +519,7 @@ NSGigE::read(PacketPtr pkt)
// doesn't actually DEPEND upon their values
// MIB are just hardware stats keepers
pkt->set<uint32_t>(0);
pkt->result = Packet::Success;
pkt->makeAtomicResponse();
return pioDelay;
} else if (daddr > 0x3FC)
panic("Something is messed up!\n");
@ -715,7 +715,7 @@ NSGigE::read(PacketPtr pkt)
DPRINTF(EthernetPIO, "read from %#x: data=%d data=%#x\n",
daddr, reg, reg);
pkt->result = Packet::Success;
pkt->makeAtomicResponse();
return pioDelay;
}
@ -1122,7 +1122,7 @@ NSGigE::write(PacketPtr pkt)
} else {
panic("Invalid Request Size");
}
pkt->result = Packet::Success;
pkt->makeAtomicResponse();
return pioDelay;
}

View file

@ -54,7 +54,6 @@ PciConfigAll::PciConfigAll(Params *p)
Tick
PciConfigAll::read(PacketPtr pkt)
{
assert(pkt->result == Packet::Unknown);
pkt->allocate();
@ -74,14 +73,13 @@ PciConfigAll::read(PacketPtr pkt)
default:
panic("invalid access size(?) for PCI configspace!\n");
}
pkt->result = Packet::Success;
pkt->makeAtomicResponse();
return params()->pio_delay;
}
Tick
PciConfigAll::write(PacketPtr pkt)
{
assert(pkt->result == Packet::Unknown);
panic("Attempting to write to config space on non-existant device\n");
M5_DUMMY_RETURN
}

View file

@ -68,7 +68,6 @@ PciDev::PciConfigPort::PciConfigPort(PciDev *dev, int busid, int devid,
Tick
PciDev::PciConfigPort::recvAtomic(PacketPtr pkt)
{
assert(pkt->result == Packet::Unknown);
assert(pkt->getAddr() >= configAddr &&
pkt->getAddr() < configAddr + PCI_CONFIG_SIZE);
return pkt->isRead() ? device->readConfig(pkt) : device->writeConfig(pkt);
@ -156,7 +155,7 @@ PciDev::readConfig(PacketPtr pkt)
default:
panic("invalid access size(?) for PCI configspace!\n");
}
pkt->result = Packet::Success;
pkt->makeAtomicResponse();
return configDelay;
}
@ -283,9 +282,8 @@ PciDev::writeConfig(PacketPtr pkt)
default:
panic("invalid access size(?) for PCI configspace!\n");
}
pkt->result = Packet::Success;
pkt->makeAtomicResponse();
return configDelay;
}
void

View file

@ -74,7 +74,6 @@ DumbTOD::DumbTOD(Params *p)
Tick
DumbTOD::read(PacketPtr pkt)
{
assert(pkt->result == Packet::Unknown);
assert(pkt->getAddr() >= pioAddr && pkt->getAddr() < pioAddr + pioSize);
assert(pkt->getSize() == 8);
@ -82,7 +81,7 @@ DumbTOD::read(PacketPtr pkt)
pkt->set(todTime);
todTime += 1000;
pkt->result = Packet::Success;
pkt->makeAtomicResponse();
return pioDelay;
}

View file

@ -72,7 +72,6 @@ Iob::Iob(Params *p)
Tick
Iob::read(PacketPtr pkt)
{
assert(pkt->result == Packet::Unknown);
if (pkt->getAddr() >= iobManAddr && pkt->getAddr() < iobManAddr + iobManSize)
readIob(pkt);
@ -81,7 +80,7 @@ Iob::read(PacketPtr pkt)
else
panic("Invalid address reached Iob\n");
pkt->result = Packet::Success;
pkt->makeAtomicResponse();
return pioDelay;
}
@ -176,7 +175,7 @@ Iob::write(PacketPtr pkt)
panic("Invalid address reached Iob\n");
pkt->result = Packet::Success;
pkt->makeAtomicResponse();
return pioDelay;
}

View file

@ -61,7 +61,6 @@ MmDisk::read(PacketPtr pkt)
uint32_t d32;
uint64_t d64;
assert(pkt->result == Packet::Unknown);
assert(pkt->getAddr() >= pioAddr && pkt->getAddr() < pioAddr + pioSize);
accessAddr = pkt->getAddr() - pioAddr;
@ -101,7 +100,7 @@ MmDisk::read(PacketPtr pkt)
panic("Invalid access size\n");
}
pkt->result = Packet::Success;
pkt->makeAtomicResponse();
return pioDelay;
}
@ -115,7 +114,6 @@ MmDisk::write(PacketPtr pkt)
uint32_t d32;
uint64_t d64;
assert(pkt->result == Packet::Unknown);
assert(pkt->getAddr() >= pioAddr && pkt->getAddr() < pioAddr + pioSize);
accessAddr = pkt->getAddr() - pioAddr;
@ -157,7 +155,7 @@ MmDisk::write(PacketPtr pkt)
panic("Invalid access size\n");
}
pkt->result = Packet::Success;
pkt->makeAtomicResponse();
return pioDelay;
}

View file

@ -58,7 +58,7 @@ Uart8250::IntrEvent::IntrEvent(Uart8250 *u, int bit)
const char *
Uart8250::IntrEvent::description()
{
return "uart interrupt delay event";
return "uart interrupt delay";
}
void
@ -111,7 +111,6 @@ Uart8250::Uart8250(Params *p)
Tick
Uart8250::read(PacketPtr pkt)
{
assert(pkt->result == Packet::Unknown);
assert(pkt->getAddr() >= pioAddr && pkt->getAddr() < pioAddr + pioSize);
assert(pkt->getSize() == 1);
@ -186,7 +185,7 @@ Uart8250::read(PacketPtr pkt)
/* uint32_t d32 = *data;
DPRINTF(Uart, "Register read to register %#x returned %#x\n", daddr, d32);
*/
pkt->result = Packet::Success;
pkt->makeAtomicResponse();
return pioDelay;
}
@ -194,7 +193,6 @@ Tick
Uart8250::write(PacketPtr pkt)
{
assert(pkt->result == Packet::Unknown);
assert(pkt->getAddr() >= pioAddr && pkt->getAddr() < pioAddr + pioSize);
assert(pkt->getSize() == 1);
@ -272,7 +270,7 @@ Uart8250::write(PacketPtr pkt)
panic("Tried to access a UART port that doesn't exist\n");
break;
}
pkt->result = Packet::Success;
pkt->makeAtomicResponse();
return pioDelay;
}

View file

@ -112,10 +112,6 @@ Bridge::BridgePort::reqQueueFull()
bool
Bridge::BridgePort::recvTiming(PacketPtr pkt)
{
if (!(pkt->flags & SNOOP_COMMIT))
return true;
DPRINTF(BusBridge, "recvTiming: src %d dest %d addr 0x%x\n",
pkt->getSrc(), pkt->getDest(), pkt->getAddr());
@ -125,14 +121,13 @@ Bridge::BridgePort::recvTiming(PacketPtr pkt)
otherPort->sendQueue.size(), otherPort->queuedRequests,
otherPort->outstandingResponses);
if (pkt->isRequest() && otherPort->reqQueueFull() && pkt->result !=
Packet::Nacked) {
if (pkt->isRequest() && otherPort->reqQueueFull() && !pkt->wasNacked()) {
DPRINTF(BusBridge, "Remote queue full, nacking\n");
nackRequest(pkt);
return true;
}
if (pkt->needsResponse() && pkt->result != Packet::Nacked)
if (pkt->needsResponse() && !pkt->wasNacked())
if (respQueueFull()) {
DPRINTF(BusBridge, "Local queue full, no space for response, nacking\n");
DPRINTF(BusBridge, "queue size: %d outreq: %d outstanding resp: %d\n",
@ -153,7 +148,7 @@ void
Bridge::BridgePort::nackRequest(PacketPtr pkt)
{
// Nack the packet
pkt->result = Packet::Nacked;
pkt->setNacked();
pkt->setDest(pkt->getSrc());
//put it on the list to send
@ -198,7 +193,7 @@ Bridge::BridgePort::nackRequest(PacketPtr pkt)
void
Bridge::BridgePort::queueForSendTiming(PacketPtr pkt)
{
if (pkt->isResponse() || pkt->result == Packet::Nacked) {
if (pkt->isResponse() || pkt->wasNacked()) {
// This is a response for a request we forwarded earlier. The
// corresponding PacketBuffer should be stored in the packet's
// senderState field.
@ -210,7 +205,7 @@ Bridge::BridgePort::queueForSendTiming(PacketPtr pkt)
// Check if this packet was expecting a response and it's a nacked
// packet, in which case we will never being seeing it
if (buf->expectResponse && pkt->result == Packet::Nacked)
if (buf->expectResponse && pkt->wasNacked())
--outstandingResponses;
DPRINTF(BusBridge, "response, new dest %d\n", pkt->getDest());
@ -218,7 +213,7 @@ Bridge::BridgePort::queueForSendTiming(PacketPtr pkt)
}
if (pkt->isRequest() && pkt->result != Packet::Nacked) {
if (pkt->isRequest() && !pkt->wasNacked()) {
++queuedRequests;
}
@ -248,11 +243,9 @@ Bridge::BridgePort::trySend()
PacketPtr pkt = buf->pkt;
pkt->flags &= ~SNOOP_COMMIT; //CLear it if it was set
// Ugly! @todo When multilevel coherence works this will be removed
if (pkt->cmd == MemCmd::WriteInvalidateReq && fixPartialWrite &&
pkt->result != Packet::Nacked) {
!pkt->wasNacked()) {
PacketPtr funcPkt = new Packet(pkt->req, MemCmd::WriteReq,
Packet::Broadcast);
funcPkt->dataStatic(pkt->getPtr<uint8_t>());
@ -265,7 +258,7 @@ Bridge::BridgePort::trySend()
buf->origSrc, pkt->getDest(), pkt->getAddr());
bool wasReq = pkt->isRequest();
bool wasNacked = pkt->result == Packet::Nacked;
bool wasNacked = pkt->wasNacked();
if (sendTiming(pkt)) {
// send successful
@ -340,17 +333,14 @@ void
Bridge::BridgePort::recvFunctional(PacketPtr pkt)
{
std::list<PacketBuffer*>::iterator i;
bool pktContinue = true;
for (i = sendQueue.begin(); i != sendQueue.end(); ++i) {
if (pkt->intersect((*i)->pkt)) {
pktContinue &= fixPacket(pkt, (*i)->pkt);
}
if (pkt->checkFunctional((*i)->pkt))
return;
}
if (pktContinue) {
otherPort->sendFunctional(pkt);
}
// fall through if pkt still not satisfied
otherPort->sendFunctional(pkt);
}
/** Function called by the port when the bus is receiving a status change.*/
@ -365,6 +355,8 @@ Bridge::BridgePort::getDeviceAddressRanges(AddrRangeList &resp,
bool &snoop)
{
otherPort->getPeerAddressRanges(resp, snoop);
// we don't allow snooping across bridges
snoop = false;
}
BEGIN_DECLARE_SIM_OBJECT_PARAMS(Bridge)

View file

@ -86,7 +86,7 @@ class Bridge : public MemObject
expectResponse(_pkt->needsResponse() && !nack)
{
if (!pkt->isResponse() && !nack && pkt->result != Packet::Nacked)
if (!pkt->isResponse() && !nack && !pkt->wasNacked())
pkt->senderState = this;
}
@ -146,7 +146,7 @@ class Bridge : public MemObject
virtual void process() { port->trySend(); }
virtual const char *description() { return "bridge send event"; }
virtual const char *description() { return "bridge send"; }
};
SendEvent sendEvent;

View file

@ -33,7 +33,7 @@
* Definition of a bus object.
*/
#include <algorithm>
#include <limits>
#include "base/misc.hh"
@ -172,7 +172,7 @@ void Bus::occupyBus(PacketPtr pkt)
bool
Bus::recvTiming(PacketPtr pkt)
{
Port *port;
int port_id;
DPRINTF(Bus, "recvTiming: packet src %d dest %d addr 0x%x cmd %s\n",
pkt->getSrc(), pkt->getDest(), pkt->getAddr(), pkt->cmdString());
@ -183,8 +183,10 @@ Bus::recvTiming(PacketPtr pkt)
// If the bus is busy, or other devices are in line ahead of the current
// one, put this device on the retry list.
if (tickNextIdle > curTick ||
(retryList.size() && (!inRetry || pktPort != retryList.front()))) {
if (!pkt->isExpressSnoop() &&
(tickNextIdle > curTick ||
(retryList.size() && (!inRetry || pktPort != retryList.front()))))
{
addToRetryList(pktPort);
DPRINTF(Bus, "recvTiming: Bus is busy, returning false\n");
return false;
@ -195,43 +197,30 @@ Bus::recvTiming(PacketPtr pkt)
// Make sure to clear the snoop commit flag so it doesn't think an
// access has been handled twice.
if (dest == Packet::Broadcast) {
port = findPort(pkt->getAddr(), pkt->getSrc());
pkt->flags &= ~SNOOP_COMMIT;
if (timingSnoop(pkt, port ? port : interfaces[pkt->getSrc()])) {
bool success;
port_id = findPort(pkt->getAddr());
timingSnoop(pkt, interfaces[port_id]);
pkt->flags |= SNOOP_COMMIT;
success = timingSnoop(pkt, port ? port : interfaces[pkt->getSrc()]);
assert(success);
if (pkt->flags & SATISFIED) {
//Cache-Cache transfer occuring
if (inRetry) {
retryList.front()->onRetryList(false);
retryList.pop_front();
inRetry = false;
}
occupyBus(pkt);
DPRINTF(Bus, "recvTiming: Packet sucessfully sent\n");
return true;
if (pkt->memInhibitAsserted()) {
//Cache-Cache transfer occuring
if (inRetry) {
retryList.front()->onRetryList(false);
retryList.pop_front();
inRetry = false;
}
} else {
//Snoop didn't succeed
DPRINTF(Bus, "Adding1 a retry to RETRY list %d\n",
pktPort->getId());
addToRetryList(pktPort);
return false;
occupyBus(pkt);
DPRINTF(Bus, "recvTiming: Packet sucessfully sent\n");
return true;
}
} else {
assert(dest >= 0 && dest < maxId);
assert(dest != pkt->getSrc()); // catch infinite loops
port = interfaces[dest];
port_id = dest;
}
occupyBus(pkt);
if (port) {
if (port->sendTiming(pkt)) {
if (port_id != pkt->getSrc()) {
if (interfaces[port_id]->sendTiming(pkt)) {
// Packet was successfully sent. Return true.
// Also take care of retries
if (inRetry) {
@ -291,8 +280,8 @@ Bus::recvRetry(int id)
}
}
Port *
Bus::findPort(Addr addr, int id)
int
Bus::findPort(Addr addr)
{
/* An interval tree would be a better way to do this. --ali. */
int dest_id = -1;
@ -307,7 +296,7 @@ Bus::findPort(Addr addr, int id)
iter != defaultRange.end(); iter++) {
if (*iter == addr) {
DPRINTF(Bus, " found addr %#llx on default\n", addr);
return defaultPort;
return defaultId;
}
}
@ -318,39 +307,11 @@ Bus::findPort(Addr addr, int id)
DPRINTF(Bus, "Unable to find destination for addr: %#llx, will use "
"default port", addr);
return defaultPort;
return defaultId;
}
}
// we shouldn't be sending this back to where it came from
// do the snoop access and then we should terminate
// the cyclical call.
if (dest_id == id)
return 0;
return interfaces[dest_id];
}
Tick
Bus::atomicSnoop(PacketPtr pkt, Port *responder)
{
Tick response_time = 0;
for (SnoopIter s_iter = snoopPorts.begin();
s_iter != snoopPorts.end();
s_iter++) {
BusPort *p = *s_iter;
if (p != responder && p->getId() != pkt->getSrc()) {
Tick response = p->sendAtomic(pkt);
if (response) {
assert(!response_time); //Multiple responders
response_time = response;
}
}
}
return response_time;
return dest_id;
}
void
@ -360,6 +321,8 @@ Bus::functionalSnoop(PacketPtr pkt, Port *responder)
// id after each
int src_id = pkt->getSrc();
assert(pkt->isRequest()); // hasn't already been satisfied
for (SnoopIter s_iter = snoopPorts.begin();
s_iter != snoopPorts.end();
s_iter++) {
@ -367,7 +330,7 @@ Bus::functionalSnoop(PacketPtr pkt, Port *responder)
if (p != responder && p->getId() != src_id) {
p->sendFunctional(pkt);
}
if (pkt->result == Packet::Success) {
if (pkt->isResponse()) {
break;
}
pkt->setSrc(src_id);
@ -400,21 +363,64 @@ Bus::recvAtomic(PacketPtr pkt)
DPRINTF(Bus, "recvAtomic: packet src %d dest %d addr 0x%x cmd %s\n",
pkt->getSrc(), pkt->getDest(), pkt->getAddr(), pkt->cmdString());
assert(pkt->getDest() == Packet::Broadcast);
pkt->flags |= SNOOP_COMMIT;
assert(pkt->isRequest());
// Assume one bus cycle in order to get through. This may have
// some clock skew issues yet again...
pkt->finishTime = curTick + clock;
// Variables for recording original command and snoop response (if
// any)... if a snooper respondes, we will need to restore
// original command so that additional snoops can take place
// properly
MemCmd orig_cmd = pkt->cmd;
MemCmd snoop_response_cmd = MemCmd::InvalidCmd;
Tick snoop_response_latency = 0;
int orig_src = pkt->getSrc();
Port *port = findPort(pkt->getAddr(), pkt->getSrc());
Tick snoopTime = atomicSnoop(pkt, port ? port : interfaces[pkt->getSrc()]);
int target_port_id = findPort(pkt->getAddr());
Port *target_port = interfaces[target_port_id];
if (snoopTime)
return snoopTime; //Snoop satisfies it
else if (port)
return port->sendAtomic(pkt);
else
return 0;
SnoopIter s_end = snoopPorts.end();
for (SnoopIter s_iter = snoopPorts.begin(); s_iter != s_end; s_iter++) {
BusPort *p = *s_iter;
// same port should not have both target addresses and snooping
assert(p != target_port);
if (p->getId() != pkt->getSrc()) {
Tick latency = p->sendAtomic(pkt);
if (pkt->isResponse()) {
// response from snoop agent
assert(pkt->cmd != orig_cmd);
assert(pkt->memInhibitAsserted());
// should only happen once
assert(snoop_response_cmd == MemCmd::InvalidCmd);
// save response state
snoop_response_cmd = pkt->cmd;
snoop_response_latency = latency;
// restore original packet state for remaining snoopers
pkt->cmd = orig_cmd;
pkt->setSrc(orig_src);
pkt->setDest(Packet::Broadcast);
}
}
}
Tick response_latency = 0;
// we can get requests sent up from the memory side of the bus for
// snooping... don't send them back down!
if (target_port_id != pkt->getSrc()) {
response_latency = target_port->sendAtomic(pkt);
}
// if we got a response from a snooper, restore it here
if (snoop_response_cmd != MemCmd::InvalidCmd) {
// no one else should have responded
assert(!pkt->isResponse());
assert(pkt->cmd == orig_cmd);
pkt->cmd = snoop_response_cmd;
response_latency = snoop_response_latency;
}
// why do we have this packet field and the return value both???
pkt->finishTime = curTick + response_latency;
return response_latency;
}
/** Function called by the port when the bus is receiving a Functional
@ -425,13 +431,13 @@ Bus::recvFunctional(PacketPtr pkt)
DPRINTF(Bus, "recvFunctional: packet src %d dest %d addr 0x%x cmd %s\n",
pkt->getSrc(), pkt->getDest(), pkt->getAddr(), pkt->cmdString());
assert(pkt->getDest() == Packet::Broadcast);
pkt->flags |= SNOOP_COMMIT;
Port* port = findPort(pkt->getAddr(), pkt->getSrc());
functionalSnoop(pkt, port ? port : interfaces[pkt->getSrc()]);
int port_id = findPort(pkt->getAddr());
Port *port = interfaces[port_id];
functionalSnoop(pkt, port);
// If the snooping found what we were looking for, we're done.
if (pkt->result != Packet::Success && port) {
// If the snooping hasn't found what we were looking for, keep going.
if (!pkt->isResponse() && port_id != pkt->getSrc()) {
port->sendFunctional(pkt);
}
}

View file

@ -176,14 +176,9 @@ class Bus : public MemObject
/** Find which port connected to this bus (if any) should be given a packet
* with this address.
* @param addr Address to find port for.
* @param id Id of the port this packet was received from (to prevent
* loops)
* @return pointer to port that the packet should be sent out of.
* @return id of port that the packet should be sent out of.
*/
Port *findPort(Addr addr, int id);
/** Snoop all relevant ports atomicly. */
Tick atomicSnoop(PacketPtr pkt, Port* responder);
int findPort(Addr addr);
/** Snoop all relevant ports functionally. */
void functionalSnoop(PacketPtr pkt, Port *responder);

View file

@ -51,7 +51,6 @@ class BaseCache(MemObject):
mshrs = Param.Int("number of MSHRs (max outstanding requests)")
prioritizeRequests = Param.Bool(False,
"always service demand misses first")
protocol = Param.CoherenceProtocol(NULL, "coherence protocol to use")
repl = Param.Repl(NULL, "replacement policy")
size = Param.MemorySize("capacity in bytes")
split = Param.Bool(False, "whether or not this cache is split")

View file

@ -40,28 +40,35 @@
using namespace std;
BaseCache::CachePort::CachePort(const std::string &_name, BaseCache *_cache,
bool _isCpuSide)
: Port(_name, _cache), cache(_cache), isCpuSide(_isCpuSide)
BaseCache::CachePort::CachePort(const std::string &_name, BaseCache *_cache)
: SimpleTimingPort(_name, _cache), cache(_cache), otherPort(NULL),
blocked(false), mustSendRetry(false)
{
}
BaseCache::BaseCache(const std::string &name, Params &params)
: MemObject(name),
mshrQueue(params.numMSHRs, 4, MSHRQueue_MSHRs),
writeBuffer(params.numWriteBuffers, params.numMSHRs+1000,
MSHRQueue_WriteBuffer),
blkSize(params.blkSize),
hitLatency(params.hitLatency),
numTarget(params.numTargets),
blocked(0),
noTargetMSHR(NULL),
missCount(params.maxMisses),
drainEvent(NULL)
{
blocked = false;
waitingOnRetry = false;
//Start ports at null if more than one is created we should panic
//cpuSidePort = NULL;
//memSidePort = NULL;
}
void
BaseCache::CachePort::recvStatusChange(Port::Status status)
{
cache->recvStatusChange(status, isCpuSide);
}
void
BaseCache::CachePort::getDeviceAddressRanges(AddrRangeList &resp, bool &snoop)
{
cache->getAddressRanges(resp, snoop, isCpuSide);
if (status == Port::RangeChange) {
otherPort->sendStatusChange(Port::RangeChange);
}
}
int
@ -70,136 +77,25 @@ BaseCache::CachePort::deviceBlockSize()
return cache->getBlockSize();
}
bool
BaseCache::CachePort::checkFunctional(PacketPtr pkt)
{
//Check storage here first
list<PacketPtr>::iterator i = drainList.begin();
list<PacketPtr>::iterator iend = drainList.end();
bool notDone = true;
while (i != iend && notDone) {
PacketPtr target = *i;
// If the target contains data, and it overlaps the
// probed request, need to update data
if (target->intersect(pkt)) {
DPRINTF(Cache, "Functional %s access to blk_addr %x intersects a drain\n",
pkt->cmdString(), pkt->getAddr() & ~(cache->getBlockSize() - 1));
notDone = fixPacket(pkt, target);
}
i++;
}
//Also check the response not yet ready to be on the list
std::list<std::pair<Tick,PacketPtr> >::iterator j = transmitList.begin();
std::list<std::pair<Tick,PacketPtr> >::iterator jend = transmitList.end();
while (j != jend && notDone) {
PacketPtr target = j->second;
// If the target contains data, and it overlaps the
// probed request, need to update data
if (target->intersect(pkt)) {
DPRINTF(Cache, "Functional %s access to blk_addr %x intersects a response\n",
pkt->cmdString(), pkt->getAddr() & ~(cache->getBlockSize() - 1));
notDone = fixDelayedResponsePacket(pkt, target);
}
j++;
}
return notDone;
}
void
BaseCache::CachePort::checkAndSendFunctional(PacketPtr pkt)
{
bool notDone = checkFunctional(pkt);
if (notDone)
checkFunctional(pkt);
if (!pkt->isResponse())
sendFunctional(pkt);
}
void
BaseCache::CachePort::recvRetry()
bool
BaseCache::CachePort::recvRetryCommon()
{
PacketPtr pkt;
assert(waitingOnRetry);
if (!drainList.empty()) {
DPRINTF(CachePort, "%s attempting to send a retry for response (%i waiting)\n"
, name(), drainList.size());
//We have some responses to drain first
pkt = drainList.front();
drainList.pop_front();
if (sendTiming(pkt)) {
DPRINTF(CachePort, "%s sucessful in sending a retry for"
"response (%i still waiting)\n", name(), drainList.size());
if (!drainList.empty() ||
!isCpuSide && cache->doMasterRequest() ||
isCpuSide && cache->doSlaveRequest()) {
DPRINTF(CachePort, "%s has more responses/requests\n", name());
new BaseCache::RequestEvent(this, curTick + 1);
}
waitingOnRetry = false;
}
else {
drainList.push_front(pkt);
}
// Check if we're done draining once this list is empty
if (drainList.empty())
cache->checkDrain();
}
else if (!isCpuSide)
{
DPRINTF(CachePort, "%s attempting to send a retry for MSHR\n", name());
if (!cache->doMasterRequest()) {
//This can happen if I am the owner of a block and see an upgrade
//while the block was in my WB Buffers. I just remove the
//wb and de-assert the masterRequest
waitingOnRetry = false;
return;
}
pkt = cache->getPacket();
MSHR* mshr = (MSHR*) pkt->senderState;
//Copy the packet, it may be modified/destroyed elsewhere
PacketPtr copyPkt = new Packet(*pkt);
copyPkt->dataStatic<uint8_t>(pkt->getPtr<uint8_t>());
mshr->pkt = copyPkt;
bool success = sendTiming(pkt);
DPRINTF(CachePort, "Address %x was %s in sending the timing request\n",
pkt->getAddr(), success ? "succesful" : "unsuccesful");
waitingOnRetry = !success;
if (waitingOnRetry) {
DPRINTF(CachePort, "%s now waiting on a retry\n", name());
}
cache->sendResult(pkt, mshr, success);
if (success && cache->doMasterRequest())
{
DPRINTF(CachePort, "%s has more requests\n", name());
//Still more to issue, rerequest in 1 cycle
new BaseCache::RequestEvent(this, curTick + 1);
}
}
else
{
assert(cache->doSlaveRequest());
//pkt = cache->getCoherencePacket();
//We save the packet, no reordering on CSHRS
pkt = cache->getCoherencePacket();
MSHR* cshr = (MSHR*)pkt->senderState;
bool success = sendTiming(pkt);
cache->sendCoherenceResult(pkt, cshr, success);
waitingOnRetry = !success;
if (success && cache->doSlaveRequest())
{
DPRINTF(CachePort, "%s has more requests\n", name());
//Still more to issue, rerequest in 1 cycle
new BaseCache::RequestEvent(this, curTick + 1);
}
}
if (waitingOnRetry) DPRINTF(CachePort, "%s STILL Waiting on retry\n", name());
else DPRINTF(CachePort, "%s no longer waiting on retry\n", name());
return;
waitingOnRetry = false;
return false;
}
void
BaseCache::CachePort::setBlocked()
{
@ -220,147 +116,12 @@ BaseCache::CachePort::clearBlocked()
{
DPRINTF(Cache, "Cache Sending Retry\n");
mustSendRetry = false;
sendRetry();
SendRetryEvent *ev = new SendRetryEvent(this, true);
// @TODO: need to find a better time (next bus cycle?)
ev->schedule(curTick + 1);
}
}
BaseCache::RequestEvent::RequestEvent(CachePort *_cachePort, Tick when)
: Event(&mainEventQueue, CPU_Tick_Pri), cachePort(_cachePort)
{
this->setFlags(AutoDelete);
schedule(when);
}
void
BaseCache::RequestEvent::process()
{
if (cachePort->waitingOnRetry) return;
//We have some responses to drain first
if (!cachePort->drainList.empty()) {
DPRINTF(CachePort, "%s trying to drain a response\n", cachePort->name());
if (cachePort->sendTiming(cachePort->drainList.front())) {
DPRINTF(CachePort, "%s drains a response succesfully\n", cachePort->name());
cachePort->drainList.pop_front();
if (!cachePort->drainList.empty() ||
!cachePort->isCpuSide && cachePort->cache->doMasterRequest() ||
cachePort->isCpuSide && cachePort->cache->doSlaveRequest()) {
DPRINTF(CachePort, "%s still has outstanding bus reqs\n", cachePort->name());
this->schedule(curTick + 1);
}
}
else {
cachePort->waitingOnRetry = true;
DPRINTF(CachePort, "%s now waiting on a retry\n", cachePort->name());
}
}
else if (!cachePort->isCpuSide)
{ //MSHR
DPRINTF(CachePort, "%s trying to send a MSHR request\n", cachePort->name());
if (!cachePort->cache->doMasterRequest()) {
//This can happen if I am the owner of a block and see an upgrade
//while the block was in my WB Buffers. I just remove the
//wb and de-assert the masterRequest
return;
}
PacketPtr pkt = cachePort->cache->getPacket();
MSHR* mshr = (MSHR*) pkt->senderState;
//Copy the packet, it may be modified/destroyed elsewhere
PacketPtr copyPkt = new Packet(*pkt);
copyPkt->dataStatic<uint8_t>(pkt->getPtr<uint8_t>());
mshr->pkt = copyPkt;
bool success = cachePort->sendTiming(pkt);
DPRINTF(CachePort, "Address %x was %s in sending the timing request\n",
pkt->getAddr(), success ? "succesful" : "unsuccesful");
cachePort->waitingOnRetry = !success;
if (cachePort->waitingOnRetry) {
DPRINTF(CachePort, "%s now waiting on a retry\n", cachePort->name());
}
cachePort->cache->sendResult(pkt, mshr, success);
if (success && cachePort->cache->doMasterRequest())
{
DPRINTF(CachePort, "%s still more MSHR requests to send\n",
cachePort->name());
//Still more to issue, rerequest in 1 cycle
this->schedule(curTick+1);
}
}
else
{
//CSHR
assert(cachePort->cache->doSlaveRequest());
PacketPtr pkt = cachePort->cache->getCoherencePacket();
MSHR* cshr = (MSHR*) pkt->senderState;
bool success = cachePort->sendTiming(pkt);
cachePort->cache->sendCoherenceResult(pkt, cshr, success);
cachePort->waitingOnRetry = !success;
if (cachePort->waitingOnRetry)
DPRINTF(CachePort, "%s now waiting on a retry\n", cachePort->name());
if (success && cachePort->cache->doSlaveRequest())
{
DPRINTF(CachePort, "%s still more CSHR requests to send\n",
cachePort->name());
//Still more to issue, rerequest in 1 cycle
this->schedule(curTick+1);
}
}
}
const char *
BaseCache::RequestEvent::description()
{
return "Cache request event";
}
BaseCache::ResponseEvent::ResponseEvent(CachePort *_cachePort)
: Event(&mainEventQueue, CPU_Tick_Pri), cachePort(_cachePort)
{
}
void
BaseCache::ResponseEvent::process()
{
assert(cachePort->transmitList.size());
assert(cachePort->transmitList.front().first <= curTick);
PacketPtr pkt = cachePort->transmitList.front().second;
cachePort->transmitList.pop_front();
if (!cachePort->transmitList.empty()) {
Tick time = cachePort->transmitList.front().first;
schedule(time <= curTick ? curTick+1 : time);
}
if (pkt->flags & NACKED_LINE)
pkt->result = Packet::Nacked;
else
pkt->result = Packet::Success;
pkt->makeTimingResponse();
DPRINTF(CachePort, "%s attempting to send a response\n", cachePort->name());
if (!cachePort->drainList.empty() || cachePort->waitingOnRetry) {
//Already have a list, just append
cachePort->drainList.push_back(pkt);
DPRINTF(CachePort, "%s appending response onto drain list\n", cachePort->name());
}
else if (!cachePort->sendTiming(pkt)) {
//It failed, save it to list of drain events
DPRINTF(CachePort, "%s now waiting for a retry\n", cachePort->name());
cachePort->drainList.push_back(pkt);
cachePort->waitingOnRetry = true;
}
// Check if we're done draining once this list is empty
if (cachePort->drainList.empty() && cachePort->transmitList.empty())
cachePort->cache->checkDrain();
}
const char *
BaseCache::ResponseEvent::description()
{
return "Cache response event";
}
void
BaseCache::init()
@ -370,6 +131,7 @@ BaseCache::init()
cpuSidePort->sendStatusChange(Port::RangeChange);
}
void
BaseCache::regStats()
{
@ -388,20 +150,29 @@ BaseCache::regStats()
;
}
// These macros make it easier to sum the right subset of commands and
// to change the subset of commands that are considered "demand" vs
// "non-demand"
#define SUM_DEMAND(s) \
(s[MemCmd::ReadReq] + s[MemCmd::WriteReq] + s[MemCmd::ReadExReq])
// should writebacks be included here? prior code was inconsistent...
#define SUM_NON_DEMAND(s) \
(s[MemCmd::SoftPFReq] + s[MemCmd::HardPFReq])
demandHits
.name(name() + ".demand_hits")
.desc("number of demand (read+write) hits")
.flags(total)
;
demandHits = hits[MemCmd::ReadReq] + hits[MemCmd::WriteReq];
demandHits = SUM_DEMAND(hits);
overallHits
.name(name() + ".overall_hits")
.desc("number of overall hits")
.flags(total)
;
overallHits = demandHits + hits[MemCmd::SoftPFReq] + hits[MemCmd::HardPFReq]
+ hits[MemCmd::Writeback];
overallHits = demandHits + SUM_NON_DEMAND(hits);
// Miss statistics
for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
@ -421,15 +192,14 @@ BaseCache::regStats()
.desc("number of demand (read+write) misses")
.flags(total)
;
demandMisses = misses[MemCmd::ReadReq] + misses[MemCmd::WriteReq];
demandMisses = SUM_DEMAND(misses);
overallMisses
.name(name() + ".overall_misses")
.desc("number of overall misses")
.flags(total)
;
overallMisses = demandMisses + misses[MemCmd::SoftPFReq] +
misses[MemCmd::HardPFReq] + misses[MemCmd::Writeback];
overallMisses = demandMisses + SUM_NON_DEMAND(misses);
// Miss latency statistics
for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
@ -449,15 +219,14 @@ BaseCache::regStats()
.desc("number of demand (read+write) miss cycles")
.flags(total)
;
demandMissLatency = missLatency[MemCmd::ReadReq] + missLatency[MemCmd::WriteReq];
demandMissLatency = SUM_DEMAND(missLatency);
overallMissLatency
.name(name() + ".overall_miss_latency")
.desc("number of overall miss cycles")
.flags(total)
;
overallMissLatency = demandMissLatency + missLatency[MemCmd::SoftPFReq] +
missLatency[MemCmd::HardPFReq];
overallMissLatency = demandMissLatency + SUM_NON_DEMAND(missLatency);
// access formulas
for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
@ -580,17 +349,284 @@ BaseCache::regStats()
.desc("number of cache copies performed")
;
writebacks
.init(maxThreadsPerCPU)
.name(name() + ".writebacks")
.desc("number of writebacks")
.flags(total)
;
// MSHR statistics
// MSHR hit statistics
for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
MemCmd cmd(access_idx);
const string &cstr = cmd.toString();
mshr_hits[access_idx]
.init(maxThreadsPerCPU)
.name(name() + "." + cstr + "_mshr_hits")
.desc("number of " + cstr + " MSHR hits")
.flags(total | nozero | nonan)
;
}
demandMshrHits
.name(name() + ".demand_mshr_hits")
.desc("number of demand (read+write) MSHR hits")
.flags(total)
;
demandMshrHits = SUM_DEMAND(mshr_hits);
overallMshrHits
.name(name() + ".overall_mshr_hits")
.desc("number of overall MSHR hits")
.flags(total)
;
overallMshrHits = demandMshrHits + SUM_NON_DEMAND(mshr_hits);
// MSHR miss statistics
for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
MemCmd cmd(access_idx);
const string &cstr = cmd.toString();
mshr_misses[access_idx]
.init(maxThreadsPerCPU)
.name(name() + "." + cstr + "_mshr_misses")
.desc("number of " + cstr + " MSHR misses")
.flags(total | nozero | nonan)
;
}
demandMshrMisses
.name(name() + ".demand_mshr_misses")
.desc("number of demand (read+write) MSHR misses")
.flags(total)
;
demandMshrMisses = SUM_DEMAND(mshr_misses);
overallMshrMisses
.name(name() + ".overall_mshr_misses")
.desc("number of overall MSHR misses")
.flags(total)
;
overallMshrMisses = demandMshrMisses + SUM_NON_DEMAND(mshr_misses);
// MSHR miss latency statistics
for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
MemCmd cmd(access_idx);
const string &cstr = cmd.toString();
mshr_miss_latency[access_idx]
.init(maxThreadsPerCPU)
.name(name() + "." + cstr + "_mshr_miss_latency")
.desc("number of " + cstr + " MSHR miss cycles")
.flags(total | nozero | nonan)
;
}
demandMshrMissLatency
.name(name() + ".demand_mshr_miss_latency")
.desc("number of demand (read+write) MSHR miss cycles")
.flags(total)
;
demandMshrMissLatency = SUM_DEMAND(mshr_miss_latency);
overallMshrMissLatency
.name(name() + ".overall_mshr_miss_latency")
.desc("number of overall MSHR miss cycles")
.flags(total)
;
overallMshrMissLatency =
demandMshrMissLatency + SUM_NON_DEMAND(mshr_miss_latency);
// MSHR uncacheable statistics
for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
MemCmd cmd(access_idx);
const string &cstr = cmd.toString();
mshr_uncacheable[access_idx]
.init(maxThreadsPerCPU)
.name(name() + "." + cstr + "_mshr_uncacheable")
.desc("number of " + cstr + " MSHR uncacheable")
.flags(total | nozero | nonan)
;
}
overallMshrUncacheable
.name(name() + ".overall_mshr_uncacheable_misses")
.desc("number of overall MSHR uncacheable misses")
.flags(total)
;
overallMshrUncacheable =
SUM_DEMAND(mshr_uncacheable) + SUM_NON_DEMAND(mshr_uncacheable);
// MSHR miss latency statistics
for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
MemCmd cmd(access_idx);
const string &cstr = cmd.toString();
mshr_uncacheable_lat[access_idx]
.init(maxThreadsPerCPU)
.name(name() + "." + cstr + "_mshr_uncacheable_latency")
.desc("number of " + cstr + " MSHR uncacheable cycles")
.flags(total | nozero | nonan)
;
}
overallMshrUncacheableLatency
.name(name() + ".overall_mshr_uncacheable_latency")
.desc("number of overall MSHR uncacheable cycles")
.flags(total)
;
overallMshrUncacheableLatency =
SUM_DEMAND(mshr_uncacheable_lat) +
SUM_NON_DEMAND(mshr_uncacheable_lat);
#if 0
// MSHR access formulas
for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
MemCmd cmd(access_idx);
const string &cstr = cmd.toString();
mshrAccesses[access_idx]
.name(name() + "." + cstr + "_mshr_accesses")
.desc("number of " + cstr + " mshr accesses(hits+misses)")
.flags(total | nozero | nonan)
;
mshrAccesses[access_idx] =
mshr_hits[access_idx] + mshr_misses[access_idx]
+ mshr_uncacheable[access_idx];
}
demandMshrAccesses
.name(name() + ".demand_mshr_accesses")
.desc("number of demand (read+write) mshr accesses")
.flags(total | nozero | nonan)
;
demandMshrAccesses = demandMshrHits + demandMshrMisses;
overallMshrAccesses
.name(name() + ".overall_mshr_accesses")
.desc("number of overall (read+write) mshr accesses")
.flags(total | nozero | nonan)
;
overallMshrAccesses = overallMshrHits + overallMshrMisses
+ overallMshrUncacheable;
#endif
// MSHR miss rate formulas
for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
MemCmd cmd(access_idx);
const string &cstr = cmd.toString();
mshrMissRate[access_idx]
.name(name() + "." + cstr + "_mshr_miss_rate")
.desc("mshr miss rate for " + cstr + " accesses")
.flags(total | nozero | nonan)
;
mshrMissRate[access_idx] =
mshr_misses[access_idx] / accesses[access_idx];
}
demandMshrMissRate
.name(name() + ".demand_mshr_miss_rate")
.desc("mshr miss rate for demand accesses")
.flags(total)
;
demandMshrMissRate = demandMshrMisses / demandAccesses;
overallMshrMissRate
.name(name() + ".overall_mshr_miss_rate")
.desc("mshr miss rate for overall accesses")
.flags(total)
;
overallMshrMissRate = overallMshrMisses / overallAccesses;
// mshrMiss latency formulas
for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
MemCmd cmd(access_idx);
const string &cstr = cmd.toString();
avgMshrMissLatency[access_idx]
.name(name() + "." + cstr + "_avg_mshr_miss_latency")
.desc("average " + cstr + " mshr miss latency")
.flags(total | nozero | nonan)
;
avgMshrMissLatency[access_idx] =
mshr_miss_latency[access_idx] / mshr_misses[access_idx];
}
demandAvgMshrMissLatency
.name(name() + ".demand_avg_mshr_miss_latency")
.desc("average overall mshr miss latency")
.flags(total)
;
demandAvgMshrMissLatency = demandMshrMissLatency / demandMshrMisses;
overallAvgMshrMissLatency
.name(name() + ".overall_avg_mshr_miss_latency")
.desc("average overall mshr miss latency")
.flags(total)
;
overallAvgMshrMissLatency = overallMshrMissLatency / overallMshrMisses;
// mshrUncacheable latency formulas
for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
MemCmd cmd(access_idx);
const string &cstr = cmd.toString();
avgMshrUncacheableLatency[access_idx]
.name(name() + "." + cstr + "_avg_mshr_uncacheable_latency")
.desc("average " + cstr + " mshr uncacheable latency")
.flags(total | nozero | nonan)
;
avgMshrUncacheableLatency[access_idx] =
mshr_uncacheable_lat[access_idx] / mshr_uncacheable[access_idx];
}
overallAvgMshrUncacheableLatency
.name(name() + ".overall_avg_mshr_uncacheable_latency")
.desc("average overall mshr uncacheable latency")
.flags(total)
;
overallAvgMshrUncacheableLatency = overallMshrUncacheableLatency / overallMshrUncacheable;
mshr_cap_events
.init(maxThreadsPerCPU)
.name(name() + ".mshr_cap_events")
.desc("number of times MSHR cap was activated")
.flags(total)
;
//software prefetching stats
soft_prefetch_mshr_full
.init(maxThreadsPerCPU)
.name(name() + ".soft_prefetch_mshr_full")
.desc("number of mshr full events for SW prefetching instrutions")
.flags(total)
;
mshr_no_allocate_misses
.name(name() +".no_allocate_misses")
.desc("Number of misses that were no-allocate")
;
}
unsigned int
BaseCache::drain(Event *de)
{
int count = memSidePort->drain(de) + cpuSidePort->drain(de);
// Set status
if (!canDrain()) {
if (count != 0) {
drainEvent = de;
changeState(SimObject::Draining);
return 1;
return count;
}
changeState(SimObject::Drained);

View file

@ -26,6 +26,8 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Authors: Erik Hallnor
* Steve Reinhardt
* Ron Dreslinski
*/
/**
@ -39,37 +41,19 @@
#include <vector>
#include <string>
#include <list>
#include <algorithm>
#include <inttypes.h>
#include "base/misc.hh"
#include "base/statistics.hh"
#include "base/trace.hh"
#include "mem/cache/miss/mshr_queue.hh"
#include "mem/mem_object.hh"
#include "mem/packet.hh"
#include "mem/port.hh"
#include "mem/tport.hh"
#include "mem/request.hh"
#include "sim/eventq.hh"
/**
* Reasons for Caches to be Blocked.
*/
enum BlockedCause{
Blocked_NoMSHRs,
Blocked_NoTargets,
Blocked_NoWBBuffers,
Blocked_Coherence,
NUM_BLOCKED_CAUSES
};
/**
* Reasons for cache to request a bus.
*/
enum RequestCause{
Request_MSHR,
Request_WB,
Request_Coherence,
Request_PF
};
#include "sim/sim_exit.hh"
class MSHR;
/**
@ -77,91 +61,133 @@ class MSHR;
*/
class BaseCache : public MemObject
{
class CachePort : public Port
/**
* Indexes to enumerate the MSHR queues.
*/
enum MSHRQueueIndex {
MSHRQueue_MSHRs,
MSHRQueue_WriteBuffer
};
/**
* Reasons for caches to be blocked.
*/
enum BlockedCause {
Blocked_NoMSHRs = MSHRQueue_MSHRs,
Blocked_NoWBBuffers = MSHRQueue_WriteBuffer,
Blocked_NoTargets,
NUM_BLOCKED_CAUSES
};
public:
/**
* Reasons for cache to request a bus.
*/
enum RequestCause {
Request_MSHR = MSHRQueue_MSHRs,
Request_WB = MSHRQueue_WriteBuffer,
Request_PF,
NUM_REQUEST_CAUSES
};
private:
class CachePort : public SimpleTimingPort
{
public:
BaseCache *cache;
protected:
CachePort(const std::string &_name, BaseCache *_cache, bool _isCpuSide);
virtual void recvStatusChange(Status status);
CachePort(const std::string &_name, BaseCache *_cache);
virtual void getDeviceAddressRanges(AddrRangeList &resp,
bool &snoop);
virtual void recvStatusChange(Status status);
virtual int deviceBlockSize();
virtual void recvRetry();
bool recvRetryCommon();
typedef EventWrapper<Port, &Port::sendRetry>
SendRetryEvent;
public:
void setOtherPort(CachePort *_otherPort) { otherPort = _otherPort; }
void setBlocked();
void clearBlocked();
bool checkFunctional(PacketPtr pkt);
void checkAndSendFunctional(PacketPtr pkt);
bool canDrain() { return drainList.empty() && transmitList.empty(); }
CachePort *otherPort;
bool blocked;
bool mustSendRetry;
bool isCpuSide;
void requestBus(RequestCause cause, Tick time)
{
DPRINTF(CachePort, "Asserting bus request for cause %d\n", cause);
if (!waitingOnRetry) {
schedSendEvent(time);
}
}
bool waitingOnRetry;
std::list<PacketPtr> drainList;
std::list<std::pair<Tick,PacketPtr> > transmitList;
};
struct RequestEvent : public Event
{
CachePort *cachePort;
RequestEvent(CachePort *_cachePort, Tick when);
void process();
const char *description();
};
struct ResponseEvent : public Event
{
CachePort *cachePort;
ResponseEvent(CachePort *_cachePort);
void process();
const char *description();
void respond(PacketPtr pkt, Tick time) {
schedSendTiming(pkt, time);
}
};
public: //Made public so coherence can get at it.
CachePort *cpuSidePort;
CachePort *memSidePort;
ResponseEvent *sendEvent;
ResponseEvent *memSendEvent;
protected:
private:
void recvStatusChange(Port::Status status, bool isCpuSide)
/** Miss status registers */
MSHRQueue mshrQueue;
/** Write/writeback buffer */
MSHRQueue writeBuffer;
MSHR *allocateBufferInternal(MSHRQueue *mq, Addr addr, int size,
PacketPtr pkt, Tick time, bool requestBus)
{
if (status == Port::RangeChange){
if (!isCpuSide) {
cpuSidePort->sendStatusChange(Port::RangeChange);
}
else {
memSidePort->sendStatusChange(Port::RangeChange);
}
MSHR *mshr = mq->allocate(addr, size, pkt, time, order++);
if (mq->isFull()) {
setBlocked((BlockedCause)mq->index);
}
if (requestBus) {
requestMemSideBus((RequestCause)mq->index, time);
}
return mshr;
}
void markInServiceInternal(MSHR *mshr)
{
MSHRQueue *mq = mshr->queue;
bool wasFull = mq->isFull();
mq->markInService(mshr);
if (wasFull && !mq->isFull()) {
clearBlocked((BlockedCause)mq->index);
}
}
virtual PacketPtr getPacket() = 0;
/** Block size of this cache */
const int blkSize;
virtual PacketPtr getCoherencePacket() = 0;
/**
* The latency of a hit in this device.
*/
int hitLatency;
virtual void sendResult(PacketPtr &pkt, MSHR* mshr, bool success) = 0;
/** The number of targets for each MSHR. */
const int numTarget;
virtual void sendCoherenceResult(PacketPtr &pkt, MSHR* mshr, bool success) = 0;
/** Increasing order number assigned to each incoming request. */
uint64_t order;
/**
* Bit vector of the blocking reasons for the access path.
@ -169,29 +195,11 @@ class BaseCache : public MemObject
*/
uint8_t blocked;
/**
* Bit vector for the blocking reasons for the snoop path.
* @sa #BlockedCause
*/
uint8_t blockedSnoop;
/**
* Bit vector for the outstanding requests for the master interface.
*/
uint8_t masterRequests;
/**
* Bit vector for the outstanding requests for the slave interface.
*/
uint8_t slaveRequests;
protected:
/** Stores time the cache blocked for statistics. */
Tick blockedCycle;
/** Block size of this cache */
const int blkSize;
/** Pointer to the MSHR that has no targets. */
MSHR *noTargetMSHR;
/** The number of misses to trigger an exit event. */
Counter missCount;
@ -265,6 +273,73 @@ class BaseCache : public MemObject
/** The number of cache copies performed. */
Stats::Scalar<> cacheCopies;
/** Number of blocks written back per thread. */
Stats::Vector<> writebacks;
/** Number of misses that hit in the MSHRs per command and thread. */
Stats::Vector<> mshr_hits[MemCmd::NUM_MEM_CMDS];
/** Demand misses that hit in the MSHRs. */
Stats::Formula demandMshrHits;
/** Total number of misses that hit in the MSHRs. */
Stats::Formula overallMshrHits;
/** Number of misses that miss in the MSHRs, per command and thread. */
Stats::Vector<> mshr_misses[MemCmd::NUM_MEM_CMDS];
/** Demand misses that miss in the MSHRs. */
Stats::Formula demandMshrMisses;
/** Total number of misses that miss in the MSHRs. */
Stats::Formula overallMshrMisses;
/** Number of misses that miss in the MSHRs, per command and thread. */
Stats::Vector<> mshr_uncacheable[MemCmd::NUM_MEM_CMDS];
/** Total number of misses that miss in the MSHRs. */
Stats::Formula overallMshrUncacheable;
/** Total cycle latency of each MSHR miss, per command and thread. */
Stats::Vector<> mshr_miss_latency[MemCmd::NUM_MEM_CMDS];
/** Total cycle latency of demand MSHR misses. */
Stats::Formula demandMshrMissLatency;
/** Total cycle latency of overall MSHR misses. */
Stats::Formula overallMshrMissLatency;
/** Total cycle latency of each MSHR miss, per command and thread. */
Stats::Vector<> mshr_uncacheable_lat[MemCmd::NUM_MEM_CMDS];
/** Total cycle latency of overall MSHR misses. */
Stats::Formula overallMshrUncacheableLatency;
/** The total number of MSHR accesses per command and thread. */
Stats::Formula mshrAccesses[MemCmd::NUM_MEM_CMDS];
/** The total number of demand MSHR accesses. */
Stats::Formula demandMshrAccesses;
/** The total number of MSHR accesses. */
Stats::Formula overallMshrAccesses;
/** The miss rate in the MSHRs pre command and thread. */
Stats::Formula mshrMissRate[MemCmd::NUM_MEM_CMDS];
/** The demand miss rate in the MSHRs. */
Stats::Formula demandMshrMissRate;
/** The overall miss rate in the MSHRs. */
Stats::Formula overallMshrMissRate;
/** The average latency of an MSHR miss, per command and thread. */
Stats::Formula avgMshrMissLatency[MemCmd::NUM_MEM_CMDS];
/** The average latency of a demand MSHR miss. */
Stats::Formula demandAvgMshrMissLatency;
/** The average overall latency of an MSHR miss. */
Stats::Formula overallAvgMshrMissLatency;
/** The average latency of an MSHR miss, per command and thread. */
Stats::Formula avgMshrUncacheableLatency[MemCmd::NUM_MEM_CMDS];
/** The average overall latency of an MSHR miss. */
Stats::Formula overallAvgMshrUncacheableLatency;
/** The number of times a thread hit its MSHR cap. */
Stats::Vector<> mshr_cap_events;
/** The number of times software prefetches caused the MSHR to block. */
Stats::Vector<> soft_prefetch_mshr_full;
Stats::Scalar<> mshr_no_allocate_misses;
/**
* @}
*/
@ -279,12 +354,13 @@ class BaseCache : public MemObject
class Params
{
public:
/** List of address ranges of this cache. */
std::vector<Range<Addr> > addrRange;
/** The hit latency for this cache. */
int hitLatency;
/** The block size of this cache. */
int blkSize;
int numMSHRs;
int numTargets;
int numWriteBuffers;
/**
* The maximum number of misses this cache should handle before
* ending the simulation.
@ -294,10 +370,12 @@ class BaseCache : public MemObject
/**
* Construct an instance of this parameter class.
*/
Params(std::vector<Range<Addr> > addr_range,
int hit_latency, int _blkSize, Counter max_misses)
: addrRange(addr_range), hitLatency(hit_latency), blkSize(_blkSize),
maxMisses(max_misses)
Params(int _hitLatency, int _blkSize,
int _numMSHRs, int _numTargets, int _numWriteBuffers,
Counter _maxMisses)
: hitLatency(_hitLatency), blkSize(_blkSize),
numMSHRs(_numMSHRs), numTargets(_numTargets),
numWriteBuffers(_numWriteBuffers), maxMisses(_maxMisses)
{
}
};
@ -309,20 +387,10 @@ class BaseCache : public MemObject
* of this cache.
* @param params The parameter object for this BaseCache.
*/
BaseCache(const std::string &name, Params &params)
: MemObject(name), blocked(0), blockedSnoop(0), masterRequests(0),
slaveRequests(0), blkSize(params.blkSize),
missCount(params.maxMisses), drainEvent(NULL)
{
//Start ports at null if more than one is created we should panic
cpuSidePort = NULL;
memSidePort = NULL;
}
BaseCache(const std::string &name, Params &params);
~BaseCache()
{
delete sendEvent;
delete memSendEvent;
}
virtual void init();
@ -336,6 +404,35 @@ class BaseCache : public MemObject
return blkSize;
}
Addr blockAlign(Addr addr) const { return (addr & ~(blkSize - 1)); }
MSHR *allocateMissBuffer(PacketPtr pkt, Tick time, bool requestBus)
{
return allocateBufferInternal(&mshrQueue,
blockAlign(pkt->getAddr()), blkSize,
pkt, time, requestBus);
}
MSHR *allocateBuffer(PacketPtr pkt, Tick time, bool requestBus)
{
MSHRQueue *mq = NULL;
if (pkt->isWrite() && !pkt->isRead()) {
/**
* @todo Add write merging here.
*/
mq = &writeBuffer;
} else {
mq = &mshrQueue;
}
return allocateBufferInternal(mq, pkt->getAddr(), pkt->getSize(),
pkt, time, requestBus);
}
/**
* Returns true if the cache is blocked for accesses.
*/
@ -344,14 +441,6 @@ class BaseCache : public MemObject
return blocked != 0;
}
/**
* Returns true if the cache is blocked for snoops.
*/
bool isBlockedForSnoop()
{
return blockedSnoop != 0;
}
/**
* Marks the access path of the cache as blocked for the given cause. This
* also sets the blocked flag in the slave interface.
@ -363,32 +452,10 @@ class BaseCache : public MemObject
if (blocked == 0) {
blocked_causes[cause]++;
blockedCycle = curTick;
cpuSidePort->setBlocked();
}
int old_state = blocked;
if (!(blocked & flag)) {
//Wasn't already blocked for this cause
blocked |= flag;
DPRINTF(Cache,"Blocking for cause %s\n", cause);
if (!old_state)
cpuSidePort->setBlocked();
}
}
/**
* Marks the snoop path of the cache as blocked for the given cause. This
* also sets the blocked flag in the master interface.
* @param cause The reason to block the snoop path.
*/
void setBlockedForSnoop(BlockedCause cause)
{
uint8_t flag = 1 << cause;
uint8_t old_state = blockedSnoop;
if (!(blockedSnoop & flag)) {
//Wasn't already blocked for this cause
blockedSnoop |= flag;
if (!old_state)
memSidePort->setBlocked();
}
blocked |= flag;
DPRINTF(Cache,"Blocking for cause %d, mask=%d\n", cause, blocked);
}
/**
@ -401,33 +468,18 @@ class BaseCache : public MemObject
void clearBlocked(BlockedCause cause)
{
uint8_t flag = 1 << cause;
DPRINTF(Cache,"Unblocking for cause %s, causes left=%i\n",
cause, blocked);
if (blocked & flag)
{
blocked &= ~flag;
if (!isBlocked()) {
blocked_cycles[cause] += curTick - blockedCycle;
DPRINTF(Cache,"Unblocking from all causes\n");
cpuSidePort->clearBlocked();
}
}
if (blockedSnoop & flag)
{
blockedSnoop &= ~flag;
if (!isBlockedForSnoop()) {
memSidePort->clearBlocked();
}
blocked &= ~flag;
DPRINTF(Cache,"Unblocking for cause %d, mask=%d\n", cause, blocked);
if (blocked == 0) {
blocked_cycles[cause] += curTick - blockedCycle;
cpuSidePort->clearBlocked();
}
}
/**
* True if the master bus should be requested.
* @return True if there are outstanding requests for the master bus.
*/
bool doMasterRequest()
Tick nextMSHRReadyTime()
{
return masterRequests != 0;
return std::min(mshrQueue.nextMSHRReadyTime(),
writeBuffer.nextMSHRReadyTime());
}
/**
@ -435,269 +487,40 @@ class BaseCache : public MemObject
* @param cause The reason for the request.
* @param time The time to make the request.
*/
void setMasterRequest(RequestCause cause, Tick time)
void requestMemSideBus(RequestCause cause, Tick time)
{
if (!doMasterRequest() && !memSidePort->waitingOnRetry)
{
new RequestEvent(memSidePort, time);
}
uint8_t flag = 1<<cause;
masterRequests |= flag;
memSidePort->requestBus(cause, time);
}
/**
* Clear the master bus request for the given cause.
* @param cause The request reason to clear.
*/
void clearMasterRequest(RequestCause cause)
void deassertMemSideBusRequest(RequestCause cause)
{
uint8_t flag = 1<<cause;
masterRequests &= ~flag;
checkDrain();
}
/**
* Return true if the slave bus should be requested.
* @return True if there are outstanding requests for the slave bus.
*/
bool doSlaveRequest()
{
return slaveRequests != 0;
}
/**
* Request the slave bus for the given reason and time.
* @param cause The reason for the request.
* @param time The time to make the request.
*/
void setSlaveRequest(RequestCause cause, Tick time)
{
if (!doSlaveRequest() && !cpuSidePort->waitingOnRetry)
{
new RequestEvent(cpuSidePort, time);
}
uint8_t flag = 1<<cause;
slaveRequests |= flag;
}
/**
* Clear the slave bus request for the given reason.
* @param cause The request reason to clear.
*/
void clearSlaveRequest(RequestCause cause)
{
uint8_t flag = 1<<cause;
slaveRequests &= ~flag;
checkDrain();
}
/**
* Send a response to the slave interface.
* @param pkt The request being responded to.
* @param time The time the response is ready.
*/
void respond(PacketPtr pkt, Tick time)
{
assert(time >= curTick);
if (pkt->needsResponse()) {
/* CacheEvent *reqCpu = new CacheEvent(cpuSidePort, pkt);
reqCpu->schedule(time);
*/
if (cpuSidePort->transmitList.empty()) {
assert(!sendEvent->scheduled());
sendEvent->schedule(time);
cpuSidePort->transmitList.push_back(std::pair<Tick,PacketPtr>
(time,pkt));
return;
}
// something is on the list and this belongs at the end
if (time >= cpuSidePort->transmitList.back().first) {
cpuSidePort->transmitList.push_back(std::pair<Tick,PacketPtr>
(time,pkt));
return;
}
// Something is on the list and this belongs somewhere else
std::list<std::pair<Tick,PacketPtr> >::iterator i =
cpuSidePort->transmitList.begin();
std::list<std::pair<Tick,PacketPtr> >::iterator end =
cpuSidePort->transmitList.end();
bool done = false;
while (i != end && !done) {
if (time < i->first) {
if (i == cpuSidePort->transmitList.begin()) {
//Inserting at begining, reschedule
sendEvent->reschedule(time);
}
cpuSidePort->transmitList.insert(i,std::pair<Tick,PacketPtr>
(time,pkt));
done = true;
}
i++;
}
}
else {
if (pkt->cmd != MemCmd::UpgradeReq)
{
delete pkt->req;
delete pkt;
}
}
}
/**
* Send a reponse to the slave interface and calculate miss latency.
* @param pkt The request to respond to.
* @param time The time the response is ready.
*/
void respondToMiss(PacketPtr pkt, Tick time)
{
assert(time >= curTick);
if (!pkt->req->isUncacheable()) {
missLatency[pkt->cmdToIndex()][0/*pkt->req->getThreadNum()*/] +=
time - pkt->time;
}
if (pkt->needsResponse()) {
/* CacheEvent *reqCpu = new CacheEvent(cpuSidePort, pkt);
reqCpu->schedule(time);
*/
if (cpuSidePort->transmitList.empty()) {
assert(!sendEvent->scheduled());
sendEvent->schedule(time);
cpuSidePort->transmitList.push_back(std::pair<Tick,PacketPtr>
(time,pkt));
return;
}
// something is on the list and this belongs at the end
if (time >= cpuSidePort->transmitList.back().first) {
cpuSidePort->transmitList.push_back(std::pair<Tick,PacketPtr>
(time,pkt));
return;
}
// Something is on the list and this belongs somewhere else
std::list<std::pair<Tick,PacketPtr> >::iterator i =
cpuSidePort->transmitList.begin();
std::list<std::pair<Tick,PacketPtr> >::iterator end =
cpuSidePort->transmitList.end();
bool done = false;
while (i != end && !done) {
if (time < i->first) {
if (i == cpuSidePort->transmitList.begin()) {
//Inserting at begining, reschedule
sendEvent->reschedule(time);
}
cpuSidePort->transmitList.insert(i,std::pair<Tick,PacketPtr>
(time,pkt));
done = true;
}
i++;
}
}
else {
if (pkt->cmd != MemCmd::UpgradeReq)
{
delete pkt->req;
delete pkt;
}
}
}
/**
* Suppliess the data if cache to cache transfers are enabled.
* @param pkt The bus transaction to fulfill.
*/
void respondToSnoop(PacketPtr pkt, Tick time)
{
assert(time >= curTick);
assert (pkt->needsResponse());
/* CacheEvent *reqMem = new CacheEvent(memSidePort, pkt);
reqMem->schedule(time);
*/
if (memSidePort->transmitList.empty()) {
assert(!memSendEvent->scheduled());
memSendEvent->schedule(time);
memSidePort->transmitList.push_back(std::pair<Tick,PacketPtr>
(time,pkt));
return;
}
// something is on the list and this belongs at the end
if (time >= memSidePort->transmitList.back().first) {
memSidePort->transmitList.push_back(std::pair<Tick,PacketPtr>
(time,pkt));
return;
}
// Something is on the list and this belongs somewhere else
std::list<std::pair<Tick,PacketPtr> >::iterator i =
memSidePort->transmitList.begin();
std::list<std::pair<Tick,PacketPtr> >::iterator end =
memSidePort->transmitList.end();
bool done = false;
while (i != end && !done) {
if (time < i->first) {
if (i == memSidePort->transmitList.begin()) {
//Inserting at begining, reschedule
memSendEvent->reschedule(time);
}
memSidePort->transmitList.insert(i,std::pair<Tick,PacketPtr>(time,pkt));
done = true;
}
i++;
}
}
/**
* Notification from master interface that a address range changed. Nothing
* to do for a cache.
*/
void rangeChange() {}
void getAddressRanges(AddrRangeList &resp, bool &snoop, bool isCpuSide)
{
if (isCpuSide)
{
bool dummy;
memSidePort->getPeerAddressRanges(resp, dummy);
}
else
{
//This is where snoops get updated
AddrRangeList dummy;
snoop = true;
}
// obsolete!!
assert(false);
// memSidePort->deassertBusRequest(cause);
// checkDrain();
}
virtual unsigned int drain(Event *de);
void checkDrain()
{
if (drainEvent && canDrain()) {
drainEvent->process();
changeState(SimObject::Drained);
// Clear the drain event
drainEvent = NULL;
}
}
bool canDrain()
{
if (doMasterRequest() || doSlaveRequest()) {
return false;
} else if (memSidePort && !memSidePort->canDrain()) {
return false;
} else if (cpuSidePort && !cpuSidePort->canDrain()) {
return false;
}
return true;
}
virtual bool inCache(Addr addr) = 0;
virtual bool inMissQueue(Addr addr) = 0;
void incMissCount(PacketPtr pkt)
{
misses[pkt->cmdToIndex()][0/*pkt->req->getThreadNum()*/]++;
if (missCount) {
--missCount;
if (missCount == 0)
exitSimLoop("A cache reached the maximum miss count");
}
}
};
#endif //__BASE_CACHE_HH__

View file

@ -58,12 +58,6 @@
#include "mem/cache/tags/split_lifo.hh"
#endif
#include "mem/cache/miss/miss_queue.hh"
#include "mem/cache/miss/blocking_buffer.hh"
#include "mem/cache/coherence/uni_coherence.hh"
#include "mem/cache/coherence/simple_coherence.hh"
#include "mem/cache/cache_impl.hh"
// Template Instantiations
@ -71,28 +65,23 @@
#if defined(USE_CACHE_FALRU)
template class Cache<FALRU, SimpleCoherence>;
template class Cache<FALRU, UniCoherence>;
template class Cache<FALRU>;
#endif
#if defined(USE_CACHE_IIC)
template class Cache<IIC, SimpleCoherence>;
template class Cache<IIC, UniCoherence>;
template class Cache<IIC>;
#endif
#if defined(USE_CACHE_LRU)
template class Cache<LRU, SimpleCoherence>;
template class Cache<LRU, UniCoherence>;
template class Cache<LRU>;
#endif
#if defined(USE_CACHE_SPLIT)
template class Cache<Split, SimpleCoherence>;
template class Cache<Split, UniCoherence>;
template class Cache<Split>;
#endif
#if defined(USE_CACHE_SPLIT_LIFO)
template class Cache<SplitLIFO, SimpleCoherence>;
template class Cache<SplitLIFO, UniCoherence>;
template class Cache<SplitLIFO>;
#endif
#endif //DOXYGEN_SHOULD_SKIP_THIS

309
src/mem/cache/cache.hh vendored
View file

@ -28,6 +28,7 @@
* Authors: Erik Hallnor
* Dave Greene
* Steve Reinhardt
* Ron Dreslinski
*/
/**
@ -38,26 +39,23 @@
#ifndef __CACHE_HH__
#define __CACHE_HH__
#include "base/compression/base.hh"
#include "base/misc.hh" // fatal, panic, and warn
#include "cpu/smt.hh" // SMT_MAX_THREADS
#include "mem/cache/base_cache.hh"
#include "mem/cache/cache_blk.hh"
#include "mem/cache/miss/miss_buffer.hh"
#include "mem/cache/miss/mshr.hh"
#include "sim/eventq.hh"
//Forward decleration
class MSHR;
class BasePrefetcher;
/**
* A template-policy based cache. The behavior of the cache can be altered by
* supplying different template policies. TagStore handles all tag and data
* storage @sa TagStore. Buffering handles all misses and writes/writebacks
* @sa MissQueue. Coherence handles all coherence policy details @sa
* UniCoherence, SimpleMultiCoherence.
* storage @sa TagStore.
*/
template <class TagStore, class Coherence>
template <class TagStore>
class Cache : public BaseCache
{
public:
@ -74,15 +72,18 @@ class Cache : public BaseCache
{
public:
CpuSidePort(const std::string &_name,
Cache<TagStore,Coherence> *_cache);
Cache<TagStore> *_cache);
// BaseCache::CachePort just has a BaseCache *; this function
// lets us get back the type info we lost when we stored the
// cache pointer there.
Cache<TagStore,Coherence> *myCache() {
return static_cast<Cache<TagStore,Coherence> *>(cache);
Cache<TagStore> *myCache() {
return static_cast<Cache<TagStore> *>(cache);
}
virtual void getDeviceAddressRanges(AddrRangeList &resp,
bool &snoop);
virtual bool recvTiming(PacketPtr pkt);
virtual Tick recvAtomic(PacketPtr pkt);
@ -94,65 +95,42 @@ class Cache : public BaseCache
{
public:
MemSidePort(const std::string &_name,
Cache<TagStore,Coherence> *_cache);
Cache<TagStore> *_cache);
// BaseCache::CachePort just has a BaseCache *; this function
// lets us get back the type info we lost when we stored the
// cache pointer there.
Cache<TagStore,Coherence> *myCache() {
return static_cast<Cache<TagStore,Coherence> *>(cache);
Cache<TagStore> *myCache() {
return static_cast<Cache<TagStore> *>(cache);
}
void sendPacket();
void processSendEvent();
virtual void getDeviceAddressRanges(AddrRangeList &resp,
bool &snoop);
virtual bool recvTiming(PacketPtr pkt);
virtual void recvRetry();
virtual Tick recvAtomic(PacketPtr pkt);
virtual void recvFunctional(PacketPtr pkt);
typedef EventWrapper<MemSidePort, &MemSidePort::processSendEvent>
SendEvent;
};
/** Tag and data Storage */
TagStore *tags;
/** Miss and Writeback handler */
MissBuffer *missQueue;
/** Coherence protocol. */
Coherence *coherence;
/** Prefetcher */
BasePrefetcher *prefetcher;
/**
* The clock ratio of the outgoing bus.
* Used for calculating critical word first.
*/
int busRatio;
/**
* The bus width in bytes of the outgoing bus.
* Used for calculating critical word first.
*/
int busWidth;
/**
* The latency of a hit in this device.
*/
int hitLatency;
/**
* A permanent mem req to always be used to cause invalidations.
* Used to append to target list, to cause an invalidation.
*/
PacketPtr invalidatePkt;
Request *invalidateReq;
/**
* Policy class for performing compression.
*/
CompressionAlgorithm *compressionAlg;
/**
* The block size of this cache. Set to value in the Tags object.
*/
const int16_t blkSize;
/** Temporary cache block for occasional transitory use */
BlkType *tempBlock;
/**
* Can this cache should allocate a block on a line-sized write miss.
@ -161,50 +139,6 @@ class Cache : public BaseCache
const bool prefetchMiss;
/**
* Can the data can be stored in a compressed form.
*/
const bool storeCompressed;
/**
* Do we need to compress blocks on writebacks (i.e. because
* writeback bus is compressed but storage is not)?
*/
const bool compressOnWriteback;
/**
* The latency of a compression operation.
*/
const int16_t compLatency;
/**
* Should we use an adaptive compression scheme.
*/
const bool adaptiveCompression;
/**
* Do writebacks need to be compressed (i.e. because writeback bus
* is compressed), whether or not they're already compressed for
* storage.
*/
const bool writebackCompressed;
/**
* Compare the internal block data to the fast access block data.
* @param blk The cache block to check.
* @return True if the data is the same.
*/
bool verifyData(BlkType *blk);
/**
* Update the internal data of the block. The data to write is assumed to
* be in the fast access data.
* @param blk The block with the data to update.
* @param writebacks A list to store any generated writebacks.
* @param compress_block True if we should compress this block
*/
void updateData(BlkType *blk, PacketList &writebacks, bool compress_block);
/**
* Handle a replacement for the given request.
* @param blk A pointer to the block, usually NULL
@ -212,7 +146,7 @@ class Cache : public BaseCache
* @param new_state The new state of the block.
* @param writebacks A list to store any generated writebacks.
*/
BlkType* doReplacement(BlkType *blk, PacketPtr &pkt,
BlkType* doReplacement(BlkType *blk, PacketPtr pkt,
CacheBlk::State new_state, PacketList &writebacks);
/**
@ -224,59 +158,39 @@ class Cache : public BaseCache
* @return Pointer to the cache block touched by the request. NULL if it
* was a miss.
*/
BlkType* handleAccess(PacketPtr &pkt, int & lat,
PacketList & writebacks, bool update = true);
bool access(PacketPtr pkt, BlkType *&blk, int &lat);
/**
*Handle doing the Compare and Swap function for SPARC.
*/
void cmpAndSwap(BlkType *blk, PacketPtr &pkt);
/**
* Populates a cache block and handles all outstanding requests for the
* satisfied fill request. This version takes an MSHR pointer and uses its
* request to fill the cache block, while repsonding to its targets.
* @param blk The cache block if it already exists.
* @param mshr The MSHR that contains the fill data and targets to satisfy.
* @param new_state The state of the new cache block.
* @param writebacks List for any writebacks that need to be performed.
* @return Pointer to the new cache block.
*/
BlkType* handleFill(BlkType *blk, MSHR * mshr, CacheBlk::State new_state,
PacketList & writebacks, PacketPtr pkt);
void cmpAndSwap(BlkType *blk, PacketPtr pkt);
/**
* Populates a cache block and handles all outstanding requests for the
* satisfied fill request. This version takes two memory requests. One
* contains the fill data, the other is an optional target to satisfy.
* Used for Cache::probe.
* @param blk The cache block if it already exists.
* @param pkt The memory request with the fill data.
* @param new_state The state of the new cache block.
* @param blk The cache block if it already exists.
* @param writebacks List for any writebacks that need to be performed.
* @param target The memory request to perform after the fill.
* @return Pointer to the new cache block.
*/
BlkType* handleFill(BlkType *blk, PacketPtr &pkt,
CacheBlk::State new_state,
PacketList & writebacks, PacketPtr target = NULL);
BlkType *handleFill(PacketPtr pkt, BlkType *blk,
PacketList &writebacks);
/**
* Sets the blk to the new state and handles the given request.
* @param blk The cache block being snooped.
* @param new_state The new coherence state for the block.
* @param pkt The request to satisfy
*/
void handleSnoop(BlkType *blk, CacheBlk::State new_state,
PacketPtr &pkt);
void satisfyCpuSideRequest(PacketPtr pkt, BlkType *blk);
bool satisfyMSHR(MSHR *mshr, PacketPtr pkt, BlkType *blk);
void doTimingSupplyResponse(PacketPtr req_pkt, uint8_t *blk_data,
bool already_copied);
/**
* Sets the blk to the new state.
* @param blk The cache block being snooped.
* @param new_state The new coherence state for the block.
*/
void handleSnoop(BlkType *blk, CacheBlk::State new_state);
void handleSnoop(PacketPtr ptk, BlkType *blk,
bool is_timing, bool is_deferred);
/**
* Create a writeback request for the given block.
@ -291,44 +205,23 @@ class Cache : public BaseCache
{
public:
TagStore *tags;
MissBuffer *missQueue;
Coherence *coherence;
BaseCache::Params baseParams;
BasePrefetcher*prefetcher;
bool prefetchAccess;
int hitLatency;
CompressionAlgorithm *compressionAlg;
const int16_t blkSize;
const bool doFastWrites;
const bool prefetchMiss;
const bool storeCompressed;
const bool compressOnWriteback;
const int16_t compLatency;
const bool adaptiveCompression;
const bool writebackCompressed;
Params(TagStore *_tags, MissBuffer *mq, Coherence *coh,
Params(TagStore *_tags,
BaseCache::Params params,
BasePrefetcher *_prefetcher,
bool prefetch_access, int hit_latency,
bool do_fast_writes,
bool store_compressed, bool adaptive_compression,
bool writeback_compressed,
CompressionAlgorithm *_compressionAlg, int comp_latency,
bool prefetch_miss)
: tags(_tags), missQueue(mq), coherence(coh),
: tags(_tags),
baseParams(params),
prefetcher(_prefetcher), prefetchAccess(prefetch_access),
hitLatency(hit_latency),
compressionAlg(_compressionAlg),
blkSize(_tags->getBlockSize()),
doFastWrites(do_fast_writes),
prefetchMiss(prefetch_miss),
storeCompressed(store_compressed),
compressOnWriteback(!store_compressed && writeback_compressed),
compLatency(comp_latency),
adaptiveCompression(adaptive_compression),
writebackCompressed(writeback_compressed)
prefetchMiss(prefetch_miss)
{
}
};
@ -339,8 +232,6 @@ class Cache : public BaseCache
virtual Port *getPort(const std::string &if_name, int idx = -1);
virtual void deletePortRefs(Port *p);
virtual void recvStatusChange(Port::Status status, bool isCpuSide);
void regStats();
/**
@ -348,98 +239,90 @@ class Cache : public BaseCache
* @param pkt The request to perform.
* @return The result of the access.
*/
bool access(PacketPtr &pkt);
bool timingAccess(PacketPtr pkt);
/**
* Selects a request to send on the bus.
* @return The memory request to service.
* Performs the access specified by the request.
* @param pkt The request to perform.
* @return The result of the access.
*/
virtual PacketPtr getPacket();
Tick atomicAccess(PacketPtr pkt);
/**
* Was the request was sent successfully?
* @param pkt The request.
* @param success True if the request was sent successfully.
* Performs the access specified by the request.
* @param pkt The request to perform.
* @return The result of the access.
*/
virtual void sendResult(PacketPtr &pkt, MSHR* mshr, bool success);
/**
* Was the CSHR request was sent successfully?
* @param pkt The request.
* @param success True if the request was sent successfully.
*/
virtual void sendCoherenceResult(PacketPtr &pkt, MSHR* cshr, bool success);
void functionalAccess(PacketPtr pkt, CachePort *otherSidePort);
/**
* Handles a response (cache line fill/write ack) from the bus.
* @param pkt The request being responded to.
*/
void handleResponse(PacketPtr &pkt);
/**
* Selects a coherence message to forward to lower levels of the hierarchy.
* @return The coherence message to forward.
*/
virtual PacketPtr getCoherencePacket();
void handleResponse(PacketPtr pkt);
/**
* Snoops bus transactions to maintain coherence.
* @param pkt The current bus transaction.
*/
void snoop(PacketPtr &pkt);
void snoopTiming(PacketPtr pkt);
void snoopResponse(PacketPtr &pkt);
/**
* Snoop for the provided request in the cache and return the estimated
* time of completion.
* @param pkt The memory request to snoop
* @return The estimated completion time.
*/
Tick snoopAtomic(PacketPtr pkt);
/**
* Squash all requests associated with specified thread.
* intended for use by I-cache.
* @param threadNum The thread to squash.
*/
void squash(int threadNum)
void squash(int threadNum);
/**
* Selects a outstanding request to service.
* @return The request to service, NULL if none found.
*/
PacketPtr getBusPacket(PacketPtr cpu_pkt, BlkType *blk,
bool needsExclusive);
MSHR *getNextMSHR();
PacketPtr getTimingPacket();
/**
* Marks a request as in service (sent on the bus). This can have side
* effect since storage for no response commands is deallocated once they
* are successfully sent.
* @param pkt The request that was sent on the bus.
*/
void markInService(MSHR *mshr);
/**
* Perform the given writeback request.
* @param pkt The writeback request.
*/
void doWriteback(PacketPtr pkt);
/**
* Return whether there are any outstanding misses.
*/
bool outstandingMisses() const
{
missQueue->squash(threadNum);
return mshrQueue.allocated != 0;
}
/**
* Return the number of outstanding misses in a Cache.
* Default returns 0.
*
* @retval unsigned The number of missing still outstanding.
*/
unsigned outstandingMisses() const
{
return missQueue->getMisses();
CacheBlk *findBlock(Addr addr) {
return tags->findBlock(addr);
}
/**
* Perform the access specified in the request and return the estimated
* time of completion. This function can either update the hierarchy state
* or just perform the access wherever the data is found depending on the
* state of the update flag.
* @param pkt The memory request to satisfy
* @param update If true, update the hierarchy, otherwise just perform the
* request.
* @return The estimated completion time.
*/
Tick probe(PacketPtr &pkt, bool update, CachePort * otherSidePort);
/**
* Snoop for the provided request in the cache and return the estimated
* time of completion.
* @todo Can a snoop probe not change state?
* @param pkt The memory request to satisfy
* @param update If true, update the hierarchy, otherwise just perform the
* request.
* @return The estimated completion time.
*/
Tick snoopProbe(PacketPtr &pkt);
bool inCache(Addr addr) {
return (tags->findBlock(addr) != 0);
}
bool inMissQueue(Addr addr) {
return (missQueue->findMSHR(addr) != 0);
return (mshrQueue.findMatch(addr) != 0);
}
};

View file

@ -39,6 +39,7 @@
#include "sim/core.hh" // for Tick
#include "arch/isa_traits.hh" // for Addr
#include "mem/packet.hh"
#include "mem/request.hh"
/**
@ -51,8 +52,6 @@ enum CacheBlkStatusBits {
BlkWritable = 0x02,
/** dirty (modified) */
BlkDirty = 0x04,
/** compressed */
BlkCompressed = 0x08,
/** block was referenced */
BlkReferenced = 0x10,
/** block was a hardware prefetch yet unaccessed*/
@ -174,20 +173,11 @@ class CacheBlk
* Check to see if a block has been written.
* @return True if the block is dirty.
*/
bool isModified() const
bool isDirty() const
{
return (status & BlkDirty) != 0;
}
/**
* Check to see if this block contains compressed data.
* @return True iF the block's data is compressed.
*/
bool isCompressed() const
{
return (status & BlkCompressed) != 0;
}
/**
* Check if this block has been referenced.
* @return True if the block has been referenced.
@ -213,10 +203,10 @@ class CacheBlk
* redundant records on the list, but that's OK, as they'll all
* get blown away at the next store.
*/
void trackLoadLocked(Request *req)
void trackLoadLocked(PacketPtr pkt)
{
assert(req->isLocked());
lockList.push_front(Lock(req));
assert(pkt->isLocked());
lockList.push_front(Lock(pkt->req));
}
/**
@ -230,9 +220,10 @@ class CacheBlk
* @return True if write should proceed, false otherwise. Returns
* false only in the case of a failed store conditional.
*/
bool checkWrite(Request *req)
bool checkWrite(PacketPtr pkt)
{
if (req->isLocked()) {
Request *req = pkt->req;
if (pkt->isLocked()) {
// it's a store conditional... have to check for matching
// load locked.
bool success = false;

View file

@ -42,7 +42,6 @@
#include "mem/cache/base_cache.hh"
#include "mem/cache/cache.hh"
#include "mem/bus.hh"
#include "mem/cache/coherence/coherence_protocol.hh"
#include "sim/builder.hh"
// Tag Templates
@ -66,18 +65,6 @@
#include "mem/cache/tags/split_lifo.hh"
#endif
// Compression Templates
#include "base/compression/null_compression.hh"
#include "base/compression/lzss_compression.hh"
// MissQueue Templates
#include "mem/cache/miss/miss_queue.hh"
#include "mem/cache/miss/blocking_buffer.hh"
// Coherence Templates
#include "mem/cache/coherence/uni_coherence.hh"
#include "mem/cache/coherence/simple_coherence.hh"
//Prefetcher Headers
#if defined(USE_GHB)
#include "mem/cache/prefetch/ghb_prefetcher.hh"
@ -105,16 +92,11 @@ BEGIN_DECLARE_SIM_OBJECT_PARAMS(BaseCache)
Param<int> tgts_per_mshr;
Param<int> write_buffers;
Param<bool> prioritizeRequests;
SimObjectParam<CoherenceProtocol *> protocol;
Param<Addr> trace_addr;
Param<int> hash_delay;
#if defined(USE_CACHE_IIC)
SimObjectParam<Repl *> repl;
#endif
Param<bool> compressed_bus;
Param<bool> store_compressed;
Param<bool> adaptive_compression;
Param<int> compression_latency;
Param<int> subblock_size;
Param<Counter> max_miss_count;
VectorParam<Range<Addr> > addr_range;
@ -149,23 +131,12 @@ BEGIN_INIT_SIM_OBJECT_PARAMS(BaseCache)
INIT_PARAM_DFLT(write_buffers, "number of write buffers", 8),
INIT_PARAM_DFLT(prioritizeRequests, "always service demand misses first",
false),
INIT_PARAM_DFLT(protocol, "coherence protocol to use in the cache", NULL),
INIT_PARAM_DFLT(trace_addr, "address to trace", 0),
INIT_PARAM_DFLT(hash_delay, "time in cycles of hash access",1),
#if defined(USE_CACHE_IIC)
INIT_PARAM_DFLT(repl, "replacement policy",NULL),
#endif
INIT_PARAM_DFLT(compressed_bus,
"This cache connects to a compressed memory",
false),
INIT_PARAM_DFLT(store_compressed, "Store compressed data in the cache",
false),
INIT_PARAM_DFLT(adaptive_compression, "Use an adaptive compression scheme",
false),
INIT_PARAM_DFLT(compression_latency,
"Latency in cycles of compression algorithm",
0),
INIT_PARAM_DFLT(subblock_size,
"Size of subblock in IIC used for compression",
0),
@ -193,7 +164,7 @@ BEGIN_INIT_SIM_OBJECT_PARAMS(BaseCache)
END_INIT_SIM_OBJECT_PARAMS(BaseCache)
#define BUILD_CACHE(TAGS, tags, c) \
#define BUILD_CACHE(TAGS, tags) \
do { \
BasePrefetcher *pf; \
if (pf_policy == "tagged") { \
@ -208,16 +179,12 @@ END_INIT_SIM_OBJECT_PARAMS(BaseCache)
else { \
BUILD_NULL_PREFETCHER(TAGS); \
} \
Cache<TAGS, c>::Params params(tags, mq, coh, base_params, \
pf, prefetch_access, latency, \
true, \
store_compressed, \
adaptive_compression, \
compressed_bus, \
compAlg, compression_latency, \
prefetch_miss); \
Cache<TAGS, c> *retval = \
new Cache<TAGS, c>(getInstanceName(), params); \
Cache<TAGS>::Params params(tags, base_params, \
pf, prefetch_access, latency, \
true, \
prefetch_miss); \
Cache<TAGS> *retval = \
new Cache<TAGS>(getInstanceName(), params); \
return retval; \
} while (0)
@ -225,90 +192,72 @@ END_INIT_SIM_OBJECT_PARAMS(BaseCache)
panic("%s not compiled into M5", x); \
} while (0)
#define BUILD_COMPRESSED_CACHE(TAGS, tags, c) \
do { \
CompressionAlgorithm *compAlg; \
if (compressed_bus || store_compressed) { \
compAlg = new LZSSCompression(); \
} else { \
compAlg = new NullCompression(); \
} \
BUILD_CACHE(TAGS, tags, c); \
} while (0)
#if defined(USE_CACHE_FALRU)
#define BUILD_FALRU_CACHE(c) do { \
#define BUILD_FALRU_CACHE do { \
FALRU *tags = new FALRU(block_size, size, latency); \
BUILD_COMPRESSED_CACHE(FALRU, tags, c); \
BUILD_CACHE(FALRU, tags); \
} while (0)
#else
#define BUILD_FALRU_CACHE(c) BUILD_CACHE_PANIC("falru cache")
#define BUILD_FALRU_CACHE BUILD_CACHE_PANIC("falru cache")
#endif
#if defined(USE_CACHE_LRU)
#define BUILD_LRU_CACHE(c) do { \
#define BUILD_LRU_CACHE do { \
LRU *tags = new LRU(numSets, block_size, assoc, latency); \
BUILD_COMPRESSED_CACHE(LRU, tags, c); \
BUILD_CACHE(LRU, tags); \
} while (0)
#else
#define BUILD_LRU_CACHE(c) BUILD_CACHE_PANIC("lru cache")
#define BUILD_LRU_CACHE BUILD_CACHE_PANIC("lru cache")
#endif
#if defined(USE_CACHE_SPLIT)
#define BUILD_SPLIT_CACHE(c) do { \
#define BUILD_SPLIT_CACHE do { \
Split *tags = new Split(numSets, block_size, assoc, split_size, lifo, \
two_queue, latency); \
BUILD_COMPRESSED_CACHE(Split, tags, c); \
BUILD_CACHE(Split, tags); \
} while (0)
#else
#define BUILD_SPLIT_CACHE(c) BUILD_CACHE_PANIC("split cache")
#define BUILD_SPLIT_CACHE BUILD_CACHE_PANIC("split cache")
#endif
#if defined(USE_CACHE_SPLIT_LIFO)
#define BUILD_SPLIT_LIFO_CACHE(c) do { \
#define BUILD_SPLIT_LIFO_CACHE do { \
SplitLIFO *tags = new SplitLIFO(block_size, size, assoc, \
latency, two_queue, -1); \
BUILD_COMPRESSED_CACHE(SplitLIFO, tags, c); \
BUILD_CACHE(SplitLIFO, tags); \
} while (0)
#else
#define BUILD_SPLIT_LIFO_CACHE(c) BUILD_CACHE_PANIC("lifo cache")
#define BUILD_SPLIT_LIFO_CACHE BUILD_CACHE_PANIC("lifo cache")
#endif
#if defined(USE_CACHE_IIC)
#define BUILD_IIC_CACHE(c) do { \
#define BUILD_IIC_CACHE do { \
IIC *tags = new IIC(iic_params); \
BUILD_COMPRESSED_CACHE(IIC, tags, c); \
BUILD_CACHE(IIC, tags); \
} while (0)
#else
#define BUILD_IIC_CACHE(c) BUILD_CACHE_PANIC("iic")
#define BUILD_IIC_CACHE BUILD_CACHE_PANIC("iic")
#endif
#define BUILD_CACHES(c) do { \
#define BUILD_CACHES do { \
if (repl == NULL) { \
if (numSets == 1) { \
BUILD_FALRU_CACHE(c); \
BUILD_FALRU_CACHE; \
} else { \
if (split == true) { \
BUILD_SPLIT_CACHE(c); \
BUILD_SPLIT_CACHE; \
} else if (lifo == true) { \
BUILD_SPLIT_LIFO_CACHE(c); \
BUILD_SPLIT_LIFO_CACHE; \
} else { \
BUILD_LRU_CACHE(c); \
BUILD_LRU_CACHE; \
} \
} \
} else { \
BUILD_IIC_CACHE(c); \
BUILD_IIC_CACHE; \
} \
} while (0)
#define BUILD_COHERENCE(b) do { \
if (protocol == NULL) { \
UniCoherence *coh = new UniCoherence(); \
BUILD_CACHES(UniCoherence); \
} else { \
SimpleCoherence *coh = new SimpleCoherence(protocol); \
BUILD_CACHES(SimpleCoherence); \
} \
} while (0)
#if defined(USE_TAGGED)
@ -375,8 +324,9 @@ CREATE_SIM_OBJECT(BaseCache)
}
// Build BaseCache param object
BaseCache::Params base_params(addr_range, latency,
block_size, max_miss_count);
BaseCache::Params base_params(latency, block_size,
mshrs, tgts_per_mshr, write_buffers,
max_miss_count);
//Warnings about prefetcher policy
if (pf_policy == "none" && (prefetch_miss || prefetch_access)) {
@ -414,14 +364,7 @@ CREATE_SIM_OBJECT(BaseCache)
const void *repl = NULL;
#endif
if (mshrs == 1 /*|| out_bus->doEvents() == false*/) {
BlockingBuffer *mq = new BlockingBuffer(true);
BUILD_COHERENCE(BlockingBuffer);
} else {
MissQueue *mq = new MissQueue(mshrs, tgts_per_mshr, write_buffers,
true, prefetch_miss);
BUILD_COHERENCE(MissQueue);
}
BUILD_CACHES;
return NULL;
}

File diff suppressed because it is too large Load diff

View file

@ -1,8 +0,0 @@
from m5.SimObject import SimObject
from m5.params import *
class Coherence(Enum): vals = ['uni', 'msi', 'mesi', 'mosi', 'moesi']
class CoherenceProtocol(SimObject):
type = 'CoherenceProtocol'
do_upgrades = Param.Bool(True, "use upgrade transactions?")
protocol = Param.Coherence("name of coherence protocol")

View file

@ -1,37 +0,0 @@
# -*- mode:python -*-
# Copyright (c) 2006 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Nathan Binkert
Import('*')
SimObject('CoherenceProtocol.py')
Source('coherence_protocol.cc')
Source('uni_coherence.cc')

View file

@ -1,495 +0,0 @@
/*
* Copyright (c) 2002-2005 The Regents of The University of Michigan
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Authors: Erik Hallnor
* Steve Reinhardt
* Ron Dreslinski
*/
/**
* @file
* Definitions of CoherenceProtocol.
*/
#include <string>
#include "base/misc.hh"
#include "mem/cache/miss/mshr.hh"
#include "mem/cache/cache.hh"
#include "mem/cache/coherence/coherence_protocol.hh"
#include "sim/builder.hh"
using namespace std;
CoherenceProtocol::StateTransition::StateTransition()
: busCmd(MemCmd::InvalidCmd), newState(-1), snoopFunc(invalidTransition)
{
}
void
CoherenceProtocol::regStats()
{
// Even though we count all the possible transitions in the
// requestCount and snoopCount arrays, most of these are invalid,
// so we just select the interesting ones to print here.
requestCount[Invalid][MemCmd::ReadReq]
.name(name() + ".read_invalid")
.desc("read misses to invalid blocks")
;
requestCount[Invalid][MemCmd::WriteReq]
.name(name() +".write_invalid")
.desc("write misses to invalid blocks")
;
requestCount[Invalid][MemCmd::SoftPFReq]
.name(name() +".swpf_invalid")
.desc("soft prefetch misses to invalid blocks")
;
requestCount[Invalid][MemCmd::HardPFReq]
.name(name() +".hwpf_invalid")
.desc("hard prefetch misses to invalid blocks")
;
requestCount[Shared][MemCmd::WriteReq]
.name(name() + ".write_shared")
.desc("write misses to shared blocks")
;
requestCount[Owned][MemCmd::WriteReq]
.name(name() + ".write_owned")
.desc("write misses to owned blocks")
;
snoopCount[Shared][MemCmd::ReadReq]
.name(name() + ".snoop_read_shared")
.desc("read snoops on shared blocks")
;
snoopCount[Shared][MemCmd::ReadExReq]
.name(name() + ".snoop_readex_shared")
.desc("readEx snoops on shared blocks")
;
snoopCount[Shared][MemCmd::UpgradeReq]
.name(name() + ".snoop_upgrade_shared")
.desc("upgradee snoops on shared blocks")
;
snoopCount[Modified][MemCmd::ReadReq]
.name(name() + ".snoop_read_modified")
.desc("read snoops on modified blocks")
;
snoopCount[Modified][MemCmd::ReadExReq]
.name(name() + ".snoop_readex_modified")
.desc("readEx snoops on modified blocks")
;
snoopCount[Owned][MemCmd::ReadReq]
.name(name() + ".snoop_read_owned")
.desc("read snoops on owned blocks")
;
snoopCount[Owned][MemCmd::ReadExReq]
.name(name() + ".snoop_readex_owned")
.desc("readEx snoops on owned blocks")
;
snoopCount[Owned][MemCmd::UpgradeReq]
.name(name() + ".snoop_upgrade_owned")
.desc("upgrade snoops on owned blocks")
;
snoopCount[Exclusive][MemCmd::ReadReq]
.name(name() + ".snoop_read_exclusive")
.desc("read snoops on exclusive blocks")
;
snoopCount[Exclusive][MemCmd::ReadExReq]
.name(name() + ".snoop_readex_exclusive")
.desc("readEx snoops on exclusive blocks")
;
snoopCount[Shared][MemCmd::InvalidateReq]
.name(name() + ".snoop_inv_shared")
.desc("Invalidate snoops on shared blocks")
;
snoopCount[Owned][MemCmd::InvalidateReq]
.name(name() + ".snoop_inv_owned")
.desc("Invalidate snoops on owned blocks")
;
snoopCount[Exclusive][MemCmd::InvalidateReq]
.name(name() + ".snoop_inv_exclusive")
.desc("Invalidate snoops on exclusive blocks")
;
snoopCount[Modified][MemCmd::InvalidateReq]
.name(name() + ".snoop_inv_modified")
.desc("Invalidate snoops on modified blocks")
;
snoopCount[Invalid][MemCmd::InvalidateReq]
.name(name() + ".snoop_inv_invalid")
.desc("Invalidate snoops on invalid blocks")
;
snoopCount[Shared][MemCmd::WriteInvalidateReq]
.name(name() + ".snoop_writeinv_shared")
.desc("WriteInvalidate snoops on shared blocks")
;
snoopCount[Owned][MemCmd::WriteInvalidateReq]
.name(name() + ".snoop_writeinv_owned")
.desc("WriteInvalidate snoops on owned blocks")
;
snoopCount[Exclusive][MemCmd::WriteInvalidateReq]
.name(name() + ".snoop_writeinv_exclusive")
.desc("WriteInvalidate snoops on exclusive blocks")
;
snoopCount[Modified][MemCmd::WriteInvalidateReq]
.name(name() + ".snoop_writeinv_modified")
.desc("WriteInvalidate snoops on modified blocks")
;
snoopCount[Invalid][MemCmd::WriteInvalidateReq]
.name(name() + ".snoop_writeinv_invalid")
.desc("WriteInvalidate snoops on invalid blocks")
;
}
bool
CoherenceProtocol::invalidateTrans(BaseCache *cache, PacketPtr &pkt,
CacheBlk *blk, MSHR *mshr,
CacheBlk::State & new_state)
{
// invalidate the block
new_state = (blk->status & ~stateMask) | Invalid;
return false;
}
bool
CoherenceProtocol::supplyTrans(BaseCache *cache, PacketPtr &pkt,
CacheBlk *blk,
MSHR *mshr,
CacheBlk::State & new_state)
{
return true;
}
bool
CoherenceProtocol::supplyAndGotoSharedTrans(BaseCache *cache, PacketPtr &pkt,
CacheBlk *blk,
MSHR *mshr,
CacheBlk::State & new_state)
{
new_state = (blk->status & ~stateMask) | Shared;
pkt->flags |= SHARED_LINE;
return supplyTrans(cache, pkt, blk, mshr, new_state);
}
bool
CoherenceProtocol::supplyAndGotoOwnedTrans(BaseCache *cache, PacketPtr &pkt,
CacheBlk *blk,
MSHR *mshr,
CacheBlk::State & new_state)
{
new_state = (blk->status & ~stateMask) | Owned;
pkt->flags |= SHARED_LINE;
return supplyTrans(cache, pkt, blk, mshr, new_state);
}
bool
CoherenceProtocol::supplyAndInvalidateTrans(BaseCache *cache, PacketPtr &pkt,
CacheBlk *blk,
MSHR *mshr,
CacheBlk::State & new_state)
{
new_state = (blk->status & ~stateMask) | Invalid;
return supplyTrans(cache, pkt, blk, mshr, new_state);
}
bool
CoherenceProtocol::assertShared(BaseCache *cache, PacketPtr &pkt,
CacheBlk *blk,
MSHR *mshr,
CacheBlk::State & new_state)
{
new_state = (blk->status & ~stateMask) | Shared;
pkt->flags |= SHARED_LINE;
return false;
}
CoherenceProtocol::CoherenceProtocol(const string &name,
const string &protocol,
const bool doUpgrades)
: SimObject(name)
{
// Python should catch this, but in case it doesn't...
if (!(protocol == "msi" || protocol == "mesi" ||
protocol == "mosi" || protocol == "moesi")) {
fatal("CoherenceProtocol: unrecognized protocol %s\n", protocol);
}
bool hasOwned = (protocol == "mosi" || protocol == "moesi");
bool hasExclusive = (protocol == "mesi" || protocol == "moesi");
if (hasOwned && !doUpgrades) {
fatal("CoherenceProtocol: ownership protocols require upgrade "
"transactions\n(write miss on owned block generates ReadExcl, "
"which will clobber dirty block)\n");
}
// set up a few shortcuts to save typing & visual clutter
typedef MemCmd MC;
StateTransition (&tt)[stateMax+1][MC::NUM_MEM_CMDS] = transitionTable;
MC::Command writeToSharedCmd =
doUpgrades ? MC::UpgradeReq : MC::ReadExReq;
MC::Command writeToSharedResp =
doUpgrades ? MC::UpgradeReq : MC::ReadExResp;
// Note that all transitions by default cause a panic.
// Override the valid transitions with the appropriate actions here.
//
// ----- incoming requests: specify outgoing bus request -----
//
tt[Invalid][MC::ReadReq].onRequest(MC::ReadReq);
// we only support write allocate right now
tt[Invalid][MC::WriteReq].onRequest(MC::ReadExReq);
tt[Invalid][MC::SwapReq].onRequest(MC::ReadExReq);
tt[Shared][MC::WriteReq].onRequest(writeToSharedCmd);
tt[Shared][MC::SwapReq].onRequest(writeToSharedCmd);
if (hasOwned) {
tt[Owned][MC::WriteReq].onRequest(writeToSharedCmd);
tt[Owned][MC::SwapReq].onRequest(writeToSharedCmd);
}
// Prefetching causes a read
tt[Invalid][MC::SoftPFReq].onRequest(MC::ReadReq);
tt[Invalid][MC::HardPFReq].onRequest(MC::ReadReq);
//
// ----- on response to given request: specify new state -----
//
tt[Invalid][MC::ReadExResp].onResponse(Modified);
tt[Shared][writeToSharedResp].onResponse(Modified);
// Go to Exclusive state on read response if we have one (will
// move into shared if the shared line is asserted in the
// getNewState function)
//
// originally had this as:
// tt[Invalid][MC::ReadResp].onResponse(hasExclusive ? Exclusive: Shared);
// ...but for some reason that caused a link error...
if (hasExclusive) {
tt[Invalid][MC::ReadResp].onResponse(Exclusive);
} else {
tt[Invalid][MC::ReadResp].onResponse(Shared);
}
if (hasOwned) {
tt[Owned][writeToSharedResp].onResponse(Modified);
}
//
// ----- bus snoop transition functions -----
//
tt[Invalid][MC::ReadReq].onSnoop(nullTransition);
tt[Invalid][MC::ReadExReq].onSnoop(nullTransition);
tt[Invalid][MC::InvalidateReq].onSnoop(invalidateTrans);
tt[Invalid][MC::WriteInvalidateReq].onSnoop(invalidateTrans);
tt[Shared][MC::ReadReq].onSnoop(hasExclusive
? assertShared : nullTransition);
tt[Shared][MC::ReadExReq].onSnoop(invalidateTrans);
tt[Shared][MC::InvalidateReq].onSnoop(invalidateTrans);
tt[Shared][MC::WriteInvalidateReq].onSnoop(invalidateTrans);
if (doUpgrades) {
tt[Invalid][MC::UpgradeReq].onSnoop(nullTransition);
tt[Shared][MC::UpgradeReq].onSnoop(invalidateTrans);
}
tt[Modified][MC::ReadExReq].onSnoop(supplyAndInvalidateTrans);
tt[Modified][MC::ReadReq].onSnoop(hasOwned
? supplyAndGotoOwnedTrans
: supplyAndGotoSharedTrans);
tt[Modified][MC::InvalidateReq].onSnoop(invalidateTrans);
tt[Modified][MC::WriteInvalidateReq].onSnoop(invalidateTrans);
if (hasExclusive) {
tt[Exclusive][MC::ReadReq].onSnoop(assertShared);
tt[Exclusive][MC::ReadExReq].onSnoop(invalidateTrans);
tt[Exclusive][MC::InvalidateReq].onSnoop(invalidateTrans);
tt[Exclusive][MC::WriteInvalidateReq].onSnoop(invalidateTrans);
}
if (hasOwned) {
tt[Owned][MC::ReadReq].onSnoop(supplyAndGotoOwnedTrans);
tt[Owned][MC::ReadExReq].onSnoop(supplyAndInvalidateTrans);
tt[Owned][MC::UpgradeReq].onSnoop(invalidateTrans);
tt[Owned][MC::InvalidateReq].onSnoop(invalidateTrans);
tt[Owned][MC::WriteInvalidateReq].onSnoop(invalidateTrans);
}
// @todo add in hardware prefetch to this list
}
MemCmd
CoherenceProtocol::getBusCmd(MemCmd cmdIn, CacheBlk::State state,
MSHR *mshr)
{
state &= stateMask;
int cmd_idx = cmdIn.toInt();
assert(0 <= state && state <= stateMax);
assert(0 <= cmd_idx && cmd_idx < MemCmd::NUM_MEM_CMDS);
MemCmd::Command cmdOut = transitionTable[state][cmd_idx].busCmd;
assert(cmdOut != MemCmd::InvalidCmd);
++requestCount[state][cmd_idx];
return cmdOut;
}
CacheBlk::State
CoherenceProtocol::getNewState(PacketPtr &pkt, CacheBlk::State oldState)
{
CacheBlk::State state = oldState & stateMask;
int cmd_idx = pkt->cmdToIndex();
assert(0 <= state && state <= stateMax);
assert(0 <= cmd_idx && cmd_idx < MemCmd::NUM_MEM_CMDS);
CacheBlk::State newState = transitionTable[state][cmd_idx].newState;
//Check if it's exclusive and the shared line was asserted,
//then goto shared instead
if (newState == Exclusive && (pkt->flags & SHARED_LINE)) {
newState = Shared;
}
assert(newState != -1);
//Make sure not to loose any other state information
newState = (oldState & ~stateMask) | newState;
return newState;
}
bool
CoherenceProtocol::handleBusRequest(BaseCache *cache, PacketPtr &pkt,
CacheBlk *blk,
MSHR *mshr,
CacheBlk::State & new_state)
{
if (blk == NULL) {
// nothing to do if we don't have a block
return false;
}
CacheBlk::State state = blk->status & stateMask;
int cmd_idx = pkt->cmdToIndex();
assert(0 <= state && state <= stateMax);
assert(0 <= cmd_idx && cmd_idx < MemCmd::NUM_MEM_CMDS);
// assert(mshr == NULL); // can't currently handle outstanding requests
//Check first if MSHR, and also insure, if there is one, that it is not in service
assert(!mshr || mshr->inService == 0);
++snoopCount[state][cmd_idx];
bool ret = transitionTable[state][cmd_idx].snoopFunc(cache, pkt, blk, mshr,
new_state);
return ret;
}
bool
CoherenceProtocol::nullTransition(BaseCache *cache, PacketPtr &pkt,
CacheBlk *blk, MSHR *mshr,
CacheBlk::State & new_state)
{
// do nothing
if (blk)
new_state = blk->status;
return false;
}
bool
CoherenceProtocol::invalidTransition(BaseCache *cache, PacketPtr &pkt,
CacheBlk *blk, MSHR *mshr,
CacheBlk::State & new_state)
{
panic("Invalid transition");
return false;
}
#ifndef DOXYGEN_SHOULD_SKIP_THIS
BEGIN_DECLARE_SIM_OBJECT_PARAMS(CoherenceProtocol)
Param<string> protocol;
Param<bool> do_upgrades;
END_DECLARE_SIM_OBJECT_PARAMS(CoherenceProtocol)
BEGIN_INIT_SIM_OBJECT_PARAMS(CoherenceProtocol)
INIT_PARAM(protocol, "name of coherence protocol"),
INIT_PARAM_DFLT(do_upgrades, "use upgrade transactions?", true)
END_INIT_SIM_OBJECT_PARAMS(CoherenceProtocol)
CREATE_SIM_OBJECT(CoherenceProtocol)
{
return new CoherenceProtocol(getInstanceName(), protocol,
do_upgrades);
}
REGISTER_SIM_OBJECT("CoherenceProtocol", CoherenceProtocol)
#endif // DOXYGEN_SHOULD_SKIP_THIS

View file

@ -1,257 +0,0 @@
/*
* Copyright (c) 2002-2005 The Regents of The University of Michigan
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Authors: Erik Hallnor
* Ron Dreslinski
* Steve Reinhardt
*/
/**
* @file
* Declaration of CoherenceProcotol a basic coherence policy.
*/
#ifndef __COHERENCE_PROTOCOL_HH__
#define __COHERENCE_PROTOCOL_HH__
#include <string>
#include "sim/sim_object.hh"
#include "mem/packet.hh"
#include "mem/cache/cache_blk.hh"
#include "base/statistics.hh"
class BaseCache;
class MSHR;
/**
* A simple coherence policy for the memory hierarchy. Currently implements
* MSI, MESI, and MOESI protocols.
*/
class CoherenceProtocol : public SimObject
{
public:
/**
* Contruct and initialize this policy.
* @param name The name of this policy.
* @param protocol The string representation of the protocol to use.
* @param doUpgrades True if bus upgrades should be used.
*/
CoherenceProtocol(const std::string &name, const std::string &protocol,
const bool doUpgrades);
/**
* Destructor.
*/
virtual ~CoherenceProtocol() {};
/**
* Register statistics
*/
virtual void regStats();
/**
* Get the proper bus command for the given command and status.
* @param cmd The request's command.
* @param status The current state of the cache block.
* @param mshr The MSHR matching the request.
* @return The proper bus command, as determined by the protocol.
*/
MemCmd getBusCmd(MemCmd cmd, CacheBlk::State status,
MSHR *mshr = NULL);
/**
* Return the proper state given the current state and the bus response.
* @param pkt The bus response.
* @param oldState The current block state.
* @return The new state.
*/
CacheBlk::State getNewState(PacketPtr &pkt,
CacheBlk::State oldState);
/**
* Handle snooped bus requests.
* @param cache The cache that snooped the request.
* @param pkt The snooped bus request.
* @param blk The cache block corresponding to the request, if any.
* @param mshr The MSHR corresponding to the request, if any.
* @param new_state The new coherence state of the block.
* @return True if the request should be satisfied locally.
*/
bool handleBusRequest(BaseCache *cache, PacketPtr &pkt, CacheBlk *blk,
MSHR *mshr, CacheBlk::State &new_state);
protected:
/** Snoop function type. */
typedef bool (*SnoopFuncType)(BaseCache *, PacketPtr &, CacheBlk *,
MSHR *, CacheBlk::State&);
//
// Standard snoop transition functions
//
/**
* Do nothing transition.
*/
static bool nullTransition(BaseCache *, PacketPtr &, CacheBlk *,
MSHR *, CacheBlk::State&);
/**
* Invalid transition, basically panic.
*/
static bool invalidTransition(BaseCache *, PacketPtr &, CacheBlk *,
MSHR *, CacheBlk::State&);
/**
* Invalidate block, move to Invalid state.
*/
static bool invalidateTrans(BaseCache *, PacketPtr &, CacheBlk *,
MSHR *, CacheBlk::State&);
/**
* Supply data, no state transition.
*/
static bool supplyTrans(BaseCache *, PacketPtr &, CacheBlk *,
MSHR *, CacheBlk::State&);
/**
* Supply data and go to Shared state.
*/
static bool supplyAndGotoSharedTrans(BaseCache *, PacketPtr &, CacheBlk *,
MSHR *, CacheBlk::State&);
/**
* Supply data and go to Owned state.
*/
static bool supplyAndGotoOwnedTrans(BaseCache *, PacketPtr &, CacheBlk *,
MSHR *, CacheBlk::State&);
/**
* Invalidate block, supply data, and go to Invalid state.
*/
static bool supplyAndInvalidateTrans(BaseCache *, PacketPtr &, CacheBlk *,
MSHR *, CacheBlk::State&);
/**
* Assert the shared line for a block that is shared/exclusive.
*/
static bool assertShared(BaseCache *, PacketPtr &, CacheBlk *,
MSHR *, CacheBlk::State&);
/**
* Definition of protocol state transitions.
*/
class StateTransition
{
friend class CoherenceProtocol;
/** The bus command of this transition. */
Packet::Command busCmd;
/** The state to transition to. */
int newState;
/** The snoop function for this transition. */
SnoopFuncType snoopFunc;
/**
* Constructor, defaults to invalid transition.
*/
StateTransition();
/**
* Initialize bus command.
* @param cmd The bus command to use.
*/
void onRequest(Packet::Command cmd)
{
busCmd = cmd;
}
/**
* Set the transition state.
* @param s The new state.
*/
void onResponse(CacheBlk::State s)
{
newState = s;
}
/**
* Initialize the snoop function.
* @param f The new snoop function.
*/
void onSnoop(SnoopFuncType f)
{
snoopFunc = f;
}
};
friend class CoherenceProtocol::StateTransition;
/** Mask to select status bits relevant to coherence protocol. */
static const int stateMask = BlkValid | BlkWritable | BlkDirty;
/** The Modified (M) state. */
static const int Modified = BlkValid | BlkWritable | BlkDirty;
/** The Owned (O) state. */
static const int Owned = BlkValid | BlkDirty;
/** The Exclusive (E) state. */
static const int Exclusive = BlkValid | BlkWritable;
/** The Shared (S) state. */
static const int Shared = BlkValid;
/** The Invalid (I) state. */
static const int Invalid = 0;
/**
* Maximum state encoding value (used to size transition lookup
* table). Could be more than number of states, depends on
* encoding of status bits.
*/
static const int stateMax = stateMask;
/**
* The table of all possible transitions, organized by starting state and
* request command.
*/
StateTransition transitionTable[stateMax+1][MemCmd::NUM_MEM_CMDS];
/**
* @addtogroup CoherenceStatistics
* @{
*/
/**
* State accesses from parent cache.
*/
Stats::Scalar<> requestCount[stateMax+1][MemCmd::NUM_MEM_CMDS];
/**
* State accesses from snooped requests.
*/
Stats::Scalar<> snoopCount[stateMax+1][MemCmd::NUM_MEM_CMDS];
/**
* @}
*/
};
#endif // __COHERENCE_PROTOCOL_HH__

View file

@ -1,180 +0,0 @@
/*
* Copyright (c) 2003-2005 The Regents of The University of Michigan
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Authors: Erik Hallnor
* Ron Dreslinski
*/
/**
* @file
* Declaration of a simple coherence policy.
*/
#ifndef __SIMPLE_COHERENCE_HH__
#define __SIMPLE_COHERENCE_HH__
#include <string>
#include "mem/packet.hh"
#include "mem/cache/cache_blk.hh"
#include "mem/cache/miss/mshr_queue.hh"
#include "mem/cache/coherence/coherence_protocol.hh"
class BaseCache;
/**
* A simple MP coherence policy. This policy assumes an atomic bus and only one
* level of cache.
*/
class SimpleCoherence
{
protected:
/** Pointer to the parent cache. */
BaseCache *cache;
/** Pointer to the coherence protocol. */
CoherenceProtocol *protocol;
public:
/**
* Construct and initialize this coherence policy.
* @param _protocol The coherence protocol to use.
*/
SimpleCoherence(CoherenceProtocol *_protocol)
: protocol(_protocol)
{
}
/**
* Set the pointer to the parent cache.
* @param _cache The parent cache.
*/
void setCache(BaseCache *_cache)
{
cache = _cache;
}
/**
* Register statistics.
* @param name The name to prepend to stat descriptions.
*/
void regStats(const std::string &name)
{
}
/**
* This policy does not forward invalidates, return NULL.
* @return NULL.
*/
PacketPtr getPacket()
{
return NULL;
}
/**
* Was the CSHR request was sent successfully?
* @param pkt The request.
* @param success True if the request was sent successfully.
*/
void sendResult(PacketPtr &pkt, MSHR* cshr, bool success)
{
//Don't do coherence
return;
}
/**
* Return the proper state given the current state and the bus response.
* @param pkt The bus response.
* @param current The current block state.
* @return The new state.
*/
CacheBlk::State getNewState(PacketPtr &pkt, CacheBlk::State current)
{
return protocol->getNewState(pkt, current);
}
/**
* Handle snooped bus requests.
* @param pkt The snooped bus request.
* @param blk The cache block corresponding to the request, if any.
* @param mshr The MSHR corresponding to the request, if any.
* @param new_state Return the new state for the block.
*/
bool handleBusRequest(PacketPtr &pkt, CacheBlk *blk, MSHR *mshr,
CacheBlk::State &new_state)
{
// assert(mshr == NULL);
//Got rid of, there could be an MSHR, but it can't be in service
if (blk != NULL)
{
if (pkt->cmd != MemCmd::Writeback) {
return protocol->handleBusRequest(cache, pkt, blk, mshr,
new_state);
}
else { //It is a writeback, must be ownership protocol, just keep state
new_state = blk->status;
}
}
return false;
}
/**
* Get the proper bus command for the given command and status.
* @param cmd The request's command.
* @param state The current state of the cache block.
* @return The proper bus command, as determined by the protocol.
*/
MemCmd getBusCmd(MemCmd cmd,
CacheBlk::State state)
{
if (cmd == MemCmd::Writeback) return MemCmd::Writeback;
return protocol->getBusCmd(cmd, state);
}
/**
* Return true if this coherence policy can handle fast cache writes.
*/
bool allowFastWrites() { return false; }
bool hasProtocol() { return true; }
bool propogateInvalidate(PacketPtr pkt, bool isTiming)
{
//For now we do nothing, asssumes simple coherence is top level of cache
return false;
}
};
#endif //__SIMPLE_COHERENCE_HH__

View file

@ -1,135 +0,0 @@
/*
* Copyright (c) 2003-2005 The Regents of The University of Michigan
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Authors: Erik Hallnor
*/
#include "mem/cache/coherence/uni_coherence.hh"
#include "mem/cache/base_cache.hh"
#include "base/trace.hh"
using namespace std;
UniCoherence::UniCoherence()
: cshrs(50)
{
}
PacketPtr
UniCoherence::getPacket()
{
PacketPtr pkt = cshrs.getReq();
return pkt;
}
void
UniCoherence::sendResult(PacketPtr &pkt, MSHR* cshr, bool success)
{
if (success)
{
bool unblock = cshrs.isFull();
// cshrs.markInService(cshr);
delete pkt->req;
cshrs.deallocate(cshr);
if (!cshrs.havePending()) {
cache->clearSlaveRequest(Request_Coherence);
}
if (unblock) {
//since CSHRs are always used as buffers, should always get rid of one
assert(!cshrs.isFull());
cache->clearBlocked(Blocked_Coherence);
}
}
}
/**
* @todo add support for returning slave requests, not doing them here.
*/
bool
UniCoherence::handleBusRequest(PacketPtr &pkt, CacheBlk *blk, MSHR *mshr,
CacheBlk::State &new_state)
{
new_state = 0;
if (pkt->isInvalidate()) {
DPRINTF(Cache, "snoop inval on blk %x (blk ptr %x)\n",
pkt->getAddr(), blk);
}
else if (blk) {
new_state = blk->status;
if (pkt->isRead()) {
DPRINTF(Cache, "Uni-coherence snoops a read that hit in itself"
". Should satisfy the packet\n");
return true; //Satisfy Reads if we can
}
}
return false;
}
bool
UniCoherence::propogateInvalidate(PacketPtr pkt, bool isTiming)
{
if (pkt->isInvalidate()) {
/* Temp Fix for now, forward all invalidates up as functional accesses */
if (isTiming) {
// Forward to other caches
Request* req = new Request(pkt->req->getPaddr(), pkt->getSize(), 0);
PacketPtr tmp = new Packet(req, MemCmd::InvalidateReq, -1);
cshrs.allocate(tmp);
cache->setSlaveRequest(Request_Coherence, curTick);
if (cshrs.isFull())
cache->setBlockedForSnoop(Blocked_Coherence);
}
else {
PacketPtr tmp = new Packet(pkt->req, MemCmd::InvalidateReq, -1);
cache->cpuSidePort->sendAtomic(tmp);
delete tmp;
}
/**/
/* PacketPtr tmp = new Packet(pkt->req, MemCmd::InvalidateReq, -1);
cache->cpuSidePort->sendFunctional(tmp);
delete tmp;
*/
}
if (pkt->isRead()) {
/*For now we will see if someone above us has the data by
doing a functional access on reads. Fix this later */
PacketPtr tmp = new Packet(pkt->req, MemCmd::ReadReq, -1);
tmp->allocate();
cache->cpuSidePort->sendFunctional(tmp);
bool hit = (tmp->result == Packet::Success);
if (hit) {
memcpy(pkt->getPtr<uint8_t>(), tmp->getPtr<uint8_t>(),
pkt->getSize());
DPRINTF(Cache, "Uni-coherence snoops a read that hit in L1\n");
}
delete tmp;
return hit;
}
return false;
}

View file

@ -1,146 +0,0 @@
/*
* Copyright (c) 2003-2005 The Regents of The University of Michigan
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Authors: Erik Hallnor
*/
#ifndef __UNI_COHERENCE_HH__
#define __UNI_COHERENCE_HH__
#include "base/trace.hh"
#include "base/misc.hh"
#include "mem/cache/cache_blk.hh"
#include "mem/cache/miss/mshr_queue.hh"
#include "mem/packet.hh"
class BaseCache;
class UniCoherence
{
protected:
/** Buffers to hold forwarded invalidates. */
MSHRQueue cshrs;
/** Pointer to the parent cache. */
BaseCache *cache;
public:
/**
* Construct and initialize this coherence policy.
*/
UniCoherence();
/**
* Set the pointer to the parent cache.
* @param _cache The parent cache.
*/
void setCache(BaseCache *_cache)
{
cache = _cache;
}
/**
* Register statistics.
* @param name The name to prepend to stat descriptions.
*/
void regStats(const std::string &name)
{
}
/**
* Return Read.
* @param cmd The request's command.
* @param state The current state of the cache block.
* @return The proper bus command, as determined by the protocol.
* @todo Make changes so writebacks don't get here.
*/
MemCmd getBusCmd(MemCmd cmd, CacheBlk::State state)
{
if (cmd == MemCmd::HardPFReq && state)
warn("Trying to issue a prefetch to a block we already have\n");
if (cmd == MemCmd::Writeback)
return MemCmd::Writeback;
return MemCmd::ReadReq;
}
/**
* Just return readable and writeable.
* @param pkt The bus response.
* @param current The current block state.
* @return The new state.
*/
CacheBlk::State getNewState(PacketPtr &pkt, CacheBlk::State current)
{
if (pkt->senderState) //Blocking Buffers don't get mshrs
{
if (((MSHR *)(pkt->senderState))->originalCmd == MemCmd::HardPFReq) {
DPRINTF(HWPrefetch, "Marking a hardware prefetch as such in the state\n");
return BlkHWPrefetched | BlkValid | BlkWritable;
}
else {
return BlkValid | BlkWritable;
}
}
//@todo What about prefetching with blocking buffers
else
return BlkValid | BlkWritable;
}
/**
* Return outstanding invalidate to forward.
* @return The next invalidate to forward to lower levels of cache.
*/
PacketPtr getPacket();
/**
* Was the CSHR request was sent successfully?
* @param pkt The request.
* @param success True if the request was sent successfully.
*/
void sendResult(PacketPtr &pkt, MSHR* cshr, bool success);
/**
* Handle snooped bus requests.
* @param pkt The snooped bus request.
* @param blk The cache block corresponding to the request, if any.
* @param mshr The MSHR corresponding to the request, if any.
* @param new_state The new coherence state of the block.
* @return True if the request should be satisfied locally.
*/
bool handleBusRequest(PacketPtr &pkt, CacheBlk *blk, MSHR *mshr,
CacheBlk::State &new_state);
/**
* Return true if this coherence policy can handle fast cache writes.
*/
bool allowFastWrites() { return true; }
bool hasProtocol() { return false; }
bool propogateInvalidate(PacketPtr pkt, bool isTiming);
};
#endif //__UNI_COHERENCE_HH__

View file

@ -30,8 +30,5 @@
Import('*')
Source('blocking_buffer.cc')
Source('miss_buffer.cc')
Source('miss_queue.cc')
Source('mshr.cc')
Source('mshr_queue.cc')

View file

@ -1,245 +0,0 @@
/*
* Copyright (c) 2003-2005 The Regents of The University of Michigan
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Authors: Erik Hallnor
*/
/**
* @file
* Definitions of a simple buffer for a blocking cache.
*/
#include <cstring>
#include "mem/cache/base_cache.hh"
#include "mem/cache/miss/blocking_buffer.hh"
#include "mem/cache/prefetch/base_prefetcher.hh"
#include "mem/request.hh"
/**
* @todo Move writebacks into shared BaseBuffer class.
*/
void
BlockingBuffer::regStats(const std::string &name)
{
MissBuffer::regStats(name);
}
void
BlockingBuffer::handleMiss(PacketPtr &pkt, int blk_size, Tick time)
{
Addr blk_addr = pkt->getAddr() & ~(Addr)(blk_size - 1);
if (pkt->isWrite() && (pkt->req->isUncacheable() || !writeAllocate ||
!pkt->needsResponse())) {
if (!pkt->needsResponse()) {
wb.allocateAsBuffer(pkt);
} else {
wb.allocate(pkt->cmd, blk_addr, blk_size, pkt);
}
std::memcpy(wb.pkt->getPtr<uint8_t>(), pkt->getPtr<uint8_t>(), blk_size);
cache->setBlocked(Blocked_NoWBBuffers);
cache->setMasterRequest(Request_WB, time);
return;
}
if (!pkt->needsResponse()) {
miss.allocateAsBuffer(pkt);
} else {
miss.allocate(pkt->cmd, blk_addr, blk_size, pkt);
}
if (!pkt->req->isUncacheable()) {
miss.pkt->flags |= CACHE_LINE_FILL;
}
cache->setBlocked(Blocked_NoMSHRs);
cache->setMasterRequest(Request_MSHR, time);
}
PacketPtr
BlockingBuffer::getPacket()
{
if (miss.pkt && !miss.inService) {
return miss.pkt;
}
return wb.pkt;
}
void
BlockingBuffer::setBusCmd(PacketPtr &pkt, MemCmd cmd)
{
MSHR *mshr = (MSHR*) pkt->senderState;
mshr->originalCmd = pkt->cmd;
if (pkt->isCacheFill())
pkt->cmdOverride(cmd);
}
void
BlockingBuffer::restoreOrigCmd(PacketPtr &pkt)
{
pkt->cmdOverride(((MSHR*)(pkt->senderState))->originalCmd);
}
void
BlockingBuffer::markInService(PacketPtr &pkt, MSHR* mshr)
{
if (!pkt->isCacheFill() && pkt->isWrite()) {
// Forwarding a write/ writeback, don't need to change
// the command
assert(mshr == &wb);
cache->clearMasterRequest(Request_WB);
if (!pkt->needsResponse()) {
assert(wb.getNumTargets() == 0);
wb.deallocate();
cache->clearBlocked(Blocked_NoWBBuffers);
} else {
wb.inService = true;
}
} else {
assert(mshr == &miss);
cache->clearMasterRequest(Request_MSHR);
if (!pkt->needsResponse()) {
assert(miss.getNumTargets() == 0);
miss.deallocate();
cache->clearBlocked(Blocked_NoMSHRs);
} else {
//mark in service
miss.inService = true;
}
}
}
void
BlockingBuffer::handleResponse(PacketPtr &pkt, Tick time)
{
if (pkt->isCacheFill()) {
// targets were handled in the cache tags
assert((MSHR*)pkt->senderState == &miss);
miss.deallocate();
cache->clearBlocked(Blocked_NoMSHRs);
} else {
if (((MSHR*)(pkt->senderState))->hasTargets()) {
// Should only have 1 target if we had any
assert(((MSHR*)(pkt->senderState))->getNumTargets() == 1);
PacketPtr target = ((MSHR*)(pkt->senderState))->getTarget();
((MSHR*)(pkt->senderState))->popTarget();
if (pkt->isRead()) {
std::memcpy(target->getPtr<uint8_t>(), pkt->getPtr<uint8_t>(), target->getSize());
}
cache->respond(target, time);
assert(!((MSHR*)(pkt->senderState))->hasTargets());
}
if (pkt->isWrite()) {
assert(((MSHR*)(pkt->senderState)) == &wb);
wb.deallocate();
cache->clearBlocked(Blocked_NoWBBuffers);
} else {
miss.deallocate();
cache->clearBlocked(Blocked_NoMSHRs);
}
}
}
void
BlockingBuffer::squash(int threadNum)
{
if (miss.threadNum == threadNum) {
PacketPtr target = miss.getTarget();
miss.popTarget();
assert(0/*target->req->getThreadNum()*/ == threadNum);
target = NULL;
assert(!miss.hasTargets());
miss.ntargets=0;
if (!miss.inService) {
miss.deallocate();
cache->clearBlocked(Blocked_NoMSHRs);
cache->clearMasterRequest(Request_MSHR);
}
}
}
void
BlockingBuffer::doWriteback(Addr addr,
int size, uint8_t *data, bool compressed)
{
// Generate request
Request * req = new Request(addr, size, 0);
PacketPtr pkt = new Packet(req, MemCmd::Writeback, -1);
pkt->allocate();
if (data) {
std::memcpy(pkt->getPtr<uint8_t>(), data, size);
}
if (compressed) {
pkt->flags |= COMPRESSED;
}
///All writebacks charged to same thread @todo figure this out
writebacks[0/*pkt->req->getThreadNum()*/]++;
wb.allocateAsBuffer(pkt);
cache->setMasterRequest(Request_WB, curTick);
cache->setBlocked(Blocked_NoWBBuffers);
}
void
BlockingBuffer::doWriteback(PacketPtr &pkt)
{
writebacks[0/*pkt->req->getThreadNum()*/]++;
wb.allocateAsBuffer(pkt);
// Since allocate as buffer copies the request,
// need to copy data here.
std::memcpy(wb.pkt->getPtr<uint8_t>(), pkt->getPtr<uint8_t>(), pkt->getSize());
cache->setBlocked(Blocked_NoWBBuffers);
cache->setMasterRequest(Request_WB, curTick);
}
MSHR *
BlockingBuffer::findMSHR(Addr addr)
{
if (miss.addr == addr && miss.pkt)
return &miss;
return NULL;
}
bool
BlockingBuffer::findWrites(Addr addr, std::vector<MSHR*>& writes)
{
if (wb.addr == addr && wb.pkt) {
writes.push_back(&wb);
return true;
}
return false;
}

View file

@ -1,209 +0,0 @@
/*
* Copyright (c) 2003-2005 The Regents of The University of Michigan
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Authors: Erik Hallnor
*/
/**
* @file
* Declaration of a simple buffer for a blocking cache.
*/
#ifndef __BLOCKING_BUFFER_HH__
#define __BLOCKING_BUFFER_HH__
#include <vector>
#include "base/misc.hh" // for fatal()
#include "mem/cache/miss/miss_buffer.hh"
#include "mem/cache/miss/mshr.hh"
/**
* Miss and writeback storage for a blocking cache.
*/
class BlockingBuffer : public MissBuffer
{
protected:
/** Miss storage. */
MSHR miss;
/** WB storage. */
MSHR wb;
public:
/**
* Builds and initializes this buffer.
* @param write_allocate If true, treat write misses the same as reads.
*/
BlockingBuffer(bool write_allocate)
: MissBuffer(write_allocate)
{
}
/**
* Register statistics for this object.
* @param name The name of the parent cache.
*/
void regStats(const std::string &name);
/**
* Handle a cache miss properly. Requests the bus and marks the cache as
* blocked.
* @param pkt The request that missed in the cache.
* @param blk_size The block size of the cache.
* @param time The time the miss is detected.
*/
void handleMiss(PacketPtr &pkt, int blk_size, Tick time);
/**
* Fetch the block for the given address and buffer the given target.
* @param addr The address to fetch.
* @param asid The address space of the address.
* @param blk_size The block size of the cache.
* @param time The time the miss is detected.
* @param target The target for the fetch.
*/
MSHR* fetchBlock(Addr addr, int blk_size, Tick time,
PacketPtr &target)
{
fatal("Unimplemented");
M5_DUMMY_RETURN
}
/**
* Selects a outstanding request to service.
* @return The request to service, NULL if none found.
*/
PacketPtr getPacket();
/**
* Set the command to the given bus command.
* @param pkt The request to update.
* @param cmd The bus command to use.
*/
void setBusCmd(PacketPtr &pkt, MemCmd cmd);
/**
* Restore the original command in case of a bus transmission error.
* @param pkt The request to reset.
*/
void restoreOrigCmd(PacketPtr &pkt);
/**
* Marks a request as in service (sent on the bus). This can have side
* effect since storage for no response commands is deallocated once they
* are successfully sent.
* @param pkt The request that was sent on the bus.
*/
void markInService(PacketPtr &pkt, MSHR* mshr);
/**
* Frees the resources of the request and unblock the cache.
* @param pkt The request that has been satisfied.
* @param time The time when the request is satisfied.
*/
void handleResponse(PacketPtr &pkt, Tick time);
/**
* Removes all outstanding requests for a given thread number. If a request
* has been sent to the bus, this function removes all of its targets.
* @param threadNum The thread number of the requests to squash.
*/
void squash(int threadNum);
/**
* Return the current number of outstanding misses.
* @return the number of outstanding misses.
*/
int getMisses()
{
return miss.getNumTargets();
}
/**
* Searches for the supplied address in the miss "queue".
* @param addr The address to look for.
* @param asid The address space id.
* @return A pointer to miss if it matches.
*/
MSHR* findMSHR(Addr addr);
/**
* Searches for the supplied address in the write buffer.
* @param addr The address to look for.
* @param asid The address space id.
* @param writes List of pointers to the matching writes.
* @return True if there is a matching write.
*/
bool findWrites(Addr addr, std::vector<MSHR*>& writes);
/**
* Perform a writeback of dirty data to the given address.
* @param addr The address to write to.
* @param asid The address space id.
* @param size The number of bytes to write.
* @param data The data to write, can be NULL.
* @param compressed True if the data is compressed.
*/
void doWriteback(Addr addr,
int size, uint8_t *data, bool compressed);
/**
* Perform a writeback request.
* @param pkt The writeback request.
*/
void doWriteback(PacketPtr &pkt);
/**
* Returns true if there are outstanding requests.
* @return True if there are outstanding requests.
*/
bool havePending()
{
return !miss.inService || !wb.inService;
}
/**
* Add a target to the given MSHR. This assumes it is in the miss queue.
* @param mshr The mshr to add a target to.
* @param pkt The target to add.
*/
void addTarget(MSHR *mshr, PacketPtr &pkt)
{
fatal("Shouldn't call this on a blocking buffer.");
}
/**
* Dummy implmentation.
*/
MSHR* allocateTargetList(Addr addr)
{
fatal("Unimplemented");
M5_DUMMY_RETURN
}
};
#endif // __BLOCKING_BUFFER_HH__

View file

@ -1,62 +0,0 @@
/*
* Copyright (c) 2003-2006 The Regents of The University of Michigan
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Authors: Erik Hallnor
*/
#include "cpu/smt.hh" //for maxThreadsPerCPU
#include "mem/cache/base_cache.hh"
#include "mem/cache/miss/miss_buffer.hh"
#include "mem/cache/prefetch/base_prefetcher.hh"
/**
* @todo Move writebacks into shared BaseBuffer class.
*/
void
MissBuffer::regStats(const std::string &name)
{
using namespace Stats;
writebacks
.init(maxThreadsPerCPU)
.name(name + ".writebacks")
.desc("number of writebacks")
.flags(total)
;
}
void
MissBuffer::setCache(BaseCache *_cache)
{
cache = _cache;
blkSize = cache->getBlockSize();
}
void
MissBuffer::setPrefetcher(BasePrefetcher *_prefetcher)
{
prefetcher = _prefetcher;
}

View file

@ -1,223 +0,0 @@
/*
* Copyright (c) 2003-2006 The Regents of The University of Michigan
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Authors: Steve Reinhardt
*/
/**
* @file
* MissBuffer declaration.
*/
#ifndef __MISS_BUFFER_HH__
#define __MISS_BUFFER_HH__
class BaseCache;
class BasePrefetcher;
class MSHR;
/**
* Abstract base class for cache miss buffering.
*/
class MissBuffer
{
protected:
/** True if the cache should allocate on a write miss. */
const bool writeAllocate;
/** Pointer to the parent cache. */
BaseCache *cache;
/** The Prefetcher */
BasePrefetcher *prefetcher;
/** Block size of the parent cache. */
int blkSize;
// Statistics
/**
* @addtogroup CacheStatistics
* @{
*/
/** Number of blocks written back per thread. */
Stats::Vector<> writebacks;
/**
* @}
*/
public:
MissBuffer(bool write_allocate)
: writeAllocate(write_allocate)
{
}
virtual ~MissBuffer() {}
/**
* Called by the parent cache to set the back pointer.
* @param _cache A pointer to the parent cache.
*/
void setCache(BaseCache *_cache);
void setPrefetcher(BasePrefetcher *_prefetcher);
/**
* Register statistics for this object.
* @param name The name of the parent cache.
*/
virtual void regStats(const std::string &name);
/**
* Handle a cache miss properly. Either allocate an MSHR for the request,
* or forward it through the write buffer.
* @param pkt The request that missed in the cache.
* @param blk_size The block size of the cache.
* @param time The time the miss is detected.
*/
virtual void handleMiss(PacketPtr &pkt, int blk_size, Tick time) = 0;
/**
* Fetch the block for the given address and buffer the given target.
* @param addr The address to fetch.
* @param asid The address space of the address.
* @param blk_size The block size of the cache.
* @param time The time the miss is detected.
* @param target The target for the fetch.
*/
virtual MSHR *fetchBlock(Addr addr, int blk_size, Tick time,
PacketPtr &target) = 0;
/**
* Selects a outstanding request to service.
* @return The request to service, NULL if none found.
*/
virtual PacketPtr getPacket() = 0;
/**
* Set the command to the given bus command.
* @param pkt The request to update.
* @param cmd The bus command to use.
*/
virtual void setBusCmd(PacketPtr &pkt, MemCmd cmd) = 0;
/**
* Restore the original command in case of a bus transmission error.
* @param pkt The request to reset.
*/
virtual void restoreOrigCmd(PacketPtr &pkt) = 0;
/**
* Marks a request as in service (sent on the bus). This can have side
* effect since storage for no response commands is deallocated once they
* are successfully sent.
* @param pkt The request that was sent on the bus.
*/
virtual void markInService(PacketPtr &pkt, MSHR* mshr) = 0;
/**
* Collect statistics and free resources of a satisfied request.
* @param pkt The request that has been satisfied.
* @param time The time when the request is satisfied.
*/
virtual void handleResponse(PacketPtr &pkt, Tick time) = 0;
/**
* Removes all outstanding requests for a given thread number. If a request
* has been sent to the bus, this function removes all of its targets.
* @param threadNum The thread number of the requests to squash.
*/
virtual void squash(int threadNum) = 0;
/**
* Return the current number of outstanding misses.
* @return the number of outstanding misses.
*/
virtual int getMisses() = 0;
/**
* Searches for the supplied address in the miss queue.
* @param addr The address to look for.
* @param asid The address space id.
* @return The MSHR that contains the address, NULL if not found.
* @warning Currently only searches the miss queue. If non write allocate
* might need to search the write buffer for coherence.
*/
virtual MSHR* findMSHR(Addr addr) = 0;
/**
* Searches for the supplied address in the write buffer.
* @param addr The address to look for.
* @param asid The address space id.
* @param writes The list of writes that match the address.
* @return True if any writes are found
*/
virtual bool findWrites(Addr addr, std::vector<MSHR*>& writes) = 0;
/**
* Perform a writeback of dirty data to the given address.
* @param addr The address to write to.
* @param asid The address space id.
* @param xc The execution context of the address space.
* @param size The number of bytes to write.
* @param data The data to write, can be NULL.
* @param compressed True if the data is compressed.
*/
virtual void doWriteback(Addr addr, int size, uint8_t *data,
bool compressed) = 0;
/**
* Perform the given writeback request.
* @param pkt The writeback request.
*/
virtual void doWriteback(PacketPtr &pkt) = 0;
/**
* Returns true if there are outstanding requests.
* @return True if there are outstanding requests.
*/
virtual bool havePending() = 0;
/**
* Add a target to the given MSHR. This assumes it is in the miss queue.
* @param mshr The mshr to add a target to.
* @param pkt The target to add.
*/
virtual void addTarget(MSHR *mshr, PacketPtr &pkt) = 0;
/**
* Allocate a MSHR to hold a list of targets to a block involved in a copy.
* If the block is marked done then the MSHR already holds the data to
* fill the block. Otherwise the block needs to be fetched.
* @param addr The address to buffer.
* @param asid The address space ID.
* @return A pointer to the allocated MSHR.
*/
virtual MSHR* allocateTargetList(Addr addr) = 0;
};
#endif //__MISS_BUFFER_HH__

View file

@ -1,752 +0,0 @@
/*
* Copyright (c) 2003-2005 The Regents of The University of Michigan
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Authors: Erik Hallnor
* Ron Dreslinski
*/
/**
* @file
* Miss and writeback queue definitions.
*/
#include "cpu/smt.hh" //for maxThreadsPerCPU
#include "mem/cache/base_cache.hh"
#include "mem/cache/miss/miss_queue.hh"
#include "mem/cache/prefetch/base_prefetcher.hh"
using namespace std;
// simple constructor
/**
* @todo Remove the +16 from the write buffer constructor once we handle
* stalling on writebacks do to compression writes.
*/
MissQueue::MissQueue(int numMSHRs, int numTargets, int write_buffers,
bool write_allocate, bool prefetch_miss)
: MissBuffer(write_allocate),
mq(numMSHRs, 4), wb(write_buffers,numMSHRs+1000), numMSHR(numMSHRs),
numTarget(numTargets), writeBuffers(write_buffers),
order(0), prefetchMiss(prefetch_miss)
{
noTargetMSHR = NULL;
}
MissQueue::~MissQueue()
{
}
void
MissQueue::regStats(const string &name)
{
MissBuffer::regStats(name);
using namespace Stats;
// MSHR hit statistics
for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
MemCmd cmd(access_idx);
const string &cstr = cmd.toString();
mshr_hits[access_idx]
.init(maxThreadsPerCPU)
.name(name + "." + cstr + "_mshr_hits")
.desc("number of " + cstr + " MSHR hits")
.flags(total | nozero | nonan)
;
}
demandMshrHits
.name(name + ".demand_mshr_hits")
.desc("number of demand (read+write) MSHR hits")
.flags(total)
;
demandMshrHits = mshr_hits[MemCmd::ReadReq] + mshr_hits[MemCmd::WriteReq];
overallMshrHits
.name(name + ".overall_mshr_hits")
.desc("number of overall MSHR hits")
.flags(total)
;
overallMshrHits = demandMshrHits + mshr_hits[MemCmd::SoftPFReq] +
mshr_hits[MemCmd::HardPFReq];
// MSHR miss statistics
for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
MemCmd cmd(access_idx);
const string &cstr = cmd.toString();
mshr_misses[access_idx]
.init(maxThreadsPerCPU)
.name(name + "." + cstr + "_mshr_misses")
.desc("number of " + cstr + " MSHR misses")
.flags(total | nozero | nonan)
;
}
demandMshrMisses
.name(name + ".demand_mshr_misses")
.desc("number of demand (read+write) MSHR misses")
.flags(total)
;
demandMshrMisses = mshr_misses[MemCmd::ReadReq] + mshr_misses[MemCmd::WriteReq];
overallMshrMisses
.name(name + ".overall_mshr_misses")
.desc("number of overall MSHR misses")
.flags(total)
;
overallMshrMisses = demandMshrMisses + mshr_misses[MemCmd::SoftPFReq] +
mshr_misses[MemCmd::HardPFReq];
// MSHR miss latency statistics
for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
MemCmd cmd(access_idx);
const string &cstr = cmd.toString();
mshr_miss_latency[access_idx]
.init(maxThreadsPerCPU)
.name(name + "." + cstr + "_mshr_miss_latency")
.desc("number of " + cstr + " MSHR miss cycles")
.flags(total | nozero | nonan)
;
}
demandMshrMissLatency
.name(name + ".demand_mshr_miss_latency")
.desc("number of demand (read+write) MSHR miss cycles")
.flags(total)
;
demandMshrMissLatency = mshr_miss_latency[MemCmd::ReadReq]
+ mshr_miss_latency[MemCmd::WriteReq];
overallMshrMissLatency
.name(name + ".overall_mshr_miss_latency")
.desc("number of overall MSHR miss cycles")
.flags(total)
;
overallMshrMissLatency = demandMshrMissLatency +
mshr_miss_latency[MemCmd::SoftPFReq] + mshr_miss_latency[MemCmd::HardPFReq];
// MSHR uncacheable statistics
for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
MemCmd cmd(access_idx);
const string &cstr = cmd.toString();
mshr_uncacheable[access_idx]
.init(maxThreadsPerCPU)
.name(name + "." + cstr + "_mshr_uncacheable")
.desc("number of " + cstr + " MSHR uncacheable")
.flags(total | nozero | nonan)
;
}
overallMshrUncacheable
.name(name + ".overall_mshr_uncacheable_misses")
.desc("number of overall MSHR uncacheable misses")
.flags(total)
;
overallMshrUncacheable = mshr_uncacheable[MemCmd::ReadReq]
+ mshr_uncacheable[MemCmd::WriteReq] + mshr_uncacheable[MemCmd::SoftPFReq]
+ mshr_uncacheable[MemCmd::HardPFReq];
// MSHR miss latency statistics
for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
MemCmd cmd(access_idx);
const string &cstr = cmd.toString();
mshr_uncacheable_lat[access_idx]
.init(maxThreadsPerCPU)
.name(name + "." + cstr + "_mshr_uncacheable_latency")
.desc("number of " + cstr + " MSHR uncacheable cycles")
.flags(total | nozero | nonan)
;
}
overallMshrUncacheableLatency
.name(name + ".overall_mshr_uncacheable_latency")
.desc("number of overall MSHR uncacheable cycles")
.flags(total)
;
overallMshrUncacheableLatency = mshr_uncacheable_lat[MemCmd::ReadReq]
+ mshr_uncacheable_lat[MemCmd::WriteReq]
+ mshr_uncacheable_lat[MemCmd::SoftPFReq]
+ mshr_uncacheable_lat[MemCmd::HardPFReq];
#if 0
// MSHR access formulas
for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
MemCmd cmd(access_idx);
const string &cstr = cmd.toString();
mshrAccesses[access_idx]
.name(name + "." + cstr + "_mshr_accesses")
.desc("number of " + cstr + " mshr accesses(hits+misses)")
.flags(total | nozero | nonan)
;
mshrAccesses[access_idx] =
mshr_hits[access_idx] + mshr_misses[access_idx]
+ mshr_uncacheable[access_idx];
}
demandMshrAccesses
.name(name + ".demand_mshr_accesses")
.desc("number of demand (read+write) mshr accesses")
.flags(total | nozero | nonan)
;
demandMshrAccesses = demandMshrHits + demandMshrMisses;
overallMshrAccesses
.name(name + ".overall_mshr_accesses")
.desc("number of overall (read+write) mshr accesses")
.flags(total | nozero | nonan)
;
overallMshrAccesses = overallMshrHits + overallMshrMisses
+ overallMshrUncacheable;
#endif
// MSHR miss rate formulas
for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
MemCmd cmd(access_idx);
const string &cstr = cmd.toString();
mshrMissRate[access_idx]
.name(name + "." + cstr + "_mshr_miss_rate")
.desc("mshr miss rate for " + cstr + " accesses")
.flags(total | nozero | nonan)
;
mshrMissRate[access_idx] =
mshr_misses[access_idx] / cache->accesses[access_idx];
}
demandMshrMissRate
.name(name + ".demand_mshr_miss_rate")
.desc("mshr miss rate for demand accesses")
.flags(total)
;
demandMshrMissRate = demandMshrMisses / cache->demandAccesses;
overallMshrMissRate
.name(name + ".overall_mshr_miss_rate")
.desc("mshr miss rate for overall accesses")
.flags(total)
;
overallMshrMissRate = overallMshrMisses / cache->overallAccesses;
// mshrMiss latency formulas
for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
MemCmd cmd(access_idx);
const string &cstr = cmd.toString();
avgMshrMissLatency[access_idx]
.name(name + "." + cstr + "_avg_mshr_miss_latency")
.desc("average " + cstr + " mshr miss latency")
.flags(total | nozero | nonan)
;
avgMshrMissLatency[access_idx] =
mshr_miss_latency[access_idx] / mshr_misses[access_idx];
}
demandAvgMshrMissLatency
.name(name + ".demand_avg_mshr_miss_latency")
.desc("average overall mshr miss latency")
.flags(total)
;
demandAvgMshrMissLatency = demandMshrMissLatency / demandMshrMisses;
overallAvgMshrMissLatency
.name(name + ".overall_avg_mshr_miss_latency")
.desc("average overall mshr miss latency")
.flags(total)
;
overallAvgMshrMissLatency = overallMshrMissLatency / overallMshrMisses;
// mshrUncacheable latency formulas
for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
MemCmd cmd(access_idx);
const string &cstr = cmd.toString();
avgMshrUncacheableLatency[access_idx]
.name(name + "." + cstr + "_avg_mshr_uncacheable_latency")
.desc("average " + cstr + " mshr uncacheable latency")
.flags(total | nozero | nonan)
;
avgMshrUncacheableLatency[access_idx] =
mshr_uncacheable_lat[access_idx] / mshr_uncacheable[access_idx];
}
overallAvgMshrUncacheableLatency
.name(name + ".overall_avg_mshr_uncacheable_latency")
.desc("average overall mshr uncacheable latency")
.flags(total)
;
overallAvgMshrUncacheableLatency = overallMshrUncacheableLatency / overallMshrUncacheable;
mshr_cap_events
.init(maxThreadsPerCPU)
.name(name + ".mshr_cap_events")
.desc("number of times MSHR cap was activated")
.flags(total)
;
//software prefetching stats
soft_prefetch_mshr_full
.init(maxThreadsPerCPU)
.name(name + ".soft_prefetch_mshr_full")
.desc("number of mshr full events for SW prefetching instrutions")
.flags(total)
;
mshr_no_allocate_misses
.name(name +".no_allocate_misses")
.desc("Number of misses that were no-allocate")
;
}
MSHR*
MissQueue::allocateMiss(PacketPtr &pkt, int size, Tick time)
{
MSHR* mshr = mq.allocate(pkt, size);
mshr->order = order++;
if (!pkt->req->isUncacheable() ){//&& !pkt->isNoAllocate()) {
// Mark this as a cache line fill
mshr->pkt->flags |= CACHE_LINE_FILL;
}
if (mq.isFull()) {
cache->setBlocked(Blocked_NoMSHRs);
}
if (pkt->cmd != MemCmd::HardPFReq) {
//If we need to request the bus (not on HW prefetch), do so
cache->setMasterRequest(Request_MSHR, time);
}
return mshr;
}
MSHR*
MissQueue::allocateWrite(PacketPtr &pkt, int size, Tick time)
{
MSHR* mshr = wb.allocate(pkt,size);
mshr->order = order++;
//REMOVING COMPRESSION FOR NOW
#if 0
if (pkt->isCompressed()) {
mshr->pkt->deleteData();
mshr->pkt->actualSize = pkt->actualSize;
mshr->pkt->data = new uint8_t[pkt->actualSize];
memcpy(mshr->pkt->data, pkt->data, pkt->actualSize);
} else {
#endif
memcpy(mshr->pkt->getPtr<uint8_t>(), pkt->getPtr<uint8_t>(), pkt->getSize());
//{
if (wb.isFull()) {
cache->setBlocked(Blocked_NoWBBuffers);
}
cache->setMasterRequest(Request_WB, time);
return mshr;
}
/**
* @todo Remove SW prefetches on mshr hits.
*/
void
MissQueue::handleMiss(PacketPtr &pkt, int blkSize, Tick time)
{
// if (!cache->isTopLevel())
if (prefetchMiss) prefetcher->handleMiss(pkt, time);
int size = blkSize;
Addr blkAddr = pkt->getAddr() & ~(Addr)(blkSize-1);
MSHR* mshr = NULL;
if (!pkt->req->isUncacheable()) {
mshr = mq.findMatch(blkAddr);
if (mshr) {
//@todo remove hw_pf here
mshr_hits[pkt->cmdToIndex()][0/*pkt->req->getThreadNum()*/]++;
if (mshr->threadNum != 0/*pkt->req->getThreadNum()*/) {
mshr->threadNum = -1;
}
mq.allocateTarget(mshr, pkt);
if (mshr->pkt->isNoAllocate() && !pkt->isNoAllocate()) {
//We are adding an allocate after a no-allocate
mshr->pkt->flags &= ~NO_ALLOCATE;
}
if (mshr->getNumTargets() == numTarget) {
noTargetMSHR = mshr;
cache->setBlocked(Blocked_NoTargets);
mq.moveToFront(mshr);
}
return;
}
if (pkt->isNoAllocate()) {
//Count no-allocate requests differently
mshr_no_allocate_misses++;
}
else {
mshr_misses[pkt->cmdToIndex()][0/*pkt->req->getThreadNum()*/]++;
}
} else {
//Count uncacheable accesses
mshr_uncacheable[pkt->cmdToIndex()][0/*pkt->req->getThreadNum()*/]++;
size = pkt->getSize();
}
if (pkt->isWrite() && (pkt->req->isUncacheable() || !writeAllocate ||
!pkt->needsResponse())) {
/**
* @todo Add write merging here.
*/
mshr = allocateWrite(pkt, pkt->getSize(), time);
return;
}
mshr = allocateMiss(pkt, blkSize, time);
}
MSHR*
MissQueue::fetchBlock(Addr addr, int blk_size, Tick time,
PacketPtr &target)
{
Addr blkAddr = addr & ~(Addr)(blk_size - 1);
assert(mq.findMatch(addr) == NULL);
MSHR *mshr = mq.allocateFetch(blkAddr, blk_size, target);
mshr->order = order++;
mshr->pkt->flags |= CACHE_LINE_FILL;
if (mq.isFull()) {
cache->setBlocked(Blocked_NoMSHRs);
}
cache->setMasterRequest(Request_MSHR, time);
return mshr;
}
PacketPtr
MissQueue::getPacket()
{
PacketPtr pkt = mq.getReq();
if (((wb.isFull() && wb.inServiceMSHRs == 0) || !pkt ||
pkt->time > curTick) && wb.havePending()) {
pkt = wb.getReq();
// Need to search for earlier miss.
MSHR *mshr = mq.findPending(pkt);
if (mshr && mshr->order < ((MSHR*)(pkt->senderState))->order) {
// Service misses in order until conflict is cleared.
return mq.getReq();
}
}
if (pkt) {
MSHR* mshr = wb.findPending(pkt);
if (mshr /*&& mshr->order < pkt->senderState->order*/) {
// The only way this happens is if we are
// doing a write and we didn't have permissions
// then subsequently saw a writeback(owned got evicted)
// We need to make sure to perform the writeback first
// To preserve the dirty data, then we can issue the write
return wb.getReq();
}
}
else if (!mq.isFull()){
//If we have a miss queue slot, we can try a prefetch
pkt = prefetcher->getPacket();
if (pkt) {
//Update statistic on number of prefetches issued (hwpf_mshr_misses)
mshr_misses[pkt->cmdToIndex()][0/*pkt->req->getThreadNum()*/]++;
//It will request the bus for the future, but should clear that immedieatley
allocateMiss(pkt, pkt->getSize(), curTick);
pkt = mq.getReq();
assert(pkt); //We should get back a req b/c we just put one in
}
}
return pkt;
}
void
MissQueue::setBusCmd(PacketPtr &pkt, MemCmd cmd)
{
assert(pkt->senderState != 0);
MSHR * mshr = (MSHR*)pkt->senderState;
mshr->originalCmd = pkt->cmd;
if (cmd == MemCmd::UpgradeReq || cmd == MemCmd::InvalidateReq) {
pkt->flags |= NO_ALLOCATE;
pkt->flags &= ~CACHE_LINE_FILL;
}
else if (!pkt->req->isUncacheable() && !pkt->isNoAllocate() &&
cmd.needsResponse()) {
pkt->flags |= CACHE_LINE_FILL;
}
if (pkt->isCacheFill() || pkt->isNoAllocate())
pkt->cmd = cmd;
}
void
MissQueue::restoreOrigCmd(PacketPtr &pkt)
{
pkt->cmd = ((MSHR*)(pkt->senderState))->originalCmd;
}
void
MissQueue::markInService(PacketPtr &pkt, MSHR* mshr)
{
bool unblock = false;
BlockedCause cause = NUM_BLOCKED_CAUSES;
/**
* @todo Should include MSHRQueue pointer in MSHR to select the correct
* one.
*/
if ((!pkt->isCacheFill() && pkt->isWrite())) {
// Forwarding a write/ writeback, don't need to change
// the command
unblock = wb.isFull();
wb.markInService(mshr);
if (!wb.havePending()){
cache->clearMasterRequest(Request_WB);
}
if (unblock) {
// Do we really unblock?
unblock = !wb.isFull();
cause = Blocked_NoWBBuffers;
}
} else {
unblock = mq.isFull();
mq.markInService(mshr);
if (!mq.havePending()){
cache->clearMasterRequest(Request_MSHR);
}
if (mshr->originalCmd == MemCmd::HardPFReq) {
DPRINTF(HWPrefetch, "%s:Marking a HW_PF in service\n",
cache->name());
//Also clear pending if need be
if (!prefetcher->havePending())
{
cache->clearMasterRequest(Request_PF);
}
}
if (unblock) {
unblock = !mq.isFull();
cause = Blocked_NoMSHRs;
}
}
if (unblock) {
cache->clearBlocked(cause);
}
}
void
MissQueue::handleResponse(PacketPtr &pkt, Tick time)
{
MSHR* mshr = (MSHR*)pkt->senderState;
if (((MSHR*)(pkt->senderState))->originalCmd == MemCmd::HardPFReq) {
DPRINTF(HWPrefetch, "%s:Handling the response to a HW_PF\n",
cache->name());
}
#ifndef NDEBUG
int num_targets = mshr->getNumTargets();
#endif
bool unblock = false;
bool unblock_target = false;
BlockedCause cause = NUM_BLOCKED_CAUSES;
if (pkt->isCacheFill() && !pkt->isNoAllocate()) {
mshr_miss_latency[mshr->originalCmd.toInt()][0/*pkt->req->getThreadNum()*/] +=
curTick - pkt->time;
// targets were handled in the cache tags
if (mshr == noTargetMSHR) {
// we always clear at least one target
unblock_target = true;
cause = Blocked_NoTargets;
noTargetMSHR = NULL;
}
if (mshr->hasTargets()) {
// Didn't satisfy all the targets, need to resend
MemCmd cmd = mshr->getTarget()->cmd;
mshr->pkt->setDest(Packet::Broadcast);
mshr->pkt->result = Packet::Unknown;
mshr->pkt->req = mshr->getTarget()->req;
mq.markPending(mshr, cmd);
mshr->order = order++;
cache->setMasterRequest(Request_MSHR, time);
}
else {
unblock = mq.isFull();
mq.deallocate(mshr);
if (unblock) {
unblock = !mq.isFull();
cause = Blocked_NoMSHRs;
}
}
} else {
if (pkt->req->isUncacheable()) {
mshr_uncacheable_lat[pkt->cmd.toInt()][0/*pkt->req->getThreadNum()*/] +=
curTick - pkt->time;
}
if (mshr->hasTargets() && pkt->req->isUncacheable()) {
// Should only have 1 target if we had any
assert(num_targets == 1);
PacketPtr target = mshr->getTarget();
mshr->popTarget();
if (pkt->isRead()) {
memcpy(target->getPtr<uint8_t>(), pkt->getPtr<uint8_t>(),
target->getSize());
}
cache->respond(target, time);
assert(!mshr->hasTargets());
}
else if (mshr->hasTargets()) {
//Must be a no_allocate with possibly more than one target
assert(mshr->pkt->isNoAllocate());
while (mshr->hasTargets()) {
PacketPtr target = mshr->getTarget();
mshr->popTarget();
if (pkt->isRead()) {
memcpy(target->getPtr<uint8_t>(), pkt->getPtr<uint8_t>(),
target->getSize());
}
cache->respond(target, time);
}
}
if (pkt->isWrite()) {
// If the wrtie buffer is full, we might unblock now
unblock = wb.isFull();
wb.deallocate(mshr);
if (unblock) {
// Did we really unblock?
unblock = !wb.isFull();
cause = Blocked_NoWBBuffers;
}
} else {
unblock = mq.isFull();
mq.deallocate(mshr);
if (unblock) {
unblock = !mq.isFull();
cause = Blocked_NoMSHRs;
}
}
}
if (unblock || unblock_target) {
cache->clearBlocked(cause);
}
}
void
MissQueue::squash(int threadNum)
{
bool unblock = false;
BlockedCause cause = NUM_BLOCKED_CAUSES;
if (noTargetMSHR && noTargetMSHR->threadNum == threadNum) {
noTargetMSHR = NULL;
unblock = true;
cause = Blocked_NoTargets;
}
if (mq.isFull()) {
unblock = true;
cause = Blocked_NoMSHRs;
}
mq.squash(threadNum);
if (!mq.havePending()) {
cache->clearMasterRequest(Request_MSHR);
}
if (unblock && !mq.isFull()) {
cache->clearBlocked(cause);
}
}
MSHR*
MissQueue::findMSHR(Addr addr)
{
return mq.findMatch(addr);
}
bool
MissQueue::findWrites(Addr addr, vector<MSHR*> &writes)
{
return wb.findMatches(addr,writes);
}
void
MissQueue::doWriteback(Addr addr,
int size, uint8_t *data, bool compressed)
{
// Generate request
Request * req = new Request(addr, size, 0);
PacketPtr pkt = new Packet(req, MemCmd::Writeback, -1);
pkt->allocate();
if (data) {
memcpy(pkt->getPtr<uint8_t>(), data, size);
}
if (compressed) {
pkt->flags |= COMPRESSED;
}
///All writebacks charged to same thread @todo figure this out
writebacks[0/*pkt->req->getThreadNum()*/]++;
allocateWrite(pkt, 0, curTick);
}
void
MissQueue::doWriteback(PacketPtr &pkt)
{
writebacks[0/*pkt->req->getThreadNum()*/]++;
allocateWrite(pkt, 0, curTick);
}
MSHR*
MissQueue::allocateTargetList(Addr addr)
{
MSHR* mshr = mq.allocateTargetList(addr, blkSize);
mshr->pkt->flags |= CACHE_LINE_FILL;
if (mq.isFull()) {
cache->setBlocked(Blocked_NoMSHRs);
}
return mshr;
}
bool
MissQueue::havePending()
{
return mq.havePending() || wb.havePending() || prefetcher->havePending();
}

View file

@ -1,327 +0,0 @@
/*
* Copyright (c) 2003-2005 The Regents of The University of Michigan
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Authors: Erik Hallnor
*/
/**
* @file
* Miss and writeback queue declarations.
*/
#ifndef __MISS_QUEUE_HH__
#define __MISS_QUEUE_HH__
#include <vector>
#include "mem/cache/miss/miss_buffer.hh"
#include "mem/cache/miss/mshr.hh"
#include "mem/cache/miss/mshr_queue.hh"
#include "base/statistics.hh"
/**
* Manages cache misses and writebacks. Contains MSHRs to store miss data
* and the writebuffer for writes/writebacks.
* @todo need to handle data on writes better (encapsulate).
* @todo need to make replacements/writebacks happen in Cache::access
*/
class MissQueue : public MissBuffer
{
protected:
/** The MSHRs. */
MSHRQueue mq;
/** Write Buffer. */
MSHRQueue wb;
// PARAMTERS
/** The number of MSHRs in the miss queue. */
const int numMSHR;
/** The number of targets for each MSHR. */
const int numTarget;
/** The number of write buffers. */
const int writeBuffers;
/** Increasing order number assigned to each incoming request. */
uint64_t order;
bool prefetchMiss;
// Statistics
/**
* @addtogroup CacheStatistics
* @{
*/
/** Number of misses that hit in the MSHRs per command and thread. */
Stats::Vector<> mshr_hits[MemCmd::NUM_MEM_CMDS];
/** Demand misses that hit in the MSHRs. */
Stats::Formula demandMshrHits;
/** Total number of misses that hit in the MSHRs. */
Stats::Formula overallMshrHits;
/** Number of misses that miss in the MSHRs, per command and thread. */
Stats::Vector<> mshr_misses[MemCmd::NUM_MEM_CMDS];
/** Demand misses that miss in the MSHRs. */
Stats::Formula demandMshrMisses;
/** Total number of misses that miss in the MSHRs. */
Stats::Formula overallMshrMisses;
/** Number of misses that miss in the MSHRs, per command and thread. */
Stats::Vector<> mshr_uncacheable[MemCmd::NUM_MEM_CMDS];
/** Total number of misses that miss in the MSHRs. */
Stats::Formula overallMshrUncacheable;
/** Total cycle latency of each MSHR miss, per command and thread. */
Stats::Vector<> mshr_miss_latency[MemCmd::NUM_MEM_CMDS];
/** Total cycle latency of demand MSHR misses. */
Stats::Formula demandMshrMissLatency;
/** Total cycle latency of overall MSHR misses. */
Stats::Formula overallMshrMissLatency;
/** Total cycle latency of each MSHR miss, per command and thread. */
Stats::Vector<> mshr_uncacheable_lat[MemCmd::NUM_MEM_CMDS];
/** Total cycle latency of overall MSHR misses. */
Stats::Formula overallMshrUncacheableLatency;
/** The total number of MSHR accesses per command and thread. */
Stats::Formula mshrAccesses[MemCmd::NUM_MEM_CMDS];
/** The total number of demand MSHR accesses. */
Stats::Formula demandMshrAccesses;
/** The total number of MSHR accesses. */
Stats::Formula overallMshrAccesses;
/** The miss rate in the MSHRs pre command and thread. */
Stats::Formula mshrMissRate[MemCmd::NUM_MEM_CMDS];
/** The demand miss rate in the MSHRs. */
Stats::Formula demandMshrMissRate;
/** The overall miss rate in the MSHRs. */
Stats::Formula overallMshrMissRate;
/** The average latency of an MSHR miss, per command and thread. */
Stats::Formula avgMshrMissLatency[MemCmd::NUM_MEM_CMDS];
/** The average latency of a demand MSHR miss. */
Stats::Formula demandAvgMshrMissLatency;
/** The average overall latency of an MSHR miss. */
Stats::Formula overallAvgMshrMissLatency;
/** The average latency of an MSHR miss, per command and thread. */
Stats::Formula avgMshrUncacheableLatency[MemCmd::NUM_MEM_CMDS];
/** The average overall latency of an MSHR miss. */
Stats::Formula overallAvgMshrUncacheableLatency;
/** The number of times a thread hit its MSHR cap. */
Stats::Vector<> mshr_cap_events;
/** The number of times software prefetches caused the MSHR to block. */
Stats::Vector<> soft_prefetch_mshr_full;
Stats::Scalar<> mshr_no_allocate_misses;
/**
* @}
*/
private:
/** Pointer to the MSHR that has no targets. */
MSHR* noTargetMSHR;
/**
* Allocate a new MSHR to handle the provided miss.
* @param pkt The miss to buffer.
* @param size The number of bytes to fetch.
* @param time The time the miss occurs.
* @return A pointer to the new MSHR.
*/
MSHR* allocateMiss(PacketPtr &pkt, int size, Tick time);
/**
* Allocate a new WriteBuffer to handle the provided write.
* @param pkt The write to handle.
* @param size The number of bytes to write.
* @param time The time the write occurs.
* @return A pointer to the new write buffer.
*/
MSHR* allocateWrite(PacketPtr &pkt, int size, Tick time);
public:
/**
* Simple Constructor. Initializes all needed internal storage and sets
* parameters.
* @param numMSHRs The number of outstanding misses to handle.
* @param numTargets The number of outstanding targets to each miss.
* @param write_buffers The number of outstanding writes to handle.
* @param write_allocate If true, treat write misses the same as reads.
*/
MissQueue(int numMSHRs, int numTargets, int write_buffers,
bool write_allocate, bool prefetch_miss);
/**
* Deletes all allocated internal storage.
*/
~MissQueue();
/**
* Register statistics for this object.
* @param name The name of the parent cache.
*/
void regStats(const std::string &name);
/**
* Handle a cache miss properly. Either allocate an MSHR for the request,
* or forward it through the write buffer.
* @param pkt The request that missed in the cache.
* @param blk_size The block size of the cache.
* @param time The time the miss is detected.
*/
void handleMiss(PacketPtr &pkt, int blk_size, Tick time);
/**
* Fetch the block for the given address and buffer the given target.
* @param addr The address to fetch.
* @param asid The address space of the address.
* @param blk_size The block size of the cache.
* @param time The time the miss is detected.
* @param target The target for the fetch.
*/
MSHR* fetchBlock(Addr addr, int blk_size, Tick time,
PacketPtr &target);
/**
* Selects a outstanding request to service.
* @return The request to service, NULL if none found.
*/
PacketPtr getPacket();
/**
* Set the command to the given bus command.
* @param pkt The request to update.
* @param cmd The bus command to use.
*/
void setBusCmd(PacketPtr &pkt, MemCmd cmd);
/**
* Restore the original command in case of a bus transmission error.
* @param pkt The request to reset.
*/
void restoreOrigCmd(PacketPtr &pkt);
/**
* Marks a request as in service (sent on the bus). This can have side
* effect since storage for no response commands is deallocated once they
* are successfully sent.
* @param pkt The request that was sent on the bus.
*/
void markInService(PacketPtr &pkt, MSHR* mshr);
/**
* Collect statistics and free resources of a satisfied request.
* @param pkt The request that has been satisfied.
* @param time The time when the request is satisfied.
*/
void handleResponse(PacketPtr &pkt, Tick time);
/**
* Removes all outstanding requests for a given thread number. If a request
* has been sent to the bus, this function removes all of its targets.
* @param threadNum The thread number of the requests to squash.
*/
void squash(int threadNum);
/**
* Return the current number of outstanding misses.
* @return the number of outstanding misses.
*/
int getMisses()
{
return mq.getAllocatedTargets();
}
/**
* Searches for the supplied address in the miss queue.
* @param addr The address to look for.
* @param asid The address space id.
* @return The MSHR that contains the address, NULL if not found.
* @warning Currently only searches the miss queue. If non write allocate
* might need to search the write buffer for coherence.
*/
MSHR* findMSHR(Addr addr);
/**
* Searches for the supplied address in the write buffer.
* @param addr The address to look for.
* @param asid The address space id.
* @param writes The list of writes that match the address.
* @return True if any writes are found
*/
bool findWrites(Addr addr, std::vector<MSHR*>& writes);
/**
* Perform a writeback of dirty data to the given address.
* @param addr The address to write to.
* @param asid The address space id.
* @param xc The execution context of the address space.
* @param size The number of bytes to write.
* @param data The data to write, can be NULL.
* @param compressed True if the data is compressed.
*/
void doWriteback(Addr addr,
int size, uint8_t *data, bool compressed);
/**
* Perform the given writeback request.
* @param pkt The writeback request.
*/
void doWriteback(PacketPtr &pkt);
/**
* Returns true if there are outstanding requests.
* @return True if there are outstanding requests.
*/
bool havePending();
/**
* Add a target to the given MSHR. This assumes it is in the miss queue.
* @param mshr The mshr to add a target to.
* @param pkt The target to add.
*/
void addTarget(MSHR *mshr, PacketPtr &pkt)
{
mq.allocateTarget(mshr, pkt);
}
/**
* Allocate a MSHR to hold a list of targets to a block involved in a copy.
* If the block is marked done then the MSHR already holds the data to
* fill the block. Otherwise the block needs to be fetched.
* @param addr The address to buffer.
* @param asid The address space ID.
* @return A pointer to the allocated MSHR.
*/
MSHR* allocateTargetList(Addr addr);
};
#endif //__MISS_QUEUE_HH__

View file

@ -37,6 +37,7 @@
#include <assert.h>
#include <string>
#include <vector>
#include <algorithm>
#include "mem/cache/miss/mshr.hh"
#include "sim/core.hh" // for curTick
@ -54,54 +55,36 @@ MSHR::MSHR()
}
void
MSHR::allocate(MemCmd cmd, Addr _addr, int size,
PacketPtr &target)
MSHR::allocate(Addr _addr, int _size, PacketPtr target,
Tick whenReady, Counter _order)
{
addr = _addr;
if (target)
{
//Have a request, just use it
pkt = new Packet(target->req, cmd, Packet::Broadcast, size);
pkt->time = curTick;
pkt->allocate();
pkt->senderState = (Packet::SenderState *)this;
allocateTarget(target);
}
else
{
//need a request first
Request * req = new Request();
req->setPhys(addr, size, 0);
//Thread context??
pkt = new Packet(req, cmd, Packet::Broadcast, size);
pkt->time = curTick;
pkt->allocate();
pkt->senderState = (Packet::SenderState *)this;
}
}
// Since we aren't sure if data is being used, don't copy here.
/**
* @todo When we have a "global" data flag, might want to copy data here.
*/
void
MSHR::allocateAsBuffer(PacketPtr &target)
{
addr = target->getAddr();
threadNum = 0/*target->req->getThreadNum()*/;
pkt = new Packet(target->req, target->cmd, -1);
pkt->allocate();
pkt->senderState = (Packet::SenderState*)this;
pkt->time = curTick;
size = _size;
readyTime = whenReady;
order = _order;
assert(target);
isCacheFill = false;
needsExclusive = target->needsExclusive();
_isUncacheable = target->req->isUncacheable();
inService = false;
threadNum = 0;
ntargets = 1;
// Don't know of a case where we would allocate a new MSHR for a
// snoop (mem-side request), so set cpuSide to true here.
targets.push_back(Target(target, whenReady, _order, true));
assert(deferredTargets.empty());
deferredNeedsExclusive = false;
pendingInvalidate = false;
pendingShared = false;
data = NULL;
}
void
MSHR::deallocate()
{
assert(targets.empty());
assert(deferredTargets.empty());
assert(ntargets == 0);
delete pkt;
pkt = NULL;
inService = false;
//allocIter = NULL;
//readyIter = NULL;
@ -111,48 +94,105 @@ MSHR::deallocate()
* Adds a target to an MSHR
*/
void
MSHR::allocateTarget(PacketPtr &target)
MSHR::allocateTarget(PacketPtr target, Tick whenReady, Counter _order)
{
//If we append an invalidate and we issued a read to the bus,
//but now have some pending writes, we need to move
//the invalidate to before the first non-read
if (inService && pkt->isRead() && target->isInvalidate()) {
std::list<PacketPtr> temp;
while (!targets.empty()) {
if (!targets.front()->isRead()) break;
//Place on top of temp stack
temp.push_front(targets.front());
//Remove from targets
targets.pop_front();
if (inService) {
if (!deferredTargets.empty() || pendingInvalidate ||
(!needsExclusive && target->needsExclusive())) {
// need to put on deferred list
deferredTargets.push_back(Target(target, whenReady, _order, true));
if (target->needsExclusive()) {
deferredNeedsExclusive = true;
}
} else {
// still OK to append to outstanding request
targets.push_back(Target(target, whenReady, _order, true));
}
} else {
if (target->needsExclusive()) {
needsExclusive = true;
}
//Now that we have all the reads off until first non-read, we can
//place the invalidate on
targets.push_front(target);
//Now we pop off the temp_stack and put them back
while (!temp.empty()) {
targets.push_front(temp.front());
temp.pop_front();
}
}
else {
targets.push_back(target);
targets.push_back(Target(target, whenReady, _order, true));
}
++ntargets;
assert(targets.size() == ntargets);
/**
* @todo really prioritize the target commands.
*/
}
if (!inService && target->isWrite()) {
pkt->cmd = MemCmd::WriteReq;
void
MSHR::allocateSnoopTarget(PacketPtr pkt, Tick whenReady, Counter _order)
{
assert(inService); // don't bother to call otherwise
if (pendingInvalidate) {
// a prior snoop has already appended an invalidation, so
// logically we don't have the block anymore...
return;
}
DPRINTF(Cache, "deferred snoop on %x: %s %s\n", addr,
needsExclusive ? "needsExclusive" : "",
pkt->needsExclusive() ? "pkt->needsExclusive()" : "");
if (needsExclusive || pkt->needsExclusive()) {
// actual target device (typ. PhysicalMemory) will delete the
// packet on reception, so we need to save a copy here
targets.push_back(Target(new Packet(pkt), whenReady, _order, false));
++ntargets;
if (needsExclusive) {
// We're awaiting an exclusive copy, so ownership is pending.
// It's up to us to respond once the data arrives.
pkt->assertMemInhibit();
}
if (pkt->needsExclusive()) {
// This transaction will take away our pending copy
pendingInvalidate = true;
}
} else {
// Read to a read: no conflict, so no need to record as
// target, but make sure neither reader thinks he's getting an
// exclusive copy
pendingShared = true;
pkt->assertShared();
}
}
bool
MSHR::promoteDeferredTargets()
{
if (deferredTargets.empty()) {
return false;
}
assert(targets.empty());
targets = deferredTargets;
deferredTargets.clear();
assert(targets.size() == ntargets);
needsExclusive = deferredNeedsExclusive;
pendingInvalidate = false;
pendingShared = false;
deferredNeedsExclusive = false;
order = targets.front().order;
readyTime = std::max(curTick, targets.front().readyTime);
return true;
}
void
MSHR::handleFill(Packet *pkt, CacheBlk *blk)
{
if (pendingShared) {
// we snooped another read while this read was in
// service... assert shared line on its behalf
pkt->assertShared();
}
}
void
MSHR::dump()
@ -167,8 +207,8 @@ MSHR::dump()
for (int i = 0; i < ntargets; i++) {
assert(tar_it != targets.end());
ccprintf(cerr, "\t%d: Addr: %x cmd: %d\n",
i, (*tar_it)->getAddr(), (*tar_it)->cmdToIndex());
ccprintf(cerr, "\t%d: Addr: %x cmd: %s\n",
i, tar_it->pkt->getAddr(), tar_it->pkt->cmdString());
tar_it++;
}
@ -177,6 +217,4 @@ MSHR::dump()
MSHR::~MSHR()
{
if (pkt)
pkt = NULL;
}

View file

@ -36,22 +36,42 @@
#ifndef __MSHR_HH__
#define __MSHR_HH__
#include "mem/packet.hh"
#include <list>
#include <deque>
class MSHR;
#include "mem/packet.hh"
class CacheBlk;
class MSHRQueue;
/**
* Miss Status and handling Register. This class keeps all the information
* needed to handle a cache miss including a list of target requests.
*/
class MSHR {
class MSHR : public Packet::SenderState
{
public:
class Target {
public:
Tick recvTime; //!< Time when request was received (for stats)
Tick readyTime; //!< Time when request is ready to be serviced
Counter order; //!< Global order (for memory consistency mgmt)
PacketPtr pkt; //!< Pending request packet.
bool cpuSide; //!< Did request come from cpu side or mem side?
bool isCpuSide() { return cpuSide; }
Target(PacketPtr _pkt, Tick _readyTime, Counter _order, bool _cpuSide)
: recvTime(curTick), readyTime(_readyTime), order(_order),
pkt(_pkt), cpuSide(_cpuSide)
{}
};
/** Defines the Data structure of the MSHR targetlist. */
typedef std::list<PacketPtr> TargetList;
typedef std::list<Target> TargetList;
/** Target list iterator. */
typedef std::list<PacketPtr>::iterator TargetListIterator;
typedef std::list<Target>::iterator TargetListIterator;
/** A list of MSHRs. */
typedef std::list<MSHR *> List;
/** MSHR list iterator. */
@ -59,28 +79,52 @@ class MSHR {
/** MSHR list const_iterator. */
typedef List::const_iterator ConstIterator;
/** Address of the miss. */
/** Pointer to queue containing this MSHR. */
MSHRQueue *queue;
/** Cycle when ready to issue */
Tick readyTime;
/** Order number assigned by the miss queue. */
Counter order;
/** Address of the request. */
Addr addr;
/** Adress space id of the miss. */
short asid;
/** Size of the request. */
int size;
/** True if the request has been sent to the bus. */
bool inService;
/** True if we will be putting the returned block in the cache */
bool isCacheFill;
/** True if we need to get an exclusive copy of the block. */
bool needsExclusive;
/** True if the request is uncacheable */
bool _isUncacheable;
bool deferredNeedsExclusive;
bool pendingInvalidate;
bool pendingShared;
/** Thread number of the miss. */
int threadNum;
/** The request that is forwarded to the next level of the hierarchy. */
PacketPtr pkt;
short threadNum;
/** The number of currently allocated targets. */
short ntargets;
/** The original requesting command. */
MemCmd originalCmd;
/** Order number of assigned by the miss queue. */
uint64_t order;
/** Data buffer (if needed). Currently used only for pending
* upgrade handling. */
uint8_t *data;
/**
* Pointer to this MSHR on the ready list.
* @sa MissQueue, MSHRQueue::readyList
*/
Iterator readyIter;
/**
* Pointer to this MSHR on the allocated list.
* @sa MissQueue, MSHRQueue::allocatedList
@ -91,7 +135,12 @@ private:
/** List of all requests that match the address */
TargetList targets;
TargetList deferredTargets;
public:
bool isUncacheable() { return _isUncacheable; }
/**
* Allocate a miss to this MSHR.
* @param cmd The requesting command.
@ -100,14 +149,8 @@ public:
* @param size The number of bytes to request.
* @param pkt The original miss.
*/
void allocate(MemCmd cmd, Addr addr, int size,
PacketPtr &pkt);
/**
* Allocate this MSHR as a buffer for the given request.
* @param target The memory request to buffer.
*/
void allocateAsBuffer(PacketPtr &target);
void allocate(Addr addr, int size, PacketPtr pkt,
Tick when, Counter _order);
/**
* Mark this MSHR as free.
@ -118,7 +161,8 @@ public:
* Add a request to the list of targets.
* @param target The target.
*/
void allocateTarget(PacketPtr &target);
void allocateTarget(PacketPtr target, Tick when, Counter order);
void allocateSnoopTarget(PacketPtr target, Tick when, Counter order);
/** A simple constructor. */
MSHR();
@ -129,28 +173,19 @@ public:
* Returns the current number of allocated targets.
* @return The current number of allocated targets.
*/
int getNumTargets()
{
return(ntargets);
}
int getNumTargets() { return ntargets; }
/**
* Returns a pointer to the target list.
* @return a pointer to the target list.
*/
TargetList* getTargetList()
{
return &targets;
}
TargetList* getTargetList() { return &targets; }
/**
* Returns a reference to the first target.
* @return A pointer to the first target.
*/
PacketPtr getTarget()
{
return targets.front();
}
Target *getTarget() { return &targets.front(); }
/**
* Pop first target.
@ -165,11 +200,20 @@ public:
* Returns true if there are targets left.
* @return true if there are targets
*/
bool hasTargets()
bool hasTargets() { return !targets.empty(); }
bool isSimpleForward()
{
return !targets.empty();
if (getNumTargets() != 1)
return false;
Target *tgt = getTarget();
return tgt->isCpuSide() && !tgt->pkt->needsResponse();
}
bool promoteDeferredTargets();
void handleFill(Packet *pkt, CacheBlk *blk);
/**
* Prints the contents of this MSHR to stderr.
*/

View file

@ -29,22 +29,22 @@
*/
/** @file
* Definition of the MSHRQueue.
* Definition of MSHRQueue class functions.
*/
#include "mem/cache/miss/mshr_queue.hh"
#include "sim/eventq.hh"
using namespace std;
MSHRQueue::MSHRQueue(int num_mshrs, int reserve)
: numMSHRs(num_mshrs + reserve - 1), numReserve(reserve)
MSHRQueue::MSHRQueue(int num_entries, int reserve, int _index)
: numEntries(num_entries + reserve - 1), numReserve(reserve),
index(_index)
{
allocated = 0;
inServiceMSHRs = 0;
allocatedTargets = 0;
registers = new MSHR[numMSHRs];
for (int i = 0; i < numMSHRs; ++i) {
inServiceEntries = 0;
registers = new MSHR[numEntries];
for (int i = 0; i < numEntries; ++i) {
registers[i].queue = this;
freeList.push_back(&registers[i]);
}
}
@ -54,7 +54,7 @@ MSHRQueue::~MSHRQueue()
delete [] registers;
}
MSHR*
MSHR *
MSHRQueue::findMatch(Addr addr) const
{
MSHR::ConstIterator i = allocatedList.begin();
@ -87,19 +87,19 @@ MSHRQueue::findMatches(Addr addr, vector<MSHR*>& matches) const
}
MSHR*
MSHRQueue::findPending(PacketPtr &pkt) const
MSHR *
MSHRQueue::findPending(Addr addr, int size) const
{
MSHR::ConstIterator i = pendingList.begin();
MSHR::ConstIterator end = pendingList.end();
MSHR::ConstIterator i = readyList.begin();
MSHR::ConstIterator end = readyList.end();
for (; i != end; ++i) {
MSHR *mshr = *i;
if (mshr->addr < pkt->getAddr()) {
if (mshr->addr + mshr->pkt->getSize() > pkt->getAddr()) {
if (mshr->addr < addr) {
if (mshr->addr + mshr->size > addr) {
return mshr;
}
} else {
if (pkt->getAddr() + pkt->getSize() > mshr->addr) {
if (addr + size > mshr->addr) {
return mshr;
}
}
@ -107,76 +107,59 @@ MSHRQueue::findPending(PacketPtr &pkt) const
return NULL;
}
MSHR*
MSHRQueue::allocate(PacketPtr &pkt, int size)
MSHR::Iterator
MSHRQueue::addToReadyList(MSHR *mshr)
{
if (readyList.empty() || readyList.back()->readyTime <= mshr->readyTime) {
return readyList.insert(readyList.end(), mshr);
}
MSHR::Iterator i = readyList.begin();
MSHR::Iterator end = readyList.end();
for (; i != end; ++i) {
if ((*i)->readyTime > mshr->readyTime) {
return readyList.insert(i, mshr);
}
}
assert(false);
}
MSHR *
MSHRQueue::allocate(Addr addr, int size, PacketPtr &pkt,
Tick when, Counter order)
{
Addr aligned_addr = pkt->getAddr() & ~((Addr)size - 1);
assert(!freeList.empty());
MSHR *mshr = freeList.front();
assert(mshr->getNumTargets() == 0);
freeList.pop_front();
if (!pkt->needsResponse()) {
mshr->allocateAsBuffer(pkt);
} else {
assert(size !=0);
mshr->allocate(pkt->cmd, aligned_addr, size, pkt);
allocatedTargets += 1;
}
mshr->allocate(addr, size, pkt, when, order);
mshr->allocIter = allocatedList.insert(allocatedList.end(), mshr);
mshr->readyIter = pendingList.insert(pendingList.end(), mshr);
mshr->readyIter = addToReadyList(mshr);
allocated += 1;
return mshr;
}
MSHR*
MSHRQueue::allocateFetch(Addr addr, int size, PacketPtr &target)
{
MSHR *mshr = freeList.front();
assert(mshr->getNumTargets() == 0);
freeList.pop_front();
mshr->allocate(MemCmd::ReadReq, addr, size, target);
mshr->allocIter = allocatedList.insert(allocatedList.end(), mshr);
mshr->readyIter = pendingList.insert(pendingList.end(), mshr);
allocated += 1;
return mshr;
}
MSHR*
MSHRQueue::allocateTargetList(Addr addr, int size)
{
MSHR *mshr = freeList.front();
assert(mshr->getNumTargets() == 0);
freeList.pop_front();
PacketPtr dummy;
mshr->allocate(MemCmd::ReadReq, addr, size, dummy);
mshr->allocIter = allocatedList.insert(allocatedList.end(), mshr);
mshr->inService = true;
++inServiceMSHRs;
++allocated;
return mshr;
}
void
MSHRQueue::deallocate(MSHR* mshr)
MSHRQueue::deallocate(MSHR *mshr)
{
deallocateOne(mshr);
}
MSHR::Iterator
MSHRQueue::deallocateOne(MSHR* mshr)
MSHRQueue::deallocateOne(MSHR *mshr)
{
MSHR::Iterator retval = allocatedList.erase(mshr->allocIter);
freeList.push_front(mshr);
allocated--;
allocatedTargets -= mshr->getNumTargets();
if (mshr->inService) {
inServiceMSHRs--;
inServiceEntries--;
} else {
pendingList.erase(mshr->readyIter);
readyList.erase(mshr->readyIter);
}
mshr->deallocate();
return retval;
@ -187,40 +170,41 @@ MSHRQueue::moveToFront(MSHR *mshr)
{
if (!mshr->inService) {
assert(mshr == *(mshr->readyIter));
pendingList.erase(mshr->readyIter);
mshr->readyIter = pendingList.insert(pendingList.begin(), mshr);
readyList.erase(mshr->readyIter);
mshr->readyIter = readyList.insert(readyList.begin(), mshr);
}
}
void
MSHRQueue::markInService(MSHR* mshr)
MSHRQueue::markInService(MSHR *mshr)
{
//assert(mshr == pendingList.front());
if (!mshr->pkt->needsResponse() && !(mshr->pkt->cmd == MemCmd::UpgradeReq)) {
assert(mshr->getNumTargets() == 0);
assert(!mshr->inService);
if (mshr->isSimpleForward()) {
// we just forwarded the request packet & don't expect a
// response, so get rid of it
assert(mshr->getNumTargets() == 1);
mshr->popTarget();
deallocate(mshr);
return;
}
mshr->inService = true;
pendingList.erase(mshr->readyIter);
readyList.erase(mshr->readyIter);
//mshr->readyIter = NULL;
inServiceMSHRs += 1;
//pendingList.pop_front();
inServiceEntries += 1;
//readyList.pop_front();
}
void
MSHRQueue::markPending(MSHR* mshr, MemCmd cmd)
MSHRQueue::markPending(MSHR *mshr)
{
//assert(mshr->readyIter == NULL);
mshr->pkt->cmd = cmd;
mshr->pkt->flags &= ~SATISFIED;
assert(mshr->inService);
mshr->inService = false;
--inServiceMSHRs;
--inServiceEntries;
/**
* @ todo might want to add rerequests to front of pending list for
* performance.
*/
mshr->readyIter = pendingList.insert(pendingList.end(), mshr);
mshr->readyIter = addToReadyList(mshr);
}
void
@ -232,11 +216,8 @@ MSHRQueue::squash(int threadNum)
MSHR *mshr = *i;
if (mshr->threadNum == threadNum) {
while (mshr->hasTargets()) {
PacketPtr target = mshr->getTarget();
mshr->popTarget();
assert(0/*target->req->getThreadNum()*/ == threadNum);
target = NULL;
}
assert(!mshr->hasTargets());
assert(mshr->ntargets==0);

View file

@ -32,71 +32,77 @@
* Declaration of a structure to manage MSHRs.
*/
#ifndef __MSHR_QUEUE_HH__
#define __MSHR_QUEUE_HH__
#ifndef __MEM__CACHE__MISS__MSHR_QUEUE_HH__
#define __MEM__CACHE__MISS__MSHR_QUEUE_HH__
#include <vector>
#include "mem/packet.hh"
#include "mem/cache/miss/mshr.hh"
/**
* A Class for maintaining a list of pending and allocated memory requests.
*/
class MSHRQueue {
class MSHRQueue
{
private:
/** MSHR storage. */
MSHR* registers;
/** Holds pointers to all allocated MSHRs. */
MSHR *registers;
/** Holds pointers to all allocated entries. */
MSHR::List allocatedList;
/** Holds pointers to MSHRs that haven't been sent to the bus. */
MSHR::List pendingList;
/** Holds non allocated MSHRs. */
/** Holds pointers to entries that haven't been sent to the bus. */
MSHR::List readyList;
/** Holds non allocated entries. */
MSHR::List freeList;
// Parameters
/**
* The total number of MSHRs in this queue. This number is set as the
* number of MSHRs requested plus (numReserve - 1). This allows for
* the same number of effective MSHRs while still maintaining the reserve.
* The total number of entries in this queue. This number is set as the
* number of entries requested plus (numReserve - 1). This allows for
* the same number of effective entries while still maintaining the reserve.
*/
const int numMSHRs;
const int numEntries;
/**
* The number of MSHRs to hold in reserve. This is needed because copy
* operations can allocate upto 4 MSHRs at one time.
* The number of entries to hold in reserve. This is needed because copy
* operations can allocate upto 4 entries at one time.
*/
const int numReserve;
MSHR::Iterator addToReadyList(MSHR *mshr);
public:
/** The number of allocated MSHRs. */
/** The number of allocated entries. */
int allocated;
/** The number of MSHRs that have been forwarded to the bus. */
int inServiceMSHRs;
/** The number of targets waiting for response. */
int allocatedTargets;
/** The number of entries that have been forwarded to the bus. */
int inServiceEntries;
/** The index of this queue within the cache (MSHR queue vs. write
* buffer). */
const int index;
/**
* Create a queue with a given number of MSHRs.
* @param num_mshrs The number of MSHRs in this queue.
* @param reserve The minimum number of MSHRs needed to satisfy any access.
* Create a queue with a given number of entries.
* @param num_entrys The number of entries in this queue.
* @param reserve The minimum number of entries needed to satisfy
* any access.
*/
MSHRQueue(int num_mshrs, int reserve = 1);
MSHRQueue(int num_entries, int reserve, int index);
/** Destructor */
~MSHRQueue();
/**
* Find the first MSHR that matches the provide address and asid.
* Find the first MSHR that matches the provided address.
* @param addr The address to find.
* @param asid The address space id.
* @return Pointer to the matching MSHR, null if not found.
*/
MSHR* findMatch(Addr addr) const;
MSHR *findMatch(Addr addr) const;
/**
* Find and return all the matching MSHRs in the provided vector.
* Find and return all the matching entries in the provided vector.
* @param addr The address to find.
* @param asid The address space ID.
* @param matches The vector to return pointers to the matching MSHRs.
* @param matches The vector to return pointers to the matching entries.
* @return True if any matches are found, false otherwise.
* @todo Typedef the vector??
*/
@ -107,7 +113,7 @@ class MSHRQueue {
* @param pkt The request to find.
* @return A pointer to the earliest matching MSHR.
*/
MSHR* findPending(PacketPtr &pkt) const;
MSHR *findPending(Addr addr, int size) const;
/**
* Allocates a new MSHR for the request and size. This places the request
@ -116,76 +122,45 @@ class MSHRQueue {
* @param size The number in bytes to fetch from memory.
* @return The a pointer to the MSHR allocated.
*
* @pre There are free MSHRs.
* @pre There are free entries.
*/
MSHR* allocate(PacketPtr &pkt, int size = 0);
/**
* Allocate a read request for the given address, and places the given
* target on the target list.
* @param addr The address to fetch.
* @param asid The address space for the fetch.
* @param size The number of bytes to request.
* @param target The first target for the request.
* @return Pointer to the new MSHR.
*/
MSHR* allocateFetch(Addr addr, int size, PacketPtr &target);
/**
* Allocate a target list for the given address.
* @param addr The address to fetch.
* @param asid The address space for the fetch.
* @param size The number of bytes to request.
* @return Pointer to the new MSHR.
*/
MSHR* allocateTargetList(Addr addr, int size);
MSHR *allocate(Addr addr, int size, PacketPtr &pkt,
Tick when, Counter order);
/**
* Removes the given MSHR from the queue. This places the MSHR on the
* free list.
* @param mshr
*/
void deallocate(MSHR* mshr);
void deallocate(MSHR *mshr);
/**
* Allocates a target to the given MSHR. Used to keep track of the number
* of outstanding targets.
* @param mshr The MSHR to allocate the target to.
* @param pkt The target request.
*/
void allocateTarget(MSHR* mshr, PacketPtr &pkt)
{
mshr->allocateTarget(pkt);
allocatedTargets += 1;
}
/**
* Remove a MSHR from the queue. Returns an iterator into the allocatedList
* for faster squash implementation.
* Remove a MSHR from the queue. Returns an iterator into the
* allocatedList for faster squash implementation.
* @param mshr The MSHR to remove.
* @return An iterator to the next entry in the allocatedList.
*/
MSHR::Iterator deallocateOne(MSHR* mshr);
MSHR::Iterator deallocateOne(MSHR *mshr);
/**
* Moves the MSHR to the front of the pending list if it is not in service.
* @param mshr The mshr to move.
* Moves the MSHR to the front of the pending list if it is not
* in service.
* @param mshr The entry to move.
*/
void moveToFront(MSHR *mshr);
/**
* Mark the given MSHR as in service. This removes the MSHR from the
* pendingList. Deallocates the MSHR if it does not expect a response.
* readyList. Deallocates the MSHR if it does not expect a response.
* @param mshr The MSHR to mark in service.
*/
void markInService(MSHR* mshr);
void markInService(MSHR *mshr);
/**
* Mark an in service mshr as pending, used to resend a request.
* Mark an in service entry as pending, used to resend a request.
* @param mshr The MSHR to resend.
* @param cmd The command to resend.
*/
void markPending(MSHR* mshr, MemCmd cmd);
void markPending(MSHR *mshr);
/**
* Squash outstanding requests with the given thread number. If a request
@ -200,40 +175,34 @@ class MSHRQueue {
*/
bool havePending() const
{
return !pendingList.empty();
return !readyList.empty();
}
/**
* Returns true if there are no free MSHRs.
* Returns true if there are no free entries.
* @return True if this queue is full.
*/
bool isFull() const
{
return (allocated > numMSHRs - numReserve);
return (allocated > numEntries - numReserve);
}
/**
* Returns the request at the head of the pendingList.
* Returns the MSHR at the head of the readyList.
* @return The next request to service.
*/
PacketPtr getReq() const
MSHR *getNextMSHR() const
{
if (pendingList.empty()) {
if (readyList.empty() || readyList.front()->readyTime > curTick) {
return NULL;
}
MSHR* mshr = pendingList.front();
return mshr->pkt;
return readyList.front();
}
/**
* Returns the number of outstanding targets.
* @return the number of allocated targets.
*/
int getAllocatedTargets() const
Tick nextMSHRReadyTime() const
{
return allocatedTargets;
return readyList.empty() ? MaxTick : readyList.front()->readyTime;
}
};
#endif //__MSHR_QUEUE_HH__
#endif //__MEM__CACHE__MISS__MSHR_QUEUE_HH__

View file

@ -141,7 +141,7 @@ BasePrefetcher::getPacket()
keepTrying = cache->inCache(pkt->getAddr());
}
if (pf.empty()) {
cache->clearMasterRequest(Request_PF);
cache->deassertMemSideBusRequest(BaseCache::Request_PF);
if (keepTrying) return NULL; //None left, all were in cache
}
} while (keepTrying);
@ -165,7 +165,7 @@ BasePrefetcher::handleMiss(PacketPtr &pkt, Tick time)
pfRemovedMSHR++;
pf.erase(iter);
if (pf.empty())
cache->clearMasterRequest(Request_PF);
cache->deassertMemSideBusRequest(BaseCache::Request_PF);
}
//Remove anything in queue with delay older than time
@ -182,7 +182,7 @@ BasePrefetcher::handleMiss(PacketPtr &pkt, Tick time)
iter--;
}
if (pf.empty())
cache->clearMasterRequest(Request_PF);
cache->deassertMemSideBusRequest(BaseCache::Request_PF);
}
@ -241,10 +241,9 @@ BasePrefetcher::handleMiss(PacketPtr &pkt, Tick time)
}
pf.push_back(prefetch);
prefetch->flags |= CACHE_LINE_FILL;
//Make sure to request the bus, with proper delay
cache->setMasterRequest(Request_PF, prefetch->time);
cache->requestMemSideBus(BaseCache::Request_PF, prefetch->time);
//Increment through the list
addr++;

View file

@ -215,14 +215,13 @@ FALRU::findBlock(Addr addr) const
}
FALRUBlk*
FALRU::findReplacement(PacketPtr &pkt, PacketList &writebacks,
BlkList &compress_blocks)
FALRU::findReplacement(Addr addr, PacketList &writebacks)
{
FALRUBlk * blk = tail;
assert(blk->inCache == 0);
moveToHead(blk);
tagHash.erase(blk->tag);
tagHash[blkAlign(pkt->getAddr())] = blk;
tagHash[blkAlign(addr)] = blk;
if (blk->isValid()) {
replacements[0]++;
} else {

View file

@ -201,11 +201,9 @@ public:
* Find a replacement block for the address provided.
* @param pkt The request to a find a replacement candidate for.
* @param writebacks List for any writebacks to be performed.
* @param compress_blocks List of blocks to compress, for adaptive comp.
* @return The block to place the replacement in.
*/
FALRUBlk* findReplacement(PacketPtr &pkt, PacketList & writebacks,
BlkList &compress_blocks);
FALRUBlk* findReplacement(Addr addr, PacketList & writebacks);
/**
* Return the hit latency of this cache.
@ -248,10 +246,9 @@ public:
* Generate the tag from the addres. For fully associative this is just the
* block address.
* @param addr The address to get the tag from.
* @param blk ignored here
* @return The tag.
*/
Addr extractTag(Addr addr, FALRUBlk *blk) const
Addr extractTag(Addr addr) const
{
return blkAlign(addr);
}

View file

@ -303,11 +303,10 @@ IIC::findBlock(Addr addr) const
IICTag*
IIC::findReplacement(PacketPtr &pkt, PacketList &writebacks,
BlkList &compress_blocks)
IIC::findReplacement(Addr addr, PacketList &writebacks)
{
DPRINTF(IIC, "Finding Replacement for %x\n", pkt->getAddr());
unsigned set = hash(pkt->getAddr());
DPRINTF(IIC, "Finding Replacement for %x\n", addr);
unsigned set = hash(addr);
IICTag *tag_ptr;
unsigned long *tmp_data = new unsigned long[numSub];
@ -332,12 +331,14 @@ IIC::findReplacement(PacketPtr &pkt, PacketList &writebacks,
list<unsigned long> tag_indexes;
repl->doAdvance(tag_indexes);
/*
while (!tag_indexes.empty()) {
if (!tagStore[tag_indexes.front()].isCompressed()) {
compress_blocks.push_back(&tagStore[tag_indexes.front()]);
}
tag_indexes.pop_front();
}
*/
tag_ptr->re = (void*)repl->add(tag_ptr-tagStore);
@ -355,7 +356,7 @@ IIC::freeReplacementBlock(PacketList & writebacks)
DPRINTF(Cache, "Replacing %x in IIC: %s\n",
regenerateBlkAddr(tag_ptr->tag,0),
tag_ptr->isModified() ? "writeback" : "clean");
tag_ptr->isDirty() ? "writeback" : "clean");
/* write back replaced block data */
if (tag_ptr && (tag_ptr->isValid())) {
replacements[0]++;
@ -363,7 +364,7 @@ IIC::freeReplacementBlock(PacketList & writebacks)
++sampledRefs;
tag_ptr->refCount = 0;
if (tag_ptr->isModified()) {
if (tag_ptr->isDirty()) {
/* PacketPtr writeback =
buildWritebackReq(regenerateBlkAddr(tag_ptr->tag, 0),
tag_ptr->req->asid, tag_ptr->xc, blkSize,
@ -618,24 +619,6 @@ IIC::secondaryChain(Addr tag, unsigned long chain_ptr,
return NULL;
}
void
IIC::decompressBlock(unsigned long index)
{
IICTag *tag_ptr = &tagStore[index];
if (tag_ptr->isCompressed()) {
// decompress the data here.
}
}
void
IIC::compressBlock(unsigned long index)
{
IICTag *tag_ptr = &tagStore[index];
if (!tag_ptr->isCompressed()) {
// Compress the data here.
}
}
void
IIC::invalidateBlk(IIC::BlkType *tag_ptr)
{
@ -672,7 +655,6 @@ void
IIC::writeData(IICTag *blk, uint8_t *write_data, int size,
PacketList & writebacks)
{
assert(size < blkSize || !blk->isCompressed());
DPRINTF(IIC, "Writing %d bytes to %x\n", size,
blk->tag<<tagShift);
// Find the number of subblocks needed, (round up)

View file

@ -345,17 +345,6 @@ class IIC : public BaseTags
return hitLatency;
}
/**
* Generate the tag from the address.
* @param addr The address to a get a tag for.
* @param blk Ignored here.
* @return the tag.
*/
Addr extractTag(Addr addr, IICTag *blk) const
{
return (addr >> tagShift);
}
/**
* Generate the tag from the address.
* @param addr The address to a get a tag for.
@ -422,18 +411,6 @@ class IIC : public BaseTags
return tmp;
}
/**
* Decompress a block if it is compressed.
* @param index The tag store index for the block to uncompress.
*/
void decompressBlock(unsigned long index);
/**
* Try and compress a block if it is not already compressed.
* @param index The tag store index for the block to compress.
*/
void compressBlock(unsigned long index);
/**
* Invalidate a block.
* @param blk The block to invalidate.
@ -462,11 +439,9 @@ class IIC : public BaseTags
* Find a replacement block for the address provided.
* @param pkt The request to a find a replacement candidate for.
* @param writebacks List for any writebacks to be performed.
* @param compress_blocks List of blocks to compress, for adaptive comp.
* @return The block to place the replacement in.
*/
IICTag* findReplacement(PacketPtr &pkt, PacketList &writebacks,
BlkList &compress_blocks);
IICTag* findReplacement(Addr addr, PacketList &writebacks);
/**
* Read the data from the internal storage of the given cache block.

View file

@ -173,6 +173,8 @@ LRU::findBlock(Addr addr, int &lat)
if (blk != NULL) {
// move this block to head of the MRU list
sets[set].moveToHead(blk);
DPRINTF(CacheRepl, "set %x: moving blk %x to MRU\n",
set, regenerateBlkAddr(tag, set));
if (blk->whenReady > curTick
&& blk->whenReady - curTick > hitLatency) {
lat = blk->whenReady - curTick;
@ -194,10 +196,9 @@ LRU::findBlock(Addr addr) const
}
LRUBlk*
LRU::findReplacement(PacketPtr &pkt, PacketList &writebacks,
BlkList &compress_blocks)
LRU::findReplacement(Addr addr, PacketList &writebacks)
{
unsigned set = extractSet(pkt->getAddr());
unsigned set = extractSet(addr);
// grab a replacement candidate
LRUBlk *blk = sets[set].blks[assoc-1];
sets[set].moveToHead(blk);
@ -206,6 +207,9 @@ LRU::findReplacement(PacketPtr &pkt, PacketList &writebacks,
totalRefs += blk->refCount;
++sampledRefs;
blk->refCount = 0;
DPRINTF(CacheRepl, "set %x: selecting blk %x for replacement\n",
set, regenerateBlkAddr(blk->tag, set));
} else if (!blk->isTouched) {
tagsInUse++;
blk->isTouched = true;

View file

@ -189,11 +189,9 @@ public:
* Find a replacement block for the address provided.
* @param pkt The request to a find a replacement candidate for.
* @param writebacks List for any writebacks to be performed.
* @param compress_blocks List of blocks to compress, for adaptive comp.
* @return The block to place the replacement in.
*/
LRUBlk* findReplacement(PacketPtr &pkt, PacketList &writebacks,
BlkList &compress_blocks);
LRUBlk* findReplacement(Addr addr, PacketList &writebacks);
/**
* Generate the tag from the given address.
@ -205,17 +203,6 @@ public:
return (addr >> tagShift);
}
/**
* Generate the tag from the given address.
* @param addr The address to get the tag from.
* @param blk Ignored.
* @return The tag of the address.
*/
Addr extractTag(Addr addr, LRUBlk *blk) const
{
return (addr >> tagShift);
}
/**
* Calculate the set index from the address.
* @param addr The address to get the set from.

View file

@ -298,27 +298,25 @@ Split::findBlock(Addr addr) const
}
SplitBlk*
Split::findReplacement(PacketPtr &pkt, PacketList &writebacks,
BlkList &compress_blocks)
Split::findReplacement(Addr addr, PacketList &writebacks)
{
SplitBlk *blk;
assert(0);
#if 0
if (pkt->nic_pkt()) {
DPRINTF(Split, "finding a replacement for nic_req\n");
nic_repl++;
if (lifo && lifo_net)
blk = lifo_net->findReplacement(pkt, writebacks,
compress_blocks);
blk = lifo_net->findReplacement(addr, writebacks);
else if (lru_net)
blk = lru_net->findReplacement(pkt, writebacks,
compress_blocks);
blk = lru_net->findReplacement(addr, writebacks);
// in this case, this is an LRU only cache, it's non partitioned
else
blk = lru->findReplacement(pkt, writebacks, compress_blocks);
blk = lru->findReplacement(addr, writebacks);
} else {
DPRINTF(Split, "finding replacement for cpu_req\n");
blk = lru->findReplacement(pkt, writebacks,
compress_blocks);
blk = lru->findReplacement(addr, writebacks);
cpu_repl++;
}
@ -346,6 +344,7 @@ Split::findReplacement(PacketPtr &pkt, PacketList &writebacks,
// blk attributes for the new blk coming IN
blk->ts = curTick;
blk->isNIC = (pkt->nic_pkt()) ? true : false;
#endif
return blk;
}
@ -400,8 +399,13 @@ Split::regenerateBlkAddr(Addr tag, int set) const
}
Addr
Split::extractTag(Addr addr, SplitBlk *blk) const
Split::extractTag(Addr addr) const
{
// need to fix this if we want to use it... old interface of
// passing in blk was too weird
assert(0);
return 0;
/*
if (blk->part == 2) {
if (lifo_net)
return lifo_net->extractTag(addr);
@ -411,5 +415,6 @@ Split::extractTag(Addr addr, SplitBlk *blk) const
panic("this shouldn't happen");
} else
return lru->extractTag(addr);
*/
}

View file

@ -212,20 +212,17 @@ class Split : public BaseTags
* Find a replacement block for the address provided.
* @param pkt The request to a find a replacement candidate for.
* @param writebacks List for any writebacks to be performed.
* @param compress_blocks List of blocks to compress, for adaptive comp.
* @return The block to place the replacement in.
*/
SplitBlk* findReplacement(PacketPtr &pkt, PacketList &writebacks,
BlkList &compress_blocks);
SplitBlk* findReplacement(Addr addr, PacketList &writebacks);
/**
* Generate the tag from the given address.
* @param addr The address to get the tag from.
* @param blk The block to find the partition it's in
* @return The tag of the address.
*/
Addr extractTag(Addr addr, SplitBlk *blk) const;
Addr extractTag(Addr addr) const;
/**
* Calculate the set index from the address.

View file

@ -266,10 +266,9 @@ SplitLIFO::findBlock(Addr addr) const
}
SplitBlk*
SplitLIFO::findReplacement(PacketPtr &pkt, PacketList &writebacks,
BlkList &compress_blocks)
SplitLIFO::findReplacement(Addr addr, PacketList &writebacks)
{
unsigned set = extractSet(pkt->getAddr());
unsigned set = extractSet(addr);
SplitBlk *firstIn = sets[set].firstIn;
SplitBlk *lastIn = sets[set].lastIn;
@ -289,7 +288,7 @@ SplitLIFO::findReplacement(PacketPtr &pkt, PacketList &writebacks,
}
DPRINTF(Split, "just assigned %#x addr into LIFO, replacing %#x status %#x\n",
pkt->getAddr(), regenerateBlkAddr(blk->tag, set), blk->status);
addr, regenerateBlkAddr(blk->tag, set), blk->status);
if (blk->isValid()) {
replacements[0]++;
totalRefs += blk->refCount;

View file

@ -212,11 +212,9 @@ public:
* Find a replacement block for the address provided.
* @param pkt The request to a find a replacement candidate for.
* @param writebacks List for any writebacks to be performed.
* @param compress_blocks List of blocks to compress, for adaptive comp.
* @return The block to place the replacement in.
*/
SplitBlk* findReplacement(PacketPtr &pkt, PacketList &writebacks,
BlkList &compress_blocks);
SplitBlk* findReplacement(Addr addr, PacketList &writebacks);
/**
* Generate the tag from the given address.
@ -228,17 +226,6 @@ public:
return (addr >> tagShift);
}
/**
* Generate the tag from the given address.
* @param addr The address to get the tag from.
* @param blk Ignored
* @return The tag of the address.
*/
Addr extractTag(Addr addr, SplitBlk *blk) const
{
return (addr >> tagShift);
}
/**
* Calculate the set index from the address.
* @param addr The address to get the set from.

View file

@ -213,10 +213,9 @@ SplitLRU::findBlock(Addr addr) const
}
SplitBlk*
SplitLRU::findReplacement(PacketPtr &pkt, PacketList &writebacks,
BlkList &compress_blocks)
SplitLRU::findReplacement(Addr addr, PacketList &writebacks)
{
unsigned set = extractSet(pkt->getAddr());
unsigned set = extractSet(addr);
// grab a replacement candidate
SplitBlk *blk = sets[set].blks[assoc-1];
sets[set].moveToHead(blk);

View file

@ -195,11 +195,9 @@ public:
* Find a replacement block for the address provided.
* @param pkt The request to a find a replacement candidate for.
* @param writebacks List for any writebacks to be performed.
* @param compress_blocks List of blocks to compress, for adaptive comp.
* @return The block to place the replacement in.
*/
SplitBlk* findReplacement(PacketPtr &pkt, PacketList &writebacks,
BlkList &compress_blocks);
SplitBlk* findReplacement(Addr addr, PacketList &writebacks);
/**
* Generate the tag from the given address.
@ -211,17 +209,6 @@ public:
return (addr >> tagShift);
}
/**
* Generate the tag from the given address.
* @param addr The address to get the tag from.
* @param blk Ignored.
* @return The tag of the address.
*/
Addr extractTag(Addr addr, SplitBlk *blk) const
{
return (addr >> tagShift);
}
/**
* Calculate the set index from the address.
* @param addr The address to get the set from.

View file

@ -56,17 +56,16 @@ MemCmd::commandInfo[] =
{ 0, InvalidCmd, "InvalidCmd" },
/* ReadReq */
{ SET3(IsRead, IsRequest, NeedsResponse), ReadResp, "ReadReq" },
/* WriteReq */
{ SET4(IsWrite, IsRequest, NeedsResponse, HasData),
WriteResp, "WriteReq" },
/* WriteReqNoAck */
{ SET3(IsWrite, IsRequest, HasData), InvalidCmd, "WriteReqNoAck" },
/* ReadResp */
{ SET3(IsRead, IsResponse, HasData), InvalidCmd, "ReadResp" },
/* WriteReq */
{ SET5(IsWrite, NeedsExclusive, IsRequest, NeedsResponse, HasData),
WriteResp, "WriteReq" },
/* WriteResp */
{ SET2(IsWrite, IsResponse), InvalidCmd, "WriteResp" },
{ SET3(IsWrite, NeedsExclusive, IsResponse), InvalidCmd, "WriteResp" },
/* Writeback */
{ SET3(IsWrite, IsRequest, HasData), InvalidCmd, "Writeback" },
{ SET4(IsWrite, NeedsExclusive, IsRequest, HasData),
InvalidCmd, "Writeback" },
/* SoftPFReq */
{ SET4(IsRead, IsRequest, IsSWPrefetch, NeedsResponse),
SoftPFResp, "SoftPFReq" },
@ -79,28 +78,50 @@ MemCmd::commandInfo[] =
/* HardPFResp */
{ SET4(IsRead, IsResponse, IsHWPrefetch, HasData),
InvalidCmd, "HardPFResp" },
/* InvalidateReq */
{ SET2(IsInvalidate, IsRequest), InvalidCmd, "InvalidateReq" },
/* WriteInvalidateReq */
{ SET5(IsWrite, IsInvalidate, IsRequest, HasData, NeedsResponse),
{ SET6(IsWrite, NeedsExclusive, IsInvalidate,
IsRequest, HasData, NeedsResponse),
WriteInvalidateResp, "WriteInvalidateReq" },
/* WriteInvalidateResp */
{ SET3(IsWrite, IsInvalidate, IsResponse),
{ SET4(IsWrite, NeedsExclusive, IsInvalidate, IsResponse),
InvalidCmd, "WriteInvalidateResp" },
/* UpgradeReq */
{ SET3(IsInvalidate, IsRequest, IsUpgrade), InvalidCmd, "UpgradeReq" },
{ SET4(IsInvalidate, NeedsExclusive, IsRequest, NeedsResponse),
UpgradeResp, "UpgradeReq" },
/* UpgradeResp */
{ SET3(IsInvalidate, NeedsExclusive, IsResponse),
InvalidCmd, "UpgradeResp" },
/* ReadExReq */
{ SET4(IsRead, IsInvalidate, IsRequest, NeedsResponse),
{ SET5(IsRead, NeedsExclusive, IsInvalidate, IsRequest, NeedsResponse),
ReadExResp, "ReadExReq" },
/* ReadExResp */
{ SET4(IsRead, IsInvalidate, IsResponse, HasData),
{ SET5(IsRead, NeedsExclusive, IsInvalidate, IsResponse, HasData),
InvalidCmd, "ReadExResp" },
/* LoadLockedReq */
{ SET4(IsRead, IsLocked, IsRequest, NeedsResponse),
LoadLockedResp, "LoadLockedReq" },
/* LoadLockedResp */
{ SET4(IsRead, IsLocked, IsResponse, HasData),
InvalidCmd, "LoadLockedResp" },
/* StoreCondReq */
{ SET6(IsWrite, NeedsExclusive, IsLocked,
IsRequest, NeedsResponse, HasData),
StoreCondResp, "StoreCondReq" },
/* StoreCondResp */
{ SET4(IsWrite, NeedsExclusive, IsLocked, IsResponse),
InvalidCmd, "StoreCondResp" },
/* SwapReq -- for Swap ldstub type operations */
{ SET4(IsReadWrite, IsRequest, HasData, NeedsResponse),
{ SET6(IsRead, IsWrite, NeedsExclusive, IsRequest, HasData, NeedsResponse),
SwapResp, "SwapReq" },
/* SwapResp -- for Swap ldstub type operations */
{ SET3(IsReadWrite, IsResponse, HasData),
InvalidCmd, "SwapResp" }
{ SET5(IsRead, IsWrite, NeedsExclusive, IsResponse, HasData),
InvalidCmd, "SwapResp" },
/* NetworkNackError -- nacked at network layer (not by protocol) */
{ SET2(IsRequest, IsError), InvalidCmd, "NetworkNackError" },
/* InvalidDestError -- packet dest field invalid */
{ SET2(IsRequest, IsError), InvalidCmd, "InvalidDestError" },
/* BadAddressError -- memory address invalid */
{ SET2(IsRequest, IsError), InvalidCmd, "BadAddressError" }
};
@ -143,54 +164,30 @@ Packet::intersect(PacketPtr p)
return !(s1 > e2 || e1 < s2);
}
bool
fixDelayedResponsePacket(PacketPtr func, PacketPtr timing)
{
bool result;
if (timing->isRead() || timing->isWrite()) {
// Ugly hack to deal with the fact that we queue the requests
// and don't convert them to responses until we issue them on
// the bus. I tried to avoid this by converting packets to
// responses right away, but this breaks during snoops where a
// responder may do the conversion before other caches have
// done the snoop. Would work if we copied the packet instead
// of just hanging on to a pointer.
MemCmd oldCmd = timing->cmd;
timing->cmd = timing->cmd.responseCommand();
result = fixPacket(func, timing);
timing->cmd = oldCmd;
}
else {
//Don't toggle if it isn't a read/write response
result = fixPacket(func, timing);
}
return result;
}
bool
fixPacket(PacketPtr func, PacketPtr timing)
Packet::checkFunctional(Addr addr, int size, uint8_t *data)
{
Addr funcStart = func->getAddr();
Addr funcEnd = func->getAddr() + func->getSize() - 1;
Addr timingStart = timing->getAddr();
Addr timingEnd = timing->getAddr() + timing->getSize() - 1;
Addr func_start = getAddr();
Addr func_end = getAddr() + getSize() - 1;
Addr val_start = addr;
Addr val_end = val_start + size - 1;
assert(!(funcStart > timingEnd || timingStart > funcEnd));
if (func_start > val_end || val_start > func_end) {
// no intersection
return false;
}
// this packet can't solve our problem, continue on
if (!timing->hasData())
return true;
// offset of functional request into supplied value (could be
// negative if partial overlap)
int offset = func_start - val_start;
if (func->isRead()) {
if (funcStart >= timingStart && funcEnd <= timingEnd) {
func->allocate();
std::memcpy(func->getPtr<uint8_t>(), timing->getPtr<uint8_t>() +
funcStart - timingStart, func->getSize());
func->result = Packet::Success;
func->flags |= SATISFIED;
return false;
if (isRead()) {
if (func_start >= val_start && func_end <= val_end) {
allocate();
std::memcpy(getPtr<uint8_t>(), data + offset, getSize());
makeResponse();
return true;
} else {
// In this case the timing packet only partially satisfies
// the request, so we would need more information to make
@ -198,25 +195,21 @@ fixPacket(PacketPtr func, PacketPtr timing)
// something, so the request could continue and get this
// bit of possibly newer data along with the older data
// not written to yet.
panic("Timing packet only partially satisfies the functional"
"request. Now what?");
panic("Memory value only partially satisfies the functional "
"request. Now what?");
}
} else if (func->isWrite()) {
if (funcStart >= timingStart) {
std::memcpy(timing->getPtr<uint8_t>() + (funcStart - timingStart),
func->getPtr<uint8_t>(),
(std::min(funcEnd, timingEnd) - funcStart) + 1);
} else { // timingStart > funcStart
std::memcpy(timing->getPtr<uint8_t>(),
func->getPtr<uint8_t>() + (timingStart - funcStart),
(std::min(funcEnd, timingEnd) - timingStart) + 1);
} else if (isWrite()) {
if (offset >= 0) {
std::memcpy(data + offset, getPtr<uint8_t>(),
(std::min(func_end, val_end) - func_start) + 1);
} else { // val_start > func_start
std::memcpy(data, getPtr<uint8_t>() - offset,
(std::min(func_end, val_end) - val_start) + 1);
}
// we always want to keep going with a write
return true;
return false;
} else
panic("Don't know how to handle command type %#x\n",
func->cmdToIndex());
panic("Don't know how to handle command %s\n", cmdString());
}
@ -233,21 +226,10 @@ operator<<(std::ostream &o, const Packet &p)
o << p.getAddr() + p.getSize() - 1 << "] ";
o.unsetf(std::ios_base::hex| std::ios_base::showbase);
if (p.result == Packet::Success)
o << "Successful ";
if (p.result == Packet::BadAddress)
o << "BadAddress ";
if (p.result == Packet::Nacked)
o << "Nacked ";
if (p.result == Packet::Unknown)
o << "Inflight ";
if (p.isRead())
o << "Read ";
if (p.isWrite())
o << "Write ";
if (p.isReadWrite())
o << "Read/Write ";
if (p.isInvalidate())
o << "Invalidate ";
if (p.isRequest())

View file

@ -55,16 +55,6 @@ typedef Packet *PacketPtr;
typedef uint8_t* PacketDataPtr;
typedef std::list<PacketPtr> PacketList;
//Coherence Flags
#define NACKED_LINE (1 << 0)
#define SATISFIED (1 << 1)
#define SHARED_LINE (1 << 2)
#define CACHE_LINE_FILL (1 << 3)
#define COMPRESSED (1 << 4)
#define NO_ALLOCATE (1 << 5)
#define SNOOP_COMMIT (1 << 6)
class MemCmd
{
public:
@ -74,23 +64,33 @@ class MemCmd
{
InvalidCmd,
ReadReq,
WriteReq,
WriteReqNoAck,
ReadResp,
WriteReq,
WriteResp,
Writeback,
SoftPFReq,
HardPFReq,
SoftPFResp,
HardPFResp,
InvalidateReq,
WriteInvalidateReq,
WriteInvalidateResp,
UpgradeReq,
UpgradeResp,
ReadExReq,
ReadExResp,
LoadLockedReq,
LoadLockedResp,
StoreCondReq,
StoreCondResp,
SwapReq,
SwapResp,
// Error responses
// @TODO these should be classified as responses rather than
// requests; coding them as requests initially for backwards
// compatibility
NetworkNackError, // nacked at network layer (not by protocol)
InvalidDestError, // packet dest field invalid
BadAddressError, // memory address invalid
NUM_MEM_CMDS
};
@ -98,18 +98,19 @@ class MemCmd
/** List of command attributes. */
enum Attribute
{
IsRead,
IsWrite,
IsPrefetch,
IsRead, //!< Data flows from responder to requester
IsWrite, //!< Data flows from requester to responder
IsPrefetch, //!< Not a demand access
IsInvalidate,
IsRequest,
IsResponse,
NeedsResponse,
NeedsExclusive, //!< Requires exclusive copy to complete in-cache
IsRequest, //!< Issued by requester
IsResponse, //!< Issue by responder
NeedsResponse, //!< Requester needs response from target
IsSWPrefetch,
IsHWPrefetch,
IsUpgrade,
HasData,
IsReadWrite,
IsLocked, //!< Alpha/MIPS LL or SC access
HasData, //!< There is an associated payload
IsError, //!< Error response
NUM_COMMAND_ATTRIBUTES
};
@ -142,10 +143,13 @@ class MemCmd
bool isWrite() const { return testCmdAttrib(IsWrite); }
bool isRequest() const { return testCmdAttrib(IsRequest); }
bool isResponse() const { return testCmdAttrib(IsResponse); }
bool needsExclusive() const { return testCmdAttrib(NeedsExclusive); }
bool needsResponse() const { return testCmdAttrib(NeedsResponse); }
bool isInvalidate() const { return testCmdAttrib(IsInvalidate); }
bool hasData() const { return testCmdAttrib(HasData); }
bool isReadWrite() const { return testCmdAttrib(IsReadWrite); }
bool isReadWrite() const { return isRead() && isWrite(); }
bool isLocked() const { return testCmdAttrib(IsLocked); }
bool isError() const { return testCmdAttrib(IsError); }
const Command responseCommand() const {
return commandInfo[cmd].response;
@ -189,8 +193,11 @@ class Packet : public FastAlloc
typedef MemCmd::Command Command;
/** Temporary FLAGS field until cache gets working, this should be in coherence/sender state. */
uint64_t flags;
/** The command field of the packet. */
MemCmd cmd;
/** A pointer to the original request. */
RequestPtr req;
private:
/** A pointer to the data being transfered. It can be differnt
@ -231,11 +238,30 @@ class Packet : public FastAlloc
* (unlike * addr, size, and src). */
short dest;
/** The original value of the command field. Only valid when the
* current command field is an error condition; in that case, the
* previous contents of the command field are copied here. This
* field is *not* set on non-error responses.
*/
MemCmd origCmd;
/** Are the 'addr' and 'size' fields valid? */
bool addrSizeValid;
/** Is the 'src' field valid? */
bool srcValid;
bool destValid;
enum Flag {
// Snoop response flags
MemInhibit,
Shared,
// Special control flags
ExpressSnoop,
NUM_PACKET_FLAGS
};
/** Status flags */
std::bitset<NUM_PACKET_FLAGS> flags;
public:
@ -252,22 +278,6 @@ class Packet : public FastAlloc
* should be routed based on its address. */
static const short Broadcast = -1;
/** A pointer to the original request. */
RequestPtr req;
/** A virtual base opaque structure used to hold coherence-related
* state. A specific subclass would be derived from this to
* carry state specific to a particular coherence protocol. */
class CoherenceState : public FastAlloc {
public:
virtual ~CoherenceState() {}
};
/** This packet's coherence state. Caches should use
* dynamic_cast<> to cast to the state appropriate for the
* system's coherence protocol. */
CoherenceState *coherence;
/** A virtual base opaque structure used to hold state associated
* with the packet but specific to the sending device (e.g., an
* MSHR). A pointer to this state is returned in the packet's
@ -284,11 +294,6 @@ class Packet : public FastAlloc
* to cast to the state appropriate to the sender. */
SenderState *senderState;
public:
/** The command field of the packet. */
MemCmd cmd;
/** Return the string name of the cmd field (for debugging and
* tracing). */
const std::string &cmdString() const { return cmd.toString(); }
@ -296,80 +301,95 @@ class Packet : public FastAlloc
/** Return the index of this command. */
inline int cmdToIndex() const { return cmd.toInt(); }
public:
bool isRead() const { return cmd.isRead(); }
bool isWrite() const { return cmd.isWrite(); }
bool isRequest() const { return cmd.isRequest(); }
bool isResponse() const { return cmd.isResponse(); }
bool needsExclusive() const { return cmd.needsExclusive(); }
bool needsResponse() const { return cmd.needsResponse(); }
bool isInvalidate() const { return cmd.isInvalidate(); }
bool hasData() const { return cmd.hasData(); }
bool isReadWrite() const { return cmd.isReadWrite(); }
bool isLocked() const { return cmd.isLocked(); }
bool isError() const { return cmd.isError(); }
bool isCacheFill() const { return (flags & CACHE_LINE_FILL) != 0; }
bool isNoAllocate() const { return (flags & NO_ALLOCATE) != 0; }
bool isCompressed() const { return (flags & COMPRESSED) != 0; }
// Snoop flags
void assertMemInhibit() { flags[MemInhibit] = true; }
void assertShared() { flags[Shared] = true; }
bool memInhibitAsserted() { return flags[MemInhibit]; }
bool sharedAsserted() { return flags[Shared]; }
// Special control flags
void setExpressSnoop() { flags[ExpressSnoop] = true; }
bool isExpressSnoop() { return flags[ExpressSnoop]; }
// Network error conditions... encapsulate them as methods since
// their encoding keeps changing (from result field to command
// field, etc.)
void setNacked() { origCmd = cmd; cmd = MemCmd::NetworkNackError; }
void setBadAddress() { origCmd = cmd; cmd = MemCmd::BadAddressError; }
bool wasNacked() { return cmd == MemCmd::NetworkNackError; }
bool hadBadAddress() { return cmd == MemCmd::BadAddressError; }
bool nic_pkt() { panic("Unimplemented"); M5_DUMMY_RETURN }
/** Possible results of a packet's request. */
enum Result
{
Success,
BadAddress,
Nacked,
Unknown
};
/** The result of this packet's request. */
Result result;
/** Accessor function that returns the source index of the packet. */
short getSrc() const { assert(srcValid); return src; }
short getSrc() const { assert(srcValid); return src; }
void setSrc(short _src) { src = _src; srcValid = true; }
/** Reset source field, e.g. to retransmit packet on different bus. */
void clearSrc() { srcValid = false; }
/** Accessor function that returns the destination index of
the packet. */
short getDest() const { return dest; }
void setDest(short _dest) { dest = _dest; }
short getDest() const { assert(destValid); return dest; }
void setDest(short _dest) { dest = _dest; destValid = true; }
Addr getAddr() const { assert(addrSizeValid); return addr; }
int getSize() const { assert(addrSizeValid); return size; }
int getSize() const { assert(addrSizeValid); return size; }
Addr getOffset(int blkSize) const { return addr & (Addr)(blkSize - 1); }
void addrOverride(Addr newAddr) { assert(addrSizeValid); addr = newAddr; }
void cmdOverride(MemCmd newCmd) { cmd = newCmd; }
/** Constructor. Note that a Request object must be constructed
* first, but the Requests's physical address and size fields
* need not be valid. The command and destination addresses
* must be supplied. */
Packet(Request *_req, MemCmd _cmd, short _dest)
: data(NULL), staticData(false), dynamicData(false), arrayData(false),
: cmd(_cmd), req(_req),
data(NULL), staticData(false), dynamicData(false), arrayData(false),
addr(_req->paddr), size(_req->size), dest(_dest),
addrSizeValid(_req->validPaddr),
srcValid(false),
req(_req), coherence(NULL), senderState(NULL), cmd(_cmd),
result(Unknown)
addrSizeValid(_req->validPaddr), srcValid(false), destValid(true),
flags(0), time(curTick), senderState(NULL)
{
flags = 0;
time = curTick;
}
/** Alternate constructor if you are trying to create a packet with
* a request that is for a whole block, not the address from the req.
* this allows for overriding the size/addr of the req.*/
Packet(Request *_req, MemCmd _cmd, short _dest, int _blkSize)
: data(NULL), staticData(false), dynamicData(false), arrayData(false),
addr(_req->paddr & ~(_blkSize - 1)), size(_blkSize),
dest(_dest),
addrSizeValid(_req->validPaddr), srcValid(false),
req(_req), coherence(NULL), senderState(NULL), cmd(_cmd),
result(Unknown)
: cmd(_cmd), req(_req),
data(NULL), staticData(false), dynamicData(false), arrayData(false),
addr(_req->paddr & ~(_blkSize - 1)), size(_blkSize), dest(_dest),
addrSizeValid(_req->validPaddr), srcValid(false), destValid(true),
flags(0), time(curTick), senderState(NULL)
{
}
/** Alternate constructor for copying a packet. Copy all fields
* *except* if the original packet's data was dynamic, don't copy
* that, as we can't guarantee that the new packet's lifetime is
* less than that of the original packet. In this case the new
* packet should allocate its own data. */
Packet(Packet *origPkt, bool clearFlags = false)
: cmd(origPkt->cmd), req(origPkt->req),
data(origPkt->staticData ? origPkt->data : NULL),
staticData(origPkt->staticData),
dynamicData(false), arrayData(false),
addr(origPkt->addr), size(origPkt->size),
src(origPkt->src), dest(origPkt->dest),
addrSizeValid(origPkt->addrSizeValid),
srcValid(origPkt->srcValid), destValid(origPkt->destValid),
flags(clearFlags ? 0 : origPkt->flags),
time(curTick), senderState(origPkt->senderState)
{
flags = 0;
time = curTick;
}
/** Destructor. */
@ -388,7 +408,6 @@ class Packet : public FastAlloc
size = req->size;
time = req->time;
addrSizeValid = true;
result = Unknown;
if (dynamicData) {
deleteData();
dynamicData = false;
@ -396,29 +415,30 @@ class Packet : public FastAlloc
}
}
/** Take a request packet and modify it in place to be suitable
* for returning as a response to that request. Used for timing
* accesses only. For atomic and functional accesses, the
* request packet is always implicitly passed back *without*
* modifying the destination fields, so this function
* should not be called. */
void makeTimingResponse() {
assert(needsResponse());
assert(isRequest());
cmd = cmd.responseCommand();
dest = src;
srcValid = false;
}
/**
* Take a request packet and modify it in place to be suitable for
* returning as a response to that request.
* returning as a response to that request. The source and
* destination fields are *not* modified, as is appropriate for
* atomic accesses.
*/
void makeAtomicResponse()
void makeResponse()
{
assert(needsResponse());
assert(isRequest());
cmd = cmd.responseCommand();
dest = src;
destValid = srcValid;
srcValid = false;
}
void makeAtomicResponse()
{
makeResponse();
}
void makeTimingResponse()
{
makeResponse();
}
/**
@ -429,9 +449,10 @@ class Packet : public FastAlloc
void
reinitNacked()
{
assert(needsResponse() && result == Nacked);
dest = Broadcast;
result = Unknown;
assert(wasNacked());
cmd = origCmd;
assert(needsResponse());
setDest(Broadcast);
}
@ -494,6 +515,40 @@ class Packet : public FastAlloc
template <typename T>
void set(T v);
/**
* Copy data into the packet from the provided pointer.
*/
void setData(uint8_t *p)
{
std::memcpy(getPtr<uint8_t>(), p, getSize());
}
/**
* Copy data into the packet from the provided block pointer,
* which is aligned to the given block size.
*/
void setDataFromBlock(uint8_t *blk_data, int blkSize)
{
setData(blk_data + getOffset(blkSize));
}
/**
* Copy data from the packet to the provided block pointer, which
* is aligned to the given block size.
*/
void writeData(uint8_t *p)
{
std::memcpy(p, getPtr<uint8_t>(), getSize());
}
/**
* Copy data from the packet to the memory at the provided pointer.
*/
void writeDataToBlock(uint8_t *blk_data, int blkSize)
{
writeData(blk_data + getOffset(blkSize));
}
/**
* delete the data pointed to in the data pointer. Ok to call to
* matter how data was allocted.
@ -505,23 +560,27 @@ class Packet : public FastAlloc
/** Do the packet modify the same addresses. */
bool intersect(PacketPtr p);
/**
* Check a functional request against a memory value represented
* by a base/size pair and an associated data array. If the
* functional request is a read, it may be satisfied by the memory
* value. If the functional request is a write, it may update the
* memory value.
*/
bool checkFunctional(Addr base, int size, uint8_t *data);
/**
* Check a functional request against a memory value stored in
* another packet (i.e. an in-transit request or response).
*/
bool checkFunctional(PacketPtr otherPkt) {
return (otherPkt->hasData() &&
checkFunctional(otherPkt->getAddr(), otherPkt->getSize(),
otherPkt->getPtr<uint8_t>()));
}
};
/** This function given a functional packet and a timing packet either
* satisfies the timing packet, or updates the timing packet to
* reflect the updated state in the timing packet. It returns if the
* functional packet should continue to traverse the memory hierarchy
* or not.
*/
bool fixPacket(PacketPtr func, PacketPtr timing);
/** This function is a wrapper for the fixPacket field that toggles
* the hasData bit it is used when a response is waiting in the
* caches, but hasn't been marked as a response yet (so the fixPacket
* needs to get the correct value for the hasData)
*/
bool fixDelayedResponsePacket(PacketPtr func, PacketPtr timing);
std::ostream & operator<<(std::ostream &o, const Packet &p);
#endif //__MEM_PACKET_HH

View file

@ -58,8 +58,9 @@ PhysicalMemory::PhysicalMemory(Params *p)
panic("Memory Size not divisible by page size\n");
int map_flags = MAP_ANON | MAP_PRIVATE;
pmemAddr = (uint8_t *)mmap(NULL, params()->addrRange.size(), PROT_READ | PROT_WRITE,
map_flags, -1, 0);
pmemAddr =
(uint8_t *)mmap(NULL, params()->addrRange.size(),
PROT_READ | PROT_WRITE, map_flags, -1, 0);
if (pmemAddr == (void *)MAP_FAILED) {
perror("mmap");
@ -121,8 +122,9 @@ PhysicalMemory::calculateLatency(PacketPtr pkt)
// Add load-locked to tracking list. Should only be called if the
// operation is a load and the LOCKED flag is set.
void
PhysicalMemory::trackLoadLocked(Request *req)
PhysicalMemory::trackLoadLocked(PacketPtr pkt)
{
Request *req = pkt->req;
Addr paddr = LockedAddr::mask(req->getPaddr());
// first we check if we already have a locked addr for this
@ -151,10 +153,11 @@ PhysicalMemory::trackLoadLocked(Request *req)
// conflict with locked addresses, and for success/failure of store
// conditionals.
bool
PhysicalMemory::checkLockedAddrList(Request *req)
PhysicalMemory::checkLockedAddrList(PacketPtr pkt)
{
Request *req = pkt->req;
Addr paddr = LockedAddr::mask(req->getPaddr());
bool isLocked = req->isLocked();
bool isLocked = pkt->isLocked();
// Initialize return value. Non-conditional stores always
// succeed. Assume conditional stores will fail until proven
@ -198,74 +201,50 @@ PhysicalMemory::checkLockedAddrList(Request *req)
return success;
}
void
PhysicalMemory::doFunctionalAccess(PacketPtr pkt)
#if TRACING_ON
#define CASE(A, T) \
case sizeof(T): \
DPRINTF(MemoryAccess, A " of size %i on address 0x%x data 0x%x\n", \
pkt->getSize(), pkt->getAddr(), pkt->get<T>()); \
break
#define TRACE_PACKET(A) \
do { \
switch (pkt->getSize()) { \
CASE(A, uint64_t); \
CASE(A, uint32_t); \
CASE(A, uint16_t); \
CASE(A, uint8_t); \
default: \
DPRINTF(MemoryAccess, A " of size %i on address 0x%x\n", \
pkt->getSize(), pkt->getAddr()); \
} \
} while (0)
#else
#define TRACE_PACKET(A)
#endif
Tick
PhysicalMemory::doAtomicAccess(PacketPtr pkt)
{
assert(pkt->getAddr() >= start() &&
pkt->getAddr() + pkt->getSize() <= start() + size());
if (pkt->isRead()) {
if (pkt->req->isLocked()) {
trackLoadLocked(pkt->req);
}
memcpy(pkt->getPtr<uint8_t>(), pmemAddr + pkt->getAddr() - start(),
pkt->getSize());
#if TRACING_ON
switch (pkt->getSize()) {
case sizeof(uint64_t):
DPRINTF(MemoryAccess, "Read of size %i on address 0x%x data 0x%x\n",
pkt->getSize(), pkt->getAddr(),pkt->get<uint64_t>());
break;
case sizeof(uint32_t):
DPRINTF(MemoryAccess, "Read of size %i on address 0x%x data 0x%x\n",
pkt->getSize(), pkt->getAddr(),pkt->get<uint32_t>());
break;
case sizeof(uint16_t):
DPRINTF(MemoryAccess, "Read of size %i on address 0x%x data 0x%x\n",
pkt->getSize(), pkt->getAddr(),pkt->get<uint16_t>());
break;
case sizeof(uint8_t):
DPRINTF(MemoryAccess, "Read of size %i on address 0x%x data 0x%x\n",
pkt->getSize(), pkt->getAddr(),pkt->get<uint8_t>());
break;
default:
DPRINTF(MemoryAccess, "Read of size %i on address 0x%x\n",
pkt->getSize(), pkt->getAddr());
}
#endif
if (pkt->memInhibitAsserted()) {
DPRINTF(MemoryAccess, "mem inhibited on 0x%x: not responding\n",
pkt->getAddr());
return 0;
}
else if (pkt->isWrite()) {
if (writeOK(pkt->req)) {
memcpy(pmemAddr + pkt->getAddr() - start(), pkt->getPtr<uint8_t>(),
pkt->getSize());
#if TRACING_ON
switch (pkt->getSize()) {
case sizeof(uint64_t):
DPRINTF(MemoryAccess, "Write of size %i on address 0x%x data 0x%x\n",
pkt->getSize(), pkt->getAddr(),pkt->get<uint64_t>());
break;
case sizeof(uint32_t):
DPRINTF(MemoryAccess, "Write of size %i on address 0x%x data 0x%x\n",
pkt->getSize(), pkt->getAddr(),pkt->get<uint32_t>());
break;
case sizeof(uint16_t):
DPRINTF(MemoryAccess, "Write of size %i on address 0x%x data 0x%x\n",
pkt->getSize(), pkt->getAddr(),pkt->get<uint16_t>());
break;
case sizeof(uint8_t):
DPRINTF(MemoryAccess, "Write of size %i on address 0x%x data 0x%x\n",
pkt->getSize(), pkt->getAddr(),pkt->get<uint8_t>());
break;
default:
DPRINTF(MemoryAccess, "Write of size %i on address 0x%x\n",
pkt->getSize(), pkt->getAddr());
}
#endif
}
} else if (pkt->isInvalidate()) {
//upgrade or invalidate
pkt->flags |= SATISFIED;
} else if (pkt->isReadWrite()) {
uint8_t *hostAddr = pmemAddr + pkt->getAddr() - start();
if (pkt->cmd == MemCmd::SwapReq) {
IntReg overwrite_val;
bool overwrite_mem;
uint64_t condition_val64;
@ -277,66 +256,76 @@ PhysicalMemory::doFunctionalAccess(PacketPtr pkt)
// keep a copy of our possible write value, and copy what is at the
// memory address into the packet
std::memcpy(&overwrite_val, pkt->getPtr<uint8_t>(), pkt->getSize());
std::memcpy(pkt->getPtr<uint8_t>(), pmemAddr + pkt->getAddr() - start(),
pkt->getSize());
std::memcpy(pkt->getPtr<uint8_t>(), hostAddr, pkt->getSize());
if (pkt->req->isCondSwap()) {
if (pkt->getSize() == sizeof(uint64_t)) {
condition_val64 = pkt->req->getExtraData();
overwrite_mem = !std::memcmp(&condition_val64, pmemAddr +
pkt->getAddr() - start(), sizeof(uint64_t));
overwrite_mem = !std::memcmp(&condition_val64, hostAddr,
sizeof(uint64_t));
} else if (pkt->getSize() == sizeof(uint32_t)) {
condition_val32 = (uint32_t)pkt->req->getExtraData();
overwrite_mem = !std::memcmp(&condition_val32, pmemAddr +
pkt->getAddr() - start(), sizeof(uint32_t));
overwrite_mem = !std::memcmp(&condition_val32, hostAddr,
sizeof(uint32_t));
} else
panic("Invalid size for conditional read/write\n");
}
if (overwrite_mem)
std::memcpy(pmemAddr + pkt->getAddr() - start(),
&overwrite_val, pkt->getSize());
std::memcpy(hostAddr, &overwrite_val, pkt->getSize());
#if TRACING_ON
switch (pkt->getSize()) {
case sizeof(uint64_t):
DPRINTF(MemoryAccess, "Read/Write of size %i on address 0x%x old data 0x%x\n",
pkt->getSize(), pkt->getAddr(),pkt->get<uint64_t>());
DPRINTF(MemoryAccess, "New Data 0x%x %s conditional (0x%x) and %s \n",
overwrite_mem, pkt->req->isCondSwap() ? "was" : "wasn't",
condition_val64, overwrite_mem ? "happened" : "didn't happen");
break;
case sizeof(uint32_t):
DPRINTF(MemoryAccess, "Read/Write of size %i on address 0x%x old data 0x%x\n",
pkt->getSize(), pkt->getAddr(),pkt->get<uint32_t>());
DPRINTF(MemoryAccess, "New Data 0x%x %s conditional (0x%x) and %s \n",
overwrite_mem, pkt->req->isCondSwap() ? "was" : "wasn't",
condition_val32, overwrite_mem ? "happened" : "didn't happen");
break;
case sizeof(uint16_t):
DPRINTF(MemoryAccess, "Read/Write of size %i on address 0x%x old data 0x%x\n",
pkt->getSize(), pkt->getAddr(),pkt->get<uint16_t>());
DPRINTF(MemoryAccess, "New Data 0x%x wasn't conditional and happned\n",
overwrite_mem);
break;
case sizeof(uint8_t):
DPRINTF(MemoryAccess, "Read/Write of size %i on address 0x%x old data 0x%x\n",
pkt->getSize(), pkt->getAddr(),pkt->get<uint8_t>());
DPRINTF(MemoryAccess, "New Data 0x%x wasn't conditional and happned\n",
overwrite_mem);
break;
default:
DPRINTF(MemoryAccess, "Read/Write of size %i on address 0x%x\n",
pkt->getSize(), pkt->getAddr());
TRACE_PACKET("Read/Write");
} else if (pkt->isRead()) {
assert(!pkt->isWrite());
if (pkt->isLocked()) {
trackLoadLocked(pkt);
}
memcpy(pkt->getPtr<uint8_t>(), hostAddr, pkt->getSize());
TRACE_PACKET("Read");
} else if (pkt->isWrite()) {
if (writeOK(pkt)) {
memcpy(hostAddr, pkt->getPtr<uint8_t>(), pkt->getSize());
TRACE_PACKET("Write");
}
} else if (pkt->isInvalidate()) {
//upgrade or invalidate
if (pkt->needsResponse()) {
pkt->makeAtomicResponse();
}
#endif
} else {
panic("unimplemented");
}
pkt->result = Packet::Success;
if (pkt->needsResponse()) {
pkt->makeAtomicResponse();
}
return calculateLatency(pkt);
}
void
PhysicalMemory::doFunctionalAccess(PacketPtr pkt)
{
assert(pkt->getAddr() >= start() &&
pkt->getAddr() + pkt->getSize() <= start() + size());
uint8_t *hostAddr = pmemAddr + pkt->getAddr() - start();
if (pkt->cmd == MemCmd::ReadReq) {
memcpy(pkt->getPtr<uint8_t>(), hostAddr, pkt->getSize());
TRACE_PACKET("Read");
} else if (pkt->cmd == MemCmd::WriteReq) {
memcpy(hostAddr, pkt->getPtr<uint8_t>(), pkt->getSize());
TRACE_PACKET("Write");
} else {
panic("PhysicalMemory: unimplemented functional command %s",
pkt->cmdString());
}
pkt->makeAtomicResponse();
}
Port *
PhysicalMemory::getPort(const std::string &if_name, int idx)
{
@ -407,8 +396,7 @@ PhysicalMemory::MemoryPort::deviceBlockSize()
Tick
PhysicalMemory::MemoryPort::recvAtomic(PacketPtr pkt)
{
memory->doFunctionalAccess(pkt);
return memory->calculateLatency(pkt);
return memory->doAtomicAccess(pkt);
}
void

View file

@ -112,12 +112,12 @@ class PhysicalMemory : public MemObject
// inline a quick check for an empty locked addr list (hopefully
// the common case), and do the full list search (if necessary) in
// this out-of-line function
bool checkLockedAddrList(Request *req);
bool checkLockedAddrList(PacketPtr pkt);
// Record the address of a load-locked operation so that we can
// clear the execution context's lock flag if a matching store is
// performed
void trackLoadLocked(Request *req);
void trackLoadLocked(PacketPtr pkt);
// Compare a store address with any locked addresses so we can
// clear the lock flag appropriately. Return value set to 'false'
@ -126,17 +126,18 @@ class PhysicalMemory : public MemObject
// requesting execution context), 'true' otherwise. Note that
// this method must be called on *all* stores since even
// non-conditional stores must clear any matching lock addresses.
bool writeOK(Request *req) {
bool writeOK(PacketPtr pkt) {
Request *req = pkt->req;
if (lockedAddrList.empty()) {
// no locked addrs: nothing to check, store_conditional fails
bool isLocked = req->isLocked();
bool isLocked = pkt->isLocked();
if (isLocked) {
req->setExtraData(0);
}
return !isLocked; // only do write if not an sc
} else {
// iterate over list...
return checkLockedAddrList(req);
return checkLockedAddrList(pkt);
}
}
@ -175,6 +176,7 @@ class PhysicalMemory : public MemObject
unsigned int drain(Event *de);
protected:
Tick doAtomicAccess(PacketPtr pkt);
void doFunctionalAccess(PacketPtr pkt);
virtual Tick calculateLatency(PacketPtr pkt);
void recvStatusChange(Port::Status status);

View file

@ -58,12 +58,11 @@ void
Port::blobHelper(Addr addr, uint8_t *p, int size, MemCmd cmd)
{
Request req;
Packet pkt(&req, cmd, Packet::Broadcast);
for (ChunkGenerator gen(addr, size, peerBlockSize());
!gen.done(); gen.next()) {
req.setPhys(gen.addr(), gen.size(), 0);
pkt.reinitFromRequest();
Packet pkt(&req, cmd, Packet::Broadcast);
pkt.dataStatic(p);
sendFunctional(&pkt);
p += gen.size();

View file

@ -40,11 +40,8 @@ SimpleTimingPort::checkFunctional(PacketPtr pkt)
PacketPtr target = i->pkt;
// If the target contains data, and it overlaps the
// probed request, need to update data
if (target->intersect(pkt)) {
if (!fixPacket(pkt, target)) {
// fixPacket returns true for continue, false for done
return;
}
if (pkt->checkFunctional(target)) {
return;
}
}
}
@ -55,7 +52,7 @@ SimpleTimingPort::recvFunctional(PacketPtr pkt)
checkFunctional(pkt);
// Just do an atomic access and throw away the returned latency
if (pkt->result != Packet::Success)
if (!pkt->isResponse())
recvAtomic(pkt);
}
@ -67,17 +64,29 @@ SimpleTimingPort::recvTiming(PacketPtr pkt)
// code to hanldle nacks here, but I'm pretty sure it didn't work
// correctly with the drain code, so that would need to be fixed
// if we ever added it back.
assert(pkt->result != Packet::Nacked);
assert(pkt->isRequest());
if (pkt->memInhibitAsserted()) {
// snooper will supply based on copy of packet
// still target's responsibility to delete packet
delete pkt->req;
delete pkt;
return true;
}
bool needsResponse = pkt->needsResponse();
Tick latency = recvAtomic(pkt);
// turn packet around to go back to requester if response expected
if (pkt->needsResponse()) {
pkt->makeTimingResponse();
if (needsResponse) {
// recvAtomic() should already have turned packet into
// atomic response
assert(pkt->isResponse());
schedSendTiming(pkt, curTick + latency);
}
else if (pkt->cmd != MemCmd::UpgradeReq) {
} else {
delete pkt->req;
delete pkt;
}
return true;
}
@ -88,28 +97,30 @@ SimpleTimingPort::schedSendTiming(PacketPtr pkt, Tick when)
assert(when > curTick);
// Nothing is on the list: add it and schedule an event
if (transmitList.empty()) {
assert(!sendEvent->scheduled());
sendEvent->schedule(when);
transmitList.push_back(DeferredPacket(when, pkt));
if (transmitList.empty() || when < transmitList.front().tick) {
transmitList.push_front(DeferredPacket(when, pkt));
schedSendEvent(when);
return;
}
// something is on the list and this belongs at the end
// list is non-empty and this is not the head, so event should
// already be scheduled
assert(waitingOnRetry ||
(sendEvent->scheduled() && sendEvent->when() <= when));
// list is non-empty & this belongs at the end
if (when >= transmitList.back().tick) {
transmitList.push_back(DeferredPacket(when, pkt));
return;
}
// Something is on the list and this belongs somewhere else
// this belongs in the middle somewhere
DeferredPacketIterator i = transmitList.begin();
i++; // already checked for insertion at front
DeferredPacketIterator end = transmitList.end();
for (; i != end; ++i) {
if (when < i->tick) {
if (i == transmitList.begin()) {
//Inserting at begining, reschedule
sendEvent->reschedule(when);
}
transmitList.insert(i, DeferredPacket(when, pkt));
return;
}
@ -122,12 +133,15 @@ void
SimpleTimingPort::sendDeferredPacket()
{
assert(deferredPacketReady());
bool success = sendTiming(transmitList.front().pkt);
// take packet off list here; if recvTiming() on the other side
// calls sendTiming() back on us (like SimpleTimingCpu does), then
// we get confused by having a non-active packet on transmitList
DeferredPacket dp = transmitList.front();
transmitList.pop_front();
bool success = sendTiming(dp.pkt);
if (success) {
//send successful, remove packet
transmitList.pop_front();
if (!transmitList.empty()) {
if (!transmitList.empty() && !sendEvent->scheduled()) {
Tick time = transmitList.front().tick;
sendEvent->schedule(time <= curTick ? curTick+1 : time);
}
@ -136,6 +150,12 @@ SimpleTimingPort::sendDeferredPacket()
drainEvent->process();
drainEvent = NULL;
}
} else {
// Unsuccessful, need to put back on transmitList. Callee
// should not have messed with it (since it didn't accept that
// packet), so we can just push it back on the front.
assert(!sendEvent->scheduled());
transmitList.push_front(dp);
}
waitingOnRetry = !success;

Some files were not shown because too many files have changed in this diff Show more