Still missing prefetch and tags directories as well as cache builder.
Some implementation details were left blank still, need to fill them in. src/SConscript: Reorder build to compile all files first src/mem/cache/cache.hh: src/mem/cache/cache_builder.cc: src/mem/cache/cache_impl.hh: src/mem/cache/coherence/coherence_protocol.cc: src/mem/cache/coherence/uni_coherence.cc: src/mem/cache/coherence/uni_coherence.hh: src/mem/cache/miss/blocking_buffer.cc: src/mem/cache/miss/miss_queue.cc: src/mem/cache/miss/mshr.cc: src/mem/cache/miss/mshr.hh: src/mem/cache/miss/mshr_queue.cc: More changesets pulled, now compiles everything in /miss directory and in the root directory src/mem/packet.hh: Add some more support, need to clean some of it out once everything is working --HG-- extra : convert_revision : ba73676165810edf2c2effaf5fbad8397d6bd800
This commit is contained in:
parent
0d323c753d
commit
eafb5c4936
13 changed files with 491 additions and 432 deletions
|
@ -103,7 +103,6 @@ base_sources = Split('''
|
|||
|
||||
mem/cache/base_cache.cc
|
||||
mem/cache/cache.cc
|
||||
mem/cache/cache_builder.cc
|
||||
mem/cache/coherence/coherence_protocol.cc
|
||||
mem/cache/coherence/uni_coherence.cc
|
||||
mem/cache/miss/blocking_buffer.cc
|
||||
|
@ -126,6 +125,8 @@ base_sources = Split('''
|
|||
mem/cache/tags/split_lifo.cc
|
||||
mem/cache/tags/split_lru.cc
|
||||
|
||||
mem/cache/cache_builder.cc
|
||||
|
||||
sim/builder.cc
|
||||
sim/debug.cc
|
||||
sim/eventq.cc
|
||||
|
|
35
src/mem/cache/cache.hh
vendored
35
src/mem/cache/cache.hh
vendored
|
@ -44,8 +44,9 @@
|
|||
#include "mem/cache/base_cache.hh"
|
||||
#include "mem/cache/prefetch/prefetcher.hh"
|
||||
|
||||
// forward declarations
|
||||
class Bus;
|
||||
//Forward decleration
|
||||
class MSHR;
|
||||
|
||||
|
||||
/**
|
||||
* A template-policy based cache. The behavior of the cache can be altered by
|
||||
|
@ -92,6 +93,11 @@ class Cache : public BaseCache
|
|||
*/
|
||||
int busWidth;
|
||||
|
||||
/**
|
||||
* The latency of a hit in this device.
|
||||
*/
|
||||
int hitLatency;
|
||||
|
||||
/**
|
||||
* A permanent mem req to always be used to cause invalidations.
|
||||
* Used to append to target list, to cause an invalidation.
|
||||
|
@ -121,18 +127,18 @@ class Cache : public BaseCache
|
|||
bool doCopy;
|
||||
bool blockOnCopy;
|
||||
BaseCache::Params baseParams;
|
||||
Bus *in;
|
||||
Bus *out;
|
||||
Prefetcher<TagStore, Buffering> *prefetcher;
|
||||
bool prefetchAccess;
|
||||
int hitLatency;
|
||||
|
||||
Params(TagStore *_tags, Buffering *mq, Coherence *coh,
|
||||
bool do_copy, BaseCache::Params params, Bus * in_bus,
|
||||
Bus * out_bus, Prefetcher<TagStore, Buffering> *_prefetcher,
|
||||
bool prefetch_access)
|
||||
bool do_copy, BaseCache::Params params,
|
||||
Prefetcher<TagStore, Buffering> *_prefetcher,
|
||||
bool prefetch_access, int hit_latency)
|
||||
: tags(_tags), missQueue(mq), coherence(coh), doCopy(do_copy),
|
||||
blockOnCopy(false), baseParams(params), in(in_bus), out(out_bus),
|
||||
prefetcher(_prefetcher), prefetchAccess(prefetch_access)
|
||||
blockOnCopy(false), baseParams(params),
|
||||
prefetcher(_prefetcher), prefetchAccess(prefetch_access),
|
||||
hitLatency(hit_latency)
|
||||
{
|
||||
}
|
||||
};
|
||||
|
@ -140,6 +146,17 @@ class Cache : public BaseCache
|
|||
/** Instantiates a basic cache object. */
|
||||
Cache(const std::string &_name, Params ¶ms);
|
||||
|
||||
bool doTimingAccess(Packet *pkt, CachePort *cachePort,
|
||||
bool isCpuSide);
|
||||
|
||||
Tick doAtomicAccess(Packet *pkt, CachePort *cachePort,
|
||||
bool isCpuSide);
|
||||
|
||||
void doFunctionalAccess(Packet *pkt, CachePort *cachePort,
|
||||
bool isCpuSide);
|
||||
|
||||
void recvStatusChange(Port::Status status, bool isCpuSide);
|
||||
|
||||
void regStats();
|
||||
|
||||
/**
|
||||
|
|
38
src/mem/cache/cache_builder.cc
vendored
38
src/mem/cache/cache_builder.cc
vendored
|
@ -42,7 +42,7 @@
|
|||
|
||||
#include "mem/cache/base_cache.hh"
|
||||
#include "mem/cache/cache.hh"
|
||||
#include "mem/bus/bus.hh"
|
||||
#include "mem/bus.hh"
|
||||
#include "mem/cache/coherence/coherence_protocol.hh"
|
||||
#include "sim/builder.hh"
|
||||
|
||||
|
@ -84,13 +84,6 @@
|
|||
#include "mem/cache/coherence/uni_coherence.hh"
|
||||
#include "mem/cache/coherence/simple_coherence.hh"
|
||||
|
||||
// Bus Interfaces
|
||||
#include "mem/bus/slave_interface.hh"
|
||||
#include "mem/bus/master_interface.hh"
|
||||
#include "mem/memory_interface.hh"
|
||||
|
||||
#include "mem/trace/mem_trace_writer.hh"
|
||||
|
||||
//Prefetcher Headers
|
||||
#if defined(USE_GHB)
|
||||
#include "mem/cache/prefetch/ghb_prefetcher.hh"
|
||||
|
@ -118,8 +111,8 @@ BEGIN_DECLARE_SIM_OBJECT_PARAMS(BaseCache)
|
|||
Param<int> tgts_per_mshr;
|
||||
Param<int> write_buffers;
|
||||
Param<bool> prioritizeRequests;
|
||||
SimObjectParam<Bus *> in_bus;
|
||||
SimObjectParam<Bus *> out_bus;
|
||||
// SimObjectParam<Bus *> in_bus;
|
||||
// SimObjectParam<Bus *> out_bus;
|
||||
Param<bool> do_copy;
|
||||
SimObjectParam<CoherenceProtocol *> protocol;
|
||||
Param<Addr> trace_addr;
|
||||
|
@ -133,9 +126,9 @@ BEGIN_DECLARE_SIM_OBJECT_PARAMS(BaseCache)
|
|||
Param<int> compression_latency;
|
||||
Param<int> subblock_size;
|
||||
Param<Counter> max_miss_count;
|
||||
SimObjectParam<HierParams *> hier;
|
||||
// SimObjectParam<HierParams *> hier;
|
||||
VectorParam<Range<Addr> > addr_range;
|
||||
SimObjectParam<MemTraceWriter *> mem_trace;
|
||||
// SimObjectParam<MemTraceWriter *> mem_trace;
|
||||
Param<bool> split;
|
||||
Param<int> split_size;
|
||||
Param<bool> lifo;
|
||||
|
@ -151,6 +144,7 @@ BEGIN_DECLARE_SIM_OBJECT_PARAMS(BaseCache)
|
|||
Param<bool> prefetch_cache_check_push;
|
||||
Param<bool> prefetch_use_cpu_id;
|
||||
Param<bool> prefetch_data_accesses_only;
|
||||
Param<int> hit_latency;
|
||||
|
||||
END_DECLARE_SIM_OBJECT_PARAMS(BaseCache)
|
||||
|
||||
|
@ -166,8 +160,9 @@ BEGIN_INIT_SIM_OBJECT_PARAMS(BaseCache)
|
|||
INIT_PARAM_DFLT(write_buffers, "number of write buffers", 8),
|
||||
INIT_PARAM_DFLT(prioritizeRequests, "always service demand misses first",
|
||||
false),
|
||||
INIT_PARAM_DFLT(in_bus, "incoming bus object", NULL),
|
||||
/* INIT_PARAM_DFLT(in_bus, "incoming bus object", NULL),
|
||||
INIT_PARAM(out_bus, "outgoing bus object"),
|
||||
*/
|
||||
INIT_PARAM_DFLT(do_copy, "perform fast copies in the cache", false),
|
||||
INIT_PARAM_DFLT(protocol, "coherence protocol to use in the cache", NULL),
|
||||
INIT_PARAM_DFLT(trace_addr, "address to trace", 0),
|
||||
|
@ -192,12 +187,13 @@ BEGIN_INIT_SIM_OBJECT_PARAMS(BaseCache)
|
|||
INIT_PARAM_DFLT(max_miss_count,
|
||||
"The number of misses to handle before calling exit",
|
||||
0),
|
||||
INIT_PARAM_DFLT(hier,
|
||||
/* INIT_PARAM_DFLT(hier,
|
||||
"Hierarchy global variables",
|
||||
&defaultHierParams),
|
||||
*/
|
||||
INIT_PARAM_DFLT(addr_range, "The address range in bytes",
|
||||
vector<Range<Addr> >(1,RangeIn((Addr)0, MaxAddr))),
|
||||
INIT_PARAM_DFLT(mem_trace, "Memory trace to write accesses to", NULL),
|
||||
// INIT_PARAM_DFLT(mem_trace, "Memory trace to write accesses to", NULL),
|
||||
INIT_PARAM_DFLT(split, "Whether this is a partitioned cache", false),
|
||||
INIT_PARAM_DFLT(split_size, "the number of \"ways\" belonging to the LRU partition", 0),
|
||||
INIT_PARAM_DFLT(lifo, "whether you are using a LIFO repl. policy", false),
|
||||
|
@ -212,7 +208,8 @@ BEGIN_INIT_SIM_OBJECT_PARAMS(BaseCache)
|
|||
INIT_PARAM_DFLT(prefetch_policy, "Type of prefetcher to use", "none"),
|
||||
INIT_PARAM_DFLT(prefetch_cache_check_push, "Check if in cash on push or pop of prefetch queue", true),
|
||||
INIT_PARAM_DFLT(prefetch_use_cpu_id, "Use the CPU ID to seperate calculations of prefetches", true),
|
||||
INIT_PARAM_DFLT(prefetch_data_accesses_only, "Only prefetch on data not on instruction accesses", false)
|
||||
INIT_PARAM_DFLT(prefetch_data_accesses_only, "Only prefetch on data not on instruction accesses", false),
|
||||
INIT_PARAM_DFLT(hit_latency, "Hit Latecny for a succesful access", 1)
|
||||
END_INIT_SIM_OBJECT_PARAMS(BaseCache)
|
||||
|
||||
|
||||
|
@ -232,12 +229,12 @@ END_INIT_SIM_OBJECT_PARAMS(BaseCache)
|
|||
} \
|
||||
Cache<CacheTags<t, comp>, b, c>::Params params(tagStore, mq, coh, \
|
||||
do_copy, base_params, \
|
||||
in_bus, out_bus, pf, \
|
||||
/*in_bus, out_bus,*/ pf, \
|
||||
prefetch_access); \
|
||||
Cache<CacheTags<t, comp>, b, c> *retval = \
|
||||
new Cache<CacheTags<t, comp>, b, c>(getInstanceName(), hier, \
|
||||
new Cache<CacheTags<t, comp>, b, c>(getInstanceName(), /*hier,*/ \
|
||||
params); \
|
||||
if (in_bus == NULL) { \
|
||||
/* if (in_bus == NULL) { \
|
||||
retval->setSlaveInterface(new MemoryInterface<Cache<CacheTags<t, comp>, b, c> >(getInstanceName(), hier, retval, mem_trace)); \
|
||||
} else { \
|
||||
retval->setSlaveInterface(new SlaveInterface<Cache<CacheTags<t, comp>, b, c>, Bus>(getInstanceName(), hier, retval, in_bus, mem_trace)); \
|
||||
|
@ -245,6 +242,7 @@ END_INIT_SIM_OBJECT_PARAMS(BaseCache)
|
|||
retval->setMasterInterface(new MasterInterface<Cache<CacheTags<t, comp>, b, c>, Bus>(getInstanceName(), hier, retval, out_bus)); \
|
||||
out_bus->rangeChange(); \
|
||||
return retval; \
|
||||
*/return true; \
|
||||
} while (0)
|
||||
|
||||
#define BUILD_CACHE_PANIC(x) do { \
|
||||
|
@ -465,7 +463,7 @@ CREATE_SIM_OBJECT(BaseCache)
|
|||
const void *repl = NULL;
|
||||
#endif
|
||||
|
||||
if (mshrs == 1 || out_bus->doEvents() == false) {
|
||||
if (mshrs == 1 /*|| out_bus->doEvents() == false*/) {
|
||||
BlockingBuffer *mq = new BlockingBuffer(true);
|
||||
BUILD_COHERENCE(BlockingBuffer);
|
||||
} else {
|
||||
|
|
185
src/mem/cache/cache_impl.hh
vendored
185
src/mem/cache/cache_impl.hh
vendored
|
@ -90,6 +90,8 @@ doAtomicAccess(Packet *pkt, CachePort *cachePort, bool isCpuSide)
|
|||
else
|
||||
snoopProbe(pkt, true);
|
||||
}
|
||||
//Fix this timing info
|
||||
return hitLatency;
|
||||
}
|
||||
|
||||
template<class TagStore, class Buffering, class Coherence>
|
||||
|
@ -129,10 +131,12 @@ Cache(const std::string &_name,
|
|||
coherence(params.coherence), prefetcher(params.prefetcher),
|
||||
doCopy(params.doCopy), blockOnCopy(params.blockOnCopy)
|
||||
{
|
||||
if (params.in == NULL) {
|
||||
//FIX BUS POINTERS
|
||||
// if (params.in == NULL) {
|
||||
topLevelCache = true;
|
||||
}
|
||||
tags->setCache(this, params.out->width, params.out->clockRate);
|
||||
// }
|
||||
//PLEASE FIX THIS, BUS SIZES NOT BEING USED
|
||||
tags->setCache(this, blkSize, 1/*params.out->width, params.out->clockRate*/);
|
||||
tags->setPrefetcher(prefetcher);
|
||||
missQueue->setCache(this);
|
||||
missQueue->setPrefetcher(prefetcher);
|
||||
|
@ -140,8 +144,10 @@ Cache(const std::string &_name,
|
|||
prefetcher->setCache(this);
|
||||
prefetcher->setTags(tags);
|
||||
prefetcher->setBuffer(missQueue);
|
||||
#if 0
|
||||
invalidatePkt = new Packet;
|
||||
invalidatePkt->cmd = Packet::InvalidateReq;
|
||||
#endif
|
||||
}
|
||||
|
||||
template<class TagStore, class Buffering, class Coherence>
|
||||
|
@ -175,27 +181,27 @@ Cache<TagStore,Buffering,Coherence>::access(PacketPtr &pkt)
|
|||
//Upgrade or Invalidate
|
||||
//Look into what happens if two slave caches on bus
|
||||
DPRINTF(Cache, "%s %d %x ? blk_addr: %x\n", pkt->cmdString(),
|
||||
pkt->req->asid, pkt->addr & (((ULL(1))<<48)-1),
|
||||
pkt->addr & ~((Addr)blkSize - 1));
|
||||
pkt->req->getAsid(), pkt->getAddr() & (((ULL(1))<<48)-1),
|
||||
pkt->getAddr() & ~((Addr)blkSize - 1));
|
||||
|
||||
//@todo Should this return latency have the hit latency in it?
|
||||
// respond(pkt,curTick+lat);
|
||||
(int)pkt->coherence |= SATISFIED;
|
||||
pkt->flags |= SATISFIED;
|
||||
// return MA_HIT; //@todo, return values
|
||||
return true;
|
||||
}
|
||||
blk = tags->handleAccess(pkt, lat, writebacks);
|
||||
} else {
|
||||
size = pkt->size;
|
||||
size = pkt->getSize();
|
||||
}
|
||||
// If this is a block size write/hint (WH64) allocate the block here
|
||||
// if the coherence protocol allows it.
|
||||
/** @todo make the fast write alloc (wh64) work with coherence. */
|
||||
/** @todo Do we want to do fast writes for writebacks as well? */
|
||||
if (!blk && pkt->size >= blkSize && coherence->allowFastWrites() &&
|
||||
if (!blk && pkt->getSize() >= blkSize && coherence->allowFastWrites() &&
|
||||
(pkt->cmd == Packet::WriteReq || pkt->cmd == Packet::WriteInvalidateReq) ) {
|
||||
// not outstanding misses, can do this
|
||||
MSHR* outstanding_miss = missQueue->findMSHR(pkt->addr, pkt->req->asid);
|
||||
MSHR* outstanding_miss = missQueue->findMSHR(pkt->getAddr(), pkt->req->getAsid());
|
||||
if (pkt->cmd == Packet::WriteInvalidateReq || !outstanding_miss) {
|
||||
if (outstanding_miss) {
|
||||
warn("WriteInv doing a fastallocate"
|
||||
|
@ -211,8 +217,8 @@ Cache<TagStore,Buffering,Coherence>::access(PacketPtr &pkt)
|
|||
writebacks.pop_front();
|
||||
}
|
||||
DPRINTF(Cache, "%s %d %x %s blk_addr: %x pc %x\n", pkt->cmdString(),
|
||||
pkt->req->asid, pkt->addr & (((ULL(1))<<48)-1), (blk) ? "hit" : "miss",
|
||||
pkt->addr & ~((Addr)blkSize - 1), pkt->req->pc);
|
||||
pkt->req->getAsid(), pkt->getAddr() & (((ULL(1))<<48)-1), (blk) ? "hit" : "miss",
|
||||
pkt->getAddr() & ~((Addr)blkSize - 1), pkt->req->getPC());
|
||||
if (blk) {
|
||||
// Hit
|
||||
hits[pkt->cmdToIndex()][pkt->req->getThreadNum()]++;
|
||||
|
@ -230,7 +236,7 @@ Cache<TagStore,Buffering,Coherence>::access(PacketPtr &pkt)
|
|||
if (missCount) {
|
||||
--missCount;
|
||||
if (missCount == 0)
|
||||
new SimLoopExitEvent("A cache reached the maximum miss count");
|
||||
new SimLoopExitEvent(curTick, "A cache reached the maximum miss count");
|
||||
}
|
||||
}
|
||||
missQueue->handleMiss(pkt, size, curTick + hitLatency);
|
||||
|
@ -281,14 +287,14 @@ Cache<TagStore,Buffering,Coherence>::handleResponse(Packet * &pkt)
|
|||
BlkType *blk = NULL;
|
||||
if (pkt->senderState) {
|
||||
// MemDebug::cacheResponse(pkt);
|
||||
DPRINTF(Cache, "Handling reponse to %x, blk addr: %x\n",pkt->addr,
|
||||
pkt->addr & (((ULL(1))<<48)-1));
|
||||
DPRINTF(Cache, "Handling reponse to %x, blk addr: %x\n",pkt->getAddr(),
|
||||
pkt->getAddr() & (((ULL(1))<<48)-1));
|
||||
|
||||
if (pkt->isCacheFill() && !pkt->isNoAllocate()) {
|
||||
blk = tags->findBlock(pkt);
|
||||
CacheBlk::State old_state = (blk) ? blk->status : 0;
|
||||
PacketList writebacks;
|
||||
blk = tags->handleFill(blk, pkt->senderState,
|
||||
blk = tags->handleFill(blk, (MSHR*)pkt->senderState,
|
||||
coherence->getNewState(pkt,old_state),
|
||||
writebacks);
|
||||
while (!writebacks.empty()) {
|
||||
|
@ -310,7 +316,7 @@ Cache<TagStore,Buffering,Coherence>::pseudoFill(Addr addr, int asid)
|
|||
// Read the data into the mshr
|
||||
BlkType *blk = tags->handleAccess(mshr->pkt, lat, dummy, false);
|
||||
assert(dummy.empty());
|
||||
assert((int)mshr->pkt->coherence & SATISFIED);
|
||||
assert(mshr->pkt->flags & SATISFIED);
|
||||
// can overload order since it isn't used on non pending blocks
|
||||
mshr->order = blk->status;
|
||||
// temporarily remove the block from the cache.
|
||||
|
@ -328,11 +334,11 @@ Cache<TagStore,Buffering,Coherence>::pseudoFill(MSHR *mshr)
|
|||
// Read the data into the mshr
|
||||
BlkType *blk = tags->handleAccess(mshr->pkt, lat, dummy, false);
|
||||
assert(dummy.empty());
|
||||
assert((int)mshr->pkt->coherence & SATISFIED);
|
||||
assert(mshr->pkt->flags & SATISFIED);
|
||||
// can overload order since it isn't used on non pending blocks
|
||||
mshr->order = blk->status;
|
||||
// temporarily remove the block from the cache.
|
||||
tags->invalidateBlk(mshr->pkt->addr, mshr->pkt->req->asid);
|
||||
tags->invalidateBlk(mshr->pkt->getAddr(), mshr->pkt->req->getAsid());
|
||||
}
|
||||
|
||||
|
||||
|
@ -348,9 +354,10 @@ template<class TagStore, class Buffering, class Coherence>
|
|||
void
|
||||
Cache<TagStore,Buffering,Coherence>::snoop(Packet * &pkt)
|
||||
{
|
||||
Addr blk_addr = pkt->addr & ~(Addr(blkSize-1));
|
||||
|
||||
Addr blk_addr = pkt->getAddr() & ~(Addr(blkSize-1));
|
||||
BlkType *blk = tags->findBlock(pkt);
|
||||
MSHR *mshr = missQueue->findMSHR(blk_addr, pkt->req->asid);
|
||||
MSHR *mshr = missQueue->findMSHR(blk_addr, pkt->req->getAsid());
|
||||
if (isTopLevel() && coherence->hasProtocol()) { //@todo Move this into handle bus req
|
||||
//If we find an mshr, and it is in service, we need to NACK or invalidate
|
||||
if (mshr) {
|
||||
|
@ -360,7 +367,7 @@ Cache<TagStore,Buffering,Coherence>::snoop(Packet * &pkt)
|
|||
//If the outstanding request was an invalidate (upgrade,readex,..)
|
||||
//Then we need to ACK the request until we get the data
|
||||
//Also NACK if the outstanding request is not a cachefill (writeback)
|
||||
(int)pkt->coherence |= NACKED_LINE;
|
||||
pkt->flags |= NACKED_LINE;
|
||||
return;
|
||||
}
|
||||
else {
|
||||
|
@ -373,19 +380,19 @@ Cache<TagStore,Buffering,Coherence>::snoop(Packet * &pkt)
|
|||
//@todo Make it so that a read to a pending read can't be exclusive now.
|
||||
|
||||
//Set the address so find match works
|
||||
invalidatePkt->addr = pkt->addr;
|
||||
invalidatePkt->addrOverride(pkt->getAddr());
|
||||
|
||||
//Append the invalidate on
|
||||
missQueue->addTarget(mshr,invalidatePkt);
|
||||
DPRINTF(Cache, "Appending Invalidate to blk_addr: %x\n", pkt->addr & (((ULL(1))<<48)-1));
|
||||
DPRINTF(Cache, "Appending Invalidate to blk_addr: %x\n", pkt->getAddr() & (((ULL(1))<<48)-1));
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
//We also need to check the writeback buffers and handle those
|
||||
std::vector<MSHR *> writebacks;
|
||||
if (missQueue->findWrites(blk_addr, pkt->req->asid, writebacks)) {
|
||||
DPRINTF(Cache, "Snoop hit in writeback to blk_addr: %x\n", pkt->addr & (((ULL(1))<<48)-1));
|
||||
if (missQueue->findWrites(blk_addr, pkt->req->getAsid(), writebacks)) {
|
||||
DPRINTF(Cache, "Snoop hit in writeback to blk_addr: %x\n", pkt->getAddr() & (((ULL(1))<<48)-1));
|
||||
|
||||
//Look through writebacks for any non-uncachable writes, use that
|
||||
for (int i=0; i<writebacks.size(); i++) {
|
||||
|
@ -395,17 +402,18 @@ Cache<TagStore,Buffering,Coherence>::snoop(Packet * &pkt)
|
|||
if (pkt->isRead()) {
|
||||
//Only Upgrades don't get here
|
||||
//Supply the data
|
||||
(int)pkt->coherence |= SATISFIED;
|
||||
pkt->flags |= SATISFIED;
|
||||
|
||||
//If we are in an exclusive protocol, make it ask again
|
||||
//to get write permissions (upgrade), signal shared
|
||||
(int)pkt->coherence |= SHARED_LINE;
|
||||
pkt->flags |= SHARED_LINE;
|
||||
|
||||
assert(pkt->isRead());
|
||||
assert(pkt->offset < blkSize);
|
||||
assert(pkt->size <= blkSize);
|
||||
assert(pkt->offset + pkt->size <=blkSize);
|
||||
memcpy(pkt->data, mshr->pkt->data + pkt->offset, pkt->size);
|
||||
Addr offset = pkt->getAddr() & ~(blkSize - 1);
|
||||
assert(offset < blkSize);
|
||||
assert(pkt->getSize() <= blkSize);
|
||||
assert(offset + pkt->getSize() <=blkSize);
|
||||
memcpy(pkt->getPtr<uint8_t>(), mshr->pkt->getPtr<uint8_t>() + offset, pkt->getSize());
|
||||
|
||||
respondToSnoop(pkt);
|
||||
}
|
||||
|
@ -434,7 +442,7 @@ void
|
|||
Cache<TagStore,Buffering,Coherence>::snoopResponse(Packet * &pkt)
|
||||
{
|
||||
//Need to handle the response, if NACKED
|
||||
if ((int)pkt->coherence & NACKED_LINE) {
|
||||
if (pkt->flags & NACKED_LINE) {
|
||||
//Need to mark it as not in service, and retry for bus
|
||||
assert(0); //Yeah, we saw a NACK come through
|
||||
|
||||
|
@ -463,41 +471,35 @@ Tick
|
|||
Cache<TagStore,Buffering,Coherence>::probe(Packet * &pkt, bool update)
|
||||
{
|
||||
// MemDebug::cacheProbe(pkt);
|
||||
|
||||
if (!pkt->req->isUncacheable()) {
|
||||
if (pkt->isInvalidate() && !pkt->isRead()
|
||||
&& !pkt->isWrite()) {
|
||||
//Upgrade or Invalidate, satisfy it, don't forward
|
||||
DPRINTF(Cache, "%s %d %x ? blk_addr: %x\n", pkt->cmdString(),
|
||||
pkt->req->asid, pkt->addr & (((ULL(1))<<48)-1),
|
||||
pkt->addr & ~((Addr)blkSize - 1));
|
||||
(int)pkt->coherence |= SATISFIED;
|
||||
pkt->req->getAsid(), pkt->getAddr() & (((ULL(1))<<48)-1),
|
||||
pkt->getAddr() & ~((Addr)blkSize - 1));
|
||||
pkt->flags |= SATISFIED;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
if (!update && !doData()) {
|
||||
// Nothing to do here
|
||||
return mi->sendProbe(pkt,update);
|
||||
}
|
||||
|
||||
PacketList writebacks;
|
||||
int lat;
|
||||
BlkType *blk = tags->handleAccess(pkt, lat, writebacks, update);
|
||||
|
||||
if (!blk) {
|
||||
// Need to check for outstanding misses and writes
|
||||
Addr blk_addr = pkt->addr & ~(blkSize - 1);
|
||||
Addr blk_addr = pkt->getAddr() & ~(blkSize - 1);
|
||||
|
||||
// There can only be one matching outstanding miss.
|
||||
MSHR* mshr = missQueue->findMSHR(blk_addr, pkt->req->asid);
|
||||
MSHR* mshr = missQueue->findMSHR(blk_addr, pkt->req->getAsid());
|
||||
|
||||
// There can be many matching outstanding writes.
|
||||
vector<MSHR*> writes;
|
||||
missQueue->findWrites(blk_addr, pkt->req->asid, writes);
|
||||
missQueue->findWrites(blk_addr, pkt->req->getAsid(), writes);
|
||||
|
||||
if (!update) {
|
||||
mi->sendProbe(pkt, update);
|
||||
memSidePort->sendFunctional(pkt);
|
||||
// Check for data in MSHR and writebuffer.
|
||||
if (mshr) {
|
||||
warn("Found outstanding miss on an non-update probe");
|
||||
|
@ -508,26 +510,26 @@ Cache<TagStore,Buffering,Coherence>::probe(Packet * &pkt, bool update)
|
|||
Packet * target = *i;
|
||||
// If the target contains data, and it overlaps the
|
||||
// probed request, need to update data
|
||||
if (target->isWrite() && target->overlaps(pkt)) {
|
||||
if (target->isWrite() && target->intersect(pkt)) {
|
||||
uint8_t* pkt_data;
|
||||
uint8_t* write_data;
|
||||
int data_size;
|
||||
if (target->addr < pkt->addr) {
|
||||
int offset = pkt->addr - target->paddr;
|
||||
pkt_data = pkt->data;
|
||||
write_data = target->data + offset;
|
||||
data_size = target->size - offset;
|
||||
if (target->getAddr() < pkt->getAddr()) {
|
||||
int offset = pkt->getAddr() - target->getAddr();
|
||||
pkt_data = pkt->getPtr<uint8_t>();
|
||||
write_data = target->getPtr<uint8_t>() + offset;
|
||||
data_size = target->getSize() - offset;
|
||||
assert(data_size > 0);
|
||||
if (data_size > pkt->size)
|
||||
data_size = pkt->size;
|
||||
if (data_size > pkt->getSize())
|
||||
data_size = pkt->getSize();
|
||||
} else {
|
||||
int offset = target->addr - pkt->addr;
|
||||
pkt_data = pkt->data + offset;
|
||||
write_data = target->data;
|
||||
data_size = pkt->size - offset;
|
||||
assert(data_size > pkt->size);
|
||||
if (data_size > target->size)
|
||||
data_size = target->size;
|
||||
int offset = target->getAddr() - pkt->getAddr();
|
||||
pkt_data = pkt->getPtr<uint8_t>() + offset;
|
||||
write_data = target->getPtr<uint8_t>();
|
||||
data_size = pkt->getSize() - offset;
|
||||
assert(data_size > pkt->getSize());
|
||||
if (data_size > target->getSize())
|
||||
data_size = target->getSize();
|
||||
}
|
||||
|
||||
if (pkt->isWrite()) {
|
||||
|
@ -540,27 +542,27 @@ Cache<TagStore,Buffering,Coherence>::probe(Packet * &pkt, bool update)
|
|||
}
|
||||
for (int i = 0; i < writes.size(); ++i) {
|
||||
Packet * write = writes[i]->pkt;
|
||||
if (write->overlaps(pkt)) {
|
||||
if (write->intersect(pkt)) {
|
||||
warn("Found outstanding write on an non-update probe");
|
||||
uint8_t* pkt_data;
|
||||
uint8_t* write_data;
|
||||
int data_size;
|
||||
if (write->addr < pkt->addr) {
|
||||
int offset = pkt->addr - write->addr;
|
||||
pkt_data = pkt->data;
|
||||
write_data = write->data + offset;
|
||||
data_size = write->size - offset;
|
||||
if (write->getAddr() < pkt->getAddr()) {
|
||||
int offset = pkt->getAddr() - write->getAddr();
|
||||
pkt_data = pkt->getPtr<uint8_t>();
|
||||
write_data = write->getPtr<uint8_t>() + offset;
|
||||
data_size = write->getSize() - offset;
|
||||
assert(data_size > 0);
|
||||
if (data_size > pkt->size)
|
||||
data_size = pkt->size;
|
||||
if (data_size > pkt->getSize())
|
||||
data_size = pkt->getSize();
|
||||
} else {
|
||||
int offset = write->addr - pkt->addr;
|
||||
pkt_data = pkt->data + offset;
|
||||
write_data = write->data;
|
||||
data_size = pkt->size - offset;
|
||||
assert(data_size > pkt->size);
|
||||
if (data_size > write->size)
|
||||
data_size = write->size;
|
||||
int offset = write->getAddr() - pkt->getAddr();
|
||||
pkt_data = pkt->getPtr<uint8_t>() + offset;
|
||||
write_data = write->getPtr<uint8_t>();
|
||||
data_size = pkt->getSize() - offset;
|
||||
assert(data_size > pkt->getSize());
|
||||
if (data_size > write->getSize())
|
||||
data_size = write->getSize();
|
||||
}
|
||||
|
||||
if (pkt->isWrite()) {
|
||||
|
@ -580,23 +582,20 @@ Cache<TagStore,Buffering,Coherence>::probe(Packet * &pkt, bool update)
|
|||
}
|
||||
if (!pkt->req->isUncacheable()) {
|
||||
// Fetch the cache block to fill
|
||||
Packet * busPkt = new Packet();
|
||||
busPkt->addr = blk_addr;
|
||||
busPkt->size = blkSize;
|
||||
busPkt->data = new uint8_t[blkSize];
|
||||
|
||||
BlkType *blk = tags->findBlock(pkt);
|
||||
busPkt->cmd = coherence->getBusCmd(pkt->cmd,
|
||||
Packet::Command temp_cmd = coherence->getBusCmd(pkt->cmd,
|
||||
(blk)? blk->status : 0);
|
||||
|
||||
busPkt->req->asid = pkt->req->asid;
|
||||
busPkt->xc = pkt->xc;
|
||||
busPkt->req->setThreadNum() = pkt->req->getThreadNum();
|
||||
Packet * busPkt = new Packet(pkt->req,temp_cmd, -1, blkSize);
|
||||
|
||||
uint8_t* temp_data = new uint8_t[blkSize];
|
||||
busPkt->dataDynamicArray<uint8_t>(temp_data);
|
||||
|
||||
busPkt->time = curTick;
|
||||
|
||||
lat = mi->sendProbe(busPkt, update);
|
||||
lat = memSidePort->sendAtomic(busPkt);
|
||||
|
||||
if (!busPkt->isSatisfied()) {
|
||||
if (!(busPkt->flags & SATISFIED)) {
|
||||
// blocked at a higher level, just return
|
||||
return 0;
|
||||
}
|
||||
|
@ -609,19 +608,19 @@ Cache<TagStore,Buffering,Coherence>::probe(Packet * &pkt, bool update)
|
|||
writebacks, pkt);
|
||||
// Handle writebacks if needed
|
||||
while (!writebacks.empty()){
|
||||
mi->sendProbe(writebacks.front(), update);
|
||||
memSidePort->sendAtomic(writebacks.front());
|
||||
writebacks.pop_front();
|
||||
}
|
||||
return lat + hitLatency;
|
||||
} else {
|
||||
return mi->sendProbe(pkt,update);
|
||||
return memSidePort->sendAtomic(pkt);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// There was a cache hit.
|
||||
// Handle writebacks if needed
|
||||
while (!writebacks.empty()){
|
||||
mi->sendProbe(writebacks.front(), update);
|
||||
memSidePort->sendAtomic(writebacks.front());
|
||||
writebacks.pop_front();
|
||||
}
|
||||
|
||||
|
@ -629,7 +628,7 @@ Cache<TagStore,Buffering,Coherence>::probe(Packet * &pkt, bool update)
|
|||
hits[pkt->cmdToIndex()][pkt->req->getThreadNum()]++;
|
||||
} else if (pkt->isWrite()) {
|
||||
// Still need to change data in all locations.
|
||||
return mi->sendProbe(pkt, update);
|
||||
return memSidePort->sendAtomic(pkt);
|
||||
}
|
||||
return curTick + lat;
|
||||
}
|
||||
|
@ -641,11 +640,11 @@ template<class TagStore, class Buffering, class Coherence>
|
|||
Tick
|
||||
Cache<TagStore,Buffering,Coherence>::snoopProbe(PacketPtr &pkt, bool update)
|
||||
{
|
||||
Addr blk_addr = pkt->addr & ~(Addr(blkSize-1));
|
||||
Addr blk_addr = pkt->getAddr() & ~(Addr(blkSize-1));
|
||||
BlkType *blk = tags->findBlock(pkt);
|
||||
MSHR *mshr = missQueue->findMSHR(blk_addr, pkt->req->asid);
|
||||
MSHR *mshr = missQueue->findMSHR(blk_addr, pkt->req->getAsid());
|
||||
CacheBlk::State new_state = 0;
|
||||
bool satisfy = coherence->handleBusPktuest(pkt,blk,mshr, new_state);
|
||||
bool satisfy = coherence->handleBusRequest(pkt,blk,mshr, new_state);
|
||||
if (satisfy) {
|
||||
tags->handleSnoop(blk, new_state, pkt);
|
||||
return hitLatency;
|
||||
|
|
283
src/mem/cache/coherence/coherence_protocol.cc
vendored
283
src/mem/cache/coherence/coherence_protocol.cc
vendored
|
@ -47,7 +47,7 @@ using namespace std;
|
|||
|
||||
|
||||
CoherenceProtocol::StateTransition::StateTransition()
|
||||
: busCmd(InvalidCmd), newState(-1), snoopFunc(invalidTransition)
|
||||
: busCmd(Packet::InvalidCmd), newState(-1), snoopFunc(invalidTransition)
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -59,132 +59,132 @@ CoherenceProtocol::regStats()
|
|||
// requestCount and snoopCount arrays, most of these are invalid,
|
||||
// so we just select the interesting ones to print here.
|
||||
|
||||
requestCount[Invalid][Read]
|
||||
requestCount[Invalid][Packet::ReadReq]
|
||||
.name(name() + ".read_invalid")
|
||||
.desc("read misses to invalid blocks")
|
||||
;
|
||||
|
||||
requestCount[Invalid][Write]
|
||||
requestCount[Invalid][Packet::WriteReq]
|
||||
.name(name() +".write_invalid")
|
||||
.desc("write misses to invalid blocks")
|
||||
;
|
||||
|
||||
requestCount[Invalid][Soft_Prefetch]
|
||||
requestCount[Invalid][Packet::SoftPFReq]
|
||||
.name(name() +".swpf_invalid")
|
||||
.desc("soft prefetch misses to invalid blocks")
|
||||
;
|
||||
|
||||
requestCount[Invalid][Hard_Prefetch]
|
||||
requestCount[Invalid][Packet::HardPFReq]
|
||||
.name(name() +".hwpf_invalid")
|
||||
.desc("hard prefetch misses to invalid blocks")
|
||||
;
|
||||
|
||||
requestCount[Shared][Write]
|
||||
requestCount[Shared][Packet::WriteReq]
|
||||
.name(name() + ".write_shared")
|
||||
.desc("write misses to shared blocks")
|
||||
;
|
||||
|
||||
requestCount[Owned][Write]
|
||||
requestCount[Owned][Packet::WriteReq]
|
||||
.name(name() + ".write_owned")
|
||||
.desc("write misses to owned blocks")
|
||||
;
|
||||
|
||||
snoopCount[Shared][Read]
|
||||
snoopCount[Shared][Packet::ReadReq]
|
||||
.name(name() + ".snoop_read_shared")
|
||||
.desc("read snoops on shared blocks")
|
||||
;
|
||||
|
||||
snoopCount[Shared][ReadEx]
|
||||
snoopCount[Shared][Packet::ReadExReq]
|
||||
.name(name() + ".snoop_readex_shared")
|
||||
.desc("readEx snoops on shared blocks")
|
||||
;
|
||||
|
||||
snoopCount[Shared][Upgrade]
|
||||
snoopCount[Shared][Packet::UpgradeReq]
|
||||
.name(name() + ".snoop_upgrade_shared")
|
||||
.desc("upgradee snoops on shared blocks")
|
||||
;
|
||||
|
||||
snoopCount[Modified][Read]
|
||||
snoopCount[Modified][Packet::ReadReq]
|
||||
.name(name() + ".snoop_read_modified")
|
||||
.desc("read snoops on modified blocks")
|
||||
;
|
||||
|
||||
snoopCount[Modified][ReadEx]
|
||||
snoopCount[Modified][Packet::ReadExReq]
|
||||
.name(name() + ".snoop_readex_modified")
|
||||
.desc("readEx snoops on modified blocks")
|
||||
;
|
||||
|
||||
snoopCount[Owned][Read]
|
||||
snoopCount[Owned][Packet::ReadReq]
|
||||
.name(name() + ".snoop_read_owned")
|
||||
.desc("read snoops on owned blocks")
|
||||
;
|
||||
|
||||
snoopCount[Owned][ReadEx]
|
||||
snoopCount[Owned][Packet::ReadExReq]
|
||||
.name(name() + ".snoop_readex_owned")
|
||||
.desc("readEx snoops on owned blocks")
|
||||
;
|
||||
|
||||
snoopCount[Owned][Upgrade]
|
||||
snoopCount[Owned][Packet::UpgradeReq]
|
||||
.name(name() + ".snoop_upgrade_owned")
|
||||
.desc("upgrade snoops on owned blocks")
|
||||
;
|
||||
|
||||
snoopCount[Exclusive][Read]
|
||||
snoopCount[Exclusive][Packet::ReadReq]
|
||||
.name(name() + ".snoop_read_exclusive")
|
||||
.desc("read snoops on exclusive blocks")
|
||||
;
|
||||
|
||||
snoopCount[Exclusive][ReadEx]
|
||||
snoopCount[Exclusive][Packet::ReadExReq]
|
||||
.name(name() + ".snoop_readex_exclusive")
|
||||
.desc("readEx snoops on exclusive blocks")
|
||||
;
|
||||
|
||||
snoopCount[Shared][Invalidate]
|
||||
snoopCount[Shared][Packet::InvalidateReq]
|
||||
.name(name() + ".snoop_inv_shared")
|
||||
.desc("Invalidate snoops on shared blocks")
|
||||
;
|
||||
|
||||
snoopCount[Owned][Invalidate]
|
||||
snoopCount[Owned][Packet::InvalidateReq]
|
||||
.name(name() + ".snoop_inv_owned")
|
||||
.desc("Invalidate snoops on owned blocks")
|
||||
;
|
||||
|
||||
snoopCount[Exclusive][Invalidate]
|
||||
snoopCount[Exclusive][Packet::InvalidateReq]
|
||||
.name(name() + ".snoop_inv_exclusive")
|
||||
.desc("Invalidate snoops on exclusive blocks")
|
||||
;
|
||||
|
||||
snoopCount[Modified][Invalidate]
|
||||
snoopCount[Modified][Packet::InvalidateReq]
|
||||
.name(name() + ".snoop_inv_modified")
|
||||
.desc("Invalidate snoops on modified blocks")
|
||||
;
|
||||
|
||||
snoopCount[Invalid][Invalidate]
|
||||
snoopCount[Invalid][Packet::InvalidateReq]
|
||||
.name(name() + ".snoop_inv_invalid")
|
||||
.desc("Invalidate snoops on invalid blocks")
|
||||
;
|
||||
|
||||
snoopCount[Shared][WriteInvalidate]
|
||||
snoopCount[Shared][Packet::WriteInvalidateReq]
|
||||
.name(name() + ".snoop_writeinv_shared")
|
||||
.desc("WriteInvalidate snoops on shared blocks")
|
||||
;
|
||||
|
||||
snoopCount[Owned][WriteInvalidate]
|
||||
snoopCount[Owned][Packet::WriteInvalidateReq]
|
||||
.name(name() + ".snoop_writeinv_owned")
|
||||
.desc("WriteInvalidate snoops on owned blocks")
|
||||
;
|
||||
|
||||
snoopCount[Exclusive][WriteInvalidate]
|
||||
snoopCount[Exclusive][Packet::WriteInvalidateReq]
|
||||
.name(name() + ".snoop_writeinv_exclusive")
|
||||
.desc("WriteInvalidate snoops on exclusive blocks")
|
||||
;
|
||||
|
||||
snoopCount[Modified][WriteInvalidate]
|
||||
snoopCount[Modified][Packet::WriteInvalidateReq]
|
||||
.name(name() + ".snoop_writeinv_modified")
|
||||
.desc("WriteInvalidate snoops on modified blocks")
|
||||
;
|
||||
|
||||
snoopCount[Invalid][WriteInvalidate]
|
||||
snoopCount[Invalid][Packet::WriteInvalidateReq]
|
||||
.name(name() + ".snoop_writeinv_invalid")
|
||||
.desc("WriteInvalidate snoops on invalid blocks")
|
||||
;
|
||||
|
@ -270,167 +270,168 @@ CoherenceProtocol::CoherenceProtocol(const string &name,
|
|||
fatal("");
|
||||
}
|
||||
|
||||
Packet::CommandEnum writeToSharedCmd = doUpgrades ? Upgrade : ReadEx;
|
||||
Packet::Command writeToSharedCmd = doUpgrades ? Packet::UpgradeReq : Packet::ReadExReq;
|
||||
Packet::Command writeToSharedResp = doUpgrades ? Packet::UpgradeResp : Packet::ReadExResp;
|
||||
|
||||
//@todo add in hardware prefetch to this list
|
||||
if (protocol == "msi") {
|
||||
// incoming requests: specify outgoing bus request
|
||||
transitionTable[Invalid][Read].onRequest(Read);
|
||||
transitionTable[Invalid][Write].onRequest(ReadEx);
|
||||
transitionTable[Shared][Write].onRequest(writeToSharedCmd);
|
||||
transitionTable[Invalid][Packet::ReadReq].onRequest(Packet::ReadReq);
|
||||
transitionTable[Invalid][Packet::WriteReq].onRequest(Packet::ReadExReq);
|
||||
transitionTable[Shared][Packet::WriteReq].onRequest(writeToSharedCmd);
|
||||
//Prefetching causes a read
|
||||
transitionTable[Invalid][Soft_Prefetch].onRequest(Read);
|
||||
transitionTable[Invalid][Hard_Prefetch].onRequest(Read);
|
||||
transitionTable[Invalid][Packet::SoftPFReq].onRequest(Packet::ReadReq);
|
||||
transitionTable[Invalid][Packet::HardPFReq].onRequest(Packet::ReadReq);
|
||||
|
||||
// on response to given request: specify new state
|
||||
transitionTable[Invalid][Read].onResponse(Shared);
|
||||
transitionTable[Invalid][ReadEx].onResponse(Modified);
|
||||
transitionTable[Shared][writeToSharedCmd].onResponse(Modified);
|
||||
transitionTable[Invalid][Packet::ReadResp].onResponse(Shared);
|
||||
transitionTable[Invalid][Packet::ReadExResp].onResponse(Modified);
|
||||
transitionTable[Shared][writeToSharedResp].onResponse(Modified);
|
||||
|
||||
// bus snoop transition functions
|
||||
transitionTable[Invalid][Read].onSnoop(nullTransition);
|
||||
transitionTable[Invalid][ReadEx].onSnoop(nullTransition);
|
||||
transitionTable[Shared][Read].onSnoop(nullTransition);
|
||||
transitionTable[Shared][ReadEx].onSnoop(invalidateTrans);
|
||||
transitionTable[Modified][ReadEx].onSnoop(supplyAndInvalidateTrans);
|
||||
transitionTable[Modified][Read].onSnoop(supplyAndGotoSharedTrans);
|
||||
transitionTable[Invalid][Packet::ReadReq].onSnoop(nullTransition);
|
||||
transitionTable[Invalid][Packet::ReadExReq].onSnoop(nullTransition);
|
||||
transitionTable[Shared][Packet::ReadReq].onSnoop(nullTransition);
|
||||
transitionTable[Shared][Packet::ReadExReq].onSnoop(invalidateTrans);
|
||||
transitionTable[Modified][Packet::ReadExReq].onSnoop(supplyAndInvalidateTrans);
|
||||
transitionTable[Modified][Packet::ReadReq].onSnoop(supplyAndGotoSharedTrans);
|
||||
//Tansitions on seeing a DMA (writeInv(samelevel) or DMAInv)
|
||||
transitionTable[Invalid][Invalidate].onSnoop(invalidateTrans);
|
||||
transitionTable[Shared][Invalidate].onSnoop(invalidateTrans);
|
||||
transitionTable[Modified][Invalidate].onSnoop(invalidateTrans);
|
||||
transitionTable[Invalid][WriteInvalidate].onSnoop(invalidateTrans);
|
||||
transitionTable[Shared][WriteInvalidate].onSnoop(invalidateTrans);
|
||||
transitionTable[Modified][WriteInvalidate].onSnoop(invalidateTrans);
|
||||
transitionTable[Invalid][Packet::InvalidateReq].onSnoop(invalidateTrans);
|
||||
transitionTable[Shared][Packet::InvalidateReq].onSnoop(invalidateTrans);
|
||||
transitionTable[Modified][Packet::InvalidateReq].onSnoop(invalidateTrans);
|
||||
transitionTable[Invalid][Packet::WriteInvalidateReq].onSnoop(invalidateTrans);
|
||||
transitionTable[Shared][Packet::WriteInvalidateReq].onSnoop(invalidateTrans);
|
||||
transitionTable[Modified][Packet::WriteInvalidateReq].onSnoop(invalidateTrans);
|
||||
|
||||
if (doUpgrades) {
|
||||
transitionTable[Invalid][Upgrade].onSnoop(nullTransition);
|
||||
transitionTable[Shared][Upgrade].onSnoop(invalidateTrans);
|
||||
transitionTable[Invalid][Packet::UpgradeReq].onSnoop(nullTransition);
|
||||
transitionTable[Shared][Packet::UpgradeReq].onSnoop(invalidateTrans);
|
||||
}
|
||||
}
|
||||
|
||||
else if(protocol == "mesi") {
|
||||
// incoming requests: specify outgoing bus request
|
||||
transitionTable[Invalid][Read].onRequest(Read);
|
||||
transitionTable[Invalid][Write].onRequest(ReadEx);
|
||||
transitionTable[Shared][Write].onRequest(writeToSharedCmd);
|
||||
transitionTable[Invalid][Packet::ReadReq].onRequest(Packet::ReadReq);
|
||||
transitionTable[Invalid][Packet::WriteReq].onRequest(Packet::ReadExReq);
|
||||
transitionTable[Shared][Packet::WriteReq].onRequest(writeToSharedCmd);
|
||||
//Prefetching causes a read
|
||||
transitionTable[Invalid][Soft_Prefetch].onRequest(Read);
|
||||
transitionTable[Invalid][Hard_Prefetch].onRequest(Read);
|
||||
transitionTable[Invalid][Packet::SoftPFReq].onRequest(Packet::ReadReq);
|
||||
transitionTable[Invalid][Packet::HardPFReq].onRequest(Packet::ReadReq);
|
||||
|
||||
// on response to given request: specify new state
|
||||
transitionTable[Invalid][Read].onResponse(Exclusive);
|
||||
transitionTable[Invalid][Packet::ReadResp].onResponse(Exclusive);
|
||||
//It will move into shared if the shared line is asserted in the
|
||||
//getNewState function
|
||||
transitionTable[Invalid][ReadEx].onResponse(Modified);
|
||||
transitionTable[Shared][writeToSharedCmd].onResponse(Modified);
|
||||
transitionTable[Invalid][Packet::ReadExResp].onResponse(Modified);
|
||||
transitionTable[Shared][writeToSharedResp].onResponse(Modified);
|
||||
|
||||
// bus snoop transition functions
|
||||
transitionTable[Invalid][Read].onSnoop(nullTransition);
|
||||
transitionTable[Invalid][ReadEx].onSnoop(nullTransition);
|
||||
transitionTable[Shared][Read].onSnoop(assertShared);
|
||||
transitionTable[Shared][ReadEx].onSnoop(invalidateTrans);
|
||||
transitionTable[Exclusive][Read].onSnoop(assertShared);
|
||||
transitionTable[Exclusive][ReadEx].onSnoop(invalidateTrans);
|
||||
transitionTable[Modified][ReadEx].onSnoop(supplyAndInvalidateTrans);
|
||||
transitionTable[Modified][Read].onSnoop(supplyAndGotoSharedTrans);
|
||||
transitionTable[Invalid][Packet::ReadReq].onSnoop(nullTransition);
|
||||
transitionTable[Invalid][Packet::ReadExReq].onSnoop(nullTransition);
|
||||
transitionTable[Shared][Packet::ReadReq].onSnoop(assertShared);
|
||||
transitionTable[Shared][Packet::ReadExReq].onSnoop(invalidateTrans);
|
||||
transitionTable[Exclusive][Packet::ReadReq].onSnoop(assertShared);
|
||||
transitionTable[Exclusive][Packet::ReadExReq].onSnoop(invalidateTrans);
|
||||
transitionTable[Modified][Packet::ReadExReq].onSnoop(supplyAndInvalidateTrans);
|
||||
transitionTable[Modified][Packet::ReadReq].onSnoop(supplyAndGotoSharedTrans);
|
||||
//Tansitions on seeing a DMA (writeInv(samelevel) or DMAInv)
|
||||
transitionTable[Invalid][Invalidate].onSnoop(invalidateTrans);
|
||||
transitionTable[Shared][Invalidate].onSnoop(invalidateTrans);
|
||||
transitionTable[Modified][Invalidate].onSnoop(invalidateTrans);
|
||||
transitionTable[Exclusive][Invalidate].onSnoop(invalidateTrans);
|
||||
transitionTable[Invalid][WriteInvalidate].onSnoop(invalidateTrans);
|
||||
transitionTable[Shared][WriteInvalidate].onSnoop(invalidateTrans);
|
||||
transitionTable[Modified][WriteInvalidate].onSnoop(invalidateTrans);
|
||||
transitionTable[Exclusive][WriteInvalidate].onSnoop(invalidateTrans);
|
||||
transitionTable[Invalid][Packet::InvalidateReq].onSnoop(invalidateTrans);
|
||||
transitionTable[Shared][Packet::InvalidateReq].onSnoop(invalidateTrans);
|
||||
transitionTable[Modified][Packet::InvalidateReq].onSnoop(invalidateTrans);
|
||||
transitionTable[Exclusive][Packet::InvalidateReq].onSnoop(invalidateTrans);
|
||||
transitionTable[Invalid][Packet::WriteInvalidateReq].onSnoop(invalidateTrans);
|
||||
transitionTable[Shared][Packet::WriteInvalidateReq].onSnoop(invalidateTrans);
|
||||
transitionTable[Modified][Packet::WriteInvalidateReq].onSnoop(invalidateTrans);
|
||||
transitionTable[Exclusive][Packet::WriteInvalidateReq].onSnoop(invalidateTrans);
|
||||
|
||||
if (doUpgrades) {
|
||||
transitionTable[Invalid][Upgrade].onSnoop(nullTransition);
|
||||
transitionTable[Shared][Upgrade].onSnoop(invalidateTrans);
|
||||
transitionTable[Invalid][Packet::UpgradeReq].onSnoop(nullTransition);
|
||||
transitionTable[Shared][Packet::UpgradeReq].onSnoop(invalidateTrans);
|
||||
}
|
||||
}
|
||||
|
||||
else if(protocol == "mosi") {
|
||||
// incoming requests: specify outgoing bus request
|
||||
transitionTable[Invalid][Read].onRequest(Read);
|
||||
transitionTable[Invalid][Write].onRequest(ReadEx);
|
||||
transitionTable[Shared][Write].onRequest(writeToSharedCmd);
|
||||
transitionTable[Owned][Write].onRequest(writeToSharedCmd);
|
||||
transitionTable[Invalid][Packet::ReadReq].onRequest(Packet::ReadReq);
|
||||
transitionTable[Invalid][Packet::WriteReq].onRequest(Packet::ReadExReq);
|
||||
transitionTable[Shared][Packet::WriteReq].onRequest(writeToSharedCmd);
|
||||
transitionTable[Owned][Packet::WriteReq].onRequest(writeToSharedCmd);
|
||||
//Prefetching causes a read
|
||||
transitionTable[Invalid][Soft_Prefetch].onRequest(Read);
|
||||
transitionTable[Invalid][Hard_Prefetch].onRequest(Read);
|
||||
transitionTable[Invalid][Packet::SoftPFReq].onRequest(Packet::ReadReq);
|
||||
transitionTable[Invalid][Packet::HardPFReq].onRequest(Packet::ReadReq);
|
||||
|
||||
// on response to given request: specify new state
|
||||
transitionTable[Invalid][Read].onResponse(Shared);
|
||||
transitionTable[Invalid][ReadEx].onResponse(Modified);
|
||||
transitionTable[Shared][writeToSharedCmd].onResponse(Modified);
|
||||
transitionTable[Owned][writeToSharedCmd].onResponse(Modified);
|
||||
transitionTable[Invalid][Packet::ReadResp].onResponse(Shared);
|
||||
transitionTable[Invalid][Packet::ReadExResp].onResponse(Modified);
|
||||
transitionTable[Shared][writeToSharedResp].onResponse(Modified);
|
||||
transitionTable[Owned][writeToSharedResp].onResponse(Modified);
|
||||
|
||||
// bus snoop transition functions
|
||||
transitionTable[Invalid][Read].onSnoop(nullTransition);
|
||||
transitionTable[Invalid][ReadEx].onSnoop(nullTransition);
|
||||
transitionTable[Invalid][Upgrade].onSnoop(nullTransition);
|
||||
transitionTable[Shared][Read].onSnoop(assertShared);
|
||||
transitionTable[Shared][ReadEx].onSnoop(invalidateTrans);
|
||||
transitionTable[Shared][Upgrade].onSnoop(invalidateTrans);
|
||||
transitionTable[Modified][ReadEx].onSnoop(supplyAndInvalidateTrans);
|
||||
transitionTable[Modified][Read].onSnoop(supplyAndGotoOwnedTrans);
|
||||
transitionTable[Owned][Read].onSnoop(supplyAndGotoOwnedTrans);
|
||||
transitionTable[Owned][ReadEx].onSnoop(supplyAndInvalidateTrans);
|
||||
transitionTable[Owned][Upgrade].onSnoop(invalidateTrans);
|
||||
transitionTable[Invalid][Packet::ReadReq].onSnoop(nullTransition);
|
||||
transitionTable[Invalid][Packet::ReadExReq].onSnoop(nullTransition);
|
||||
transitionTable[Invalid][Packet::UpgradeReq].onSnoop(nullTransition);
|
||||
transitionTable[Shared][Packet::ReadReq].onSnoop(assertShared);
|
||||
transitionTable[Shared][Packet::ReadExReq].onSnoop(invalidateTrans);
|
||||
transitionTable[Shared][Packet::UpgradeReq].onSnoop(invalidateTrans);
|
||||
transitionTable[Modified][Packet::ReadExReq].onSnoop(supplyAndInvalidateTrans);
|
||||
transitionTable[Modified][Packet::ReadReq].onSnoop(supplyAndGotoOwnedTrans);
|
||||
transitionTable[Owned][Packet::ReadReq].onSnoop(supplyAndGotoOwnedTrans);
|
||||
transitionTable[Owned][Packet::ReadExReq].onSnoop(supplyAndInvalidateTrans);
|
||||
transitionTable[Owned][Packet::UpgradeReq].onSnoop(invalidateTrans);
|
||||
//Tansitions on seeing a DMA (writeInv(samelevel) or DMAInv)
|
||||
transitionTable[Invalid][Invalidate].onSnoop(invalidateTrans);
|
||||
transitionTable[Shared][Invalidate].onSnoop(invalidateTrans);
|
||||
transitionTable[Modified][Invalidate].onSnoop(invalidateTrans);
|
||||
transitionTable[Owned][Invalidate].onSnoop(invalidateTrans);
|
||||
transitionTable[Invalid][WriteInvalidate].onSnoop(invalidateTrans);
|
||||
transitionTable[Shared][WriteInvalidate].onSnoop(invalidateTrans);
|
||||
transitionTable[Modified][WriteInvalidate].onSnoop(invalidateTrans);
|
||||
transitionTable[Owned][WriteInvalidate].onSnoop(invalidateTrans);
|
||||
transitionTable[Invalid][Packet::InvalidateReq].onSnoop(invalidateTrans);
|
||||
transitionTable[Shared][Packet::InvalidateReq].onSnoop(invalidateTrans);
|
||||
transitionTable[Modified][Packet::InvalidateReq].onSnoop(invalidateTrans);
|
||||
transitionTable[Owned][Packet::InvalidateReq].onSnoop(invalidateTrans);
|
||||
transitionTable[Invalid][Packet::WriteInvalidateReq].onSnoop(invalidateTrans);
|
||||
transitionTable[Shared][Packet::WriteInvalidateReq].onSnoop(invalidateTrans);
|
||||
transitionTable[Modified][Packet::WriteInvalidateReq].onSnoop(invalidateTrans);
|
||||
transitionTable[Owned][Packet::WriteInvalidateReq].onSnoop(invalidateTrans);
|
||||
}
|
||||
|
||||
else if(protocol == "moesi") {
|
||||
// incoming requests: specify outgoing bus request
|
||||
transitionTable[Invalid][Read].onRequest(Read);
|
||||
transitionTable[Invalid][Write].onRequest(ReadEx);
|
||||
transitionTable[Shared][Write].onRequest(writeToSharedCmd);
|
||||
transitionTable[Owned][Write].onRequest(writeToSharedCmd);
|
||||
transitionTable[Invalid][Packet::ReadReq].onRequest(Packet::ReadReq);
|
||||
transitionTable[Invalid][Packet::WriteReq].onRequest(Packet::ReadExReq);
|
||||
transitionTable[Shared][Packet::WriteReq].onRequest(writeToSharedCmd);
|
||||
transitionTable[Owned][Packet::WriteReq].onRequest(writeToSharedCmd);
|
||||
//Prefetching causes a read
|
||||
transitionTable[Invalid][Soft_Prefetch].onRequest(Read);
|
||||
transitionTable[Invalid][Hard_Prefetch].onRequest(Read);
|
||||
transitionTable[Invalid][Packet::SoftPFReq].onRequest(Packet::ReadReq);
|
||||
transitionTable[Invalid][Packet::HardPFReq].onRequest(Packet::ReadReq);
|
||||
|
||||
// on response to given request: specify new state
|
||||
transitionTable[Invalid][Read].onResponse(Exclusive);
|
||||
transitionTable[Invalid][Packet::ReadResp].onResponse(Exclusive);
|
||||
//It will move into shared if the shared line is asserted in the
|
||||
//getNewState function
|
||||
transitionTable[Invalid][ReadEx].onResponse(Modified);
|
||||
transitionTable[Shared][writeToSharedCmd].onResponse(Modified);
|
||||
transitionTable[Owned][writeToSharedCmd].onResponse(Modified);
|
||||
transitionTable[Invalid][Packet::ReadExResp].onResponse(Modified);
|
||||
transitionTable[Shared][writeToSharedResp].onResponse(Modified);
|
||||
transitionTable[Owned][writeToSharedResp].onResponse(Modified);
|
||||
|
||||
// bus snoop transition functions
|
||||
transitionTable[Invalid][Read].onSnoop(nullTransition);
|
||||
transitionTable[Invalid][ReadEx].onSnoop(nullTransition);
|
||||
transitionTable[Invalid][Upgrade].onSnoop(nullTransition);
|
||||
transitionTable[Shared][Read].onSnoop(assertShared);
|
||||
transitionTable[Shared][ReadEx].onSnoop(invalidateTrans);
|
||||
transitionTable[Shared][Upgrade].onSnoop(invalidateTrans);
|
||||
transitionTable[Exclusive][Read].onSnoop(assertShared);
|
||||
transitionTable[Exclusive][ReadEx].onSnoop(invalidateTrans);
|
||||
transitionTable[Modified][Read].onSnoop(supplyAndGotoOwnedTrans);
|
||||
transitionTable[Modified][ReadEx].onSnoop(supplyAndInvalidateTrans);
|
||||
transitionTable[Owned][Read].onSnoop(supplyAndGotoOwnedTrans);
|
||||
transitionTable[Owned][ReadEx].onSnoop(supplyAndInvalidateTrans);
|
||||
transitionTable[Owned][Upgrade].onSnoop(invalidateTrans);
|
||||
transitionTable[Invalid][Packet::ReadReq].onSnoop(nullTransition);
|
||||
transitionTable[Invalid][Packet::ReadExReq].onSnoop(nullTransition);
|
||||
transitionTable[Invalid][Packet::UpgradeReq].onSnoop(nullTransition);
|
||||
transitionTable[Shared][Packet::ReadReq].onSnoop(assertShared);
|
||||
transitionTable[Shared][Packet::ReadExReq].onSnoop(invalidateTrans);
|
||||
transitionTable[Shared][Packet::UpgradeReq].onSnoop(invalidateTrans);
|
||||
transitionTable[Exclusive][Packet::ReadReq].onSnoop(assertShared);
|
||||
transitionTable[Exclusive][Packet::ReadExReq].onSnoop(invalidateTrans);
|
||||
transitionTable[Modified][Packet::ReadReq].onSnoop(supplyAndGotoOwnedTrans);
|
||||
transitionTable[Modified][Packet::ReadExReq].onSnoop(supplyAndInvalidateTrans);
|
||||
transitionTable[Owned][Packet::ReadReq].onSnoop(supplyAndGotoOwnedTrans);
|
||||
transitionTable[Owned][Packet::ReadExReq].onSnoop(supplyAndInvalidateTrans);
|
||||
transitionTable[Owned][Packet::UpgradeReq].onSnoop(invalidateTrans);
|
||||
//Transitions on seeing a DMA (writeInv(samelevel) or DMAInv)
|
||||
transitionTable[Invalid][Invalidate].onSnoop(invalidateTrans);
|
||||
transitionTable[Shared][Invalidate].onSnoop(invalidateTrans);
|
||||
transitionTable[Exclusive][Invalidate].onSnoop(invalidateTrans);
|
||||
transitionTable[Modified][Invalidate].onSnoop(invalidateTrans);
|
||||
transitionTable[Owned][Invalidate].onSnoop(invalidateTrans);
|
||||
transitionTable[Invalid][WriteInvalidate].onSnoop(invalidateTrans);
|
||||
transitionTable[Shared][WriteInvalidate].onSnoop(invalidateTrans);
|
||||
transitionTable[Exclusive][WriteInvalidate].onSnoop(invalidateTrans);
|
||||
transitionTable[Modified][WriteInvalidate].onSnoop(invalidateTrans);
|
||||
transitionTable[Owned][WriteInvalidate].onSnoop(invalidateTrans);
|
||||
transitionTable[Invalid][Packet::InvalidateReq].onSnoop(invalidateTrans);
|
||||
transitionTable[Shared][Packet::InvalidateReq].onSnoop(invalidateTrans);
|
||||
transitionTable[Exclusive][Packet::InvalidateReq].onSnoop(invalidateTrans);
|
||||
transitionTable[Modified][Packet::InvalidateReq].onSnoop(invalidateTrans);
|
||||
transitionTable[Owned][Packet::InvalidateReq].onSnoop(invalidateTrans);
|
||||
transitionTable[Invalid][Packet::WriteInvalidateReq].onSnoop(invalidateTrans);
|
||||
transitionTable[Shared][Packet::WriteInvalidateReq].onSnoop(invalidateTrans);
|
||||
transitionTable[Exclusive][Packet::WriteInvalidateReq].onSnoop(invalidateTrans);
|
||||
transitionTable[Modified][Packet::WriteInvalidateReq].onSnoop(invalidateTrans);
|
||||
transitionTable[Owned][Packet::WriteInvalidateReq].onSnoop(invalidateTrans);
|
||||
}
|
||||
|
||||
else {
|
||||
|
@ -446,14 +447,14 @@ CoherenceProtocol::getBusCmd(Packet::Command cmdIn, CacheBlk::State state,
|
|||
MSHR *mshr)
|
||||
{
|
||||
state &= stateMask;
|
||||
int cmd_idx = cmdIn.toIndex();
|
||||
int cmd_idx = (int) cmdIn;
|
||||
|
||||
assert(0 <= state && state <= stateMax);
|
||||
assert(0 <= cmd_idx && cmd_idx < NUM_MEM_CMDS);
|
||||
|
||||
Packet::Command cmdOut = transitionTable[state][cmd_idx].busCmd;
|
||||
|
||||
assert(cmdOut != InvalidCmd);
|
||||
assert(cmdOut != Packet::InvalidCmd);
|
||||
|
||||
++requestCount[state][cmd_idx];
|
||||
|
||||
|
@ -462,7 +463,7 @@ CoherenceProtocol::getBusCmd(Packet::Command cmdIn, CacheBlk::State state,
|
|||
|
||||
|
||||
CacheBlk::State
|
||||
CoherenceProtocol::getNewState(const Packet * &pkt, CacheBlk::State oldState)
|
||||
CoherenceProtocol::getNewState(Packet * &pkt, CacheBlk::State oldState)
|
||||
{
|
||||
CacheBlk::State state = oldState & stateMask;
|
||||
int cmd_idx = pkt->cmdToIndex();
|
||||
|
|
13
src/mem/cache/coherence/uni_coherence.cc
vendored
13
src/mem/cache/coherence/uni_coherence.cc
vendored
|
@ -44,8 +44,8 @@ Packet *
|
|||
UniCoherence::getPacket()
|
||||
{
|
||||
bool unblock = cshrs.isFull();
|
||||
Packet * pkt = cshrs.getPkt();
|
||||
cshrs.markInService(pkt->senderState);
|
||||
Packet* pkt = cshrs.getReq();
|
||||
cshrs.markInService((MSHR*)pkt->senderState);
|
||||
if (!cshrs.havePending()) {
|
||||
cache->clearSlaveRequest(Request_Coherence);
|
||||
}
|
||||
|
@ -65,15 +65,12 @@ UniCoherence::handleBusRequest(Packet * &pkt, CacheBlk *blk, MSHR *mshr,
|
|||
CacheBlk::State &new_state)
|
||||
{
|
||||
new_state = 0;
|
||||
if (pkt->cmd.isInvalidate()) {
|
||||
if (pkt->isInvalidate()) {
|
||||
DPRINTF(Cache, "snoop inval on blk %x (blk ptr %x)\n",
|
||||
pkt->paddr, blk);
|
||||
pkt->getAddr(), blk);
|
||||
if (!cache->isTopLevel()) {
|
||||
// Forward to other caches
|
||||
Packet * tmp = new MemPkt();
|
||||
tmp->cmd = Invalidate;
|
||||
tmp->paddr = pkt->paddr;
|
||||
tmp->size = pkt->size;
|
||||
Packet * tmp = new Packet(pkt->req, Packet::InvalidateReq, -1);
|
||||
cshrs.allocate(tmp);
|
||||
cache->setSlaveRequest(Request_Coherence, curTick);
|
||||
if (cshrs.isFull()) {
|
||||
|
|
1
src/mem/cache/coherence/uni_coherence.hh
vendored
1
src/mem/cache/coherence/uni_coherence.hh
vendored
|
@ -32,6 +32,7 @@
|
|||
#define __UNI_COHERENCE_HH__
|
||||
|
||||
#include "base/trace.hh"
|
||||
#include "base/misc.hh"
|
||||
#include "mem/cache/cache_blk.hh"
|
||||
#include "mem/cache/miss/mshr_queue.hh"
|
||||
#include "mem/packet.hh"
|
||||
|
|
91
src/mem/cache/miss/blocking_buffer.cc
vendored
91
src/mem/cache/miss/blocking_buffer.cc
vendored
|
@ -33,12 +33,12 @@
|
|||
* Definitions of a simple buffer for a blocking cache.
|
||||
*/
|
||||
|
||||
#include "cpu/exec_context.hh"
|
||||
#include "cpu/smt.hh" //for maxThreadsPerCPU
|
||||
#include "mem/cache/base_cache.hh"
|
||||
#include "mem/cache/miss/blocking_buffer.hh"
|
||||
#include "mem/cache/prefetch/base_prefetcher.hh"
|
||||
#include "sim/eventq.hh" // for Event declaration.
|
||||
#include "mem/request.hh"
|
||||
|
||||
using namespace TheISA;
|
||||
|
||||
|
@ -72,26 +72,26 @@ BlockingBuffer::setPrefetcher(BasePrefetcher *_prefetcher)
|
|||
void
|
||||
BlockingBuffer::handleMiss(Packet * &pkt, int blk_size, Tick time)
|
||||
{
|
||||
Addr blk_addr = pkt->paddr & ~(Addr)(blk_size - 1);
|
||||
if (pkt->cmd.isWrite() && (pkt->req->isUncacheable() || !writeAllocate ||
|
||||
pkt->cmd.isNoResponse())) {
|
||||
if (pkt->cmd.isNoResponse()) {
|
||||
Addr blk_addr = pkt->getAddr() & ~(Addr)(blk_size - 1);
|
||||
if (pkt->isWrite() && (pkt->req->isUncacheable() || !writeAllocate ||
|
||||
!pkt->needsResponse())) {
|
||||
if (!pkt->needsResponse()) {
|
||||
wb.allocateAsBuffer(pkt);
|
||||
} else {
|
||||
wb.allocate(pkt->cmd, blk_addr, pkt->req->asid, blk_size, pkt);
|
||||
}
|
||||
if (cache->doData()) {
|
||||
memcpy(wb.pkt->data, pkt->data, blk_size);
|
||||
wb.allocate(pkt->cmd, blk_addr, pkt->req->getAsid(), blk_size, pkt);
|
||||
}
|
||||
|
||||
memcpy(wb.pkt->getPtr<uint8_t>(), pkt->getPtr<uint8_t>(), blk_size);
|
||||
|
||||
cache->setBlocked(Blocked_NoWBBuffers);
|
||||
cache->setMasterRequest(Request_WB, time);
|
||||
return;
|
||||
}
|
||||
|
||||
if (pkt->cmd.isNoResponse()) {
|
||||
if (!pkt->needsResponse()) {
|
||||
miss.allocateAsBuffer(pkt);
|
||||
} else {
|
||||
miss.allocate(pkt->cmd, blk_addr, pkt->req->asid, blk_size, pkt);
|
||||
miss.allocate(pkt->cmd, blk_addr, pkt->req->getAsid(), blk_size, pkt);
|
||||
}
|
||||
if (!pkt->req->isUncacheable()) {
|
||||
miss.pkt->flags |= CACHE_LINE_FILL;
|
||||
|
@ -112,27 +112,27 @@ BlockingBuffer::getPacket()
|
|||
void
|
||||
BlockingBuffer::setBusCmd(Packet * &pkt, Packet::Command cmd)
|
||||
{
|
||||
MSHR *mshr = pkt->senderState;
|
||||
MSHR *mshr = (MSHR*) pkt->senderState;
|
||||
mshr->originalCmd = pkt->cmd;
|
||||
if (pkt->isCacheFill())
|
||||
pkt->cmd = cmd;
|
||||
pkt->cmdOverride(cmd);
|
||||
}
|
||||
|
||||
void
|
||||
BlockingBuffer::restoreOrigCmd(Packet * &pkt)
|
||||
{
|
||||
pkt->cmd = pkt->senderState->originalCmd;
|
||||
pkt->cmdOverride(((MSHR*)(pkt->senderState))->originalCmd);
|
||||
}
|
||||
|
||||
void
|
||||
BlockingBuffer::markInService(Packet * &pkt)
|
||||
{
|
||||
if (!pkt->isCacheFill() && pkt->cmd.isWrite()) {
|
||||
if (!pkt->isCacheFill() && pkt->isWrite()) {
|
||||
// Forwarding a write/ writeback, don't need to change
|
||||
// the command
|
||||
assert(pkt->senderState == &wb);
|
||||
assert((MSHR*)pkt->senderState == &wb);
|
||||
cache->clearMasterRequest(Request_WB);
|
||||
if (pkt->cmd.isNoResponse()) {
|
||||
if (!pkt->needsResponse()) {
|
||||
assert(wb.getNumTargets() == 0);
|
||||
wb.deallocate();
|
||||
cache->clearBlocked(Blocked_NoWBBuffers);
|
||||
|
@ -140,9 +140,9 @@ BlockingBuffer::markInService(Packet * &pkt)
|
|||
wb.inService = true;
|
||||
}
|
||||
} else {
|
||||
assert(pkt->senderState == &miss);
|
||||
assert((MSHR*)pkt->senderState == &miss);
|
||||
cache->clearMasterRequest(Request_MSHR);
|
||||
if (pkt->cmd.isNoResponse()) {
|
||||
if (!pkt->needsResponse()) {
|
||||
assert(miss.getNumTargets() == 0);
|
||||
miss.deallocate();
|
||||
cache->clearBlocked(Blocked_NoMSHRs);
|
||||
|
@ -158,24 +158,24 @@ BlockingBuffer::handleResponse(Packet * &pkt, Tick time)
|
|||
{
|
||||
if (pkt->isCacheFill()) {
|
||||
// targets were handled in the cache tags
|
||||
assert(pkt->senderState == &miss);
|
||||
assert((MSHR*)pkt->senderState == &miss);
|
||||
miss.deallocate();
|
||||
cache->clearBlocked(Blocked_NoMSHRs);
|
||||
} else {
|
||||
if (pkt->senderState->hasTargets()) {
|
||||
if (((MSHR*)(pkt->senderState))->hasTargets()) {
|
||||
// Should only have 1 target if we had any
|
||||
assert(pkt->senderState->getNumTargets() == 1);
|
||||
Packet * target = pkt->senderState->getTarget();
|
||||
pkt->senderState->popTarget();
|
||||
if (cache->doData() && pkt->cmd.isRead()) {
|
||||
memcpy(target->data, pkt->data, target->size);
|
||||
assert(((MSHR*)(pkt->senderState))->getNumTargets() == 1);
|
||||
Packet * target = ((MSHR*)(pkt->senderState))->getTarget();
|
||||
((MSHR*)(pkt->senderState))->popTarget();
|
||||
if (pkt->isRead()) {
|
||||
memcpy(target->getPtr<uint8_t>(), pkt->getPtr<uint8_t>(), target->getSize());
|
||||
}
|
||||
cache->respond(target, time);
|
||||
assert(!pkt->senderState->hasTargets());
|
||||
assert(!((MSHR*)(pkt->senderState))->hasTargets());
|
||||
}
|
||||
|
||||
if (pkt->cmd.isWrite()) {
|
||||
assert(pkt->senderState == &wb);
|
||||
if (pkt->isWrite()) {
|
||||
assert(((MSHR*)(pkt->senderState)) == &wb);
|
||||
wb.deallocate();
|
||||
cache->clearBlocked(Blocked_NoWBBuffers);
|
||||
} else {
|
||||
|
@ -186,15 +186,12 @@ BlockingBuffer::handleResponse(Packet * &pkt, Tick time)
|
|||
}
|
||||
|
||||
void
|
||||
BlockingBuffer::squash(int req->getThreadNum()ber)
|
||||
BlockingBuffer::squash(int threadNum)
|
||||
{
|
||||
if (miss.setThreadNum() == req->getThreadNum()ber) {
|
||||
if (miss.threadNum == threadNum) {
|
||||
Packet * target = miss.getTarget();
|
||||
miss.popTarget();
|
||||
assert(target->req->setThreadNum() == req->getThreadNum()ber);
|
||||
if (target->completionEvent != NULL) {
|
||||
delete target->completionEvent;
|
||||
}
|
||||
assert(target->req->getThreadNum() == threadNum);
|
||||
target = NULL;
|
||||
assert(!miss.hasTargets());
|
||||
miss.ntargets=0;
|
||||
|
@ -210,27 +207,20 @@ void
|
|||
BlockingBuffer::doWriteback(Addr addr, int asid,
|
||||
int size, uint8_t *data, bool compressed)
|
||||
{
|
||||
|
||||
// Generate request
|
||||
Packet * pkt = new Packet();
|
||||
pkt->paddr = addr;
|
||||
pkt->req->asid = asid;
|
||||
pkt->size = size;
|
||||
pkt->data = new uint8_t[size];
|
||||
Request * req = new Request(addr, size, 0);
|
||||
Packet * pkt = new Packet(req, Packet::Writeback, -1);
|
||||
uint8_t *new_data = new uint8_t[size];
|
||||
pkt->dataDynamicArray<uint8_t>(new_data);
|
||||
if (data) {
|
||||
memcpy(pkt->data, data, size);
|
||||
memcpy(pkt->getPtr<uint8_t>(), data, size);
|
||||
}
|
||||
/**
|
||||
* @todo Need to find a way to charge the writeback to the "correct"
|
||||
* thread.
|
||||
*/
|
||||
pkt->req->setThreadNum() = 0;
|
||||
|
||||
pkt->cmd = Writeback;
|
||||
if (compressed) {
|
||||
pkt->flags |= COMPRESSED;
|
||||
}
|
||||
|
||||
///All writebacks charged to same thread @todo figure this out
|
||||
writebacks[pkt->req->getThreadNum()]++;
|
||||
|
||||
wb.allocateAsBuffer(pkt);
|
||||
|
@ -249,9 +239,8 @@ BlockingBuffer::doWriteback(Packet * &pkt)
|
|||
|
||||
// Since allocate as buffer copies the request,
|
||||
// need to copy data here.
|
||||
if (cache->doData()) {
|
||||
memcpy(wb.pkt->data, pkt->data, pkt->size);
|
||||
}
|
||||
memcpy(wb.pkt->getPtr<uint8_t>(), pkt->getPtr<uint8_t>(), pkt->getSize());
|
||||
|
||||
cache->setBlocked(Blocked_NoWBBuffers);
|
||||
cache->setMasterRequest(Request_WB, curTick);
|
||||
}
|
||||
|
|
163
src/mem/cache/miss/miss_queue.cc
vendored
163
src/mem/cache/miss/miss_queue.cc
vendored
|
@ -34,7 +34,6 @@
|
|||
* Miss and writeback queue definitions.
|
||||
*/
|
||||
|
||||
#include "cpu/exec_context.hh"
|
||||
#include "cpu/smt.hh" //for maxThreadsPerCPU
|
||||
#include "mem/cache/base_cache.hh"
|
||||
#include "mem/cache/miss/miss_queue.hh"
|
||||
|
@ -59,6 +58,10 @@ MissQueue::MissQueue(int numMSHRs, int numTargets, int write_buffers,
|
|||
void
|
||||
MissQueue::regStats(const string &name)
|
||||
{
|
||||
Request temp_req;
|
||||
Packet::Command temp_cmd = Packet::ReadReq;
|
||||
Packet temp_pkt(&temp_req, temp_cmd, 0); //@todo FIx command strings so this isn't neccessary
|
||||
|
||||
using namespace Stats;
|
||||
|
||||
writebacks
|
||||
|
@ -71,7 +74,7 @@ MissQueue::regStats(const string &name)
|
|||
// MSHR hit statistics
|
||||
for (int access_idx = 0; access_idx < NUM_MEM_CMDS; ++access_idx) {
|
||||
Packet::Command cmd = (Packet::Command)access_idx;
|
||||
const string &cstr = cmd.toString();
|
||||
const string &cstr = temp_pkt.cmdIdxToString(cmd);
|
||||
|
||||
mshr_hits[access_idx]
|
||||
.init(maxThreadsPerCPU)
|
||||
|
@ -86,20 +89,20 @@ MissQueue::regStats(const string &name)
|
|||
.desc("number of demand (read+write) MSHR hits")
|
||||
.flags(total)
|
||||
;
|
||||
demandMshrHits = mshr_hits[Read] + mshr_hits[Write];
|
||||
demandMshrHits = mshr_hits[Packet::ReadReq] + mshr_hits[Packet::WriteReq];
|
||||
|
||||
overallMshrHits
|
||||
.name(name + ".overall_mshr_hits")
|
||||
.desc("number of overall MSHR hits")
|
||||
.flags(total)
|
||||
;
|
||||
overallMshrHits = demandMshrHits + mshr_hits[Soft_Prefetch] +
|
||||
mshr_hits[Hard_Prefetch];
|
||||
overallMshrHits = demandMshrHits + mshr_hits[Packet::SoftPFReq] +
|
||||
mshr_hits[Packet::HardPFReq];
|
||||
|
||||
// MSHR miss statistics
|
||||
for (int access_idx = 0; access_idx < NUM_MEM_CMDS; ++access_idx) {
|
||||
Packet::Command cmd = (Packet::CommandEnum)access_idx;
|
||||
const string &cstr = cmd.toString();
|
||||
Packet::Command cmd = (Packet::Command)access_idx;
|
||||
const string &cstr = temp_pkt.cmdIdxToString(cmd);
|
||||
|
||||
mshr_misses[access_idx]
|
||||
.init(maxThreadsPerCPU)
|
||||
|
@ -114,20 +117,20 @@ MissQueue::regStats(const string &name)
|
|||
.desc("number of demand (read+write) MSHR misses")
|
||||
.flags(total)
|
||||
;
|
||||
demandMshrMisses = mshr_misses[Read] + mshr_misses[Write];
|
||||
demandMshrMisses = mshr_misses[Packet::ReadReq] + mshr_misses[Packet::WriteReq];
|
||||
|
||||
overallMshrMisses
|
||||
.name(name + ".overall_mshr_misses")
|
||||
.desc("number of overall MSHR misses")
|
||||
.flags(total)
|
||||
;
|
||||
overallMshrMisses = demandMshrMisses + mshr_misses[Soft_Prefetch] +
|
||||
mshr_misses[Hard_Prefetch];
|
||||
overallMshrMisses = demandMshrMisses + mshr_misses[Packet::SoftPFReq] +
|
||||
mshr_misses[Packet::HardPFReq];
|
||||
|
||||
// MSHR miss latency statistics
|
||||
for (int access_idx = 0; access_idx < NUM_MEM_CMDS; ++access_idx) {
|
||||
Packet::Command cmd = (Packet::CommandEnum)access_idx;
|
||||
const string &cstr = cmd.toString();
|
||||
Packet::Command cmd = (Packet::Command)access_idx;
|
||||
const string &cstr = temp_pkt.cmdIdxToString(cmd);
|
||||
|
||||
mshr_miss_latency[access_idx]
|
||||
.init(maxThreadsPerCPU)
|
||||
|
@ -142,7 +145,8 @@ MissQueue::regStats(const string &name)
|
|||
.desc("number of demand (read+write) MSHR miss cycles")
|
||||
.flags(total)
|
||||
;
|
||||
demandMshrMissLatency = mshr_miss_latency[Read] + mshr_miss_latency[Write];
|
||||
demandMshrMissLatency = mshr_miss_latency[Packet::ReadReq]
|
||||
+ mshr_miss_latency[Packet::WriteReq];
|
||||
|
||||
overallMshrMissLatency
|
||||
.name(name + ".overall_mshr_miss_latency")
|
||||
|
@ -150,12 +154,12 @@ MissQueue::regStats(const string &name)
|
|||
.flags(total)
|
||||
;
|
||||
overallMshrMissLatency = demandMshrMissLatency +
|
||||
mshr_miss_latency[Soft_Prefetch] + mshr_miss_latency[Hard_Prefetch];
|
||||
mshr_miss_latency[Packet::SoftPFReq] + mshr_miss_latency[Packet::HardPFReq];
|
||||
|
||||
// MSHR uncacheable statistics
|
||||
for (int access_idx = 0; access_idx < NUM_MEM_CMDS; ++access_idx) {
|
||||
Packet::Command cmd = (Packet::CommandEnum)access_idx;
|
||||
const string &cstr = cmd.toString();
|
||||
Packet::Command cmd = (Packet::Command)access_idx;
|
||||
const string &cstr = temp_pkt.cmdIdxToString(cmd);
|
||||
|
||||
mshr_uncacheable[access_idx]
|
||||
.init(maxThreadsPerCPU)
|
||||
|
@ -170,13 +174,14 @@ MissQueue::regStats(const string &name)
|
|||
.desc("number of overall MSHR uncacheable misses")
|
||||
.flags(total)
|
||||
;
|
||||
overallMshrUncacheable = mshr_uncacheable[Read] + mshr_uncacheable[Write]
|
||||
+ mshr_uncacheable[Soft_Prefetch] + mshr_uncacheable[Hard_Prefetch];
|
||||
overallMshrUncacheable = mshr_uncacheable[Packet::ReadReq]
|
||||
+ mshr_uncacheable[Packet::WriteReq] + mshr_uncacheable[Packet::SoftPFReq]
|
||||
+ mshr_uncacheable[Packet::HardPFReq];
|
||||
|
||||
// MSHR miss latency statistics
|
||||
for (int access_idx = 0; access_idx < NUM_MEM_CMDS; ++access_idx) {
|
||||
Packet::Command cmd = (Packet::CommandEnum)access_idx;
|
||||
const string &cstr = cmd.toString();
|
||||
Packet::Command cmd = (Packet::Command)access_idx;
|
||||
const string &cstr = temp_pkt.cmdIdxToString(cmd);
|
||||
|
||||
mshr_uncacheable_lat[access_idx]
|
||||
.init(maxThreadsPerCPU)
|
||||
|
@ -191,15 +196,16 @@ MissQueue::regStats(const string &name)
|
|||
.desc("number of overall MSHR uncacheable cycles")
|
||||
.flags(total)
|
||||
;
|
||||
overallMshrUncacheableLatency = mshr_uncacheable_lat[Read]
|
||||
+ mshr_uncacheable_lat[Write] + mshr_uncacheable_lat[Soft_Prefetch]
|
||||
+ mshr_uncacheable_lat[Hard_Prefetch];
|
||||
overallMshrUncacheableLatency = mshr_uncacheable_lat[Packet::ReadReq]
|
||||
+ mshr_uncacheable_lat[Packet::WriteReq]
|
||||
+ mshr_uncacheable_lat[Packet::SoftPFReq]
|
||||
+ mshr_uncacheable_lat[Packet::HardPFReq];
|
||||
|
||||
#if 0
|
||||
// MSHR access formulas
|
||||
for (int access_idx = 0; access_idx < NUM_MEM_CMDS; ++access_idx) {
|
||||
Packet::Command cmd = (Packet::CommandEnum)access_idx;
|
||||
const string &cstr = cmd.toString();
|
||||
Packet::Command cmd = (Packet::Command)access_idx;
|
||||
const string &cstr = temp_pkt.cmdIdxToString(cmd);
|
||||
|
||||
mshrAccesses[access_idx]
|
||||
.name(name + "." + cstr + "_mshr_accesses")
|
||||
|
@ -229,8 +235,8 @@ MissQueue::regStats(const string &name)
|
|||
|
||||
// MSHR miss rate formulas
|
||||
for (int access_idx = 0; access_idx < NUM_MEM_CMDS; ++access_idx) {
|
||||
Packet::Command cmd = (Packet::CommandEnum)access_idx;
|
||||
const string &cstr = cmd.toString();
|
||||
Packet::Command cmd = (Packet::Command)access_idx;
|
||||
const string &cstr = temp_pkt.cmdIdxToString(cmd);
|
||||
|
||||
mshrMissRate[access_idx]
|
||||
.name(name + "." + cstr + "_mshr_miss_rate")
|
||||
|
@ -258,8 +264,8 @@ MissQueue::regStats(const string &name)
|
|||
|
||||
// mshrMiss latency formulas
|
||||
for (int access_idx = 0; access_idx < NUM_MEM_CMDS; ++access_idx) {
|
||||
Packet::Command cmd = (Packet::CommandEnum)access_idx;
|
||||
const string &cstr = cmd.toString();
|
||||
Packet::Command cmd = (Packet::Command)access_idx;
|
||||
const string &cstr = temp_pkt.cmdIdxToString(cmd);
|
||||
|
||||
avgMshrMissLatency[access_idx]
|
||||
.name(name + "." + cstr + "_avg_mshr_miss_latency")
|
||||
|
@ -287,8 +293,8 @@ MissQueue::regStats(const string &name)
|
|||
|
||||
// mshrUncacheable latency formulas
|
||||
for (int access_idx = 0; access_idx < NUM_MEM_CMDS; ++access_idx) {
|
||||
Packet::Command cmd = (Packet::CommandEnum)access_idx;
|
||||
const string &cstr = cmd.toString();
|
||||
Packet::Command cmd = (Packet::Command)access_idx;
|
||||
const string &cstr = temp_pkt.cmdIdxToString(cmd);
|
||||
|
||||
avgMshrUncacheableLatency[access_idx]
|
||||
.name(name + "." + cstr + "_avg_mshr_uncacheable_latency")
|
||||
|
@ -354,7 +360,7 @@ MissQueue::allocateMiss(Packet * &pkt, int size, Tick time)
|
|||
if (mq.isFull()) {
|
||||
cache->setBlocked(Blocked_NoMSHRs);
|
||||
}
|
||||
if (pkt->cmd != Hard_Prefetch) {
|
||||
if (pkt->cmd != Packet::HardPFReq) {
|
||||
//If we need to request the bus (not on HW prefetch), do so
|
||||
cache->setMasterRequest(Request_MSHR, time);
|
||||
}
|
||||
|
@ -365,18 +371,21 @@ MissQueue::allocateMiss(Packet * &pkt, int size, Tick time)
|
|||
MSHR*
|
||||
MissQueue::allocateWrite(Packet * &pkt, int size, Tick time)
|
||||
{
|
||||
MSHR* mshr = wb.allocate(pkt,pkt->size);
|
||||
MSHR* mshr = wb.allocate(pkt,pkt->getSize());
|
||||
mshr->order = order++;
|
||||
if (cache->doData()){
|
||||
if (pkt->isCompressed()) {
|
||||
delete [] mshr->pkt->data;
|
||||
mshr->pkt->actualSize = pkt->actualSize;
|
||||
mshr->pkt->data = new uint8_t[pkt->actualSize];
|
||||
memcpy(mshr->pkt->data, pkt->data, pkt->actualSize);
|
||||
} else {
|
||||
memcpy(mshr->pkt->data, pkt->data, pkt->size);
|
||||
}
|
||||
}
|
||||
|
||||
//REMOVING COMPRESSION FOR NOW
|
||||
#if 0
|
||||
if (pkt->isCompressed()) {
|
||||
mshr->pkt->deleteData();
|
||||
mshr->pkt->actualSize = pkt->actualSize;
|
||||
mshr->pkt->data = new uint8_t[pkt->actualSize];
|
||||
memcpy(mshr->pkt->data, pkt->data, pkt->actualSize);
|
||||
} else {
|
||||
#endif
|
||||
memcpy(mshr->pkt->getPtr<uint8_t>(), pkt->getPtr<uint8_t>(), pkt->getSize());
|
||||
//{
|
||||
|
||||
if (wb.isFull()) {
|
||||
cache->setBlocked(Blocked_NoWBBuffers);
|
||||
}
|
||||
|
@ -397,15 +406,15 @@ MissQueue::handleMiss(Packet * &pkt, int blkSize, Tick time)
|
|||
if (prefetchMiss) prefetcher->handleMiss(pkt, time);
|
||||
|
||||
int size = blkSize;
|
||||
Addr blkAddr = pkt->paddr & ~(Addr)(blkSize-1);
|
||||
Addr blkAddr = pkt->getAddr() & ~(Addr)(blkSize-1);
|
||||
MSHR* mshr = NULL;
|
||||
if (!pkt->req->isUncacheable()) {
|
||||
mshr = mq.findMatch(blkAddr, pkt->req->asid);
|
||||
mshr = mq.findMatch(blkAddr, pkt->req->getAsid());
|
||||
if (mshr) {
|
||||
//@todo remove hw_pf here
|
||||
mshr_hits[pkt->cmdToIndex()][pkt->req->getThreadNum()]++;
|
||||
if (mshr->getThreadNum() != pkt->req->getThreadNum()) {
|
||||
mshr->setThreadNum() = -1;
|
||||
if (mshr->threadNum != pkt->req->getThreadNum()) {
|
||||
mshr->threadNum = -1;
|
||||
}
|
||||
mq.allocateTarget(mshr, pkt);
|
||||
if (mshr->pkt->isNoAllocate() && !pkt->isNoAllocate()) {
|
||||
|
@ -429,14 +438,14 @@ MissQueue::handleMiss(Packet * &pkt, int blkSize, Tick time)
|
|||
} else {
|
||||
//Count uncacheable accesses
|
||||
mshr_uncacheable[pkt->cmdToIndex()][pkt->req->getThreadNum()]++;
|
||||
size = pkt->size;
|
||||
size = pkt->getSize();
|
||||
}
|
||||
if (pkt->cmd.isWrite() && (pkt->req->isUncacheable() || !writeAllocate ||
|
||||
pkt->cmd.isNoResponse())) {
|
||||
if (pkt->isWrite() && (pkt->req->isUncacheable() || !writeAllocate ||
|
||||
!pkt->needsResponse())) {
|
||||
/**
|
||||
* @todo Add write merging here.
|
||||
*/
|
||||
mshr = allocateWrite(pkt, pkt->size, time);
|
||||
mshr = allocateWrite(pkt, pkt->getSize(), time);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -468,7 +477,7 @@ MissQueue::getPacket()
|
|||
pkt = wb.getReq();
|
||||
// Need to search for earlier miss.
|
||||
MSHR *mshr = mq.findPending(pkt);
|
||||
if (mshr && mshr->order < pkt->senderState->order) {
|
||||
if (mshr && mshr->order < ((MSHR*)(pkt->senderState))->order) {
|
||||
// Service misses in order until conflict is cleared.
|
||||
return mq.getReq();
|
||||
}
|
||||
|
@ -491,7 +500,7 @@ MissQueue::getPacket()
|
|||
//Update statistic on number of prefetches issued (hwpf_mshr_misses)
|
||||
mshr_misses[pkt->cmdToIndex()][pkt->req->getThreadNum()]++;
|
||||
//It will request the bus for the future, but should clear that immedieatley
|
||||
allocateMiss(pkt, pkt->size, curTick);
|
||||
allocateMiss(pkt, pkt->getSize(), curTick);
|
||||
pkt = mq.getReq();
|
||||
assert(pkt); //We should get back a req b/c we just put one in
|
||||
}
|
||||
|
@ -503,7 +512,7 @@ void
|
|||
MissQueue::setBusCmd(Packet * &pkt, Packet::Command cmd)
|
||||
{
|
||||
assert(pkt->senderState != 0);
|
||||
MSHR * mshr = pkt->senderState;
|
||||
MSHR * mshr = (MSHR*)pkt->senderState;
|
||||
mshr->originalCmd = pkt->cmd;
|
||||
if (pkt->isCacheFill() || pkt->isNoAllocate())
|
||||
pkt->cmd = cmd;
|
||||
|
@ -512,7 +521,7 @@ MissQueue::setBusCmd(Packet * &pkt, Packet::Command cmd)
|
|||
void
|
||||
MissQueue::restoreOrigCmd(Packet * &pkt)
|
||||
{
|
||||
pkt->cmd = pkt->senderState->originalCmd;
|
||||
pkt->cmd = ((MSHR*)(pkt->senderState))->originalCmd;
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -526,11 +535,11 @@ MissQueue::markInService(Packet * &pkt)
|
|||
* @todo Should include MSHRQueue pointer in MSHR to select the correct
|
||||
* one.
|
||||
*/
|
||||
if ((!pkt->isCacheFill() && pkt->cmd.isWrite()) || pkt->cmd == Copy) {
|
||||
if ((!pkt->isCacheFill() && pkt->isWrite())) {
|
||||
// Forwarding a write/ writeback, don't need to change
|
||||
// the command
|
||||
unblock = wb.isFull();
|
||||
wb.markInService(pkt->senderState);
|
||||
wb.markInService((MSHR*)pkt->senderState);
|
||||
if (!wb.havePending()){
|
||||
cache->clearMasterRequest(Request_WB);
|
||||
}
|
||||
|
@ -541,11 +550,11 @@ MissQueue::markInService(Packet * &pkt)
|
|||
}
|
||||
} else {
|
||||
unblock = mq.isFull();
|
||||
mq.markInService(pkt->senderState);
|
||||
mq.markInService((MSHR*)pkt->senderState);
|
||||
if (!mq.havePending()){
|
||||
cache->clearMasterRequest(Request_MSHR);
|
||||
}
|
||||
if (pkt->senderState->originalCmd == Hard_Prefetch) {
|
||||
if (((MSHR*)(pkt->senderState))->originalCmd == Packet::HardPFReq) {
|
||||
DPRINTF(HWPrefetch, "%s:Marking a HW_PF in service\n",
|
||||
cache->name());
|
||||
//Also clear pending if need be
|
||||
|
@ -568,8 +577,8 @@ MissQueue::markInService(Packet * &pkt)
|
|||
void
|
||||
MissQueue::handleResponse(Packet * &pkt, Tick time)
|
||||
{
|
||||
MSHR* mshr = pkt->senderState;
|
||||
if (pkt->senderState->originalCmd == Hard_Prefetch) {
|
||||
MSHR* mshr = (MSHR*)pkt->senderState;
|
||||
if (((MSHR*)(pkt->senderState))->originalCmd == Packet::HardPFReq) {
|
||||
DPRINTF(HWPrefetch, "%s:Handling the response to a HW_PF\n",
|
||||
cache->name());
|
||||
}
|
||||
|
@ -617,8 +626,9 @@ MissQueue::handleResponse(Packet * &pkt, Tick time)
|
|||
assert(num_targets == 1);
|
||||
Packet * target = mshr->getTarget();
|
||||
mshr->popTarget();
|
||||
if (cache->doData() && pkt->cmd.isRead()) {
|
||||
memcpy(target->data, pkt->data, target->size);
|
||||
if (pkt->isRead()) {
|
||||
memcpy(target->getPtr<uint8_t>(), pkt->getPtr<uint8_t>(),
|
||||
target->getSize());
|
||||
}
|
||||
cache->respond(target, time);
|
||||
assert(!mshr->hasTargets());
|
||||
|
@ -629,14 +639,15 @@ MissQueue::handleResponse(Packet * &pkt, Tick time)
|
|||
while (mshr->hasTargets()) {
|
||||
Packet * target = mshr->getTarget();
|
||||
mshr->popTarget();
|
||||
if (cache->doData() && pkt->cmd.isRead()) {
|
||||
memcpy(target->data, pkt->data, target->size);
|
||||
if (pkt->isRead()) {
|
||||
memcpy(target->getPtr<uint8_t>(), pkt->getPtr<uint8_t>(),
|
||||
target->getSize());
|
||||
}
|
||||
cache->respond(target, time);
|
||||
}
|
||||
}
|
||||
|
||||
if (pkt->cmd.isWrite()) {
|
||||
if (pkt->isWrite()) {
|
||||
// If the wrtie buffer is full, we might unblock now
|
||||
unblock = wb.isFull();
|
||||
wb.deallocate(mshr);
|
||||
|
@ -660,12 +671,12 @@ MissQueue::handleResponse(Packet * &pkt, Tick time)
|
|||
}
|
||||
|
||||
void
|
||||
MissQueue::squash(int req->getThreadNum()ber)
|
||||
MissQueue::squash(int threadNum)
|
||||
{
|
||||
bool unblock = false;
|
||||
BlockedCause cause = NUM_BLOCKED_CAUSES;
|
||||
|
||||
if (noTargetMSHR && noTargetMSHR->setThreadNum() == req->getThreadNum()ber) {
|
||||
if (noTargetMSHR && noTargetMSHR->threadNum == threadNum) {
|
||||
noTargetMSHR = NULL;
|
||||
unblock = true;
|
||||
cause = Blocked_NoTargets;
|
||||
|
@ -674,7 +685,7 @@ MissQueue::squash(int req->getThreadNum()ber)
|
|||
unblock = true;
|
||||
cause = Blocked_NoMSHRs;
|
||||
}
|
||||
mq.squash(req->getThreadNum()ber);
|
||||
mq.squash(threadNum);
|
||||
if (!mq.havePending()) {
|
||||
cache->clearMasterRequest(Request_MSHR);
|
||||
}
|
||||
|
@ -701,9 +712,19 @@ MissQueue::doWriteback(Addr addr, int asid,
|
|||
int size, uint8_t *data, bool compressed)
|
||||
{
|
||||
// Generate request
|
||||
Packet * pkt = buildWritebackReq(addr, asid, size, data,
|
||||
compressed);
|
||||
Request * req = new Request(addr, size, 0);
|
||||
Packet * pkt = new Packet(req, Packet::Writeback, -1);
|
||||
uint8_t *new_data = new uint8_t[size];
|
||||
pkt->dataDynamicArray<uint8_t>(new_data);
|
||||
if (data) {
|
||||
memcpy(pkt->getPtr<uint8_t>(), data, size);
|
||||
}
|
||||
|
||||
if (compressed) {
|
||||
pkt->flags |= COMPRESSED;
|
||||
}
|
||||
|
||||
///All writebacks charged to same thread @todo figure this out
|
||||
writebacks[pkt->req->getThreadNum()]++;
|
||||
|
||||
allocateWrite(pkt, 0, curTick);
|
||||
|
|
35
src/mem/cache/miss/mshr.cc
vendored
35
src/mem/cache/miss/mshr.cc
vendored
|
@ -50,13 +50,15 @@ MSHR::MSHR()
|
|||
{
|
||||
inService = false;
|
||||
ntargets = 0;
|
||||
setThreadNum() = -1;
|
||||
threadNum = -1;
|
||||
}
|
||||
|
||||
void
|
||||
MSHR::allocate(Packet::Command cmd, Addr _addr, int _asid, int size,
|
||||
Packet * &target)
|
||||
{
|
||||
assert("NEED TO FIX YET\n" && 0);
|
||||
#if 0
|
||||
assert(targets.empty());
|
||||
addr = _addr;
|
||||
asid = _asid;
|
||||
|
@ -74,6 +76,7 @@ MSHR::allocate(Packet::Command cmd, Addr _addr, int _asid, int size,
|
|||
pkt->req = target->req;
|
||||
allocateTarget(target);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
// Since we aren't sure if data is being used, don't copy here.
|
||||
|
@ -83,17 +86,13 @@ MSHR::allocate(Packet::Command cmd, Addr _addr, int _asid, int size,
|
|||
void
|
||||
MSHR::allocateAsBuffer(Packet * &target)
|
||||
{
|
||||
addr = target->paddr;
|
||||
asid = target->req->asid;
|
||||
setThreadNum() = target->req->getThreadNum();
|
||||
pkt = new Packet();
|
||||
pkt->addr = target->addr;
|
||||
pkt->dest = target->dest;
|
||||
pkt->cmd = target->cmd;
|
||||
pkt->size = target->size;
|
||||
pkt->req = target->req;
|
||||
pkt->data = new uint8_t[target->size];
|
||||
pkt->senderState = this;
|
||||
addr = target->getAddr();
|
||||
asid = target->req->getAsid();
|
||||
threadNum = target->req->getThreadNum();
|
||||
pkt = new Packet(target->req, target->cmd, -1);
|
||||
uint8_t *new_data = new uint8_t[target->getSize()];
|
||||
pkt->dataDynamicArray<uint8_t>(new_data);
|
||||
pkt->senderState = (Packet::SenderState*)this;
|
||||
pkt->time = curTick;
|
||||
}
|
||||
|
||||
|
@ -117,11 +116,11 @@ MSHR::allocateTarget(Packet * &target)
|
|||
//If we append an invalidate and we issued a read to the bus,
|
||||
//but now have some pending writes, we need to move
|
||||
//the invalidate to before the first non-read
|
||||
if (inService && pkt->cmd.isRead() && target->cmd.isInvalidate()) {
|
||||
if (inService && pkt->isRead() && target->isInvalidate()) {
|
||||
std::list<Packet *> temp;
|
||||
|
||||
while (!targets.empty()) {
|
||||
if (!targets.front()->cmd.isRead()) break;
|
||||
if (!targets.front()->isRead()) break;
|
||||
//Place on top of temp stack
|
||||
temp.push_front(targets.front());
|
||||
//Remove from targets
|
||||
|
@ -148,8 +147,8 @@ MSHR::allocateTarget(Packet * &target)
|
|||
* @todo really prioritize the target commands.
|
||||
*/
|
||||
|
||||
if (!inService && target->cmd.isWrite()) {
|
||||
pkt->cmd = WriteReq;
|
||||
if (!inService && target->isWrite()) {
|
||||
pkt->cmd = Packet::WriteReq;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -162,14 +161,14 @@ MSHR::dump()
|
|||
"inService: %d thread: %d\n"
|
||||
"Addr: %x asid: %d ntargets %d\n"
|
||||
"Targets:\n",
|
||||
inService, getThreadNum(), addr, asid, ntargets);
|
||||
inService, threadNum, addr, asid, ntargets);
|
||||
|
||||
TargetListIterator tar_it = targets.begin();
|
||||
for (int i = 0; i < ntargets; i++) {
|
||||
assert(tar_it != targets.end());
|
||||
|
||||
ccprintf(cerr, "\t%d: Addr: %x cmd: %d\n",
|
||||
i, (*tar_it)->paddr, (*tar_it)->cmdToIndex());
|
||||
i, (*tar_it)->getAddr(), (*tar_it)->cmdToIndex());
|
||||
|
||||
tar_it++;
|
||||
}
|
||||
|
|
2
src/mem/cache/miss/mshr.hh
vendored
2
src/mem/cache/miss/mshr.hh
vendored
|
@ -66,7 +66,7 @@ class MSHR {
|
|||
/** True if the request has been sent to the bus. */
|
||||
bool inService;
|
||||
/** Thread number of the miss. */
|
||||
int getThreadNum();
|
||||
int threadNum;
|
||||
/** The request that is forwarded to the next level of the hierarchy. */
|
||||
Packet * pkt;
|
||||
/** The number of currently allocated targets. */
|
||||
|
|
30
src/mem/cache/miss/mshr_queue.cc
vendored
30
src/mem/cache/miss/mshr_queue.cc
vendored
|
@ -94,17 +94,19 @@ MSHRQueue::findPending(Packet * &pkt) const
|
|||
MSHR::ConstIterator end = pendingList.end();
|
||||
for (; i != end; ++i) {
|
||||
MSHR *mshr = *i;
|
||||
if (mshr->addr < pkt->addr) {
|
||||
if (mshr->addr + mshr->pkt->size > pkt->addr) {
|
||||
if (mshr->addr < pkt->getAddr()) {
|
||||
if (mshr->addr + mshr->pkt->getSize() > pkt->getAddr()) {
|
||||
return mshr;
|
||||
}
|
||||
} else {
|
||||
if (pkt->addr + pkt->size > mshr->addr) {
|
||||
if (pkt->getAddr() + pkt->getSize() > mshr->addr) {
|
||||
return mshr;
|
||||
}
|
||||
}
|
||||
|
||||
//need to check destination address for copies.
|
||||
//TEMP NOT DOING COPIES
|
||||
#if 0
|
||||
if (mshr->pkt->cmd == Copy) {
|
||||
Addr dest = mshr->pkt->dest;
|
||||
if (dest < pkt->addr) {
|
||||
|
@ -117,6 +119,7 @@ MSHRQueue::findPending(Packet * &pkt) const
|
|||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
@ -124,16 +127,16 @@ MSHRQueue::findPending(Packet * &pkt) const
|
|||
MSHR*
|
||||
MSHRQueue::allocate(Packet * &pkt, int size)
|
||||
{
|
||||
Addr aligned_addr = pkt->addr & ~((Addr)size - 1);
|
||||
Addr aligned_addr = pkt->getAddr() & ~((Addr)size - 1);
|
||||
MSHR *mshr = freeList.front();
|
||||
assert(mshr->getNumTargets() == 0);
|
||||
freeList.pop_front();
|
||||
|
||||
if (pkt->cmd.isNoResponse()) {
|
||||
if (!pkt->needsResponse()) {
|
||||
mshr->allocateAsBuffer(pkt);
|
||||
} else {
|
||||
assert(size !=0);
|
||||
mshr->allocate(pkt->cmd, aligned_addr, pkt->req->req->asid, size, pkt);
|
||||
mshr->allocate(pkt->cmd, aligned_addr, pkt->req->getAsid(), size, pkt);
|
||||
allocatedTargets += 1;
|
||||
}
|
||||
mshr->allocIter = allocatedList.insert(allocatedList.end(), mshr);
|
||||
|
@ -149,7 +152,7 @@ MSHRQueue::allocateFetch(Addr addr, int asid, int size, Packet * &target)
|
|||
MSHR *mshr = freeList.front();
|
||||
assert(mshr->getNumTargets() == 0);
|
||||
freeList.pop_front();
|
||||
mshr->allocate(Read, addr, asid, size, target);
|
||||
mshr->allocate(Packet::ReadReq, addr, asid, size, target);
|
||||
mshr->allocIter = allocatedList.insert(allocatedList.end(), mshr);
|
||||
mshr->readyIter = pendingList.insert(pendingList.end(), mshr);
|
||||
|
||||
|
@ -164,7 +167,7 @@ MSHRQueue::allocateTargetList(Addr addr, int asid, int size)
|
|||
assert(mshr->getNumTargets() == 0);
|
||||
freeList.pop_front();
|
||||
Packet * dummy;
|
||||
mshr->allocate(Read, addr, asid, size, dummy);
|
||||
mshr->allocate(Packet::ReadReq, addr, asid, size, dummy);
|
||||
mshr->allocIter = allocatedList.insert(allocatedList.end(), mshr);
|
||||
mshr->inService = true;
|
||||
++inServiceMSHRs;
|
||||
|
@ -209,7 +212,7 @@ void
|
|||
MSHRQueue::markInService(MSHR* mshr)
|
||||
{
|
||||
//assert(mshr == pendingList.front());
|
||||
if (mshr->pkt->cmd.isNoResponse()) {
|
||||
if (!mshr->pkt->needsResponse()) {
|
||||
assert(mshr->getNumTargets() == 0);
|
||||
deallocate(mshr);
|
||||
return;
|
||||
|
@ -237,21 +240,18 @@ MSHRQueue::markPending(MSHR* mshr, Packet::Command cmd)
|
|||
}
|
||||
|
||||
void
|
||||
MSHRQueue::squash(int req->getThreadNum()ber)
|
||||
MSHRQueue::squash(int threadNum)
|
||||
{
|
||||
MSHR::Iterator i = allocatedList.begin();
|
||||
MSHR::Iterator end = allocatedList.end();
|
||||
for (; i != end;) {
|
||||
MSHR *mshr = *i;
|
||||
if (mshr->setThreadNum() == req->getThreadNum()ber) {
|
||||
if (mshr->threadNum == threadNum) {
|
||||
while (mshr->hasTargets()) {
|
||||
Packet * target = mshr->getTarget();
|
||||
mshr->popTarget();
|
||||
|
||||
assert(target->req->setThreadNum() == req->getThreadNum()ber);
|
||||
if (target->completionEvent != NULL) {
|
||||
delete target->completionEvent;
|
||||
}
|
||||
assert(target->req->getThreadNum() == threadNum);
|
||||
target = NULL;
|
||||
}
|
||||
assert(!mshr->hasTargets());
|
||||
|
|
|
@ -52,6 +52,9 @@ typedef std::list<PacketPtr> PacketList;
|
|||
#define NACKED_LINE 1 << 0
|
||||
#define SATISFIED 1 << 1
|
||||
#define SHARED_LINE 1 << 2
|
||||
#define CACHE_LINE_FILL 1 << 3
|
||||
#define COMPRESSED 1 << 4
|
||||
#define NO_ALLOCATE 1 << 5
|
||||
|
||||
//For statistics we need max number of commands, hard code it at
|
||||
//20 for now. @todo fix later
|
||||
|
@ -66,6 +69,10 @@ typedef std::list<PacketPtr> PacketList;
|
|||
*/
|
||||
class Packet
|
||||
{
|
||||
public:
|
||||
/** Temporary FLAGS field until cache gets working, this should be in coherence/sender state. */
|
||||
uint64_t flags;
|
||||
|
||||
private:
|
||||
/** A pointer to the data being transfered. It can be differnt
|
||||
* sizes at each level of the heirarchy so it belongs in the
|
||||
|
@ -93,6 +100,9 @@ class Packet
|
|||
/** The size of the request or transfer. */
|
||||
int size;
|
||||
|
||||
/** The offset within the block that represents the data. */
|
||||
int offset;
|
||||
|
||||
/** Device address (e.g., bus ID) of the source of the
|
||||
* transaction. The source is not responsible for setting this
|
||||
* field; it is set implicitly by the interconnect when the
|
||||
|
@ -110,6 +120,9 @@ class Packet
|
|||
bool addrSizeValid;
|
||||
/** Is the 'src' field valid? */
|
||||
bool srcValid;
|
||||
/** Is the offset valid. */
|
||||
bool offsetValid;
|
||||
|
||||
|
||||
public:
|
||||
|
||||
|
@ -171,6 +184,7 @@ class Packet
|
|||
/** List of all commands associated with a packet. */
|
||||
enum Command
|
||||
{
|
||||
InvalidCmd = 0,
|
||||
ReadReq = IsRead | IsRequest | NeedsResponse,
|
||||
WriteReq = IsWrite | IsRequest | NeedsResponse,
|
||||
WriteReqNoAck = IsWrite | IsRequest,
|
||||
|
@ -183,7 +197,10 @@ class Packet
|
|||
HardPFResp = IsRead | IsRequest | IsHWPrefetch | IsResponse,
|
||||
InvalidateReq = IsInvalidate | IsRequest,
|
||||
WriteInvalidateReq = IsWrite | IsInvalidate | IsRequest,
|
||||
UpgradeReq = IsInvalidate | NeedsResponse
|
||||
UpgradeReq = IsInvalidate | NeedsResponse,
|
||||
UpgradeResp = IsInvalidate | IsResponse,
|
||||
ReadExReq = IsRead | IsInvalidate | NeedsResponse,
|
||||
ReadExResp = IsRead | IsInvalidate | IsResponse
|
||||
};
|
||||
|
||||
/** Return the string name of the cmd field (for debugging and
|
||||
|
@ -206,8 +223,8 @@ class Packet
|
|||
bool needsResponse() { return (cmd & NeedsResponse) != 0; }
|
||||
bool isInvalidate() { return (cmd * IsInvalidate) != 0; }
|
||||
|
||||
bool isCacheFill() { assert("Unimplemented yet\n" && 0); }
|
||||
bool isNoAllocate() { assert("Unimplemented yet\n" && 0); }
|
||||
bool isCacheFill() { return (flags & CACHE_LINE_FILL) != 0; }
|
||||
bool isNoAllocate() { return (flags & NO_ALLOCATE) != 0; }
|
||||
|
||||
/** Possible results of a packet's request. */
|
||||
enum Result
|
||||
|
@ -232,6 +249,10 @@ class Packet
|
|||
|
||||
Addr getAddr() const { assert(addrSizeValid); return addr; }
|
||||
int getSize() const { assert(addrSizeValid); return size; }
|
||||
int getOffset() const { assert(offsetValid); return offset; }
|
||||
|
||||
void addrOverride(Addr newAddr) { assert(addrSizeValid); addr = newAddr; }
|
||||
void cmdOverride(Command newCmd) { cmd = newCmd; }
|
||||
|
||||
/** Constructor. Note that a Request object must be constructed
|
||||
* first, but the Requests's physical address and size fields
|
||||
|
@ -241,10 +262,25 @@ class Packet
|
|||
: data(NULL), staticData(false), dynamicData(false), arrayData(false),
|
||||
addr(_req->paddr), size(_req->size), dest(_dest),
|
||||
addrSizeValid(_req->validPaddr),
|
||||
srcValid(false),
|
||||
srcValid(false), offsetValid(false),
|
||||
req(_req), coherence(NULL), senderState(NULL), cmd(_cmd),
|
||||
result(Unknown)
|
||||
{
|
||||
flags = 0;
|
||||
}
|
||||
|
||||
/** Alternate constructor if you are trying to create a packet with
|
||||
* a request that is for a whole block, not the address from the req.
|
||||
* this allows for overriding the size/addr of the req.*/
|
||||
Packet(Request *_req, Command _cmd, short _dest, int _blkSize)
|
||||
: data(NULL), staticData(false), dynamicData(false), arrayData(false),
|
||||
addr(_req->paddr & ~(_blkSize - 1)), size(_blkSize),
|
||||
offset(_req->paddr & (_blkSize - 1)), dest(_dest),
|
||||
addrSizeValid(_req->validPaddr), srcValid(false), offsetValid(true),
|
||||
req(_req), coherence(NULL), senderState(NULL), cmd(_cmd),
|
||||
result(Unknown)
|
||||
{
|
||||
flags = 0;
|
||||
}
|
||||
|
||||
/** Destructor. */
|
||||
|
|
Loading…
Reference in a new issue