mem: Remove templates in cache model

This patch changes the cache implementation to rely on virtual methods
rather than using the replacement policy as a template argument.

There is no impact on the simulation performance, and overall the
changes make it easier to modify (and subclass) the cache and/or
replacement policy.
This commit is contained in:
David Guillen 2015-05-05 03:22:21 -04:00
parent d0d933facc
commit 5287945a8b
14 changed files with 240 additions and 279 deletions

16
src/mem/cache/base.cc vendored
View file

@ -783,21 +783,7 @@ BaseCache::drain(DrainManager *dm)
BaseCache * BaseCache *
BaseCacheParams::create() BaseCacheParams::create()
{ {
unsigned numSets = size / (assoc * system->cacheLineSize());
assert(tags); assert(tags);
if (dynamic_cast<FALRU*>(tags)) { return new Cache(this);
if (numSets != 1)
fatal("Got FALRU tags with more than one set\n");
return new Cache<FALRU>(this);
} else if (dynamic_cast<LRU*>(tags)) {
if (numSets == 1)
warn("Consider using FALRU tags for a fully associative cache\n");
return new Cache<LRU>(this);
} else if (dynamic_cast<RandomRepl*>(tags)) {
return new Cache<RandomRepl>(this);
} else {
fatal("No suitable tags selected\n");
}
} }

60
src/mem/cache/blk.hh vendored
View file

@ -397,63 +397,19 @@ class CacheBlkPrintWrapper : public Printable
}; };
/** /**
* Wrap a method and present it as a cache block visitor. * Base class for cache block visitor, operating on the cache block
* * base class (later subclassed for the various tag classes). This
* For example the forEachBlk method in the tag arrays expects a * visitor class is used as part of the forEachBlk interface in the
* callable object/function as their parameter. This class wraps a * tag classes.
* method in an object and presents callable object that adheres to
* the cache block visitor protocol.
*/ */
template <typename T, typename BlkType> class CacheBlkVisitor
class CacheBlkVisitorWrapper
{ {
public: public:
typedef bool (T::*visitorPtr)(BlkType &blk);
CacheBlkVisitorWrapper(T &_obj, visitorPtr _visitor) CacheBlkVisitor() {}
: obj(_obj), visitor(_visitor) {} virtual ~CacheBlkVisitor() {}
bool operator()(BlkType &blk) { virtual bool operator()(CacheBlk &blk) = 0;
return (obj.*visitor)(blk);
}
private:
T &obj;
visitorPtr visitor;
};
/**
* Cache block visitor that determines if there are dirty blocks in a
* cache.
*
* Use with the forEachBlk method in the tag array to determine if the
* array contains dirty blocks.
*/
template <typename BlkType>
class CacheBlkIsDirtyVisitor
{
public:
CacheBlkIsDirtyVisitor()
: _isDirty(false) {}
bool operator()(BlkType &blk) {
if (blk.isDirty()) {
_isDirty = true;
return false;
} else {
return true;
}
}
/**
* Does the array contain a dirty line?
*
* \return true if yes, false otherwise.
*/
bool isDirty() const { return _isDirty; };
private:
bool _isDirty;
}; };
#endif //__CACHE_BLK_HH__ #endif //__CACHE_BLK_HH__

View file

@ -41,11 +41,3 @@
#include "mem/cache/tags/random_repl.hh" #include "mem/cache/tags/random_repl.hh"
#include "mem/cache/cache_impl.hh" #include "mem/cache/cache_impl.hh"
// Template Instantiations
#ifndef DOXYGEN_SHOULD_SKIP_THIS
template class Cache<FALRU>;
template class Cache<LRU>;
template class Cache<RandomRepl>;
#endif //DOXYGEN_SHOULD_SKIP_THIS

106
src/mem/cache/cache.hh vendored
View file

@ -56,6 +56,7 @@
#include "mem/cache/base.hh" #include "mem/cache/base.hh"
#include "mem/cache/blk.hh" #include "mem/cache/blk.hh"
#include "mem/cache/mshr.hh" #include "mem/cache/mshr.hh"
#include "mem/cache/tags/base.hh"
#include "sim/eventq.hh" #include "sim/eventq.hh"
//Forward decleration //Forward decleration
@ -66,17 +67,14 @@ class BasePrefetcher;
* supplying different template policies. TagStore handles all tag and data * supplying different template policies. TagStore handles all tag and data
* storage @sa TagStore, \ref gem5MemorySystem "gem5 Memory System" * storage @sa TagStore, \ref gem5MemorySystem "gem5 Memory System"
*/ */
template <class TagStore>
class Cache : public BaseCache class Cache : public BaseCache
{ {
public: public:
/** Define the type of cache block to use. */
typedef typename TagStore::BlkType BlkType; /** A typedef for a list of CacheBlk pointers. */
/** A typedef for a list of BlkType pointers. */ typedef std::list<CacheBlk*> BlkList;
typedef typename TagStore::BlkList BlkList;
protected: protected:
typedef CacheBlkVisitorWrapper<Cache<TagStore>, BlkType> WrappedBlkVisitor;
/** /**
* The CPU-side port extends the base cache slave port with access * The CPU-side port extends the base cache slave port with access
@ -87,7 +85,7 @@ class Cache : public BaseCache
private: private:
// a pointer to our specific cache implementation // a pointer to our specific cache implementation
Cache<TagStore> *cache; Cache *cache;
protected: protected:
@ -103,7 +101,7 @@ class Cache : public BaseCache
public: public:
CpuSidePort(const std::string &_name, Cache<TagStore> *_cache, CpuSidePort(const std::string &_name, Cache *_cache,
const std::string &_label); const std::string &_label);
}; };
@ -119,12 +117,12 @@ class Cache : public BaseCache
protected: protected:
Cache<TagStore> &cache; Cache &cache;
SnoopRespPacketQueue &snoopRespQueue; SnoopRespPacketQueue &snoopRespQueue;
public: public:
CacheReqPacketQueue(Cache<TagStore> &cache, MasterPort &port, CacheReqPacketQueue(Cache &cache, MasterPort &port,
SnoopRespPacketQueue &snoop_resp_queue, SnoopRespPacketQueue &snoop_resp_queue,
const std::string &label) : const std::string &label) :
ReqPacketQueue(cache, port, label), cache(cache), ReqPacketQueue(cache, port, label), cache(cache),
@ -153,7 +151,7 @@ class Cache : public BaseCache
SnoopRespPacketQueue _snoopRespQueue; SnoopRespPacketQueue _snoopRespQueue;
// a pointer to our specific cache implementation // a pointer to our specific cache implementation
Cache<TagStore> *cache; Cache *cache;
protected: protected:
@ -167,18 +165,18 @@ class Cache : public BaseCache
public: public:
MemSidePort(const std::string &_name, Cache<TagStore> *_cache, MemSidePort(const std::string &_name, Cache *_cache,
const std::string &_label); const std::string &_label);
}; };
/** Tag and data Storage */ /** Tag and data Storage */
TagStore *tags; BaseTags *tags;
/** Prefetcher */ /** Prefetcher */
BasePrefetcher *prefetcher; BasePrefetcher *prefetcher;
/** Temporary cache block for occasional transitory use */ /** Temporary cache block for occasional transitory use */
BlkType *tempBlock; CacheBlk *tempBlock;
/** /**
* This cache should allocate a block on a line-sized write miss. * This cache should allocate a block on a line-sized write miss.
@ -210,13 +208,13 @@ class Cache : public BaseCache
* @param writebacks List for any writebacks that need to be performed. * @param writebacks List for any writebacks that need to be performed.
* @return Boolean indicating whether the request was satisfied. * @return Boolean indicating whether the request was satisfied.
*/ */
bool access(PacketPtr pkt, BlkType *&blk, bool access(PacketPtr pkt, CacheBlk *&blk,
Cycles &lat, PacketList &writebacks); Cycles &lat, PacketList &writebacks);
/** /**
*Handle doing the Compare and Swap function for SPARC. *Handle doing the Compare and Swap function for SPARC.
*/ */
void cmpAndSwap(BlkType *blk, PacketPtr pkt); void cmpAndSwap(CacheBlk *blk, PacketPtr pkt);
/** /**
* Find a block frame for new block at address addr targeting the * Find a block frame for new block at address addr targeting the
@ -225,7 +223,7 @@ class Cache : public BaseCache
* list. Return free block frame. May return NULL if there are * list. Return free block frame. May return NULL if there are
* no replaceable blocks at the moment. * no replaceable blocks at the moment.
*/ */
BlkType *allocateBlock(Addr addr, bool is_secure, PacketList &writebacks); CacheBlk *allocateBlock(Addr addr, bool is_secure, PacketList &writebacks);
/** /**
* Populates a cache block and handles all outstanding requests for the * Populates a cache block and handles all outstanding requests for the
@ -236,7 +234,7 @@ class Cache : public BaseCache
* @param writebacks List for any writebacks that need to be performed. * @param writebacks List for any writebacks that need to be performed.
* @return Pointer to the new cache block. * @return Pointer to the new cache block.
*/ */
BlkType *handleFill(PacketPtr pkt, BlkType *blk, CacheBlk *handleFill(PacketPtr pkt, CacheBlk *blk,
PacketList &writebacks); PacketList &writebacks);
@ -287,10 +285,10 @@ class Cache : public BaseCache
*/ */
void functionalAccess(PacketPtr pkt, bool fromCpuSide); void functionalAccess(PacketPtr pkt, bool fromCpuSide);
void satisfyCpuSideRequest(PacketPtr pkt, BlkType *blk, void satisfyCpuSideRequest(PacketPtr pkt, CacheBlk *blk,
bool deferred_response = false, bool deferred_response = false,
bool pending_downgrade = false); bool pending_downgrade = false);
bool satisfyMSHR(MSHR *mshr, PacketPtr pkt, BlkType *blk); bool satisfyMSHR(MSHR *mshr, PacketPtr pkt, CacheBlk *blk);
void doTimingSupplyResponse(PacketPtr req_pkt, const uint8_t *blk_data, void doTimingSupplyResponse(PacketPtr req_pkt, const uint8_t *blk_data,
bool already_copied, bool pending_inval); bool already_copied, bool pending_inval);
@ -300,7 +298,7 @@ class Cache : public BaseCache
* @param blk The cache block being snooped. * @param blk The cache block being snooped.
* @param new_state The new coherence state for the block. * @param new_state The new coherence state for the block.
*/ */
void handleSnoop(PacketPtr ptk, BlkType *blk, void handleSnoop(PacketPtr ptk, CacheBlk *blk,
bool is_timing, bool is_deferred, bool pending_inval); bool is_timing, bool is_deferred, bool pending_inval);
/** /**
@ -308,7 +306,7 @@ class Cache : public BaseCache
* @param blk The block to writeback. * @param blk The block to writeback.
* @return The writeback request for the block. * @return The writeback request for the block.
*/ */
PacketPtr writebackBlk(BlkType *blk); PacketPtr writebackBlk(CacheBlk *blk);
void memWriteback(); void memWriteback();
@ -321,7 +319,7 @@ class Cache : public BaseCache
* *
* \return Always returns true. * \return Always returns true.
*/ */
bool writebackVisitor(BlkType &blk); bool writebackVisitor(CacheBlk &blk);
/** /**
* Cache block visitor that invalidates all blocks in the cache. * Cache block visitor that invalidates all blocks in the cache.
* *
@ -329,7 +327,7 @@ class Cache : public BaseCache
* *
* \return Always returns true. * \return Always returns true.
*/ */
bool invalidateVisitor(BlkType &blk); bool invalidateVisitor(CacheBlk &blk);
/** /**
* Squash all requests associated with specified thread. * Squash all requests associated with specified thread.
@ -349,7 +347,7 @@ class Cache : public BaseCache
* @return A new Packet containing the request, or NULL if the * @return A new Packet containing the request, or NULL if the
* current request in cpu_pkt should just be forwarded on. * current request in cpu_pkt should just be forwarded on.
*/ */
PacketPtr getBusPacket(PacketPtr cpu_pkt, BlkType *blk, PacketPtr getBusPacket(PacketPtr cpu_pkt, CacheBlk *blk,
bool needsExclusive) const; bool needsExclusive) const;
/** /**
@ -417,4 +415,62 @@ class Cache : public BaseCache
void unserialize(Checkpoint *cp, const std::string &section); void unserialize(Checkpoint *cp, const std::string &section);
}; };
/**
* Wrap a method and present it as a cache block visitor.
*
* For example the forEachBlk method in the tag arrays expects a
* callable object/function as their parameter. This class wraps a
* method in an object and presents callable object that adheres to
* the cache block visitor protocol.
*/
class CacheBlkVisitorWrapper : public CacheBlkVisitor
{
public:
typedef bool (Cache::*VisitorPtr)(CacheBlk &blk);
CacheBlkVisitorWrapper(Cache &_cache, VisitorPtr _visitor)
: cache(_cache), visitor(_visitor) {}
bool operator()(CacheBlk &blk) M5_ATTR_OVERRIDE {
return (cache.*visitor)(blk);
}
private:
Cache &cache;
VisitorPtr visitor;
};
/**
* Cache block visitor that determines if there are dirty blocks in a
* cache.
*
* Use with the forEachBlk method in the tag array to determine if the
* array contains dirty blocks.
*/
class CacheBlkIsDirtyVisitor : public CacheBlkVisitor
{
public:
CacheBlkIsDirtyVisitor()
: _isDirty(false) {}
bool operator()(CacheBlk &blk) M5_ATTR_OVERRIDE {
if (blk.isDirty()) {
_isDirty = true;
return false;
} else {
return true;
}
}
/**
* Does the array contain a dirty line?
*
* \return true if yes, false otherwise.
*/
bool isDirty() const { return _isDirty; };
private:
bool _isDirty;
};
#endif // __CACHE_HH__ #endif // __CACHE_HH__

View file

@ -65,15 +65,14 @@
#include "mem/cache/mshr.hh" #include "mem/cache/mshr.hh"
#include "sim/sim_exit.hh" #include "sim/sim_exit.hh"
template<class TagStore> Cache::Cache(const Params *p)
Cache<TagStore>::Cache(const Params *p)
: BaseCache(p), : BaseCache(p),
tags(dynamic_cast<TagStore*>(p->tags)), tags(p->tags),
prefetcher(p->prefetcher), prefetcher(p->prefetcher),
doFastWrites(true), doFastWrites(true),
prefetchOnAccess(p->prefetch_on_access) prefetchOnAccess(p->prefetch_on_access)
{ {
tempBlock = new BlkType(); tempBlock = new CacheBlk();
tempBlock->data = new uint8_t[blkSize]; tempBlock->data = new uint8_t[blkSize];
cpuSidePort = new CpuSidePort(p->name + ".cpu_side", this, cpuSidePort = new CpuSidePort(p->name + ".cpu_side", this,
@ -86,8 +85,7 @@ Cache<TagStore>::Cache(const Params *p)
prefetcher->setCache(this); prefetcher->setCache(this);
} }
template<class TagStore> Cache::~Cache()
Cache<TagStore>::~Cache()
{ {
delete [] tempBlock->data; delete [] tempBlock->data;
delete tempBlock; delete tempBlock;
@ -96,16 +94,14 @@ Cache<TagStore>::~Cache()
delete memSidePort; delete memSidePort;
} }
template<class TagStore>
void void
Cache<TagStore>::regStats() Cache::regStats()
{ {
BaseCache::regStats(); BaseCache::regStats();
} }
template<class TagStore>
void void
Cache<TagStore>::cmpAndSwap(BlkType *blk, PacketPtr pkt) Cache::cmpAndSwap(CacheBlk *blk, PacketPtr pkt)
{ {
assert(pkt->isRequest()); assert(pkt->isRequest());
@ -145,11 +141,9 @@ Cache<TagStore>::cmpAndSwap(BlkType *blk, PacketPtr pkt)
} }
template<class TagStore>
void void
Cache<TagStore>::satisfyCpuSideRequest(PacketPtr pkt, BlkType *blk, Cache::satisfyCpuSideRequest(PacketPtr pkt, CacheBlk *blk,
bool deferred_response, bool deferred_response, bool pending_downgrade)
bool pending_downgrade)
{ {
assert(pkt->isRequest()); assert(pkt->isRequest());
@ -254,9 +248,8 @@ Cache<TagStore>::satisfyCpuSideRequest(PacketPtr pkt, BlkType *blk,
///////////////////////////////////////////////////// /////////////////////////////////////////////////////
template<class TagStore>
void void
Cache<TagStore>::markInService(MSHR *mshr, bool pending_dirty_resp) Cache::markInService(MSHR *mshr, bool pending_dirty_resp)
{ {
markInServiceInternal(mshr, pending_dirty_resp); markInServiceInternal(mshr, pending_dirty_resp);
#if 0 #if 0
@ -272,9 +265,8 @@ Cache<TagStore>::markInService(MSHR *mshr, bool pending_dirty_resp)
} }
template<class TagStore>
void void
Cache<TagStore>::squash(int threadNum) Cache::squash(int threadNum)
{ {
bool unblock = false; bool unblock = false;
BlockedCause cause = NUM_BLOCKED_CAUSES; BlockedCause cause = NUM_BLOCKED_CAUSES;
@ -300,10 +292,9 @@ Cache<TagStore>::squash(int threadNum)
// //
///////////////////////////////////////////////////// /////////////////////////////////////////////////////
template<class TagStore>
bool bool
Cache<TagStore>::access(PacketPtr pkt, BlkType *&blk, Cache::access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat,
Cycles &lat, PacketList &writebacks) PacketList &writebacks)
{ {
// sanity check // sanity check
assert(pkt->isRequest()); assert(pkt->isRequest());
@ -319,7 +310,7 @@ Cache<TagStore>::access(PacketPtr pkt, BlkType *&blk,
tags->clearLocks(); tags->clearLocks();
// flush and invalidate any existing block // flush and invalidate any existing block
BlkType *old_blk(tags->findBlock(pkt->getAddr(), pkt->isSecure())); CacheBlk *old_blk(tags->findBlock(pkt->getAddr(), pkt->isSecure()));
if (old_blk && old_blk->isValid()) { if (old_blk && old_blk->isValid()) {
if (old_blk->isDirty()) if (old_blk->isDirty())
writebacks.push_back(writebackBlk(old_blk)); writebacks.push_back(writebackBlk(old_blk));
@ -403,9 +394,8 @@ class ForwardResponseRecord : public Packet::SenderState
ForwardResponseRecord() {} ForwardResponseRecord() {}
}; };
template<class TagStore>
void void
Cache<TagStore>::recvTimingSnoopResp(PacketPtr pkt) Cache::recvTimingSnoopResp(PacketPtr pkt)
{ {
DPRINTF(Cache, "%s for %s addr %#llx size %d\n", __func__, DPRINTF(Cache, "%s for %s addr %#llx size %d\n", __func__,
pkt->cmdString(), pkt->getAddr(), pkt->getSize()); pkt->cmdString(), pkt->getAddr(), pkt->getSize());
@ -442,9 +432,8 @@ Cache<TagStore>::recvTimingSnoopResp(PacketPtr pkt)
memSidePort->schedTimingSnoopResp(pkt, snoop_resp_time); memSidePort->schedTimingSnoopResp(pkt, snoop_resp_time);
} }
template<class TagStore>
void void
Cache<TagStore>::promoteWholeLineWrites(PacketPtr pkt) Cache::promoteWholeLineWrites(PacketPtr pkt)
{ {
// Cache line clearing instructions // Cache line clearing instructions
if (doFastWrites && (pkt->cmd == MemCmd::WriteReq) && if (doFastWrites && (pkt->cmd == MemCmd::WriteReq) &&
@ -455,9 +444,8 @@ Cache<TagStore>::promoteWholeLineWrites(PacketPtr pkt)
} }
} }
template<class TagStore>
bool bool
Cache<TagStore>::recvTimingReq(PacketPtr pkt) Cache::recvTimingReq(PacketPtr pkt)
{ {
DPRINTF(CacheTags, "%s tags: %s\n", __func__, tags->print()); DPRINTF(CacheTags, "%s tags: %s\n", __func__, tags->print());
//@todo Add back in MemDebug Calls //@todo Add back in MemDebug Calls
@ -540,7 +528,7 @@ Cache<TagStore>::recvTimingReq(PacketPtr pkt)
// We use lookupLatency here because it is used to specify the latency // We use lookupLatency here because it is used to specify the latency
// to access. // to access.
Cycles lat = lookupLatency; Cycles lat = lookupLatency;
BlkType *blk = NULL; CacheBlk *blk = NULL;
bool satisfied = false; bool satisfied = false;
{ {
PacketList writebacks; PacketList writebacks;
@ -777,10 +765,9 @@ Cache<TagStore>::recvTimingReq(PacketPtr pkt)
// See comment in cache.hh. // See comment in cache.hh.
template<class TagStore>
PacketPtr PacketPtr
Cache<TagStore>::getBusPacket(PacketPtr cpu_pkt, BlkType *blk, Cache::getBusPacket(PacketPtr cpu_pkt, CacheBlk *blk,
bool needsExclusive) const bool needsExclusive) const
{ {
bool blkValid = blk && blk->isValid(); bool blkValid = blk && blk->isValid();
@ -840,9 +827,8 @@ Cache<TagStore>::getBusPacket(PacketPtr cpu_pkt, BlkType *blk,
} }
template<class TagStore>
Tick Tick
Cache<TagStore>::recvAtomic(PacketPtr pkt) Cache::recvAtomic(PacketPtr pkt)
{ {
// We are in atomic mode so we pay just for lookupLatency here. // We are in atomic mode so we pay just for lookupLatency here.
Cycles lat = lookupLatency; Cycles lat = lookupLatency;
@ -860,7 +846,7 @@ Cache<TagStore>::recvAtomic(PacketPtr pkt)
// have to invalidate ourselves and any lower caches even if // have to invalidate ourselves and any lower caches even if
// upper cache will be responding // upper cache will be responding
if (pkt->isInvalidate()) { if (pkt->isInvalidate()) {
BlkType *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure()); CacheBlk *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure());
if (blk && blk->isValid()) { if (blk && blk->isValid()) {
tags->invalidate(blk); tags->invalidate(blk);
blk->invalidate(); blk->invalidate();
@ -887,7 +873,7 @@ Cache<TagStore>::recvAtomic(PacketPtr pkt)
// writebacks... that would mean that someone used an atomic // writebacks... that would mean that someone used an atomic
// access in timing mode // access in timing mode
BlkType *blk = NULL; CacheBlk *blk = NULL;
PacketList writebacks; PacketList writebacks;
bool satisfied = access(pkt, blk, lat, writebacks); bool satisfied = access(pkt, blk, lat, writebacks);
@ -990,9 +976,8 @@ Cache<TagStore>::recvAtomic(PacketPtr pkt)
} }
template<class TagStore>
void void
Cache<TagStore>::functionalAccess(PacketPtr pkt, bool fromCpuSide) Cache::functionalAccess(PacketPtr pkt, bool fromCpuSide)
{ {
if (system->bypassCaches()) { if (system->bypassCaches()) {
// Packets from the memory side are snoop request and // Packets from the memory side are snoop request and
@ -1007,7 +992,7 @@ Cache<TagStore>::functionalAccess(PacketPtr pkt, bool fromCpuSide)
Addr blk_addr = blockAlign(pkt->getAddr()); Addr blk_addr = blockAlign(pkt->getAddr());
bool is_secure = pkt->isSecure(); bool is_secure = pkt->isSecure();
BlkType *blk = tags->findBlock(pkt->getAddr(), is_secure); CacheBlk *blk = tags->findBlock(pkt->getAddr(), is_secure);
MSHR *mshr = mshrQueue.findMatch(blk_addr, is_secure); MSHR *mshr = mshrQueue.findMatch(blk_addr, is_secure);
pkt->pushLabel(name()); pkt->pushLabel(name());
@ -1067,9 +1052,8 @@ Cache<TagStore>::functionalAccess(PacketPtr pkt, bool fromCpuSide)
///////////////////////////////////////////////////// /////////////////////////////////////////////////////
template<class TagStore>
void void
Cache<TagStore>::recvTimingResp(PacketPtr pkt) Cache::recvTimingResp(PacketPtr pkt)
{ {
assert(pkt->isResponse()); assert(pkt->isResponse());
@ -1099,7 +1083,7 @@ Cache<TagStore>::recvTimingResp(PacketPtr pkt)
// Initial target is used just for stats // Initial target is used just for stats
MSHR::Target *initial_tgt = mshr->getTarget(); MSHR::Target *initial_tgt = mshr->getTarget();
BlkType *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure()); CacheBlk *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure());
int stats_cmd_idx = initial_tgt->pkt->cmdToIndex(); int stats_cmd_idx = initial_tgt->pkt->cmdToIndex();
Tick miss_latency = curTick() - initial_tgt->recvTime; Tick miss_latency = curTick() - initial_tgt->recvTime;
PacketList writebacks; PacketList writebacks;
@ -1328,12 +1312,8 @@ Cache<TagStore>::recvTimingResp(PacketPtr pkt)
delete pkt; delete pkt;
} }
template<class TagStore>
PacketPtr PacketPtr
Cache<TagStore>::writebackBlk(BlkType *blk) Cache::writebackBlk(CacheBlk *blk)
{ {
assert(blk && blk->isValid() && blk->isDirty()); assert(blk && blk->isValid() && blk->isDirty());
@ -1360,35 +1340,31 @@ Cache<TagStore>::writebackBlk(BlkType *blk)
return writeback; return writeback;
} }
template<class TagStore>
void void
Cache<TagStore>::memWriteback() Cache::memWriteback()
{ {
WrappedBlkVisitor visitor(*this, &Cache<TagStore>::writebackVisitor); CacheBlkVisitorWrapper visitor(*this, &Cache::writebackVisitor);
tags->forEachBlk(visitor); tags->forEachBlk(visitor);
} }
template<class TagStore>
void void
Cache<TagStore>::memInvalidate() Cache::memInvalidate()
{ {
WrappedBlkVisitor visitor(*this, &Cache<TagStore>::invalidateVisitor); CacheBlkVisitorWrapper visitor(*this, &Cache::invalidateVisitor);
tags->forEachBlk(visitor); tags->forEachBlk(visitor);
} }
template<class TagStore>
bool bool
Cache<TagStore>::isDirty() const Cache::isDirty() const
{ {
CacheBlkIsDirtyVisitor<BlkType> visitor; CacheBlkIsDirtyVisitor visitor;
tags->forEachBlk(visitor); tags->forEachBlk(visitor);
return visitor.isDirty(); return visitor.isDirty();
} }
template<class TagStore>
bool bool
Cache<TagStore>::writebackVisitor(BlkType &blk) Cache::writebackVisitor(CacheBlk &blk)
{ {
if (blk.isDirty()) { if (blk.isDirty()) {
assert(blk.isValid()); assert(blk.isValid());
@ -1408,9 +1384,8 @@ Cache<TagStore>::writebackVisitor(BlkType &blk)
return true; return true;
} }
template<class TagStore>
bool bool
Cache<TagStore>::invalidateVisitor(BlkType &blk) Cache::invalidateVisitor(CacheBlk &blk)
{ {
if (blk.isDirty()) if (blk.isDirty())
@ -1418,19 +1393,17 @@ Cache<TagStore>::invalidateVisitor(BlkType &blk)
if (blk.isValid()) { if (blk.isValid()) {
assert(!blk.isDirty()); assert(!blk.isDirty());
tags->invalidate(dynamic_cast< BlkType *>(&blk)); tags->invalidate(&blk);
blk.invalidate(); blk.invalidate();
} }
return true; return true;
} }
template<class TagStore> CacheBlk*
typename Cache<TagStore>::BlkType* Cache::allocateBlock(Addr addr, bool is_secure, PacketList &writebacks)
Cache<TagStore>::allocateBlock(Addr addr, bool is_secure,
PacketList &writebacks)
{ {
BlkType *blk = tags->findVictim(addr); CacheBlk *blk = tags->findVictim(addr);
if (blk->isValid()) { if (blk->isValid()) {
Addr repl_addr = tags->regenerateBlkAddr(blk->tag, blk->set); Addr repl_addr = tags->regenerateBlkAddr(blk->tag, blk->set);
@ -1465,10 +1438,8 @@ Cache<TagStore>::allocateBlock(Addr addr, bool is_secure,
// is called by both atomic and timing-mode accesses, and in atomic // is called by both atomic and timing-mode accesses, and in atomic
// mode we don't mess with the write buffer (we just perform the // mode we don't mess with the write buffer (we just perform the
// writebacks atomically once the original request is complete). // writebacks atomically once the original request is complete).
template<class TagStore> CacheBlk*
typename Cache<TagStore>::BlkType* Cache::handleFill(PacketPtr pkt, CacheBlk *blk, PacketList &writebacks)
Cache<TagStore>::handleFill(PacketPtr pkt, BlkType *blk,
PacketList &writebacks)
{ {
assert(pkt->isResponse() || pkt->isWriteInvalidate()); assert(pkt->isResponse() || pkt->isWriteInvalidate());
Addr addr = pkt->getAddr(); Addr addr = pkt->getAddr();
@ -1556,11 +1527,9 @@ Cache<TagStore>::handleFill(PacketPtr pkt, BlkType *blk,
// //
///////////////////////////////////////////////////// /////////////////////////////////////////////////////
template<class TagStore>
void void
Cache<TagStore>:: Cache::doTimingSupplyResponse(PacketPtr req_pkt, const uint8_t *blk_data,
doTimingSupplyResponse(PacketPtr req_pkt, const uint8_t *blk_data, bool already_copied, bool pending_inval)
bool already_copied, bool pending_inval)
{ {
// sanity check // sanity check
assert(req_pkt->isRequest()); assert(req_pkt->isRequest());
@ -1604,11 +1573,9 @@ doTimingSupplyResponse(PacketPtr req_pkt, const uint8_t *blk_data,
memSidePort->schedTimingSnoopResp(pkt, forward_time, true); memSidePort->schedTimingSnoopResp(pkt, forward_time, true);
} }
template<class TagStore>
void void
Cache<TagStore>::handleSnoop(PacketPtr pkt, BlkType *blk, Cache::handleSnoop(PacketPtr pkt, CacheBlk *blk, bool is_timing,
bool is_timing, bool is_deferred, bool is_deferred, bool pending_inval)
bool pending_inval)
{ {
DPRINTF(Cache, "%s for %s addr %#llx size %d\n", __func__, DPRINTF(Cache, "%s for %s addr %#llx size %d\n", __func__,
pkt->cmdString(), pkt->getAddr(), pkt->getSize()); pkt->cmdString(), pkt->getAddr(), pkt->getSize());
@ -1745,9 +1712,8 @@ Cache<TagStore>::handleSnoop(PacketPtr pkt, BlkType *blk,
} }
template<class TagStore>
void void
Cache<TagStore>::recvTimingSnoopReq(PacketPtr pkt) Cache::recvTimingSnoopReq(PacketPtr pkt)
{ {
DPRINTF(Cache, "%s for %s addr %#llx size %d\n", __func__, DPRINTF(Cache, "%s for %s addr %#llx size %d\n", __func__,
pkt->cmdString(), pkt->getAddr(), pkt->getSize()); pkt->cmdString(), pkt->getAddr(), pkt->getSize());
@ -1778,7 +1744,7 @@ Cache<TagStore>::recvTimingSnoopReq(PacketPtr pkt)
} }
bool is_secure = pkt->isSecure(); bool is_secure = pkt->isSecure();
BlkType *blk = tags->findBlock(pkt->getAddr(), is_secure); CacheBlk *blk = tags->findBlock(pkt->getAddr(), is_secure);
Addr blk_addr = blockAlign(pkt->getAddr()); Addr blk_addr = blockAlign(pkt->getAddr());
MSHR *mshr = mshrQueue.findMatch(blk_addr, is_secure); MSHR *mshr = mshrQueue.findMatch(blk_addr, is_secure);
@ -1849,18 +1815,16 @@ Cache<TagStore>::recvTimingSnoopReq(PacketPtr pkt)
handleSnoop(pkt, blk, true, false, false); handleSnoop(pkt, blk, true, false, false);
} }
template<class TagStore>
bool bool
Cache<TagStore>::CpuSidePort::recvTimingSnoopResp(PacketPtr pkt) Cache::CpuSidePort::recvTimingSnoopResp(PacketPtr pkt)
{ {
// Express snoop responses from master to slave, e.g., from L1 to L2 // Express snoop responses from master to slave, e.g., from L1 to L2
cache->recvTimingSnoopResp(pkt); cache->recvTimingSnoopResp(pkt);
return true; return true;
} }
template<class TagStore>
Tick Tick
Cache<TagStore>::recvAtomicSnoop(PacketPtr pkt) Cache::recvAtomicSnoop(PacketPtr pkt)
{ {
// Snoops shouldn't happen when bypassing caches // Snoops shouldn't happen when bypassing caches
assert(!system->bypassCaches()); assert(!system->bypassCaches());
@ -1871,16 +1835,15 @@ Cache<TagStore>::recvAtomicSnoop(PacketPtr pkt)
return 0; return 0;
} }
BlkType *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure()); CacheBlk *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure());
handleSnoop(pkt, blk, false, false, false); handleSnoop(pkt, blk, false, false, false);
// We consider forwardLatency here because a snoop occurs in atomic mode // We consider forwardLatency here because a snoop occurs in atomic mode
return forwardLatency * clockPeriod(); return forwardLatency * clockPeriod();
} }
template<class TagStore>
MSHR * MSHR *
Cache<TagStore>::getNextMSHR() Cache::getNextMSHR()
{ {
// Check both MSHR queue and write buffer for potential requests, // Check both MSHR queue and write buffer for potential requests,
// note that null does not mean there is no request, it could // note that null does not mean there is no request, it could
@ -1962,9 +1925,8 @@ Cache<TagStore>::getNextMSHR()
} }
template<class TagStore>
PacketPtr PacketPtr
Cache<TagStore>::getTimingPacket() Cache::getTimingPacket()
{ {
MSHR *mshr = getNextMSHR(); MSHR *mshr = getNextMSHR();
@ -1984,7 +1946,7 @@ Cache<TagStore>::getTimingPacket()
assert(tags->findBlock(mshr->blkAddr, mshr->isSecure) == NULL); assert(tags->findBlock(mshr->blkAddr, mshr->isSecure) == NULL);
pkt = tgt_pkt; pkt = tgt_pkt;
} else { } else {
BlkType *blk = tags->findBlock(mshr->blkAddr, mshr->isSecure); CacheBlk *blk = tags->findBlock(mshr->blkAddr, mshr->isSecure);
if (tgt_pkt->cmd == MemCmd::HardPFReq) { if (tgt_pkt->cmd == MemCmd::HardPFReq) {
// We need to check the caches above us to verify that // We need to check the caches above us to verify that
@ -2056,9 +2018,8 @@ Cache<TagStore>::getTimingPacket()
} }
template<class TagStore>
Tick Tick
Cache<TagStore>::nextMSHRReadyTime() const Cache::nextMSHRReadyTime() const
{ {
Tick nextReady = std::min(mshrQueue.nextMSHRReadyTime(), Tick nextReady = std::min(mshrQueue.nextMSHRReadyTime(),
writeBuffer.nextMSHRReadyTime()); writeBuffer.nextMSHRReadyTime());
@ -2073,9 +2034,8 @@ Cache<TagStore>::nextMSHRReadyTime() const
return nextReady; return nextReady;
} }
template<class TagStore>
void void
Cache<TagStore>::serialize(std::ostream &os) Cache::serialize(std::ostream &os)
{ {
bool dirty(isDirty()); bool dirty(isDirty());
@ -2094,9 +2054,8 @@ Cache<TagStore>::serialize(std::ostream &os)
SERIALIZE_SCALAR(bad_checkpoint); SERIALIZE_SCALAR(bad_checkpoint);
} }
template<class TagStore>
void void
Cache<TagStore>::unserialize(Checkpoint *cp, const std::string &section) Cache::unserialize(Checkpoint *cp, const std::string &section)
{ {
bool bad_checkpoint; bool bad_checkpoint;
UNSERIALIZE_SCALAR(bad_checkpoint); UNSERIALIZE_SCALAR(bad_checkpoint);
@ -2113,16 +2072,14 @@ Cache<TagStore>::unserialize(Checkpoint *cp, const std::string &section)
// //
/////////////// ///////////////
template<class TagStore>
AddrRangeList AddrRangeList
Cache<TagStore>::CpuSidePort::getAddrRanges() const Cache::CpuSidePort::getAddrRanges() const
{ {
return cache->getAddrRanges(); return cache->getAddrRanges();
} }
template<class TagStore>
bool bool
Cache<TagStore>::CpuSidePort::recvTimingReq(PacketPtr pkt) Cache::CpuSidePort::recvTimingReq(PacketPtr pkt)
{ {
assert(!cache->system->bypassCaches()); assert(!cache->system->bypassCaches());
@ -2150,24 +2107,21 @@ Cache<TagStore>::CpuSidePort::recvTimingReq(PacketPtr pkt)
return success; return success;
} }
template<class TagStore>
Tick Tick
Cache<TagStore>::CpuSidePort::recvAtomic(PacketPtr pkt) Cache::CpuSidePort::recvAtomic(PacketPtr pkt)
{ {
return cache->recvAtomic(pkt); return cache->recvAtomic(pkt);
} }
template<class TagStore>
void void
Cache<TagStore>::CpuSidePort::recvFunctional(PacketPtr pkt) Cache::CpuSidePort::recvFunctional(PacketPtr pkt)
{ {
// functional request // functional request
cache->functionalAccess(pkt, true); cache->functionalAccess(pkt, true);
} }
template<class TagStore> Cache::
Cache<TagStore>:: CpuSidePort::CpuSidePort(const std::string &_name, Cache *_cache,
CpuSidePort::CpuSidePort(const std::string &_name, Cache<TagStore> *_cache,
const std::string &_label) const std::string &_label)
: BaseCache::CacheSlavePort(_name, _cache, _label), cache(_cache) : BaseCache::CacheSlavePort(_name, _cache, _label), cache(_cache)
{ {
@ -2179,33 +2133,29 @@ CpuSidePort::CpuSidePort(const std::string &_name, Cache<TagStore> *_cache,
// //
/////////////// ///////////////
template<class TagStore>
bool bool
Cache<TagStore>::MemSidePort::recvTimingResp(PacketPtr pkt) Cache::MemSidePort::recvTimingResp(PacketPtr pkt)
{ {
cache->recvTimingResp(pkt); cache->recvTimingResp(pkt);
return true; return true;
} }
// Express snooping requests to memside port // Express snooping requests to memside port
template<class TagStore>
void void
Cache<TagStore>::MemSidePort::recvTimingSnoopReq(PacketPtr pkt) Cache::MemSidePort::recvTimingSnoopReq(PacketPtr pkt)
{ {
// handle snooping requests // handle snooping requests
cache->recvTimingSnoopReq(pkt); cache->recvTimingSnoopReq(pkt);
} }
template<class TagStore>
Tick Tick
Cache<TagStore>::MemSidePort::recvAtomicSnoop(PacketPtr pkt) Cache::MemSidePort::recvAtomicSnoop(PacketPtr pkt)
{ {
return cache->recvAtomicSnoop(pkt); return cache->recvAtomicSnoop(pkt);
} }
template<class TagStore>
void void
Cache<TagStore>::MemSidePort::recvFunctionalSnoop(PacketPtr pkt) Cache::MemSidePort::recvFunctionalSnoop(PacketPtr pkt)
{ {
// functional snoop (note that in contrast to atomic we don't have // functional snoop (note that in contrast to atomic we don't have
// a specific functionalSnoop method, as they have the same // a specific functionalSnoop method, as they have the same
@ -2213,9 +2163,8 @@ Cache<TagStore>::MemSidePort::recvFunctionalSnoop(PacketPtr pkt)
cache->functionalAccess(pkt, false); cache->functionalAccess(pkt, false);
} }
template<class TagStore>
void void
Cache<TagStore>::CacheReqPacketQueue::sendDeferredPacket() Cache::CacheReqPacketQueue::sendDeferredPacket()
{ {
// sanity check // sanity check
assert(!waitingOnRetry); assert(!waitingOnRetry);
@ -2296,9 +2245,8 @@ Cache<TagStore>::CacheReqPacketQueue::sendDeferredPacket()
} }
} }
template<class TagStore> Cache::
Cache<TagStore>:: MemSidePort::MemSidePort(const std::string &_name, Cache *_cache,
MemSidePort::MemSidePort(const std::string &_name, Cache<TagStore> *_cache,
const std::string &_label) const std::string &_label)
: BaseCache::CacheMasterPort(_name, _cache, _reqQueue, _snoopRespQueue), : BaseCache::CacheMasterPort(_name, _cache, _reqQueue, _snoopRespQueue),
_reqQueue(*_cache, *this, _snoopRespQueue, _label), _reqQueue(*_cache, *this, _snoopRespQueue, _label),

View file

@ -53,6 +53,7 @@
#include "base/callback.hh" #include "base/callback.hh"
#include "base/statistics.hh" #include "base/statistics.hh"
#include "mem/cache/blk.hh"
#include "params/BaseTags.hh" #include "params/BaseTags.hh"
#include "sim/clocked_object.hh" #include "sim/clocked_object.hh"
@ -179,6 +180,38 @@ class BaseTags : public ClockedObject
* Print all tags used * Print all tags used
*/ */
virtual std::string print() const = 0; virtual std::string print() const = 0;
/**
* Find a block using the memory address
*/
virtual CacheBlk * findBlock(Addr addr, bool is_secure) const = 0;
/**
* Calculate the block offset of an address.
* @param addr the address to get the offset of.
* @return the block offset.
*/
int extractBlkOffset(Addr addr) const
{
return (addr & (Addr)(blkSize-1));
}
virtual void invalidate(CacheBlk *blk) = 0;
virtual CacheBlk* accessBlock(Addr addr, bool is_secure, Cycles &lat,
int context_src) = 0;
virtual Addr extractTag(Addr addr) const = 0;
virtual void insertBlock(PacketPtr pkt, CacheBlk *blk) = 0;
virtual Addr regenerateBlkAddr(Addr tag, unsigned set) const = 0;
virtual CacheBlk* findVictim(Addr addr) = 0;
virtual int extractSet(Addr addr) const = 0;
virtual void forEachBlk(CacheBlkVisitor &visitor) = 0;
}; };
class BaseTagsCallback : public Callback class BaseTagsCallback : public Callback

View file

@ -119,7 +119,7 @@ BaseSetAssoc::~BaseSetAssoc()
delete [] sets; delete [] sets;
} }
BaseSetAssoc::BlkType* CacheBlk*
BaseSetAssoc::findBlock(Addr addr, bool is_secure) const BaseSetAssoc::findBlock(Addr addr, bool is_secure) const
{ {
Addr tag = extractTag(addr); Addr tag = extractTag(addr);

View file

@ -149,7 +149,7 @@ public:
* Invalidate the given block. * Invalidate the given block.
* @param blk The block to invalidate. * @param blk The block to invalidate.
*/ */
void invalidate(BlkType *blk) void invalidate(CacheBlk *blk)
{ {
assert(blk); assert(blk);
assert(blk->isValid()); assert(blk->isValid());
@ -172,7 +172,7 @@ public:
* @param lat The access latency. * @param lat The access latency.
* @return Pointer to the cache block if found. * @return Pointer to the cache block if found.
*/ */
BlkType* accessBlock(Addr addr, bool is_secure, Cycles &lat, CacheBlk* accessBlock(Addr addr, bool is_secure, Cycles &lat,
int context_src) int context_src)
{ {
Addr tag = extractTag(addr); Addr tag = extractTag(addr);
@ -212,7 +212,7 @@ public:
* @param asid The address space ID. * @param asid The address space ID.
* @return Pointer to the cache block if found. * @return Pointer to the cache block if found.
*/ */
BlkType* findBlock(Addr addr, bool is_secure) const; CacheBlk* findBlock(Addr addr, bool is_secure) const;
/** /**
* Find an invalid block to evict for the address provided. * Find an invalid block to evict for the address provided.
@ -221,7 +221,7 @@ public:
* @param addr The addr to a find a replacement candidate for. * @param addr The addr to a find a replacement candidate for.
* @return The candidate block. * @return The candidate block.
*/ */
BlkType* findVictim(Addr addr) const CacheBlk* findVictim(Addr addr)
{ {
BlkType *blk = NULL; BlkType *blk = NULL;
int set = extractSet(addr); int set = extractSet(addr);
@ -242,7 +242,7 @@ public:
* @param pkt Packet holding the address to update * @param pkt Packet holding the address to update
* @param blk The block to update. * @param blk The block to update.
*/ */
void insertBlock(PacketPtr pkt, BlkType *blk) void insertBlock(PacketPtr pkt, CacheBlk *blk)
{ {
Addr addr = pkt->getAddr(); Addr addr = pkt->getAddr();
MasterID master_id = pkt->req->masterId(); MasterID master_id = pkt->req->masterId();
@ -311,16 +311,6 @@ public:
return ((addr >> setShift) & setMask); return ((addr >> setShift) & setMask);
} }
/**
* Get the block offset from an address.
* @param addr The address to get the offset of.
* @return The block offset.
*/
int extractBlkOffset(Addr addr) const
{
return (addr & blkMask);
}
/** /**
* Align an address to the block size. * Align an address to the block size.
* @param addr the address to align. * @param addr the address to align.
@ -375,8 +365,7 @@ public:
* *
* \param visitor Visitor to call on each block. * \param visitor Visitor to call on each block.
*/ */
template <typename V> void forEachBlk(CacheBlkVisitor &visitor) M5_ATTR_OVERRIDE {
void forEachBlk(V &visitor) {
for (unsigned i = 0; i < numSets * assoc; ++i) { for (unsigned i = 0; i < numSets * assoc; ++i) {
if (!visitor(blks[i])) if (!visitor(blks[i]))
return; return;

View file

@ -161,13 +161,19 @@ FALRU::hashLookup(Addr addr) const
} }
void void
FALRU::invalidate(FALRU::BlkType *blk) FALRU::invalidate(CacheBlk *blk)
{ {
assert(blk); assert(blk);
tagsInUse--; tagsInUse--;
} }
FALRUBlk* CacheBlk*
FALRU::accessBlock(Addr addr, bool is_secure, Cycles &lat, int context_src)
{
return accessBlock(addr, is_secure, lat, context_src, 0);
}
CacheBlk*
FALRU::accessBlock(Addr addr, bool is_secure, Cycles &lat, int context_src, FALRU::accessBlock(Addr addr, bool is_secure, Cycles &lat, int context_src,
int *inCache) int *inCache)
{ {
@ -206,7 +212,7 @@ FALRU::accessBlock(Addr addr, bool is_secure, Cycles &lat, int context_src,
} }
FALRUBlk* CacheBlk*
FALRU::findBlock(Addr addr, bool is_secure) const FALRU::findBlock(Addr addr, bool is_secure) const
{ {
Addr blkAddr = blkAlign(addr); Addr blkAddr = blkAlign(addr);
@ -220,7 +226,7 @@ FALRU::findBlock(Addr addr, bool is_secure) const
return blk; return blk;
} }
FALRUBlk* CacheBlk*
FALRU::findVictim(Addr addr) FALRU::findVictim(Addr addr)
{ {
FALRUBlk * blk = tail; FALRUBlk * blk = tail;
@ -243,7 +249,7 @@ FALRU::findVictim(Addr addr)
} }
void void
FALRU::insertBlock(PacketPtr pkt, FALRU::BlkType *blk) FALRU::insertBlock(PacketPtr pkt, CacheBlk *blk)
{ {
} }

View file

@ -174,7 +174,7 @@ public:
* Invalidate a cache block. * Invalidate a cache block.
* @param blk The block to invalidate. * @param blk The block to invalidate.
*/ */
void invalidate(BlkType *blk); void invalidate(CacheBlk *blk);
/** /**
* Access block and update replacement data. May not succeed, in which case * Access block and update replacement data. May not succeed, in which case
@ -188,8 +188,14 @@ public:
* @param inCache The FALRUBlk::inCache flags. * @param inCache The FALRUBlk::inCache flags.
* @return Pointer to the cache block. * @return Pointer to the cache block.
*/ */
FALRUBlk* accessBlock(Addr addr, bool is_secure, Cycles &lat, CacheBlk* accessBlock(Addr addr, bool is_secure, Cycles &lat,
int context_src, int *inCache = 0); int context_src, int *inCache);
/**
* Just a wrapper of above function to conform with the base interface.
*/
CacheBlk* accessBlock(Addr addr, bool is_secure, Cycles &lat,
int context_src);
/** /**
* Find the block in the cache, do not update the replacement data. * Find the block in the cache, do not update the replacement data.
@ -198,16 +204,16 @@ public:
* @param asid The address space ID. * @param asid The address space ID.
* @return Pointer to the cache block. * @return Pointer to the cache block.
*/ */
FALRUBlk* findBlock(Addr addr, bool is_secure) const; CacheBlk* findBlock(Addr addr, bool is_secure) const;
/** /**
* Find a replacement block for the address provided. * Find a replacement block for the address provided.
* @param pkt The request to a find a replacement candidate for. * @param pkt The request to a find a replacement candidate for.
* @return The block to place the replacement in. * @return The block to place the replacement in.
*/ */
FALRUBlk* findVictim(Addr addr); CacheBlk* findVictim(Addr addr);
void insertBlock(PacketPtr pkt, BlkType *blk); void insertBlock(PacketPtr pkt, CacheBlk *blk);
/** /**
* Return the block size of this cache. * Return the block size of this cache.
@ -260,23 +266,13 @@ public:
return 0; return 0;
} }
/**
* Calculate the block offset of an address.
* @param addr the address to get the offset of.
* @return the block offset.
*/
int extractBlkOffset(Addr addr) const
{
return (addr & (Addr)(blkSize-1));
}
/** /**
* Regenerate the block address from the tag and the set. * Regenerate the block address from the tag and the set.
* @param tag The tag of the block. * @param tag The tag of the block.
* @param set The set the block belongs to. * @param set The set the block belongs to.
* @return the block address. * @return the block address.
*/ */
Addr regenerateBlkAddr(Addr tag, int set) const Addr regenerateBlkAddr(Addr tag, unsigned set) const
{ {
return (tag); return (tag);
} }
@ -304,8 +300,7 @@ public:
* *
* \param visitor Visitor to call on each block. * \param visitor Visitor to call on each block.
*/ */
template <typename V> void forEachBlk(CacheBlkVisitor &visitor) M5_ATTR_OVERRIDE {
void forEachBlk(V &visitor) {
for (int i = 0; i < numBlocks; i++) { for (int i = 0; i < numBlocks; i++) {
if (!visitor(blks[i])) if (!visitor(blks[i]))
return; return;

View file

@ -54,10 +54,10 @@ LRU::LRU(const Params *p)
{ {
} }
BaseSetAssoc::BlkType* CacheBlk*
LRU::accessBlock(Addr addr, bool is_secure, Cycles &lat, int master_id) LRU::accessBlock(Addr addr, bool is_secure, Cycles &lat, int master_id)
{ {
BlkType *blk = BaseSetAssoc::accessBlock(addr, is_secure, lat, master_id); CacheBlk *blk = BaseSetAssoc::accessBlock(addr, is_secure, lat, master_id);
if (blk != NULL) { if (blk != NULL) {
// move this block to head of the MRU list // move this block to head of the MRU list
@ -70,8 +70,8 @@ LRU::accessBlock(Addr addr, bool is_secure, Cycles &lat, int master_id)
return blk; return blk;
} }
BaseSetAssoc::BlkType* CacheBlk*
LRU::findVictim(Addr addr) const LRU::findVictim(Addr addr)
{ {
int set = extractSet(addr); int set = extractSet(addr);
// grab a replacement candidate // grab a replacement candidate
@ -95,7 +95,7 @@ LRU::insertBlock(PacketPtr pkt, BlkType *blk)
} }
void void
LRU::invalidate(BlkType *blk) LRU::invalidate(CacheBlk *blk)
{ {
BaseSetAssoc::invalidate(blk); BaseSetAssoc::invalidate(blk);

View file

@ -69,11 +69,11 @@ class LRU : public BaseSetAssoc
*/ */
~LRU() {} ~LRU() {}
BlkType* accessBlock(Addr addr, bool is_secure, Cycles &lat, CacheBlk* accessBlock(Addr addr, bool is_secure, Cycles &lat,
int context_src); int context_src);
BlkType* findVictim(Addr addr) const; CacheBlk* findVictim(Addr addr);
void insertBlock(PacketPtr pkt, BlkType *blk); void insertBlock(PacketPtr pkt, BlkType *blk);
void invalidate(BlkType *blk); void invalidate(CacheBlk *blk);
}; };
#endif // __MEM_CACHE_TAGS_LRU_HH__ #endif // __MEM_CACHE_TAGS_LRU_HH__

View file

@ -44,16 +44,16 @@ RandomRepl::RandomRepl(const Params *p)
{ {
} }
BaseSetAssoc::BlkType* CacheBlk*
RandomRepl::accessBlock(Addr addr, bool is_secure, Cycles &lat, int master_id) RandomRepl::accessBlock(Addr addr, bool is_secure, Cycles &lat, int master_id)
{ {
return BaseSetAssoc::accessBlock(addr, is_secure, lat, master_id); return BaseSetAssoc::accessBlock(addr, is_secure, lat, master_id);
} }
BaseSetAssoc::BlkType* CacheBlk*
RandomRepl::findVictim(Addr addr) const RandomRepl::findVictim(Addr addr)
{ {
BlkType *blk = BaseSetAssoc::findVictim(addr); CacheBlk *blk = BaseSetAssoc::findVictim(addr);
// if all blocks are valid, pick a replacement at random // if all blocks are valid, pick a replacement at random
if (blk->isValid()) { if (blk->isValid()) {
@ -77,7 +77,7 @@ RandomRepl::insertBlock(PacketPtr pkt, BlkType *blk)
} }
void void
RandomRepl::invalidate(BlkType *blk) RandomRepl::invalidate(CacheBlk *blk)
{ {
BaseSetAssoc::invalidate(blk); BaseSetAssoc::invalidate(blk);
} }

View file

@ -58,11 +58,11 @@ class RandomRepl : public BaseSetAssoc
*/ */
~RandomRepl() {} ~RandomRepl() {}
BlkType* accessBlock(Addr addr, bool is_secure, Cycles &lat, CacheBlk* accessBlock(Addr addr, bool is_secure, Cycles &lat,
int context_src); int context_src);
BlkType* findVictim(Addr addr) const; CacheBlk* findVictim(Addr addr);
void insertBlock(PacketPtr pkt, BlkType *blk); void insertBlock(PacketPtr pkt, BlkType *blk);
void invalidate(BlkType *blk); void invalidate(CacheBlk *blk);
}; };
#endif // __MEM_CACHE_TAGS_RANDOM_REPL_HH__ #endif // __MEM_CACHE_TAGS_RANDOM_REPL_HH__