Turn cache MissQueue/BlockingBuffer into virtual object

instead of template parameter.

--HG--
extra : convert_revision : fce0fbd041149b9c781eb23f480ba84fddbfd4a0
This commit is contained in:
Steve Reinhardt 2006-12-04 09:10:53 -08:00
parent 51e3688701
commit 5fbf3aa471
17 changed files with 449 additions and 261 deletions

View file

@ -108,6 +108,7 @@ base_sources = Split('''
mem/cache/coherence/coherence_protocol.cc
mem/cache/coherence/uni_coherence.cc
mem/cache/miss/blocking_buffer.cc
mem/cache/miss/miss_buffer.cc
mem/cache/miss/miss_queue.cc
mem/cache/miss/mshr.cc
mem/cache/miss/mshr_queue.cc

View file

@ -73,38 +73,28 @@
#if defined(USE_CACHE_FALRU)
template class Cache<CacheTags<FALRU>, BlockingBuffer, SimpleCoherence>;
template class Cache<CacheTags<FALRU>, BlockingBuffer, UniCoherence>;
template class Cache<CacheTags<FALRU>, MissQueue, SimpleCoherence>;
template class Cache<CacheTags<FALRU>, MissQueue, UniCoherence>;
template class Cache<CacheTags<FALRU>, SimpleCoherence>;
template class Cache<CacheTags<FALRU>, UniCoherence>;
#endif
#if defined(USE_CACHE_IIC)
template class Cache<CacheTags<IIC>, BlockingBuffer, SimpleCoherence>;
template class Cache<CacheTags<IIC>, BlockingBuffer, UniCoherence>;
template class Cache<CacheTags<IIC>, MissQueue, SimpleCoherence>;
template class Cache<CacheTags<IIC>, MissQueue, UniCoherence>;
template class Cache<CacheTags<IIC>, SimpleCoherence>;
template class Cache<CacheTags<IIC>, UniCoherence>;
#endif
#if defined(USE_CACHE_LRU)
template class Cache<CacheTags<LRU>, BlockingBuffer, SimpleCoherence>;
template class Cache<CacheTags<LRU>, BlockingBuffer, UniCoherence>;
template class Cache<CacheTags<LRU>, MissQueue, SimpleCoherence>;
template class Cache<CacheTags<LRU>, MissQueue, UniCoherence>;
template class Cache<CacheTags<LRU>, SimpleCoherence>;
template class Cache<CacheTags<LRU>, UniCoherence>;
#endif
#if defined(USE_CACHE_SPLIT)
template class Cache<CacheTags<Split>, BlockingBuffer, SimpleCoherence>;
template class Cache<CacheTags<Split>, BlockingBuffer, UniCoherence>;
template class Cache<CacheTags<Split>, MissQueue, SimpleCoherence>;
template class Cache<CacheTags<Split>, MissQueue, UniCoherence>;
template class Cache<CacheTags<Split>, SimpleCoherence>;
template class Cache<CacheTags<Split>, UniCoherence>;
#endif
#if defined(USE_CACHE_SPLIT_LIFO)
template class Cache<CacheTags<SplitLIFO>, BlockingBuffer, SimpleCoherence>;
template class Cache<CacheTags<SplitLIFO>, BlockingBuffer, UniCoherence>;
template class Cache<CacheTags<SplitLIFO>, MissQueue, SimpleCoherence>;
template class Cache<CacheTags<SplitLIFO>, MissQueue, UniCoherence>;
template class Cache<CacheTags<SplitLIFO>, SimpleCoherence>;
template class Cache<CacheTags<SplitLIFO>, UniCoherence>;
#endif
#endif //DOXYGEN_SHOULD_SKIP_THIS

View file

@ -42,6 +42,7 @@
#include "cpu/smt.hh" // SMT_MAX_THREADS
#include "mem/cache/base_cache.hh"
#include "mem/cache/miss/miss_buffer.hh"
#include "mem/cache/prefetch/prefetcher.hh"
//Forward decleration
@ -55,7 +56,7 @@ class MSHR;
* @sa MissQueue. Coherence handles all coherence policy details @sa
* UniCoherence, SimpleMultiCoherence.
*/
template <class TagStore, class Buffering, class Coherence>
template <class TagStore, class Coherence>
class Cache : public BaseCache
{
public:
@ -68,12 +69,12 @@ class Cache : public BaseCache
/** Tag and data Storage */
TagStore *tags;
/** Miss and Writeback handler */
Buffering *missQueue;
MissBuffer *missQueue;
/** Coherence protocol. */
Coherence *coherence;
/** Prefetcher */
Prefetcher<TagStore, Buffering> *prefetcher;
Prefetcher<TagStore> *prefetcher;
/**
* The clock ratio of the outgoing bus.
@ -105,16 +106,16 @@ class Cache : public BaseCache
{
public:
TagStore *tags;
Buffering *missQueue;
MissBuffer *missQueue;
Coherence *coherence;
BaseCache::Params baseParams;
Prefetcher<TagStore, Buffering> *prefetcher;
Prefetcher<TagStore> *prefetcher;
bool prefetchAccess;
int hitLatency;
Params(TagStore *_tags, Buffering *mq, Coherence *coh,
Params(TagStore *_tags, MissBuffer *mq, Coherence *coh,
BaseCache::Params params,
Prefetcher<TagStore, Buffering> *_prefetcher,
Prefetcher<TagStore> *_prefetcher,
bool prefetch_access, int hit_latency)
: tags(_tags), missQueue(mq), coherence(coh),
baseParams(params),

View file

@ -208,26 +208,26 @@ BEGIN_INIT_SIM_OBJECT_PARAMS(BaseCache)
END_INIT_SIM_OBJECT_PARAMS(BaseCache)
#define BUILD_CACHE(t, b, c) do { \
Prefetcher<CacheTags<t>, b> *pf; \
#define BUILD_CACHE(t, c) do { \
Prefetcher<CacheTags<t> > *pf; \
if (pf_policy == "tagged") { \
BUILD_TAGGED_PREFETCHER(t, b); \
BUILD_TAGGED_PREFETCHER(t); \
} \
else if (pf_policy == "stride") { \
BUILD_STRIDED_PREFETCHER(t, b); \
BUILD_STRIDED_PREFETCHER(t); \
} \
else if (pf_policy == "ghb") { \
BUILD_GHB_PREFETCHER(t, b); \
BUILD_GHB_PREFETCHER(t); \
} \
else { \
BUILD_NULL_PREFETCHER(t, b); \
BUILD_NULL_PREFETCHER(t); \
} \
Cache<CacheTags<t>, b, c>::Params params(tagStore, mq, coh, \
Cache<CacheTags<t>, c>::Params params(tagStore, mq, coh, \
base_params, \
pf, \
prefetch_access, hit_latency); \
Cache<CacheTags<t>, b, c> *retval = \
new Cache<CacheTags<t>, b, c>(getInstanceName(), params); \
Cache<CacheTags<t>, c> *retval = \
new Cache<CacheTags<t>, c>(getInstanceName(), params); \
return retval; \
} while (0)
@ -235,7 +235,7 @@ return retval; \
panic("%s not compiled into M5", x); \
} while (0)
#define BUILD_COMPRESSED_CACHE(TAGS, tags, b, c) \
#define BUILD_COMPRESSED_CACHE(TAGS, tags, c) \
do { \
CompressionAlgorithm *compAlg; \
if (compressed_bus || store_compressed) { \
@ -247,87 +247,87 @@ return retval; \
new CacheTags<TAGS>(tags, compression_latency, true, \
store_compressed, adaptive_compression, \
compAlg, prefetch_miss); \
BUILD_CACHE(TAGS, b, c); \
BUILD_CACHE(TAGS, c); \
} while (0)
#if defined(USE_CACHE_FALRU)
#define BUILD_FALRU_CACHE(b,c) do { \
#define BUILD_FALRU_CACHE(c) do { \
FALRU *tags = new FALRU(block_size, size, latency); \
BUILD_COMPRESSED_CACHE(FALRU, tags, b, c); \
BUILD_COMPRESSED_CACHE(FALRU, tags, c); \
} while (0)
#else
#define BUILD_FALRU_CACHE(b, c) BUILD_CACHE_PANIC("falru cache")
#define BUILD_FALRU_CACHE(c) BUILD_CACHE_PANIC("falru cache")
#endif
#if defined(USE_CACHE_LRU)
#define BUILD_LRU_CACHE(b, c) do { \
#define BUILD_LRU_CACHE(c) do { \
LRU *tags = new LRU(numSets, block_size, assoc, latency); \
BUILD_COMPRESSED_CACHE(LRU, tags, b, c); \
BUILD_COMPRESSED_CACHE(LRU, tags, c); \
} while (0)
#else
#define BUILD_LRU_CACHE(b, c) BUILD_CACHE_PANIC("lru cache")
#define BUILD_LRU_CACHE(c) BUILD_CACHE_PANIC("lru cache")
#endif
#if defined(USE_CACHE_SPLIT)
#define BUILD_SPLIT_CACHE(b, c) do { \
#define BUILD_SPLIT_CACHE(c) do { \
Split *tags = new Split(numSets, block_size, assoc, split_size, lifo, \
two_queue, latency); \
BUILD_COMPRESSED_CACHE(Split, tags, b, c); \
BUILD_COMPRESSED_CACHE(Split, tags, c); \
} while (0)
#else
#define BUILD_SPLIT_CACHE(b, c) BUILD_CACHE_PANIC("split cache")
#define BUILD_SPLIT_CACHE(c) BUILD_CACHE_PANIC("split cache")
#endif
#if defined(USE_CACHE_SPLIT_LIFO)
#define BUILD_SPLIT_LIFO_CACHE(b, c) do { \
#define BUILD_SPLIT_LIFO_CACHE(c) do { \
SplitLIFO *tags = new SplitLIFO(block_size, size, assoc, \
latency, two_queue, -1); \
BUILD_COMPRESSED_CACHE(SplitLIFO, tags, b, c); \
BUILD_COMPRESSED_CACHE(SplitLIFO, tags, c); \
} while (0)
#else
#define BUILD_SPLIT_LIFO_CACHE(b, c) BUILD_CACHE_PANIC("lifo cache")
#define BUILD_SPLIT_LIFO_CACHE(c) BUILD_CACHE_PANIC("lifo cache")
#endif
#if defined(USE_CACHE_IIC)
#define BUILD_IIC_CACHE(b ,c) do { \
#define BUILD_IIC_CACHE(c) do { \
IIC *tags = new IIC(iic_params); \
BUILD_COMPRESSED_CACHE(IIC, tags, b, c); \
BUILD_COMPRESSED_CACHE(IIC, tags, c); \
} while (0)
#else
#define BUILD_IIC_CACHE(b, c) BUILD_CACHE_PANIC("iic")
#define BUILD_IIC_CACHE(c) BUILD_CACHE_PANIC("iic")
#endif
#define BUILD_CACHES(b, c) do { \
#define BUILD_CACHES(c) do { \
if (repl == NULL) { \
if (numSets == 1) { \
BUILD_FALRU_CACHE(b, c); \
BUILD_FALRU_CACHE(c); \
} else { \
if (split == true) { \
BUILD_SPLIT_CACHE(b, c); \
BUILD_SPLIT_CACHE(c); \
} else if (lifo == true) { \
BUILD_SPLIT_LIFO_CACHE(b, c); \
BUILD_SPLIT_LIFO_CACHE(c); \
} else { \
BUILD_LRU_CACHE(b, c); \
BUILD_LRU_CACHE(c); \
} \
} \
} else { \
BUILD_IIC_CACHE(b, c); \
BUILD_IIC_CACHE(c); \
} \
} while (0)
#define BUILD_COHERENCE(b) do { \
if (protocol == NULL) { \
UniCoherence *coh = new UniCoherence(); \
BUILD_CACHES(b, UniCoherence); \
BUILD_CACHES(UniCoherence); \
} else { \
SimpleCoherence *coh = new SimpleCoherence(protocol); \
BUILD_CACHES(b, SimpleCoherence); \
BUILD_CACHES(SimpleCoherence); \
} \
} while (0)
#if defined(USE_TAGGED)
#define BUILD_TAGGED_PREFETCHER(t, b) pf = new \
TaggedPrefetcher<CacheTags<t>, b>(prefetcher_size, \
#define BUILD_TAGGED_PREFETCHER(t) pf = new \
TaggedPrefetcher<CacheTags<t> >(prefetcher_size, \
!prefetch_past_page, \
prefetch_serial_squash, \
prefetch_cache_check_push, \
@ -335,12 +335,12 @@ return retval; \
prefetch_latency, \
prefetch_degree)
#else
#define BUILD_TAGGED_PREFETCHER(t, b) BUILD_CACHE_PANIC("Tagged Prefetcher")
#define BUILD_TAGGED_PREFETCHER(t) BUILD_CACHE_PANIC("Tagged Prefetcher")
#endif
#if defined(USE_STRIDED)
#define BUILD_STRIDED_PREFETCHER(t, b) pf = new \
StridePrefetcher<CacheTags<t>, b>(prefetcher_size, \
#define BUILD_STRIDED_PREFETCHER(t) pf = new \
StridePrefetcher<CacheTags<t> >(prefetcher_size, \
!prefetch_past_page, \
prefetch_serial_squash, \
prefetch_cache_check_push, \
@ -349,12 +349,12 @@ return retval; \
prefetch_degree, \
prefetch_use_cpu_id)
#else
#define BUILD_STRIDED_PREFETCHER(t, b) BUILD_CACHE_PANIC("Stride Prefetcher")
#define BUILD_STRIDED_PREFETCHER(t) BUILD_CACHE_PANIC("Stride Prefetcher")
#endif
#if defined(USE_GHB)
#define BUILD_GHB_PREFETCHER(t, b) pf = new \
GHBPrefetcher<CacheTags<t>, b>(prefetcher_size, \
#define BUILD_GHB_PREFETCHER(t) pf = new \
GHBPrefetcher<CacheTags<t> >(prefetcher_size, \
!prefetch_past_page, \
prefetch_serial_squash, \
prefetch_cache_check_push, \
@ -363,12 +363,12 @@ return retval; \
prefetch_degree, \
prefetch_use_cpu_id)
#else
#define BUILD_GHB_PREFETCHER(t, b) BUILD_CACHE_PANIC("GHB Prefetcher")
#define BUILD_GHB_PREFETCHER(t) BUILD_CACHE_PANIC("GHB Prefetcher")
#endif
#if defined(USE_TAGGED)
#define BUILD_NULL_PREFETCHER(t, b) pf = new \
TaggedPrefetcher<CacheTags<t>, b>(prefetcher_size, \
#define BUILD_NULL_PREFETCHER(t) pf = new \
TaggedPrefetcher<CacheTags<t> >(prefetcher_size, \
!prefetch_past_page, \
prefetch_serial_squash, \
prefetch_cache_check_push, \
@ -376,7 +376,7 @@ return retval; \
prefetch_latency, \
prefetch_degree)
#else
#define BUILD_NULL_PREFETCHER(t, b) BUILD_CACHE_PANIC("NULL Prefetcher (uses Tagged)")
#define BUILD_NULL_PREFETCHER(t) BUILD_CACHE_PANIC("NULL Prefetcher (uses Tagged)")
#endif
CREATE_SIM_OBJECT(BaseCache)

View file

@ -55,9 +55,9 @@
bool SIGNAL_NACK_HACK;
template<class TagStore, class Buffering, class Coherence>
template<class TagStore, class Coherence>
bool
Cache<TagStore,Buffering,Coherence>::
Cache<TagStore,Coherence>::
doTimingAccess(PacketPtr pkt, CachePort *cachePort, bool isCpuSide)
{
if (isCpuSide)
@ -81,9 +81,9 @@ doTimingAccess(PacketPtr pkt, CachePort *cachePort, bool isCpuSide)
return true;
}
template<class TagStore, class Buffering, class Coherence>
template<class TagStore, class Coherence>
Tick
Cache<TagStore,Buffering,Coherence>::
Cache<TagStore,Coherence>::
doAtomicAccess(PacketPtr pkt, bool isCpuSide)
{
if (isCpuSide)
@ -103,9 +103,9 @@ doAtomicAccess(PacketPtr pkt, bool isCpuSide)
return hitLatency;
}
template<class TagStore, class Buffering, class Coherence>
template<class TagStore, class Coherence>
void
Cache<TagStore,Buffering,Coherence>::
Cache<TagStore,Coherence>::
doFunctionalAccess(PacketPtr pkt, bool isCpuSide)
{
if (isCpuSide)
@ -123,19 +123,19 @@ doFunctionalAccess(PacketPtr pkt, bool isCpuSide)
}
}
template<class TagStore, class Buffering, class Coherence>
template<class TagStore, class Coherence>
void
Cache<TagStore,Buffering,Coherence>::
Cache<TagStore,Coherence>::
recvStatusChange(Port::Status status, bool isCpuSide)
{
}
template<class TagStore, class Buffering, class Coherence>
Cache<TagStore,Buffering,Coherence>::
template<class TagStore, class Coherence>
Cache<TagStore,Coherence>::
Cache(const std::string &_name,
Cache<TagStore,Buffering,Coherence>::Params &params)
Cache<TagStore,Coherence>::Params &params)
: BaseCache(_name, params.baseParams),
prefetchAccess(params.prefetchAccess),
tags(params.tags), missQueue(params.missQueue),
@ -154,9 +154,9 @@ Cache(const std::string &_name,
invalidatePkt = new Packet(invalidateReq, Packet::InvalidateReq, 0);
}
template<class TagStore, class Buffering, class Coherence>
template<class TagStore, class Coherence>
void
Cache<TagStore,Buffering,Coherence>::regStats()
Cache<TagStore,Coherence>::regStats()
{
BaseCache::regStats();
tags->regStats(name());
@ -165,9 +165,9 @@ Cache<TagStore,Buffering,Coherence>::regStats()
prefetcher->regStats(name());
}
template<class TagStore, class Buffering, class Coherence>
template<class TagStore, class Coherence>
bool
Cache<TagStore,Buffering,Coherence>::access(PacketPtr &pkt)
Cache<TagStore,Coherence>::access(PacketPtr &pkt)
{
//@todo Add back in MemDebug Calls
// MemDebug::cacheAccess(pkt);
@ -253,9 +253,9 @@ Cache<TagStore,Buffering,Coherence>::access(PacketPtr &pkt)
}
template<class TagStore, class Buffering, class Coherence>
template<class TagStore, class Coherence>
PacketPtr
Cache<TagStore,Buffering,Coherence>::getPacket()
Cache<TagStore,Coherence>::getPacket()
{
assert(missQueue->havePending());
PacketPtr pkt = missQueue->getPacket();
@ -276,9 +276,9 @@ Cache<TagStore,Buffering,Coherence>::getPacket()
return pkt;
}
template<class TagStore, class Buffering, class Coherence>
template<class TagStore, class Coherence>
void
Cache<TagStore,Buffering,Coherence>::sendResult(PacketPtr &pkt, MSHR* mshr,
Cache<TagStore,Coherence>::sendResult(PacketPtr &pkt, MSHR* mshr,
bool success)
{
if (success && !(SIGNAL_NACK_HACK)) {
@ -319,9 +319,9 @@ Cache<TagStore,Buffering,Coherence>::sendResult(PacketPtr &pkt, MSHR* mshr,
}
}
template<class TagStore, class Buffering, class Coherence>
template<class TagStore, class Coherence>
void
Cache<TagStore,Buffering,Coherence>::handleResponse(PacketPtr &pkt)
Cache<TagStore,Coherence>::handleResponse(PacketPtr &pkt)
{
BlkType *blk = NULL;
if (pkt->senderState) {
@ -363,16 +363,16 @@ Cache<TagStore,Buffering,Coherence>::handleResponse(PacketPtr &pkt)
}
}
template<class TagStore, class Buffering, class Coherence>
template<class TagStore, class Coherence>
PacketPtr
Cache<TagStore,Buffering,Coherence>::getCoherencePacket()
Cache<TagStore,Coherence>::getCoherencePacket()
{
return coherence->getPacket();
}
template<class TagStore, class Buffering, class Coherence>
template<class TagStore, class Coherence>
void
Cache<TagStore,Buffering,Coherence>::sendCoherenceResult(PacketPtr &pkt,
Cache<TagStore,Coherence>::sendCoherenceResult(PacketPtr &pkt,
MSHR *cshr,
bool success)
{
@ -380,9 +380,9 @@ Cache<TagStore,Buffering,Coherence>::sendCoherenceResult(PacketPtr &pkt,
}
template<class TagStore, class Buffering, class Coherence>
template<class TagStore, class Coherence>
void
Cache<TagStore,Buffering,Coherence>::snoop(PacketPtr &pkt)
Cache<TagStore,Coherence>::snoop(PacketPtr &pkt)
{
if (pkt->req->isUncacheable()) {
//Can't get a hit on an uncacheable address
@ -514,9 +514,9 @@ Cache<TagStore,Buffering,Coherence>::snoop(PacketPtr &pkt)
tags->handleSnoop(blk, new_state);
}
template<class TagStore, class Buffering, class Coherence>
template<class TagStore, class Coherence>
void
Cache<TagStore,Buffering,Coherence>::snoopResponse(PacketPtr &pkt)
Cache<TagStore,Coherence>::snoopResponse(PacketPtr &pkt)
{
//Need to handle the response, if NACKED
if (pkt->flags & NACKED_LINE) {
@ -533,9 +533,9 @@ Cache<TagStore,Buffering,Coherence>::snoopResponse(PacketPtr &pkt)
}
}
template<class TagStore, class Buffering, class Coherence>
template<class TagStore, class Coherence>
void
Cache<TagStore,Buffering,Coherence>::invalidateBlk(Addr addr)
Cache<TagStore,Coherence>::invalidateBlk(Addr addr)
{
tags->invalidateBlk(addr);
}
@ -544,9 +544,9 @@ Cache<TagStore,Buffering,Coherence>::invalidateBlk(Addr addr)
/**
* @todo Fix to not assume write allocate
*/
template<class TagStore, class Buffering, class Coherence>
template<class TagStore, class Coherence>
Tick
Cache<TagStore,Buffering,Coherence>::probe(PacketPtr &pkt, bool update,
Cache<TagStore,Coherence>::probe(PacketPtr &pkt, bool update,
CachePort* otherSidePort)
{
// MemDebug::cacheProbe(pkt);
@ -694,9 +694,9 @@ return 0;
return 0;
}
template<class TagStore, class Buffering, class Coherence>
template<class TagStore, class Coherence>
Tick
Cache<TagStore,Buffering,Coherence>::snoopProbe(PacketPtr &pkt)
Cache<TagStore,Coherence>::snoopProbe(PacketPtr &pkt)
{
//Send a atomic (false) invalidate up if the protocol calls for it
if (coherence->propogateInvalidate(pkt, false)) {

View file

@ -33,11 +33,9 @@
* Definitions of a simple buffer for a blocking cache.
*/
#include "cpu/smt.hh" //for maxThreadsPerCPU
#include "mem/cache/base_cache.hh"
#include "mem/cache/miss/blocking_buffer.hh"
#include "mem/cache/prefetch/base_prefetcher.hh"
#include "sim/eventq.hh" // for Event declaration.
#include "mem/request.hh"
/**
@ -46,27 +44,10 @@
void
BlockingBuffer::regStats(const std::string &name)
{
using namespace Stats;
writebacks
.init(maxThreadsPerCPU)
.name(name + ".writebacks")
.desc("number of writebacks")
.flags(total)
;
MissBuffer::regStats(name);
}
void
BlockingBuffer::setCache(BaseCache *_cache)
{
cache = _cache;
blkSize = cache->getBlockSize();
}
void
BlockingBuffer::setPrefetcher(BasePrefetcher *_prefetcher)
{
prefetcher = _prefetcher;
}
void
BlockingBuffer::handleMiss(PacketPtr &pkt, int blk_size, Tick time)
{
@ -241,3 +222,23 @@ BlockingBuffer::doWriteback(PacketPtr &pkt)
cache->setBlocked(Blocked_NoWBBuffers);
cache->setMasterRequest(Request_WB, curTick);
}
MSHR *
BlockingBuffer::findMSHR(Addr addr)
{
if (miss.addr == addr && miss.pkt)
return &miss;
return NULL;
}
bool
BlockingBuffer::findWrites(Addr addr, std::vector<MSHR*>& writes)
{
if (wb.addr == addr && wb.pkt) {
writes.push_back(&wb);
return true;
}
return false;
}

View file

@ -39,16 +39,13 @@
#include <vector>
#include "base/misc.hh" // for fatal()
#include "mem/cache/miss/miss_buffer.hh"
#include "mem/cache/miss/mshr.hh"
#include "base/statistics.hh"
class BaseCache;
class BasePrefetcher;
/**
* Miss and writeback storage for a blocking cache.
*/
class BlockingBuffer
class BlockingBuffer : public MissBuffer
{
protected:
/** Miss storage. */
@ -56,38 +53,13 @@ protected:
/** WB storage. */
MSHR wb;
//Params
/** Allocate on write misses. */
const bool writeAllocate;
/** Pointer to the parent cache. */
BaseCache* cache;
BasePrefetcher* prefetcher;
/** Block size of the parent cache. */
int blkSize;
// Statistics
/**
* @addtogroup CacheStatistics
* @{
*/
/** Number of blocks written back per thread. */
Stats::Vector<> writebacks;
/**
* @}
*/
public:
/**
* Builds and initializes this buffer.
* @param write_allocate If true, treat write misses the same as reads.
*/
BlockingBuffer(bool write_allocate)
: writeAllocate(write_allocate)
: MissBuffer(write_allocate)
{
}
@ -97,14 +69,6 @@ public:
*/
void regStats(const std::string &name);
/**
* Called by the parent cache to set the back pointer.
* @param _cache A pointer to the parent cache.
*/
void setCache(BaseCache *_cache);
void setPrefetcher(BasePrefetcher *_prefetcher);
/**
* Handle a cache miss properly. Requests the bus and marks the cache as
* blocked.
@ -184,12 +148,7 @@ public:
* @param asid The address space id.
* @return A pointer to miss if it matches.
*/
MSHR* findMSHR(Addr addr)
{
if (miss.addr == addr && miss.pkt)
return &miss;
return NULL;
}
MSHR* findMSHR(Addr addr);
/**
* Searches for the supplied address in the write buffer.
@ -198,16 +157,7 @@ public:
* @param writes List of pointers to the matching writes.
* @return True if there is a matching write.
*/
bool findWrites(Addr addr, std::vector<MSHR*>& writes)
{
if (wb.addr == addr && wb.pkt) {
writes.push_back(&wb);
return true;
}
return false;
}
bool findWrites(Addr addr, std::vector<MSHR*>& writes);
/**
* Perform a writeback of dirty data to the given address.

62
src/mem/cache/miss/miss_buffer.cc vendored Normal file
View file

@ -0,0 +1,62 @@
/*
* Copyright (c) 2003-2006 The Regents of The University of Michigan
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Authors: Erik Hallnor
*/
#include "cpu/smt.hh" //for maxThreadsPerCPU
#include "mem/cache/base_cache.hh"
#include "mem/cache/miss/miss_buffer.hh"
#include "mem/cache/prefetch/base_prefetcher.hh"
/**
* @todo Move writebacks into shared BaseBuffer class.
*/
void
MissBuffer::regStats(const std::string &name)
{
using namespace Stats;
writebacks
.init(maxThreadsPerCPU)
.name(name + ".writebacks")
.desc("number of writebacks")
.flags(total)
;
}
void
MissBuffer::setCache(BaseCache *_cache)
{
cache = _cache;
blkSize = cache->getBlockSize();
}
void
MissBuffer::setPrefetcher(BasePrefetcher *_prefetcher)
{
prefetcher = _prefetcher;
}

223
src/mem/cache/miss/miss_buffer.hh vendored Normal file
View file

@ -0,0 +1,223 @@
/*
* Copyright (c) 2003-2006 The Regents of The University of Michigan
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Authors: Steve Reinhardt
*/
/**
* @file
* MissBuffer declaration.
*/
#ifndef __MISS_BUFFER_HH__
#define __MISS_BUFFER_HH__
class BaseCache;
class BasePrefetcher;
class MSHR;
/**
* Abstract base class for cache miss buffering.
*/
class MissBuffer
{
protected:
/** True if the cache should allocate on a write miss. */
const bool writeAllocate;
/** Pointer to the parent cache. */
BaseCache *cache;
/** The Prefetcher */
BasePrefetcher *prefetcher;
/** Block size of the parent cache. */
int blkSize;
// Statistics
/**
* @addtogroup CacheStatistics
* @{
*/
/** Number of blocks written back per thread. */
Stats::Vector<> writebacks;
/**
* @}
*/
public:
MissBuffer(bool write_allocate)
: writeAllocate(write_allocate)
{
}
virtual ~MissBuffer() {}
/**
* Called by the parent cache to set the back pointer.
* @param _cache A pointer to the parent cache.
*/
void setCache(BaseCache *_cache);
void setPrefetcher(BasePrefetcher *_prefetcher);
/**
* Register statistics for this object.
* @param name The name of the parent cache.
*/
virtual void regStats(const std::string &name);
/**
* Handle a cache miss properly. Either allocate an MSHR for the request,
* or forward it through the write buffer.
* @param pkt The request that missed in the cache.
* @param blk_size The block size of the cache.
* @param time The time the miss is detected.
*/
virtual void handleMiss(PacketPtr &pkt, int blk_size, Tick time) = 0;
/**
* Fetch the block for the given address and buffer the given target.
* @param addr The address to fetch.
* @param asid The address space of the address.
* @param blk_size The block size of the cache.
* @param time The time the miss is detected.
* @param target The target for the fetch.
*/
virtual MSHR *fetchBlock(Addr addr, int blk_size, Tick time,
PacketPtr &target) = 0;
/**
* Selects a outstanding request to service.
* @return The request to service, NULL if none found.
*/
virtual PacketPtr getPacket() = 0;
/**
* Set the command to the given bus command.
* @param pkt The request to update.
* @param cmd The bus command to use.
*/
virtual void setBusCmd(PacketPtr &pkt, Packet::Command cmd) = 0;
/**
* Restore the original command in case of a bus transmission error.
* @param pkt The request to reset.
*/
virtual void restoreOrigCmd(PacketPtr &pkt) = 0;
/**
* Marks a request as in service (sent on the bus). This can have side
* effect since storage for no response commands is deallocated once they
* are successfully sent.
* @param pkt The request that was sent on the bus.
*/
virtual void markInService(PacketPtr &pkt, MSHR* mshr) = 0;
/**
* Collect statistics and free resources of a satisfied request.
* @param pkt The request that has been satisfied.
* @param time The time when the request is satisfied.
*/
virtual void handleResponse(PacketPtr &pkt, Tick time) = 0;
/**
* Removes all outstanding requests for a given thread number. If a request
* has been sent to the bus, this function removes all of its targets.
* @param threadNum The thread number of the requests to squash.
*/
virtual void squash(int threadNum) = 0;
/**
* Return the current number of outstanding misses.
* @return the number of outstanding misses.
*/
virtual int getMisses() = 0;
/**
* Searches for the supplied address in the miss queue.
* @param addr The address to look for.
* @param asid The address space id.
* @return The MSHR that contains the address, NULL if not found.
* @warning Currently only searches the miss queue. If non write allocate
* might need to search the write buffer for coherence.
*/
virtual MSHR* findMSHR(Addr addr) = 0;
/**
* Searches for the supplied address in the write buffer.
* @param addr The address to look for.
* @param asid The address space id.
* @param writes The list of writes that match the address.
* @return True if any writes are found
*/
virtual bool findWrites(Addr addr, std::vector<MSHR*>& writes) = 0;
/**
* Perform a writeback of dirty data to the given address.
* @param addr The address to write to.
* @param asid The address space id.
* @param xc The execution context of the address space.
* @param size The number of bytes to write.
* @param data The data to write, can be NULL.
* @param compressed True if the data is compressed.
*/
virtual void doWriteback(Addr addr, int size, uint8_t *data,
bool compressed) = 0;
/**
* Perform the given writeback request.
* @param pkt The writeback request.
*/
virtual void doWriteback(PacketPtr &pkt) = 0;
/**
* Returns true if there are outstanding requests.
* @return True if there are outstanding requests.
*/
virtual bool havePending() = 0;
/**
* Add a target to the given MSHR. This assumes it is in the miss queue.
* @param mshr The mshr to add a target to.
* @param pkt The target to add.
*/
virtual void addTarget(MSHR *mshr, PacketPtr &pkt) = 0;
/**
* Allocate a MSHR to hold a list of targets to a block involved in a copy.
* If the block is marked done then the MSHR already holds the data to
* fill the block. Otherwise the block needs to be fetched.
* @param addr The address to buffer.
* @param asid The address space ID.
* @return A pointer to the allocated MSHR.
*/
virtual MSHR* allocateTargetList(Addr addr) = 0;
};
#endif //__MISS_BUFFER_HH__

View file

@ -48,16 +48,25 @@ using namespace std;
*/
MissQueue::MissQueue(int numMSHRs, int numTargets, int write_buffers,
bool write_allocate, bool prefetch_miss)
: mq(numMSHRs, 4), wb(write_buffers,numMSHRs+1000), numMSHR(numMSHRs),
: MissBuffer(write_allocate),
mq(numMSHRs, 4), wb(write_buffers,numMSHRs+1000), numMSHR(numMSHRs),
numTarget(numTargets), writeBuffers(write_buffers),
writeAllocate(write_allocate), order(0), prefetchMiss(prefetch_miss)
order(0), prefetchMiss(prefetch_miss)
{
noTargetMSHR = NULL;
}
MissQueue::~MissQueue()
{
}
void
MissQueue::regStats(const string &name)
{
MissBuffer::regStats(name);
Request temp_req((Addr) NULL, 4, 0);
Packet::Command temp_cmd = Packet::ReadReq;
Packet temp_pkt(&temp_req, temp_cmd, 0); //@todo FIx command strings so this isn't neccessary
@ -65,13 +74,6 @@ MissQueue::regStats(const string &name)
using namespace Stats;
writebacks
.init(maxThreadsPerCPU)
.name(name + ".writebacks")
.desc("number of writebacks")
.flags(total)
;
// MSHR hit statistics
for (int access_idx = 0; access_idx < NUM_MEM_CMDS; ++access_idx) {
Packet::Command cmd = (Packet::Command)access_idx;
@ -336,18 +338,6 @@ MissQueue::regStats(const string &name)
}
void
MissQueue::setCache(BaseCache *_cache)
{
cache = _cache;
blkSize = cache->getBlockSize();
}
void
MissQueue::setPrefetcher(BasePrefetcher *_prefetcher)
{
prefetcher = _prefetcher;
}
MSHR*
MissQueue::allocateMiss(PacketPtr &pkt, int size, Tick time)
@ -706,13 +696,13 @@ MissQueue::squash(int threadNum)
}
MSHR*
MissQueue::findMSHR(Addr addr) const
MissQueue::findMSHR(Addr addr)
{
return mq.findMatch(addr);
}
bool
MissQueue::findWrites(Addr addr, vector<MSHR*> &writes) const
MissQueue::findWrites(Addr addr, vector<MSHR*> &writes)
{
return wb.findMatches(addr,writes);
}

View file

@ -38,19 +38,18 @@
#include <vector>
#include "mem/cache/miss/miss_buffer.hh"
#include "mem/cache/miss/mshr.hh"
#include "mem/cache/miss/mshr_queue.hh"
#include "base/statistics.hh"
class BaseCache;
class BasePrefetcher;
/**
* Manages cache misses and writebacks. Contains MSHRs to store miss data
* and the writebuffer for writes/writebacks.
* @todo need to handle data on writes better (encapsulate).
* @todo need to make replacements/writebacks happen in Cache::access
*/
class MissQueue
class MissQueue : public MissBuffer
{
protected:
/** The MSHRs. */
@ -66,16 +65,6 @@ class MissQueue
const int numTarget;
/** The number of write buffers. */
const int writeBuffers;
/** True if the cache should allocate on a write miss. */
const bool writeAllocate;
/** Pointer to the parent cache. */
BaseCache* cache;
/** The Prefetcher */
BasePrefetcher *prefetcher;
/** The block size of the parent cache. */
int blkSize;
/** Increasing order number assigned to each incoming request. */
uint64_t order;
@ -87,9 +76,6 @@ class MissQueue
* @addtogroup CacheStatistics
* @{
*/
/** Number of blocks written back per thread. */
Stats::Vector<> writebacks;
/** Number of misses that hit in the MSHRs per command and thread. */
Stats::Vector<> mshr_hits[NUM_MEM_CMDS];
/** Demand misses that hit in the MSHRs. */
@ -203,14 +189,6 @@ class MissQueue
*/
void regStats(const std::string &name);
/**
* Called by the parent cache to set the back pointer.
* @param _cache A pointer to the parent cache.
*/
void setCache(BaseCache *_cache);
void setPrefetcher(BasePrefetcher *_prefetcher);
/**
* Handle a cache miss properly. Either allocate an MSHR for the request,
* or forward it through the write buffer.
@ -289,7 +267,7 @@ class MissQueue
* @warning Currently only searches the miss queue. If non write allocate
* might need to search the write buffer for coherence.
*/
MSHR* findMSHR(Addr addr) const;
MSHR* findMSHR(Addr addr);
/**
* Searches for the supplied address in the write buffer.
@ -298,7 +276,7 @@ class MissQueue
* @param writes The list of writes that match the address.
* @return True if any writes are found
*/
bool findWrites(Addr addr, std::vector<MSHR*>& writes) const;
bool findWrites(Addr addr, std::vector<MSHR*>& writes);
/**
* Perform a writeback of dirty data to the given address.

View file

@ -38,15 +38,11 @@
#include "mem/cache/tags/lru.hh"
#include "mem/cache/miss/miss_queue.hh"
#include "mem/cache/miss/blocking_buffer.hh"
#include "mem/cache/prefetch/ghb_prefetcher.hh"
// Template Instantiations
#ifndef DOXYGEN_SHOULD_SKIP_THIS
template class GHBPrefetcher<CacheTags<LRU>, MissQueue>;
template class GHBPrefetcher<CacheTags<LRU>, BlockingBuffer>;
template class GHBPrefetcher<CacheTags<LRU> >;
#endif //DOXYGEN_SHOULD_SKIP_THIS

View file

@ -43,16 +43,16 @@
/**
* A template-policy based cache. The behavior of the cache can be altered by
* supplying different template policies. TagStore handles all tag and data
* storage @sa TagStore. Buffering handles all misses and writes/writebacks
* storage @sa TagStore. MissBuffer handles all misses and writes/writebacks
* @sa MissQueue. Coherence handles all coherence policy details @sa
* UniCoherence, SimpleMultiCoherence.
*/
template <class TagStore, class Buffering>
class GHBPrefetcher : public Prefetcher<TagStore, Buffering>
template <class TagStore>
class GHBPrefetcher : public Prefetcher<TagStore>
{
protected:
Buffering* mq;
MissBuffer* mq;
TagStore* tags;
Addr second_last_miss_addr[64/*MAX_CPUS*/];
@ -67,7 +67,7 @@ class GHBPrefetcher : public Prefetcher<TagStore, Buffering>
GHBPrefetcher(int size, bool pageStop, bool serialSquash,
bool cacheCheckPush, bool onlyData,
Tick latency, int degree, bool useCPUId)
:Prefetcher<TagStore, Buffering>(size, pageStop, serialSquash,
:Prefetcher<TagStore>(size, pageStop, serialSquash,
cacheCheckPush, onlyData),
latency(latency), degree(degree), useCPUId(useCPUId)
{

View file

@ -38,15 +38,11 @@
#include "mem/cache/tags/lru.hh"
#include "mem/cache/miss/miss_queue.hh"
#include "mem/cache/miss/blocking_buffer.hh"
#include "mem/cache/prefetch/stride_prefetcher.hh"
// Template Instantiations
#ifndef DOXYGEN_SHOULD_SKIP_THIS
template class StridePrefetcher<CacheTags<LRU>, MissQueue>;
template class StridePrefetcher<CacheTags<LRU>, BlockingBuffer>;
template class StridePrefetcher<CacheTags<LRU> >;
#endif //DOXYGEN_SHOULD_SKIP_THIS

View file

@ -43,16 +43,16 @@
/**
* A template-policy based cache. The behavior of the cache can be altered by
* supplying different template policies. TagStore handles all tag and data
* storage @sa TagStore. Buffering handles all misses and writes/writebacks
* storage @sa TagStore. MissBuffer handles all misses and writes/writebacks
* @sa MissQueue. Coherence handles all coherence policy details @sa
* UniCoherence, SimpleMultiCoherence.
*/
template <class TagStore, class Buffering>
class StridePrefetcher : public Prefetcher<TagStore, Buffering>
template <class TagStore>
class StridePrefetcher : public Prefetcher<TagStore>
{
protected:
Buffering* mq;
MissBuffer* mq;
TagStore* tags;
class strideEntry
@ -84,7 +84,7 @@ class StridePrefetcher : public Prefetcher<TagStore, Buffering>
StridePrefetcher(int size, bool pageStop, bool serialSquash,
bool cacheCheckPush, bool onlyData,
Tick latency, int degree, bool useCPUId)
:Prefetcher<TagStore, Buffering>(size, pageStop, serialSquash,
:Prefetcher<TagStore>(size, pageStop, serialSquash,
cacheCheckPush, onlyData),
latency(latency), degree(degree), useCPUId(useCPUId)
{

View file

@ -41,16 +41,16 @@
/**
* A template-policy based cache. The behavior of the cache can be altered by
* supplying different template policies. TagStore handles all tag and data
* storage @sa TagStore. Buffering handles all misses and writes/writebacks
* storage @sa TagStore. MissBuffer handles all misses and writes/writebacks
* @sa MissQueue. Coherence handles all coherence policy details @sa
* UniCoherence, SimpleMultiCoherence.
*/
template <class TagStore, class Buffering>
class TaggedPrefetcher : public Prefetcher<TagStore, Buffering>
template <class TagStore>
class TaggedPrefetcher : public Prefetcher<TagStore>
{
protected:
Buffering* mq;
MissBuffer* mq;
TagStore* tags;
Tick latency;

View file

@ -36,20 +36,20 @@
#include "arch/isa_traits.hh"
#include "mem/cache/prefetch/tagged_prefetcher.hh"
template <class TagStore, class Buffering>
TaggedPrefetcher<TagStore, Buffering>::
template <class TagStore>
TaggedPrefetcher<TagStore>::
TaggedPrefetcher(int size, bool pageStop, bool serialSquash,
bool cacheCheckPush, bool onlyData,
Tick latency, int degree)
:Prefetcher<TagStore, Buffering>(size, pageStop, serialSquash,
:Prefetcher<TagStore>(size, pageStop, serialSquash,
cacheCheckPush, onlyData),
latency(latency), degree(degree)
{
}
template <class TagStore, class Buffering>
template <class TagStore>
void
TaggedPrefetcher<TagStore, Buffering>::
TaggedPrefetcher<TagStore>::
calculatePrefetch(PacketPtr &pkt, std::list<Addr> &addresses,
std::list<Tick> &delays)
{