Get rid of coherence protocol object.

--HG--
extra : convert_revision : 4ff144342dca23af9a12a2169ca318a002654b42
This commit is contained in:
Steve Reinhardt 2007-06-27 20:54:13 -07:00
parent c4903e0882
commit 9117c94f9c
10 changed files with 140 additions and 1122 deletions

View file

@ -48,8 +48,6 @@ parser.add_option("-m", "--maxtick", type="int", default=m5.MaxTick,
parser.add_option("-n", "--numtesters", type="int", default=8,
metavar="N",
help="Number of tester pseudo-CPUs [default: %default]")
parser.add_option("-p", "--protocol", default="moesi",
help="Coherence protocol [default: %default]")
parser.add_option("-f", "--functional", type="int", default=0,
metavar="PCT",
@ -95,7 +93,6 @@ class L1(BaseCache):
block_size = block_size
mshrs = num_l1_mshrs
tgts_per_mshr = 8
protocol = CoherenceProtocol(protocol=options.protocol)
# ----------------------
# Base L2 Cache
@ -107,7 +104,6 @@ class L2(BaseCache):
mshrs = num_l2_mshrs
tgts_per_mshr = 16
write_buffers = 8
protocol = CoherenceProtocol(protocol=options.protocol)
if options.numtesters > block_size:
print "Error: Number of testers limited to %s because of false sharing" \

View file

@ -58,8 +58,6 @@
#include "mem/cache/tags/split_lifo.hh"
#endif
#include "mem/cache/coherence/simple_coherence.hh"
#include "mem/cache/cache_impl.hh"
// Template Instantiations
@ -67,23 +65,23 @@
#if defined(USE_CACHE_FALRU)
template class Cache<FALRU, SimpleCoherence>;
template class Cache<FALRU>;
#endif
#if defined(USE_CACHE_IIC)
template class Cache<IIC, SimpleCoherence>;
template class Cache<IIC>;
#endif
#if defined(USE_CACHE_LRU)
template class Cache<LRU, SimpleCoherence>;
template class Cache<LRU>;
#endif
#if defined(USE_CACHE_SPLIT)
template class Cache<Split, SimpleCoherence>;
template class Cache<Split>;
#endif
#if defined(USE_CACHE_SPLIT_LIFO)
template class Cache<SplitLIFO, SimpleCoherence>;
template class Cache<SplitLIFO>;
#endif
#endif //DOXYGEN_SHOULD_SKIP_THIS

View file

@ -39,9 +39,7 @@
#ifndef __CACHE_HH__
#define __CACHE_HH__
#include "base/compression/base.hh"
#include "base/misc.hh" // fatal, panic, and warn
#include "cpu/smt.hh" // SMT_MAX_THREADS
#include "mem/cache/base_cache.hh"
#include "mem/cache/cache_blk.hh"
@ -55,11 +53,9 @@ class BasePrefetcher;
/**
* A template-policy based cache. The behavior of the cache can be altered by
* supplying different template policies. TagStore handles all tag and data
* storage @sa TagStore. Buffering handles all misses and writes/writebacks
* @sa MissQueue. Coherence handles all coherence policy details @sa
* UniCoherence, SimpleMultiCoherence.
* storage @sa TagStore.
*/
template <class TagStore, class Coherence>
template <class TagStore>
class Cache : public BaseCache
{
public:
@ -76,13 +72,13 @@ class Cache : public BaseCache
{
public:
CpuSidePort(const std::string &_name,
Cache<TagStore,Coherence> *_cache);
Cache<TagStore> *_cache);
// BaseCache::CachePort just has a BaseCache *; this function
// lets us get back the type info we lost when we stored the
// cache pointer there.
Cache<TagStore,Coherence> *myCache() {
return static_cast<Cache<TagStore,Coherence> *>(cache);
Cache<TagStore> *myCache() {
return static_cast<Cache<TagStore> *>(cache);
}
virtual void getDeviceAddressRanges(AddrRangeList &resp,
@ -99,13 +95,13 @@ class Cache : public BaseCache
{
public:
MemSidePort(const std::string &_name,
Cache<TagStore,Coherence> *_cache);
Cache<TagStore> *_cache);
// BaseCache::CachePort just has a BaseCache *; this function
// lets us get back the type info we lost when we stored the
// cache pointer there.
Cache<TagStore,Coherence> *myCache() {
return static_cast<Cache<TagStore,Coherence> *>(cache);
Cache<TagStore> *myCache() {
return static_cast<Cache<TagStore> *>(cache);
}
void sendPacket();
@ -130,9 +126,6 @@ class Cache : public BaseCache
/** Tag and data Storage */
TagStore *tags;
/** Coherence protocol. */
Coherence *coherence;
/** Prefetcher */
BasePrefetcher *prefetcher;
@ -212,20 +205,19 @@ class Cache : public BaseCache
{
public:
TagStore *tags;
Coherence *coherence;
BaseCache::Params baseParams;
BasePrefetcher*prefetcher;
bool prefetchAccess;
const bool doFastWrites;
const bool prefetchMiss;
Params(TagStore *_tags, Coherence *coh,
Params(TagStore *_tags,
BaseCache::Params params,
BasePrefetcher *_prefetcher,
bool prefetch_access, int hit_latency,
bool do_fast_writes,
bool prefetch_miss)
: tags(_tags), coherence(coh),
: tags(_tags),
baseParams(params),
prefetcher(_prefetcher), prefetchAccess(prefetch_access),
doFastWrites(do_fast_writes),

View file

@ -42,7 +42,6 @@
#include "mem/cache/base_cache.hh"
#include "mem/cache/cache.hh"
#include "mem/bus.hh"
#include "mem/cache/coherence/coherence_protocol.hh"
#include "sim/builder.hh"
// Tag Templates
@ -66,13 +65,6 @@
#include "mem/cache/tags/split_lifo.hh"
#endif
// Compression Templates
#include "base/compression/null_compression.hh"
#include "base/compression/lzss_compression.hh"
// Coherence Templates
#include "mem/cache/coherence/simple_coherence.hh"
//Prefetcher Headers
#if defined(USE_GHB)
#include "mem/cache/prefetch/ghb_prefetcher.hh"
@ -100,16 +92,11 @@ BEGIN_DECLARE_SIM_OBJECT_PARAMS(BaseCache)
Param<int> tgts_per_mshr;
Param<int> write_buffers;
Param<bool> prioritizeRequests;
SimObjectParam<CoherenceProtocol *> protocol;
Param<Addr> trace_addr;
Param<int> hash_delay;
#if defined(USE_CACHE_IIC)
SimObjectParam<Repl *> repl;
#endif
Param<bool> compressed_bus;
Param<bool> store_compressed;
Param<bool> adaptive_compression;
Param<int> compression_latency;
Param<int> subblock_size;
Param<Counter> max_miss_count;
VectorParam<Range<Addr> > addr_range;
@ -144,23 +131,12 @@ BEGIN_INIT_SIM_OBJECT_PARAMS(BaseCache)
INIT_PARAM_DFLT(write_buffers, "number of write buffers", 8),
INIT_PARAM_DFLT(prioritizeRequests, "always service demand misses first",
false),
INIT_PARAM_DFLT(protocol, "coherence protocol to use in the cache", NULL),
INIT_PARAM_DFLT(trace_addr, "address to trace", 0),
INIT_PARAM_DFLT(hash_delay, "time in cycles of hash access",1),
#if defined(USE_CACHE_IIC)
INIT_PARAM_DFLT(repl, "replacement policy",NULL),
#endif
INIT_PARAM_DFLT(compressed_bus,
"This cache connects to a compressed memory",
false),
INIT_PARAM_DFLT(store_compressed, "Store compressed data in the cache",
false),
INIT_PARAM_DFLT(adaptive_compression, "Use an adaptive compression scheme",
false),
INIT_PARAM_DFLT(compression_latency,
"Latency in cycles of compression algorithm",
0),
INIT_PARAM_DFLT(subblock_size,
"Size of subblock in IIC used for compression",
0),
@ -188,7 +164,7 @@ BEGIN_INIT_SIM_OBJECT_PARAMS(BaseCache)
END_INIT_SIM_OBJECT_PARAMS(BaseCache)
#define BUILD_CACHE(TAGS, tags, c) \
#define BUILD_CACHE(TAGS, tags) \
do { \
BasePrefetcher *pf; \
if (pf_policy == "tagged") { \
@ -203,12 +179,12 @@ END_INIT_SIM_OBJECT_PARAMS(BaseCache)
else { \
BUILD_NULL_PREFETCHER(TAGS); \
} \
Cache<TAGS, c>::Params params(tags, coh, base_params, \
pf, prefetch_access, latency, \
true, \
prefetch_miss); \
Cache<TAGS, c> *retval = \
new Cache<TAGS, c>(getInstanceName(), params); \
Cache<TAGS>::Params params(tags, base_params, \
pf, prefetch_access, latency, \
true, \
prefetch_miss); \
Cache<TAGS> *retval = \
new Cache<TAGS>(getInstanceName(), params); \
return retval; \
} while (0)
@ -216,79 +192,68 @@ END_INIT_SIM_OBJECT_PARAMS(BaseCache)
panic("%s not compiled into M5", x); \
} while (0)
#define BUILD_COMPRESSED_CACHE(TAGS, tags, c) \
do { \
CompressionAlgorithm *compAlg; \
if (compressed_bus || store_compressed) { \
compAlg = new LZSSCompression(); \
} else { \
compAlg = new NullCompression(); \
} \
BUILD_CACHE(TAGS, tags, c); \
} while (0)
#if defined(USE_CACHE_FALRU)
#define BUILD_FALRU_CACHE(c) do { \
#define BUILD_FALRU_CACHE do { \
FALRU *tags = new FALRU(block_size, size, latency); \
BUILD_COMPRESSED_CACHE(FALRU, tags, c); \
BUILD_CACHE(FALRU, tags); \
} while (0)
#else
#define BUILD_FALRU_CACHE(c) BUILD_CACHE_PANIC("falru cache")
#define BUILD_FALRU_CACHE BUILD_CACHE_PANIC("falru cache")
#endif
#if defined(USE_CACHE_LRU)
#define BUILD_LRU_CACHE(c) do { \
#define BUILD_LRU_CACHE do { \
LRU *tags = new LRU(numSets, block_size, assoc, latency); \
BUILD_COMPRESSED_CACHE(LRU, tags, c); \
BUILD_CACHE(LRU, tags); \
} while (0)
#else
#define BUILD_LRU_CACHE(c) BUILD_CACHE_PANIC("lru cache")
#define BUILD_LRU_CACHE BUILD_CACHE_PANIC("lru cache")
#endif
#if defined(USE_CACHE_SPLIT)
#define BUILD_SPLIT_CACHE(c) do { \
#define BUILD_SPLIT_CACHE do { \
Split *tags = new Split(numSets, block_size, assoc, split_size, lifo, \
two_queue, latency); \
BUILD_COMPRESSED_CACHE(Split, tags, c); \
BUILD_CACHE(Split, tags); \
} while (0)
#else
#define BUILD_SPLIT_CACHE(c) BUILD_CACHE_PANIC("split cache")
#define BUILD_SPLIT_CACHE BUILD_CACHE_PANIC("split cache")
#endif
#if defined(USE_CACHE_SPLIT_LIFO)
#define BUILD_SPLIT_LIFO_CACHE(c) do { \
#define BUILD_SPLIT_LIFO_CACHE do { \
SplitLIFO *tags = new SplitLIFO(block_size, size, assoc, \
latency, two_queue, -1); \
BUILD_COMPRESSED_CACHE(SplitLIFO, tags, c); \
BUILD_CACHE(SplitLIFO, tags); \
} while (0)
#else
#define BUILD_SPLIT_LIFO_CACHE(c) BUILD_CACHE_PANIC("lifo cache")
#define BUILD_SPLIT_LIFO_CACHE BUILD_CACHE_PANIC("lifo cache")
#endif
#if defined(USE_CACHE_IIC)
#define BUILD_IIC_CACHE(c) do { \
#define BUILD_IIC_CACHE do { \
IIC *tags = new IIC(iic_params); \
BUILD_COMPRESSED_CACHE(IIC, tags, c); \
BUILD_CACHE(IIC, tags); \
} while (0)
#else
#define BUILD_IIC_CACHE(c) BUILD_CACHE_PANIC("iic")
#define BUILD_IIC_CACHE BUILD_CACHE_PANIC("iic")
#endif
#define BUILD_CACHES(c) do { \
#define BUILD_CACHES do { \
if (repl == NULL) { \
if (numSets == 1) { \
BUILD_FALRU_CACHE(c); \
BUILD_FALRU_CACHE; \
} else { \
if (split == true) { \
BUILD_SPLIT_CACHE(c); \
BUILD_SPLIT_CACHE; \
} else if (lifo == true) { \
BUILD_SPLIT_LIFO_CACHE(c); \
BUILD_SPLIT_LIFO_CACHE; \
} else { \
BUILD_LRU_CACHE(c); \
BUILD_LRU_CACHE; \
} \
} \
} else { \
BUILD_IIC_CACHE(c); \
BUILD_IIC_CACHE; \
} \
} while (0)
@ -399,8 +364,7 @@ CREATE_SIM_OBJECT(BaseCache)
const void *repl = NULL;
#endif
SimpleCoherence *coh = new SimpleCoherence(protocol);
BUILD_CACHES(SimpleCoherence);
BUILD_CACHES;
return NULL;
}

View file

@ -48,13 +48,13 @@
#include "sim/sim_exit.hh" // for SimExitEvent
template<class TagStore, class Coherence>
Cache<TagStore,Coherence>::Cache(const std::string &_name,
Cache<TagStore,Coherence>::Params &params)
template<class TagStore>
Cache<TagStore>::Cache(const std::string &_name,
Cache<TagStore>::Params &params)
: BaseCache(_name, params.baseParams),
prefetchAccess(params.prefetchAccess),
tags(params.tags),
coherence(params.coherence), prefetcher(params.prefetcher),
prefetcher(params.prefetcher),
doFastWrites(params.doFastWrites),
prefetchMiss(params.prefetchMiss)
{
@ -67,23 +67,21 @@ Cache<TagStore,Coherence>::Cache(const std::string &_name,
memSidePort->setOtherPort(cpuSidePort);
tags->setCache(this);
coherence->setCache(this);
prefetcher->setCache(this);
}
template<class TagStore, class Coherence>
template<class TagStore>
void
Cache<TagStore,Coherence>::regStats()
Cache<TagStore>::regStats()
{
BaseCache::regStats();
tags->regStats(name());
coherence->regStats(name());
prefetcher->regStats(name());
}
template<class TagStore, class Coherence>
template<class TagStore>
Port *
Cache<TagStore,Coherence>::getPort(const std::string &if_name, int idx)
Cache<TagStore>::getPort(const std::string &if_name, int idx)
{
if (if_name == "" || if_name == "cpu_side") {
return cpuSidePort;
@ -96,9 +94,9 @@ Cache<TagStore,Coherence>::getPort(const std::string &if_name, int idx)
}
}
template<class TagStore, class Coherence>
template<class TagStore>
void
Cache<TagStore,Coherence>::deletePortRefs(Port *p)
Cache<TagStore>::deletePortRefs(Port *p)
{
if (cpuSidePort == p || memSidePort == p)
panic("Can only delete functional ports\n");
@ -107,9 +105,9 @@ Cache<TagStore,Coherence>::deletePortRefs(Port *p)
}
template<class TagStore, class Coherence>
template<class TagStore>
void
Cache<TagStore,Coherence>::cmpAndSwap(BlkType *blk, PacketPtr pkt)
Cache<TagStore>::cmpAndSwap(BlkType *blk, PacketPtr pkt)
{
uint64_t overwrite_val;
bool overwrite_mem;
@ -152,9 +150,9 @@ Cache<TagStore,Coherence>::cmpAndSwap(BlkType *blk, PacketPtr pkt)
/////////////////////////////////////////////////////
template<class TagStore, class Coherence>
template<class TagStore>
void
Cache<TagStore,Coherence>::markInService(MSHR *mshr)
Cache<TagStore>::markInService(MSHR *mshr)
{
markInServiceInternal(mshr);
#if 0
@ -171,9 +169,9 @@ Cache<TagStore,Coherence>::markInService(MSHR *mshr)
}
template<class TagStore, class Coherence>
template<class TagStore>
void
Cache<TagStore,Coherence>::squash(int threadNum)
Cache<TagStore>::squash(int threadNum)
{
bool unblock = false;
BlockedCause cause = NUM_BLOCKED_CAUSES;
@ -199,9 +197,9 @@ Cache<TagStore,Coherence>::squash(int threadNum)
//
/////////////////////////////////////////////////////
template<class TagStore, class Coherence>
template<class TagStore>
bool
Cache<TagStore,Coherence>::access(PacketPtr pkt, BlkType *&blk, int &lat)
Cache<TagStore>::access(PacketPtr pkt, BlkType *&blk, int &lat)
{
if (pkt->req->isUncacheable()) {
blk = NULL;
@ -280,9 +278,9 @@ Cache<TagStore,Coherence>::access(PacketPtr pkt, BlkType *&blk, int &lat)
}
template<class TagStore, class Coherence>
template<class TagStore>
bool
Cache<TagStore,Coherence>::timingAccess(PacketPtr pkt)
Cache<TagStore>::timingAccess(PacketPtr pkt)
{
//@todo Add back in MemDebug Calls
// MemDebug::cacheAccess(pkt);
@ -398,10 +396,10 @@ Cache<TagStore,Coherence>::timingAccess(PacketPtr pkt)
}
template<class TagStore, class Coherence>
template<class TagStore>
PacketPtr
Cache<TagStore,Coherence>::getBusPacket(PacketPtr cpu_pkt, BlkType *blk,
bool needsExclusive)
Cache<TagStore>::getBusPacket(PacketPtr cpu_pkt, BlkType *blk,
bool needsExclusive)
{
bool blkValid = blk && blk->isValid();
@ -441,9 +439,9 @@ Cache<TagStore,Coherence>::getBusPacket(PacketPtr cpu_pkt, BlkType *blk,
}
template<class TagStore, class Coherence>
template<class TagStore>
Tick
Cache<TagStore,Coherence>::atomicAccess(PacketPtr pkt)
Cache<TagStore>::atomicAccess(PacketPtr pkt)
{
int lat = hitLatency;
@ -511,10 +509,10 @@ Cache<TagStore,Coherence>::atomicAccess(PacketPtr pkt)
}
template<class TagStore, class Coherence>
template<class TagStore>
void
Cache<TagStore,Coherence>::functionalAccess(PacketPtr pkt,
CachePort *otherSidePort)
Cache<TagStore>::functionalAccess(PacketPtr pkt,
CachePort *otherSidePort)
{
Addr blk_addr = pkt->getAddr() & ~(blkSize - 1);
BlkType *blk = tags->findBlock(pkt->getAddr());
@ -561,9 +559,9 @@ Cache<TagStore,Coherence>::functionalAccess(PacketPtr pkt,
/////////////////////////////////////////////////////
template<class TagStore, class Coherence>
template<class TagStore>
void
Cache<TagStore,Coherence>::satisfyCpuSideRequest(PacketPtr pkt, BlkType *blk)
Cache<TagStore>::satisfyCpuSideRequest(PacketPtr pkt, BlkType *blk)
{
assert(blk);
assert(pkt->needsExclusive() ? blk->isWritable() : blk->isValid());
@ -586,10 +584,10 @@ Cache<TagStore,Coherence>::satisfyCpuSideRequest(PacketPtr pkt, BlkType *blk)
}
template<class TagStore, class Coherence>
template<class TagStore>
bool
Cache<TagStore,Coherence>::satisfyMSHR(MSHR *mshr, PacketPtr pkt,
BlkType *blk)
Cache<TagStore>::satisfyMSHR(MSHR *mshr, PacketPtr pkt,
BlkType *blk)
{
// respond to MSHR targets, if any
@ -642,9 +640,9 @@ Cache<TagStore,Coherence>::satisfyMSHR(MSHR *mshr, PacketPtr pkt,
}
template<class TagStore, class Coherence>
template<class TagStore>
void
Cache<TagStore,Coherence>::handleResponse(PacketPtr pkt)
Cache<TagStore>::handleResponse(PacketPtr pkt)
{
Tick time = curTick + hitLatency;
MSHR *mshr = dynamic_cast<MSHR*>(pkt->senderState);
@ -730,9 +728,9 @@ Cache<TagStore,Coherence>::handleResponse(PacketPtr pkt)
template<class TagStore, class Coherence>
template<class TagStore>
PacketPtr
Cache<TagStore,Coherence>::writebackBlk(BlkType *blk)
Cache<TagStore>::writebackBlk(BlkType *blk)
{
assert(blk && blk->isValid() && blk->isDirty());
@ -754,12 +752,13 @@ Cache<TagStore,Coherence>::writebackBlk(BlkType *blk)
// is called by both atomic and timing-mode accesses, and in atomic
// mode we don't mess with the write buffer (we just perform the
// writebacks atomically once the original request is complete).
template<class TagStore, class Coherence>
typename Cache<TagStore,Coherence>::BlkType*
Cache<TagStore,Coherence>::handleFill(PacketPtr pkt, BlkType *blk,
PacketList &writebacks)
template<class TagStore>
typename Cache<TagStore>::BlkType*
Cache<TagStore>::handleFill(PacketPtr pkt, BlkType *blk,
PacketList &writebacks)
{
Addr addr = pkt->getAddr();
CacheBlk::State old_state = blk ? blk->status : 0;
if (blk == NULL) {
// better have read new data...
@ -795,21 +794,24 @@ Cache<TagStore,Coherence>::handleFill(PacketPtr pkt, BlkType *blk,
}
blk->tag = tags->extractTag(addr);
blk->status = coherence->getNewState(pkt);
} else {
// existing block... probably an upgrade
assert(blk->tag == tags->extractTag(addr));
// either we're getting new data or the block should already be valid
assert(pkt->isRead() || blk->isValid());
CacheBlk::State old_state = blk->status;
blk->status = coherence->getNewState(pkt, old_state);
if (blk->status != old_state)
DPRINTF(Cache, "Block addr %x moving from state %i to %i\n",
addr, old_state, blk->status);
else
warn("Changing state to same value\n");
}
if (pkt->needsExclusive()) {
blk->status = BlkValid | BlkWritable | BlkDirty;
} else if (!pkt->sharedAsserted()) {
blk->status = BlkValid | BlkWritable;
} else {
blk->status = BlkValid;
}
DPRINTF(Cache, "Block addr %x moving from state %i to %i\n",
addr, old_state, blk->status);
// if we got new data, copy it in
if (pkt->isRead()) {
std::memcpy(blk->data, pkt->getPtr<uint8_t>(), blkSize);
@ -827,11 +829,11 @@ Cache<TagStore,Coherence>::handleFill(PacketPtr pkt, BlkType *blk,
//
/////////////////////////////////////////////////////
template<class TagStore, class Coherence>
template<class TagStore>
void
Cache<TagStore,Coherence>::doTimingSupplyResponse(PacketPtr req_pkt,
uint8_t *blk_data,
bool already_copied)
Cache<TagStore>::doTimingSupplyResponse(PacketPtr req_pkt,
uint8_t *blk_data,
bool already_copied)
{
// timing-mode snoop responses require a new packet, unless we
// already made a copy...
@ -842,10 +844,10 @@ Cache<TagStore,Coherence>::doTimingSupplyResponse(PacketPtr req_pkt,
memSidePort->respond(pkt, curTick + hitLatency);
}
template<class TagStore, class Coherence>
template<class TagStore>
void
Cache<TagStore,Coherence>::handleSnoop(PacketPtr pkt, BlkType *blk,
bool is_timing, bool is_deferred)
Cache<TagStore>::handleSnoop(PacketPtr pkt, BlkType *blk,
bool is_timing, bool is_deferred)
{
if (!blk || !blk->isValid()) {
return;
@ -894,9 +896,9 @@ Cache<TagStore,Coherence>::handleSnoop(PacketPtr pkt, BlkType *blk,
}
template<class TagStore, class Coherence>
template<class TagStore>
void
Cache<TagStore,Coherence>::snoopTiming(PacketPtr pkt)
Cache<TagStore>::snoopTiming(PacketPtr pkt)
{
if (pkt->req->isUncacheable()) {
//Can't get a hit on an uncacheable address
@ -959,9 +961,9 @@ Cache<TagStore,Coherence>::snoopTiming(PacketPtr pkt)
}
template<class TagStore, class Coherence>
template<class TagStore>
Tick
Cache<TagStore,Coherence>::snoopAtomic(PacketPtr pkt)
Cache<TagStore>::snoopAtomic(PacketPtr pkt)
{
if (pkt->req->isUncacheable()) {
// Can't get a hit on an uncacheable address
@ -975,9 +977,9 @@ Cache<TagStore,Coherence>::snoopAtomic(PacketPtr pkt)
}
template<class TagStore, class Coherence>
template<class TagStore>
MSHR *
Cache<TagStore,Coherence>::getNextMSHR()
Cache<TagStore>::getNextMSHR()
{
// Check both MSHR queue and write buffer for potential requests
MSHR *miss_mshr = mshrQueue.getNextMSHR();
@ -1051,9 +1053,9 @@ Cache<TagStore,Coherence>::getNextMSHR()
}
template<class TagStore, class Coherence>
template<class TagStore>
PacketPtr
Cache<TagStore,Coherence>::getTimingPacket()
Cache<TagStore>::getTimingPacket()
{
MSHR *mshr = getNextMSHR();
@ -1100,9 +1102,9 @@ Cache<TagStore,Coherence>::getTimingPacket()
//
///////////////
template<class TagStore, class Coherence>
template<class TagStore>
void
Cache<TagStore,Coherence>::CpuSidePort::
Cache<TagStore>::CpuSidePort::
getDeviceAddressRanges(AddrRangeList &resp, bool &snoop)
{
// CPU side port doesn't snoop; it's a target only.
@ -1112,9 +1114,9 @@ getDeviceAddressRanges(AddrRangeList &resp, bool &snoop)
}
template<class TagStore, class Coherence>
template<class TagStore>
bool
Cache<TagStore,Coherence>::CpuSidePort::recvTiming(PacketPtr pkt)
Cache<TagStore>::CpuSidePort::recvTiming(PacketPtr pkt)
{
if (pkt->isRequest() && blocked) {
DPRINTF(Cache,"Scheduling a retry while blocked\n");
@ -1127,17 +1129,17 @@ Cache<TagStore,Coherence>::CpuSidePort::recvTiming(PacketPtr pkt)
}
template<class TagStore, class Coherence>
template<class TagStore>
Tick
Cache<TagStore,Coherence>::CpuSidePort::recvAtomic(PacketPtr pkt)
Cache<TagStore>::CpuSidePort::recvAtomic(PacketPtr pkt)
{
return myCache()->atomicAccess(pkt);
}
template<class TagStore, class Coherence>
template<class TagStore>
void
Cache<TagStore,Coherence>::CpuSidePort::recvFunctional(PacketPtr pkt)
Cache<TagStore>::CpuSidePort::recvFunctional(PacketPtr pkt)
{
checkFunctional(pkt);
if (pkt->result != Packet::Success)
@ -1145,10 +1147,10 @@ Cache<TagStore,Coherence>::CpuSidePort::recvFunctional(PacketPtr pkt)
}
template<class TagStore, class Coherence>
Cache<TagStore,Coherence>::
template<class TagStore>
Cache<TagStore>::
CpuSidePort::CpuSidePort(const std::string &_name,
Cache<TagStore,Coherence> *_cache)
Cache<TagStore> *_cache)
: BaseCache::CachePort(_name, _cache)
{
}
@ -1159,9 +1161,9 @@ CpuSidePort::CpuSidePort(const std::string &_name,
//
///////////////
template<class TagStore, class Coherence>
template<class TagStore>
void
Cache<TagStore,Coherence>::MemSidePort::
Cache<TagStore>::MemSidePort::
getDeviceAddressRanges(AddrRangeList &resp, bool &snoop)
{
otherPort->getPeerAddressRanges(resp, snoop);
@ -1171,9 +1173,9 @@ getDeviceAddressRanges(AddrRangeList &resp, bool &snoop)
}
template<class TagStore, class Coherence>
template<class TagStore>
bool
Cache<TagStore,Coherence>::MemSidePort::recvTiming(PacketPtr pkt)
Cache<TagStore>::MemSidePort::recvTiming(PacketPtr pkt)
{
// this needs to be fixed so that the cache updates the mshr and sends the
// packet back out on the link, but it probably won't happen so until this
@ -1196,9 +1198,9 @@ Cache<TagStore,Coherence>::MemSidePort::recvTiming(PacketPtr pkt)
}
template<class TagStore, class Coherence>
template<class TagStore>
Tick
Cache<TagStore,Coherence>::MemSidePort::recvAtomic(PacketPtr pkt)
Cache<TagStore>::MemSidePort::recvAtomic(PacketPtr pkt)
{
// in atomic mode, responses go back to the sender via the
// function return from sendAtomic(), not via a separate
@ -1209,9 +1211,9 @@ Cache<TagStore,Coherence>::MemSidePort::recvAtomic(PacketPtr pkt)
}
template<class TagStore, class Coherence>
template<class TagStore>
void
Cache<TagStore,Coherence>::MemSidePort::recvFunctional(PacketPtr pkt)
Cache<TagStore>::MemSidePort::recvFunctional(PacketPtr pkt)
{
checkFunctional(pkt);
if (pkt->result != Packet::Success)
@ -1220,9 +1222,9 @@ Cache<TagStore,Coherence>::MemSidePort::recvFunctional(PacketPtr pkt)
template<class TagStore, class Coherence>
template<class TagStore>
void
Cache<TagStore,Coherence>::MemSidePort::sendPacket()
Cache<TagStore>::MemSidePort::sendPacket()
{
// if we have responses that are ready, they take precedence
if (deferredPacketReady()) {
@ -1278,28 +1280,27 @@ Cache<TagStore,Coherence>::MemSidePort::sendPacket()
}
}
template<class TagStore, class Coherence>
template<class TagStore>
void
Cache<TagStore,Coherence>::MemSidePort::recvRetry()
Cache<TagStore>::MemSidePort::recvRetry()
{
assert(waitingOnRetry);
sendPacket();
}
template<class TagStore, class Coherence>
template<class TagStore>
void
Cache<TagStore,Coherence>::MemSidePort::processSendEvent()
Cache<TagStore>::MemSidePort::processSendEvent()
{
assert(!waitingOnRetry);
sendPacket();
}
template<class TagStore, class Coherence>
Cache<TagStore,Coherence>::
MemSidePort::MemSidePort(const std::string &_name,
Cache<TagStore,Coherence> *_cache)
template<class TagStore>
Cache<TagStore>::
MemSidePort::MemSidePort(const std::string &_name, Cache<TagStore> *_cache)
: BaseCache::CachePort(_name, _cache)
{
// override default send event from SimpleTimingPort

View file

@ -1,8 +0,0 @@
from m5.SimObject import SimObject
from m5.params import *
class Coherence(Enum): vals = ['uni', 'msi', 'mesi', 'mosi', 'moesi']
class CoherenceProtocol(SimObject):
type = 'CoherenceProtocol'
do_upgrades = Param.Bool(True, "use upgrade transactions?")
protocol = Param.Coherence("name of coherence protocol")

View file

@ -1,36 +0,0 @@
# -*- mode:python -*-
# Copyright (c) 2006 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Nathan Binkert
Import('*')
SimObject('CoherenceProtocol.py')
Source('coherence_protocol.cc')

View file

@ -1,469 +0,0 @@
/*
* Copyright (c) 2002-2005 The Regents of The University of Michigan
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Authors: Erik Hallnor
* Steve Reinhardt
* Ron Dreslinski
*/
/**
* @file
* Definitions of CoherenceProtocol.
*/
#include <string>
#include "base/misc.hh"
#include "mem/cache/miss/mshr.hh"
#include "mem/cache/cache.hh"
#include "mem/cache/coherence/coherence_protocol.hh"
#include "sim/builder.hh"
using namespace std;
CoherenceProtocol::StateTransition::StateTransition()
: busCmd(MemCmd::InvalidCmd), newState(-1), snoopFunc(invalidTransition)
{
}
void
CoherenceProtocol::regStats()
{
// Even though we count all the possible transitions in the
// requestCount and snoopCount arrays, most of these are invalid,
// so we just select the interesting ones to print here.
requestCount[Invalid][MemCmd::ReadReq]
.name(name() + ".read_invalid")
.desc("read misses to invalid blocks")
;
requestCount[Invalid][MemCmd::WriteReq]
.name(name() +".write_invalid")
.desc("write misses to invalid blocks")
;
requestCount[Invalid][MemCmd::SoftPFReq]
.name(name() +".swpf_invalid")
.desc("soft prefetch misses to invalid blocks")
;
requestCount[Invalid][MemCmd::HardPFReq]
.name(name() +".hwpf_invalid")
.desc("hard prefetch misses to invalid blocks")
;
requestCount[Shared][MemCmd::WriteReq]
.name(name() + ".write_shared")
.desc("write misses to shared blocks")
;
requestCount[Owned][MemCmd::WriteReq]
.name(name() + ".write_owned")
.desc("write misses to owned blocks")
;
snoopCount[Shared][MemCmd::ReadReq]
.name(name() + ".snoop_read_shared")
.desc("read snoops on shared blocks")
;
snoopCount[Shared][MemCmd::ReadExReq]
.name(name() + ".snoop_readex_shared")
.desc("readEx snoops on shared blocks")
;
snoopCount[Shared][MemCmd::UpgradeReq]
.name(name() + ".snoop_upgrade_shared")
.desc("upgradee snoops on shared blocks")
;
snoopCount[Modified][MemCmd::ReadReq]
.name(name() + ".snoop_read_modified")
.desc("read snoops on modified blocks")
;
snoopCount[Modified][MemCmd::ReadExReq]
.name(name() + ".snoop_readex_modified")
.desc("readEx snoops on modified blocks")
;
snoopCount[Owned][MemCmd::ReadReq]
.name(name() + ".snoop_read_owned")
.desc("read snoops on owned blocks")
;
snoopCount[Owned][MemCmd::ReadExReq]
.name(name() + ".snoop_readex_owned")
.desc("readEx snoops on owned blocks")
;
snoopCount[Owned][MemCmd::UpgradeReq]
.name(name() + ".snoop_upgrade_owned")
.desc("upgrade snoops on owned blocks")
;
snoopCount[Exclusive][MemCmd::ReadReq]
.name(name() + ".snoop_read_exclusive")
.desc("read snoops on exclusive blocks")
;
snoopCount[Exclusive][MemCmd::ReadExReq]
.name(name() + ".snoop_readex_exclusive")
.desc("readEx snoops on exclusive blocks")
;
snoopCount[Shared][MemCmd::WriteInvalidateReq]
.name(name() + ".snoop_writeinv_shared")
.desc("WriteInvalidate snoops on shared blocks")
;
snoopCount[Owned][MemCmd::WriteInvalidateReq]
.name(name() + ".snoop_writeinv_owned")
.desc("WriteInvalidate snoops on owned blocks")
;
snoopCount[Exclusive][MemCmd::WriteInvalidateReq]
.name(name() + ".snoop_writeinv_exclusive")
.desc("WriteInvalidate snoops on exclusive blocks")
;
snoopCount[Modified][MemCmd::WriteInvalidateReq]
.name(name() + ".snoop_writeinv_modified")
.desc("WriteInvalidate snoops on modified blocks")
;
snoopCount[Invalid][MemCmd::WriteInvalidateReq]
.name(name() + ".snoop_writeinv_invalid")
.desc("WriteInvalidate snoops on invalid blocks")
;
}
bool
CoherenceProtocol::invalidateTrans(BaseCache *cache, PacketPtr &pkt,
CacheBlk *blk, MSHR *mshr,
CacheBlk::State & new_state)
{
// invalidate the block
new_state = (blk->status & ~stateMask) | Invalid;
return false;
}
bool
CoherenceProtocol::supplyTrans(BaseCache *cache, PacketPtr &pkt,
CacheBlk *blk,
MSHR *mshr,
CacheBlk::State & new_state)
{
return true;
}
bool
CoherenceProtocol::supplyAndGotoSharedTrans(BaseCache *cache, PacketPtr &pkt,
CacheBlk *blk,
MSHR *mshr,
CacheBlk::State & new_state)
{
new_state = (blk->status & ~stateMask) | Shared;
pkt->assertShared();
return supplyTrans(cache, pkt, blk, mshr, new_state);
}
bool
CoherenceProtocol::supplyAndGotoOwnedTrans(BaseCache *cache, PacketPtr &pkt,
CacheBlk *blk,
MSHR *mshr,
CacheBlk::State & new_state)
{
new_state = (blk->status & ~stateMask) | Owned;
pkt->assertShared();
return supplyTrans(cache, pkt, blk, mshr, new_state);
}
bool
CoherenceProtocol::supplyAndInvalidateTrans(BaseCache *cache, PacketPtr &pkt,
CacheBlk *blk,
MSHR *mshr,
CacheBlk::State & new_state)
{
new_state = (blk->status & ~stateMask) | Invalid;
return supplyTrans(cache, pkt, blk, mshr, new_state);
}
bool
CoherenceProtocol::assertShared(BaseCache *cache, PacketPtr &pkt,
CacheBlk *blk,
MSHR *mshr,
CacheBlk::State & new_state)
{
new_state = (blk->status & ~stateMask) | Shared;
pkt->assertShared();
return false;
}
CoherenceProtocol::CoherenceProtocol(const string &name,
const string &protocol,
const bool doUpgrades)
: SimObject(name)
{
// Python should catch this, but in case it doesn't...
if (!(protocol == "msi" || protocol == "mesi" ||
protocol == "mosi" || protocol == "moesi")) {
fatal("CoherenceProtocol: unrecognized protocol %s\n", protocol);
}
bool hasOwned = (protocol == "mosi" || protocol == "moesi");
bool hasExclusive = (protocol == "mesi" || protocol == "moesi");
if (hasOwned && !doUpgrades) {
fatal("CoherenceProtocol: ownership protocols require upgrade "
"transactions\n(write miss on owned block generates ReadExcl, "
"which will clobber dirty block)\n");
}
// set up a few shortcuts to save typing & visual clutter
typedef MemCmd MC;
StateTransition (&tt)[stateMax+1][MC::NUM_MEM_CMDS] = transitionTable;
MC::Command writeToSharedCmd =
doUpgrades ? MC::UpgradeReq : MC::ReadExReq;
MC::Command writeToSharedResp =
doUpgrades ? MC::UpgradeResp : MC::ReadExResp;
// Note that all transitions by default cause a panic.
// Override the valid transitions with the appropriate actions here.
//
// ----- incoming requests: specify outgoing bus request -----
//
tt[Invalid][MC::ReadReq].onRequest(MC::ReadReq);
// we only support write allocate right now
tt[Invalid][MC::WriteReq].onRequest(MC::ReadExReq);
tt[Invalid][MC::ReadExReq].onRequest(MC::ReadExReq);
tt[Invalid][MC::SwapReq].onRequest(MC::ReadExReq);
tt[Invalid][MC::UpgradeReq].onRequest(MC::UpgradeReq);
tt[Shared][MC::WriteReq].onRequest(writeToSharedCmd);
tt[Shared][MC::ReadExReq].onRequest(MC::ReadExReq);
tt[Shared][MC::SwapReq].onRequest(writeToSharedCmd);
if (hasOwned) {
tt[Owned][MC::WriteReq].onRequest(writeToSharedCmd);
tt[Owned][MC::ReadExReq].onRequest(MC::ReadExReq);
tt[Owned][MC::SwapReq].onRequest(writeToSharedCmd);
}
// Prefetching causes a read
tt[Invalid][MC::SoftPFReq].onRequest(MC::ReadReq);
tt[Invalid][MC::HardPFReq].onRequest(MC::ReadReq);
//
// ----- on response to given request: specify new state -----
//
tt[Invalid][MC::ReadExResp].onResponse(Modified);
tt[Shared][writeToSharedResp].onResponse(Modified);
// Go to Exclusive state on read response if we have one (will
// move into shared if the shared line is asserted in the
// getNewState function)
//
// originally had this as:
// tt[Invalid][MC::ReadResp].onResponse(hasExclusive ? Exclusive: Shared);
// ...but for some reason that caused a link error...
if (hasExclusive) {
tt[Invalid][MC::ReadResp].onResponse(Exclusive);
} else {
tt[Invalid][MC::ReadResp].onResponse(Shared);
}
if (hasOwned) {
tt[Owned][writeToSharedResp].onResponse(Modified);
}
//
// ----- bus snoop transition functions -----
//
tt[Invalid][MC::ReadReq].onSnoop(nullTransition);
tt[Invalid][MC::ReadExReq].onSnoop(nullTransition);
tt[Invalid][MC::WriteInvalidateReq].onSnoop(invalidateTrans);
tt[Shared][MC::ReadReq].onSnoop(hasExclusive
? assertShared : nullTransition);
tt[Shared][MC::ReadExReq].onSnoop(invalidateTrans);
tt[Shared][MC::WriteInvalidateReq].onSnoop(invalidateTrans);
if (doUpgrades) {
tt[Invalid][MC::UpgradeReq].onSnoop(nullTransition);
tt[Shared][MC::UpgradeReq].onSnoop(invalidateTrans);
}
tt[Modified][MC::ReadExReq].onSnoop(supplyAndInvalidateTrans);
tt[Modified][MC::ReadReq].onSnoop(hasOwned
? supplyAndGotoOwnedTrans
: supplyAndGotoSharedTrans);
tt[Modified][MC::WriteInvalidateReq].onSnoop(invalidateTrans);
if (hasExclusive) {
tt[Exclusive][MC::ReadReq].onSnoop(assertShared);
tt[Exclusive][MC::ReadExReq].onSnoop(invalidateTrans);
tt[Exclusive][MC::WriteInvalidateReq].onSnoop(invalidateTrans);
}
if (hasOwned) {
tt[Owned][MC::ReadReq].onSnoop(supplyAndGotoOwnedTrans);
tt[Owned][MC::ReadExReq].onSnoop(supplyAndInvalidateTrans);
tt[Owned][MC::UpgradeReq].onSnoop(invalidateTrans);
tt[Owned][MC::WriteInvalidateReq].onSnoop(invalidateTrans);
}
// @todo add in hardware prefetch to this list
}
MemCmd
CoherenceProtocol::getBusCmd(MemCmd cmdIn, CacheBlk::State state,
MSHR *mshr)
{
state &= stateMask;
int cmd_idx = cmdIn.toInt();
assert(0 <= state && state <= stateMax);
assert(0 <= cmd_idx && cmd_idx < MemCmd::NUM_MEM_CMDS);
MemCmd::Command cmdOut = transitionTable[state][cmd_idx].busCmd;
assert(cmdOut != MemCmd::InvalidCmd);
++requestCount[state][cmd_idx];
return cmdOut;
}
CacheBlk::State
CoherenceProtocol::getNewState(PacketPtr pkt, CacheBlk::State oldState)
{
CacheBlk::State state = oldState & stateMask;
int cmd_idx = pkt->cmdToIndex();
assert(0 <= state && state <= stateMax);
assert(0 <= cmd_idx && cmd_idx < MemCmd::NUM_MEM_CMDS);
CacheBlk::State newState = transitionTable[state][cmd_idx].newState;
//Check if it's exclusive and the shared line was asserted,
//then goto shared instead
if (newState == Exclusive && pkt->sharedAsserted()) {
newState = Shared;
}
assert(newState != -1);
//Make sure not to loose any other state information
newState = (oldState & ~stateMask) | newState;
return newState;
}
bool
CoherenceProtocol::handleBusRequest(BaseCache *cache, PacketPtr &pkt,
CacheBlk *blk,
MSHR *mshr,
CacheBlk::State & new_state)
{
if (blk == NULL) {
// nothing to do if we don't have a block
return false;
}
CacheBlk::State state = blk->status & stateMask;
int cmd_idx = pkt->cmdToIndex();
assert(0 <= state && state <= stateMax);
assert(0 <= cmd_idx && cmd_idx < MemCmd::NUM_MEM_CMDS);
// assert(mshr == NULL); // can't currently handle outstanding requests
//Check first if MSHR, and also insure, if there is one, that it is not in service
assert(!mshr || mshr->inService == 0);
++snoopCount[state][cmd_idx];
bool ret = transitionTable[state][cmd_idx].snoopFunc(cache, pkt, blk, mshr,
new_state);
return ret;
}
bool
CoherenceProtocol::nullTransition(BaseCache *cache, PacketPtr &pkt,
CacheBlk *blk, MSHR *mshr,
CacheBlk::State & new_state)
{
// do nothing
if (blk)
new_state = blk->status;
return false;
}
bool
CoherenceProtocol::invalidTransition(BaseCache *cache, PacketPtr &pkt,
CacheBlk *blk, MSHR *mshr,
CacheBlk::State & new_state)
{
panic("Invalid transition");
return false;
}
#ifndef DOXYGEN_SHOULD_SKIP_THIS
BEGIN_DECLARE_SIM_OBJECT_PARAMS(CoherenceProtocol)
Param<string> protocol;
Param<bool> do_upgrades;
END_DECLARE_SIM_OBJECT_PARAMS(CoherenceProtocol)
BEGIN_INIT_SIM_OBJECT_PARAMS(CoherenceProtocol)
INIT_PARAM(protocol, "name of coherence protocol"),
INIT_PARAM_DFLT(do_upgrades, "use upgrade transactions?", true)
END_INIT_SIM_OBJECT_PARAMS(CoherenceProtocol)
CREATE_SIM_OBJECT(CoherenceProtocol)
{
return new CoherenceProtocol(getInstanceName(), protocol,
do_upgrades);
}
REGISTER_SIM_OBJECT("CoherenceProtocol", CoherenceProtocol)
#endif // DOXYGEN_SHOULD_SKIP_THIS

View file

@ -1,257 +0,0 @@
/*
* Copyright (c) 2002-2005 The Regents of The University of Michigan
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Authors: Erik Hallnor
* Ron Dreslinski
* Steve Reinhardt
*/
/**
* @file
* Declaration of CoherenceProcotol a basic coherence policy.
*/
#ifndef __COHERENCE_PROTOCOL_HH__
#define __COHERENCE_PROTOCOL_HH__
#include <string>
#include "sim/sim_object.hh"
#include "mem/packet.hh"
#include "mem/cache/cache_blk.hh"
#include "base/statistics.hh"
class BaseCache;
class MSHR;
/**
* A simple coherence policy for the memory hierarchy. Currently implements
* MSI, MESI, and MOESI protocols.
*/
class CoherenceProtocol : public SimObject
{
public:
/**
* Contruct and initialize this policy.
* @param name The name of this policy.
* @param protocol The string representation of the protocol to use.
* @param doUpgrades True if bus upgrades should be used.
*/
CoherenceProtocol(const std::string &name, const std::string &protocol,
const bool doUpgrades);
/**
* Destructor.
*/
virtual ~CoherenceProtocol() {};
/**
* Register statistics
*/
virtual void regStats();
/**
* Get the proper bus command for the given command and status.
* @param cmd The request's command.
* @param status The current state of the cache block.
* @param mshr The MSHR matching the request.
* @return The proper bus command, as determined by the protocol.
*/
MemCmd getBusCmd(MemCmd cmd, CacheBlk::State status,
MSHR *mshr = NULL);
/**
* Return the proper state given the current state and the bus response.
* @param pkt The bus response.
* @param oldState The current block state.
* @return The new state.
*/
CacheBlk::State getNewState(PacketPtr pkt,
CacheBlk::State oldState = 0);
/**
* Handle snooped bus requests.
* @param cache The cache that snooped the request.
* @param pkt The snooped bus request.
* @param blk The cache block corresponding to the request, if any.
* @param mshr The MSHR corresponding to the request, if any.
* @param new_state The new coherence state of the block.
* @return True if the request should be satisfied locally.
*/
bool handleBusRequest(BaseCache *cache, PacketPtr &pkt, CacheBlk *blk,
MSHR *mshr, CacheBlk::State &new_state);
protected:
/** Snoop function type. */
typedef bool (*SnoopFuncType)(BaseCache *, PacketPtr &, CacheBlk *,
MSHR *, CacheBlk::State&);
//
// Standard snoop transition functions
//
/**
* Do nothing transition.
*/
static bool nullTransition(BaseCache *, PacketPtr &, CacheBlk *,
MSHR *, CacheBlk::State&);
/**
* Invalid transition, basically panic.
*/
static bool invalidTransition(BaseCache *, PacketPtr &, CacheBlk *,
MSHR *, CacheBlk::State&);
/**
* Invalidate block, move to Invalid state.
*/
static bool invalidateTrans(BaseCache *, PacketPtr &, CacheBlk *,
MSHR *, CacheBlk::State&);
/**
* Supply data, no state transition.
*/
static bool supplyTrans(BaseCache *, PacketPtr &, CacheBlk *,
MSHR *, CacheBlk::State&);
/**
* Supply data and go to Shared state.
*/
static bool supplyAndGotoSharedTrans(BaseCache *, PacketPtr &, CacheBlk *,
MSHR *, CacheBlk::State&);
/**
* Supply data and go to Owned state.
*/
static bool supplyAndGotoOwnedTrans(BaseCache *, PacketPtr &, CacheBlk *,
MSHR *, CacheBlk::State&);
/**
* Invalidate block, supply data, and go to Invalid state.
*/
static bool supplyAndInvalidateTrans(BaseCache *, PacketPtr &, CacheBlk *,
MSHR *, CacheBlk::State&);
/**
* Assert the shared line for a block that is shared/exclusive.
*/
static bool assertShared(BaseCache *, PacketPtr &, CacheBlk *,
MSHR *, CacheBlk::State&);
/**
* Definition of protocol state transitions.
*/
class StateTransition
{
friend class CoherenceProtocol;
/** The bus command of this transition. */
Packet::Command busCmd;
/** The state to transition to. */
int newState;
/** The snoop function for this transition. */
SnoopFuncType snoopFunc;
/**
* Constructor, defaults to invalid transition.
*/
StateTransition();
/**
* Initialize bus command.
* @param cmd The bus command to use.
*/
void onRequest(Packet::Command cmd)
{
busCmd = cmd;
}
/**
* Set the transition state.
* @param s The new state.
*/
void onResponse(CacheBlk::State s)
{
newState = s;
}
/**
* Initialize the snoop function.
* @param f The new snoop function.
*/
void onSnoop(SnoopFuncType f)
{
snoopFunc = f;
}
};
friend class CoherenceProtocol::StateTransition;
/** Mask to select status bits relevant to coherence protocol. */
static const int stateMask = BlkValid | BlkWritable | BlkDirty;
/** The Modified (M) state. */
static const int Modified = BlkValid | BlkWritable | BlkDirty;
/** The Owned (O) state. */
static const int Owned = BlkValid | BlkDirty;
/** The Exclusive (E) state. */
static const int Exclusive = BlkValid | BlkWritable;
/** The Shared (S) state. */
static const int Shared = BlkValid;
/** The Invalid (I) state. */
static const int Invalid = 0;
/**
* Maximum state encoding value (used to size transition lookup
* table). Could be more than number of states, depends on
* encoding of status bits.
*/
static const int stateMax = stateMask;
/**
* The table of all possible transitions, organized by starting state and
* request command.
*/
StateTransition transitionTable[stateMax+1][MemCmd::NUM_MEM_CMDS];
/**
* @addtogroup CoherenceStatistics
* @{
*/
/**
* State accesses from parent cache.
*/
Stats::Scalar<> requestCount[stateMax+1][MemCmd::NUM_MEM_CMDS];
/**
* State accesses from snooped requests.
*/
Stats::Scalar<> snoopCount[stateMax+1][MemCmd::NUM_MEM_CMDS];
/**
* @}
*/
};
#endif // __COHERENCE_PROTOCOL_HH__

View file

@ -1,163 +0,0 @@
/*
* Copyright (c) 2003-2005 The Regents of The University of Michigan
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Authors: Erik Hallnor
* Ron Dreslinski
*/
/**
* @file
* Declaration of a simple coherence policy.
*/
#ifndef __SIMPLE_COHERENCE_HH__
#define __SIMPLE_COHERENCE_HH__
#include <string>
#include "mem/packet.hh"
#include "mem/cache/cache_blk.hh"
#include "mem/cache/miss/mshr_queue.hh"
#include "mem/cache/coherence/coherence_protocol.hh"
class BaseCache;
/**
* A simple MP coherence policy. This policy assumes an atomic bus and only one
* level of cache.
*/
class SimpleCoherence
{
protected:
/** Pointer to the parent cache. */
BaseCache *cache;
/** Pointer to the coherence protocol. */
CoherenceProtocol *protocol;
public:
/**
* Construct and initialize this coherence policy.
* @param _protocol The coherence protocol to use.
*/
SimpleCoherence(CoherenceProtocol *_protocol)
: protocol(_protocol)
{
}
/**
* Set the pointer to the parent cache.
* @param _cache The parent cache.
*/
void setCache(BaseCache *_cache)
{
cache = _cache;
}
/**
* Register statistics.
* @param name The name to prepend to stat descriptions.
*/
void regStats(const std::string &name)
{
}
/**
* This policy does not forward invalidates, return NULL.
* @return NULL.
*/
PacketPtr getPacket()
{
return NULL;
}
/**
* Return the proper state given the current state and the bus response.
* @param pkt The bus response.
* @param current The current block state.
* @return The new state.
*/
CacheBlk::State getNewState(PacketPtr pkt,
CacheBlk::State current = 0)
{
return protocol->getNewState(pkt, current);
}
/**
* Handle snooped bus requests.
* @param pkt The snooped bus request.
* @param blk The cache block corresponding to the request, if any.
* @param mshr The MSHR corresponding to the request, if any.
* @param new_state Return the new state for the block.
*/
bool handleBusRequest(PacketPtr &pkt, CacheBlk *blk, MSHR *mshr,
CacheBlk::State &new_state)
{
// assert(mshr == NULL);
//Got rid of, there could be an MSHR, but it can't be in service
if (blk != NULL)
{
if (pkt->cmd != MemCmd::Writeback) {
return protocol->handleBusRequest(cache, pkt, blk, mshr,
new_state);
}
else { //It is a writeback, must be ownership protocol, just keep state
new_state = blk->status;
}
}
return false;
}
/**
* Get the proper bus command for the given command and status.
* @param cmd The request's command.
* @param state The current state of the cache block.
* @return The proper bus command, as determined by the protocol.
*/
MemCmd getBusCmd(MemCmd cmd,
CacheBlk::State state)
{
if (cmd == MemCmd::Writeback) return MemCmd::Writeback;
return protocol->getBusCmd(cmd, state);
}
/**
* Return true if this coherence policy can handle fast cache writes.
*/
bool allowFastWrites() { return false; }
bool hasProtocol() { return true; }
};
#endif //__SIMPLE_COHERENCE_HH__