mem: Remove redundant is_top_level cache parameter

This patch takes the final step in removing the is_top_level parameter
from the cache. With the recent changes to read requests and write
invalidations, the parameter is no longer needed, and consequently
removed.

This also means that asymmetric cache hierarchies are now fully
supported (and we are actually using them already with L1 caches, but
no table-walker caches, connected to a shared L2).
This commit is contained in:
Andreas Hansson 2015-07-03 10:14:43 -04:00
parent 71856cfbbc
commit b93c912013
8 changed files with 15 additions and 29 deletions

View file

@ -52,7 +52,6 @@ class L1Cache(BaseCache):
response_latency = 2
mshrs = 4
tgts_per_mshr = 20
is_top_level = True
class L1_ICache(L1Cache):
is_read_only = True
@ -76,7 +75,6 @@ class IOCache(BaseCache):
size = '1kB'
tgts_per_mshr = 12
forward_snoops = False
is_top_level = True
class PageTableWalkerCache(BaseCache):
assoc = 2
@ -86,7 +84,6 @@ class PageTableWalkerCache(BaseCache):
size = '1kB'
tgts_per_mshr = 12
forward_snoops = False
is_top_level = True
# the x86 table walker actually writes to the table-walker cache
if buildEnv['TARGET_ISA'] == 'x86':
is_read_only = False

View file

@ -149,7 +149,6 @@ class O3_ARM_v7a_ICache(BaseCache):
tgts_per_mshr = 8
size = '32kB'
assoc = 2
is_top_level = True
forward_snoops = False
is_read_only = True
@ -162,7 +161,6 @@ class O3_ARM_v7a_DCache(BaseCache):
size = '32kB'
assoc = 2
write_buffers = 16
is_top_level = True
# TLB Cache
# Use a cache as a L2 TLB
@ -174,7 +172,6 @@ class O3_ARM_v7aWalkCache(BaseCache):
size = '1kB'
assoc = 8
write_buffers = 16
is_top_level = True
forward_snoops = False
is_read_only = True

View file

@ -154,7 +154,7 @@ for t, m in zip(testerspec, multiplier):
# Define a prototype L1 cache that we scale for all successive levels
proto_l1 = BaseCache(size = '32kB', assoc = 4,
hit_latency = 1, response_latency = 1,
tgts_per_mshr = 8, is_top_level = True)
tgts_per_mshr = 8)
if options.blocking:
proto_l1.mshrs = 1
@ -179,7 +179,6 @@ for scale in cachespec[:-1]:
next.response_latency = prev.response_latency * 10
next.assoc = prev.assoc * scale
next.mshrs = prev.mshrs * scale
next.is_top_level = False
cache_proto.insert(0, next)
# Create a config to be used by all the traffic generators

View file

@ -177,7 +177,7 @@ else:
# Define a prototype L1 cache that we scale for all successive levels
proto_l1 = BaseCache(size = '32kB', assoc = 4,
hit_latency = 1, response_latency = 1,
tgts_per_mshr = 8, is_top_level = True)
tgts_per_mshr = 8)
if options.blocking:
proto_l1.mshrs = 1
@ -197,7 +197,6 @@ for scale in cachespec[:-1]:
next.response_latency = prev.response_latency * 10
next.assoc = prev.assoc * scale
next.mshrs = prev.mshrs * scale
next.is_top_level = False
cache_proto.insert(0, next)
# Make a prototype for the tester to be used throughout

View file

@ -64,7 +64,6 @@ class BaseCache(MemObject):
forward_snoops = Param.Bool(True,
"Forward snoops from mem side to cpu side")
is_top_level = Param.Bool(False, "Is this cache at the top level (e.g. L1)")
is_read_only = Param.Bool(False, "Is this cache read only (e.g. inst)")
prefetcher = Param.BasePrefetcher(NULL,"Prefetcher attached to cache")

View file

@ -78,7 +78,6 @@ BaseCache::BaseCache(const Params *p)
responseLatency(p->response_latency),
numTarget(p->tgts_per_mshr),
forwardSnoops(p->forward_snoops),
isTopLevel(p->is_top_level),
isReadOnly(p->is_read_only),
blocked(0),
order(0),

View file

@ -304,11 +304,6 @@ class BaseCache : public MemObject
/** Do we forward snoops from mem side port through to cpu side port? */
const bool forwardSnoops;
/** Is this cache a toplevel cache (e.g. L1, I/O cache). If so we should
* never try to forward ownership and similar optimizations to the cpu
* side */
const bool isTopLevel;
/**
* Is this cache read only, for example the instruction cache, or
* table-walker cache. A cache that is read only should never see

View file

@ -179,7 +179,15 @@ Cache::satisfyCpuSideRequest(PacketPtr pkt, CacheBlk *blk,
blk->trackLoadLocked(pkt);
}
pkt->setDataFromBlock(blk->data, blkSize);
if (pkt->getSize() == blkSize) {
// determine if this read is from a (coherent) cache, or not
// by looking at the command type; we could potentially add a
// packet attribute such as 'FromCache' to make this check a
// bit cleaner
if (pkt->cmd == MemCmd::ReadExReq ||
pkt->cmd == MemCmd::ReadSharedReq ||
pkt->cmd == MemCmd::ReadCleanReq ||
pkt->cmd == MemCmd::SCUpgradeFailReq) {
assert(pkt->getSize() == blkSize);
// special handling for coherent block requests from
// upper-level caches
if (pkt->needsExclusive()) {
@ -211,7 +219,7 @@ Cache::satisfyCpuSideRequest(PacketPtr pkt, CacheBlk *blk,
if (blk->isDirty()) {
// special considerations if we're owner:
if (!deferred_response && !isTopLevel) {
if (!deferred_response) {
// if we are responding immediately and can
// signal that we're transferring ownership
// along with exclusivity, do so
@ -526,7 +534,6 @@ Cache::promoteWholeLineWrites(PacketPtr pkt)
(pkt->getSize() == blkSize) && (pkt->getOffset(blkSize) == 0)) {
pkt->cmd = MemCmd::WriteLineReq;
DPRINTF(Cache, "packet promoted from Write to WriteLineReq\n");
assert(isTopLevel); // should only happen at L1 or I/O cache
}
}
@ -696,7 +703,7 @@ Cache::recvTimingReq(PacketPtr pkt)
// processing happens before any MSHR munging on the behalf of
// this request because this new Request will be the one stored
// into the MSHRs, not the original.
if (pkt->cmd.isSWPrefetch() && isTopLevel) {
if (pkt->cmd.isSWPrefetch()) {
assert(needsResponse);
assert(pkt->req->hasPaddr());
assert(!pkt->req->isUncacheable());
@ -905,7 +912,6 @@ Cache::getBusPacket(PacketPtr cpu_pkt, CacheBlk *blk,
// the line in exclusive state, and invalidates all other
// copies
cmd = MemCmd::InvalidateReq;
assert(isTopLevel);
} else {
// block is invalid
cmd = needsExclusive ? MemCmd::ReadExReq :
@ -1034,17 +1040,12 @@ Cache::recvAtomic(PacketPtr pkt)
pkt->makeAtomicResponse();
pkt->copyError(bus_pkt);
} else if (pkt->cmd == MemCmd::InvalidateReq) {
assert(!isTopLevel);
if (blk) {
// invalidate response to a cache that received
// an invalidate request
satisfyCpuSideRequest(pkt, blk);
}
} else if (pkt->cmd == MemCmd::WriteLineReq) {
// invalidate response to the cache that
// received the original write-line request
assert(isTopLevel);
// note the use of pkt, not bus_pkt here.
// write-line request to the cache that promoted
@ -1256,7 +1257,7 @@ Cache::recvTimingResp(PacketPtr pkt)
completion_time = pkt->headerDelay;
// Software prefetch handling for cache closest to core
if (tgt_pkt->cmd.isSWPrefetch() && isTopLevel) {
if (tgt_pkt->cmd.isSWPrefetch()) {
// a software prefetch would have already been ack'd immediately
// with dummy data so the core would be able to retire it.
// this request completes right here, so we deallocate it.
@ -2148,7 +2149,7 @@ Cache::getNextMSHR()
bool
Cache::isCachedAbove(const PacketPtr pkt) const
{
if (isTopLevel)
if (!forwardSnoops)
return false;
// Mirroring the flow of HardPFReqs, the cache sends CleanEvict and
// Writeback snoops into upper level caches to check for copies of the