mem: Add an option to perform clean writebacks from caches
This patch adds the necessary commands and cache functionality to allow clean writebacks. This functionality is crucial, especially when having exclusive (victim) caches. For example, if read-only L1 instruction caches are not sending clean writebacks, there will never be any spills from the L1 to the L2. At the moment the cache model defaults to not sending clean writebacks, and this should possibly be re-evaluated. The implementation of clean writebacks relies on a new packet command WritebackClean, which acts much like a Writeback (renamed WritebackDirty), and also much like a CleanEvict. On eviction of a clean block the cache either sends a clean evict, or a clean writeback, and if any copies are still cached upstream the clean evict/writeback is dropped. Similarly, if a clean evict/writeback reaches a cache where there are outstanding MSHRs for the block, the packet is dropped. In the typical case though, the clean writeback allocates a block in the downstream cache, and marks it writable if the evicted block was writable. The patch changes the O3_ARM_v7a L1 cache configuration and the default L1 caches in config/common/Caches.py
This commit is contained in:
parent
afa252b0b9
commit
7433d77fcf
11 changed files with 180 additions and 84 deletions
|
@ -55,6 +55,8 @@ class L1Cache(Cache):
|
||||||
|
|
||||||
class L1_ICache(L1Cache):
|
class L1_ICache(L1Cache):
|
||||||
is_read_only = True
|
is_read_only = True
|
||||||
|
# Writeback clean lines as well
|
||||||
|
writeback_clean = True
|
||||||
|
|
||||||
class L1_DCache(L1Cache):
|
class L1_DCache(L1Cache):
|
||||||
pass
|
pass
|
||||||
|
@ -89,3 +91,5 @@ class PageTableWalkerCache(Cache):
|
||||||
is_read_only = False
|
is_read_only = False
|
||||||
else:
|
else:
|
||||||
is_read_only = True
|
is_read_only = True
|
||||||
|
# Writeback clean lines as well
|
||||||
|
writeback_clean = True
|
||||||
|
|
|
@ -151,6 +151,8 @@ class O3_ARM_v7a_ICache(Cache):
|
||||||
assoc = 2
|
assoc = 2
|
||||||
forward_snoops = False
|
forward_snoops = False
|
||||||
is_read_only = True
|
is_read_only = True
|
||||||
|
# Writeback clean lines as well
|
||||||
|
writeback_clean = True
|
||||||
|
|
||||||
# Data Cache
|
# Data Cache
|
||||||
class O3_ARM_v7a_DCache(Cache):
|
class O3_ARM_v7a_DCache(Cache):
|
||||||
|
@ -161,6 +163,8 @@ class O3_ARM_v7a_DCache(Cache):
|
||||||
size = '32kB'
|
size = '32kB'
|
||||||
assoc = 2
|
assoc = 2
|
||||||
write_buffers = 16
|
write_buffers = 16
|
||||||
|
# Consider the L2 a victim cache also for clean lines
|
||||||
|
writeback_clean = True
|
||||||
|
|
||||||
# TLB Cache
|
# TLB Cache
|
||||||
# Use a cache as a L2 TLB
|
# Use a cache as a L2 TLB
|
||||||
|
@ -174,6 +178,8 @@ class O3_ARM_v7aWalkCache(Cache):
|
||||||
write_buffers = 16
|
write_buffers = 16
|
||||||
forward_snoops = False
|
forward_snoops = False
|
||||||
is_read_only = True
|
is_read_only = True
|
||||||
|
# Writeback clean lines as well
|
||||||
|
writeback_clean = True
|
||||||
|
|
||||||
# L2 Cache
|
# L2 Cache
|
||||||
class O3_ARM_v7aL2(Cache):
|
class O3_ARM_v7aL2(Cache):
|
||||||
|
|
|
@ -329,7 +329,7 @@ AbstractMemory::access(PacketPtr pkt)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pkt->cmd == MemCmd::CleanEvict) {
|
if (pkt->cmd == MemCmd::CleanEvict || pkt->cmd == MemCmd::WritebackClean) {
|
||||||
DPRINTF(MemoryAccess, "CleanEvict on 0x%x: not responding\n",
|
DPRINTF(MemoryAccess, "CleanEvict on 0x%x: not responding\n",
|
||||||
pkt->getAddr());
|
pkt->getAddr());
|
||||||
return;
|
return;
|
||||||
|
|
8
src/mem/cache/Cache.py
vendored
8
src/mem/cache/Cache.py
vendored
|
@ -103,3 +103,11 @@ class Cache(BaseCache):
|
||||||
# cache a line is dropped for a mostly exclusive cache.
|
# cache a line is dropped for a mostly exclusive cache.
|
||||||
clusivity = Param.Clusivity('mostly_incl',
|
clusivity = Param.Clusivity('mostly_incl',
|
||||||
"Clusivity with upstream cache")
|
"Clusivity with upstream cache")
|
||||||
|
|
||||||
|
# Determine if this cache sends out writebacks for clean lines, or
|
||||||
|
# simply clean evicts. In cases where a downstream cache is mostly
|
||||||
|
# exclusive with respect to this cache (acting as a victim cache),
|
||||||
|
# the clean writebacks are essential for performance. In general
|
||||||
|
# this should be set to True for anything but the last-level
|
||||||
|
# cache.
|
||||||
|
writeback_clean = Param.Bool(False, "Writeback clean lines")
|
||||||
|
|
3
src/mem/cache/base.hh
vendored
3
src/mem/cache/base.hh
vendored
|
@ -521,9 +521,6 @@ class BaseCache : public MemObject
|
||||||
// should only see writes or clean evicts here
|
// should only see writes or clean evicts here
|
||||||
assert(pkt->isWrite() || pkt->cmd == MemCmd::CleanEvict);
|
assert(pkt->isWrite() || pkt->cmd == MemCmd::CleanEvict);
|
||||||
|
|
||||||
// if this is a read-only cache we should never see any writes
|
|
||||||
assert(!(isReadOnly && pkt->isWrite()));
|
|
||||||
|
|
||||||
return allocateBufferInternal(&writeBuffer,
|
return allocateBufferInternal(&writeBuffer,
|
||||||
blockAlign(pkt->getAddr()), blkSize,
|
blockAlign(pkt->getAddr()), blkSize,
|
||||||
pkt, time, true);
|
pkt, time, true);
|
||||||
|
|
161
src/mem/cache/cache.cc
vendored
161
src/mem/cache/cache.cc
vendored
|
@ -70,6 +70,7 @@ Cache::Cache(const CacheParams *p)
|
||||||
doFastWrites(true),
|
doFastWrites(true),
|
||||||
prefetchOnAccess(p->prefetch_on_access),
|
prefetchOnAccess(p->prefetch_on_access),
|
||||||
clusivity(p->clusivity),
|
clusivity(p->clusivity),
|
||||||
|
writebackClean(p->writeback_clean),
|
||||||
tempBlockWriteback(nullptr),
|
tempBlockWriteback(nullptr),
|
||||||
writebackTempBlockAtomicEvent(this, false,
|
writebackTempBlockAtomicEvent(this, false,
|
||||||
EventBase::Delayed_Writeback_Pri)
|
EventBase::Delayed_Writeback_Pri)
|
||||||
|
@ -317,7 +318,7 @@ Cache::access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat,
|
||||||
// flush and invalidate any existing block
|
// flush and invalidate any existing block
|
||||||
CacheBlk *old_blk(tags->findBlock(pkt->getAddr(), pkt->isSecure()));
|
CacheBlk *old_blk(tags->findBlock(pkt->getAddr(), pkt->isSecure()));
|
||||||
if (old_blk && old_blk->isValid()) {
|
if (old_blk && old_blk->isValid()) {
|
||||||
if (old_blk->isDirty())
|
if (old_blk->isDirty() || writebackClean)
|
||||||
writebacks.push_back(writebackBlk(old_blk));
|
writebacks.push_back(writebackBlk(old_blk));
|
||||||
else
|
else
|
||||||
writebacks.push_back(cleanEvictBlk(old_blk));
|
writebacks.push_back(cleanEvictBlk(old_blk));
|
||||||
|
@ -343,7 +344,7 @@ Cache::access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat,
|
||||||
blk ? "hit " + blk->print() : "miss");
|
blk ? "hit " + blk->print() : "miss");
|
||||||
|
|
||||||
|
|
||||||
if (pkt->evictingBlock()) {
|
if (pkt->isEviction()) {
|
||||||
// We check for presence of block in above caches before issuing
|
// We check for presence of block in above caches before issuing
|
||||||
// Writeback or CleanEvict to write buffer. Therefore the only
|
// Writeback or CleanEvict to write buffer. Therefore the only
|
||||||
// possible cases can be of a CleanEvict packet coming from above
|
// possible cases can be of a CleanEvict packet coming from above
|
||||||
|
@ -356,26 +357,49 @@ Cache::access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat,
|
||||||
if (writeBuffer.findMatches(pkt->getAddr(), pkt->isSecure(),
|
if (writeBuffer.findMatches(pkt->getAddr(), pkt->isSecure(),
|
||||||
outgoing)) {
|
outgoing)) {
|
||||||
assert(outgoing.size() == 1);
|
assert(outgoing.size() == 1);
|
||||||
PacketPtr wbPkt = outgoing[0]->getTarget()->pkt;
|
MSHR *wb_entry = outgoing[0];
|
||||||
assert(pkt->cmd == MemCmd::CleanEvict &&
|
assert(wb_entry->getNumTargets() == 1);
|
||||||
wbPkt->cmd == MemCmd::Writeback);
|
PacketPtr wbPkt = wb_entry->getTarget()->pkt;
|
||||||
// As the CleanEvict is coming from above, it would have snooped
|
assert(wbPkt->isWriteback());
|
||||||
// into other peer caches of the same level while traversing the
|
|
||||||
// crossbar. If a copy of the block had been found, the CleanEvict
|
if (pkt->isCleanEviction()) {
|
||||||
// would have been deleted in the crossbar. Now that the
|
// The CleanEvict and WritebackClean snoops into other
|
||||||
// CleanEvict is here we can be sure none of the other upper level
|
// peer caches of the same level while traversing the
|
||||||
// caches connected to this cache have the block, so we can clear
|
// crossbar. If a copy of the block is found, the
|
||||||
// the BLOCK_CACHED flag in the Writeback if set and discard the
|
// packet is deleted in the crossbar. Hence, none of
|
||||||
// CleanEvict by returning true.
|
// the other upper level caches connected to this
|
||||||
wbPkt->clearBlockCached();
|
// cache have the block, so we can clear the
|
||||||
return true;
|
// BLOCK_CACHED flag in the Writeback if set and
|
||||||
|
// discard the CleanEvict by returning true.
|
||||||
|
wbPkt->clearBlockCached();
|
||||||
|
return true;
|
||||||
|
} else {
|
||||||
|
assert(pkt->cmd == MemCmd::WritebackDirty);
|
||||||
|
// Dirty writeback from above trumps our clean
|
||||||
|
// writeback... discard here
|
||||||
|
// Note: markInService will remove entry from writeback buffer.
|
||||||
|
markInService(wb_entry, false);
|
||||||
|
delete wbPkt;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Writeback handling is special case. We can write the block into
|
// Writeback handling is special case. We can write the block into
|
||||||
// the cache without having a writeable copy (or any copy at all).
|
// the cache without having a writeable copy (or any copy at all).
|
||||||
if (pkt->cmd == MemCmd::Writeback) {
|
if (pkt->isWriteback()) {
|
||||||
assert(blkSize == pkt->getSize());
|
assert(blkSize == pkt->getSize());
|
||||||
|
|
||||||
|
// we could get a clean writeback while we are having
|
||||||
|
// outstanding accesses to a block, do the simple thing for
|
||||||
|
// now and drop the clean writeback so that we do not upset
|
||||||
|
// any ordering/decisions about ownership already taken
|
||||||
|
if (pkt->cmd == MemCmd::WritebackClean &&
|
||||||
|
mshrQueue.findMatch(pkt->getAddr(), pkt->isSecure())) {
|
||||||
|
DPRINTF(Cache, "Clean writeback %#llx to block with MSHR, "
|
||||||
|
"dropping\n", pkt->getAddr());
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
if (blk == NULL) {
|
if (blk == NULL) {
|
||||||
// need to do a replacement
|
// need to do a replacement
|
||||||
blk = allocateBlock(pkt->getAddr(), pkt->isSecure(), writebacks);
|
blk = allocateBlock(pkt->getAddr(), pkt->isSecure(), writebacks);
|
||||||
|
@ -391,7 +415,11 @@ Cache::access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat,
|
||||||
blk->status |= BlkSecure;
|
blk->status |= BlkSecure;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
blk->status |= BlkDirty;
|
// only mark the block dirty if we got a writeback command,
|
||||||
|
// and leave it as is for a clean writeback
|
||||||
|
if (pkt->cmd == MemCmd::WritebackDirty) {
|
||||||
|
blk->status |= BlkDirty;
|
||||||
|
}
|
||||||
// if shared is not asserted we got the writeback in modified
|
// if shared is not asserted we got the writeback in modified
|
||||||
// state, if it is asserted we are in the owned state
|
// state, if it is asserted we are in the owned state
|
||||||
if (!pkt->sharedAsserted()) {
|
if (!pkt->sharedAsserted()) {
|
||||||
|
@ -463,7 +491,13 @@ Cache::doWritebacks(PacketList& writebacks, Tick forward_time)
|
||||||
// this is a non-snoop request packet which does not require a
|
// this is a non-snoop request packet which does not require a
|
||||||
// response.
|
// response.
|
||||||
delete wbPkt;
|
delete wbPkt;
|
||||||
|
} else if (wbPkt->cmd == MemCmd::WritebackClean) {
|
||||||
|
// clean writeback, do not send since the block is
|
||||||
|
// still cached above
|
||||||
|
assert(writebackClean);
|
||||||
|
delete wbPkt;
|
||||||
} else {
|
} else {
|
||||||
|
assert(wbPkt->cmd == MemCmd::WritebackDirty);
|
||||||
// Set BLOCK_CACHED flag in Writeback and send below, so that
|
// Set BLOCK_CACHED flag in Writeback and send below, so that
|
||||||
// the Writeback does not reset the bit corresponding to this
|
// the Writeback does not reset the bit corresponding to this
|
||||||
// address in the snoop filter below.
|
// address in the snoop filter below.
|
||||||
|
@ -490,7 +524,7 @@ Cache::doWritebacksAtomic(PacketList& writebacks)
|
||||||
// isCachedAbove returns true we set BLOCK_CACHED flag in Writebacks
|
// isCachedAbove returns true we set BLOCK_CACHED flag in Writebacks
|
||||||
// and discard CleanEvicts.
|
// and discard CleanEvicts.
|
||||||
if (isCachedAbove(wbPkt, false)) {
|
if (isCachedAbove(wbPkt, false)) {
|
||||||
if (wbPkt->cmd == MemCmd::Writeback) {
|
if (wbPkt->cmd == MemCmd::WritebackDirty) {
|
||||||
// Set BLOCK_CACHED flag in Writeback and send below,
|
// Set BLOCK_CACHED flag in Writeback and send below,
|
||||||
// so that the Writeback does not reset the bit
|
// so that the Writeback does not reset the bit
|
||||||
// corresponding to this address in the snoop filter
|
// corresponding to this address in the snoop filter
|
||||||
|
@ -694,6 +728,10 @@ Cache::recvTimingReq(PacketPtr pkt)
|
||||||
// by access(), that calls accessBlock() function.
|
// by access(), that calls accessBlock() function.
|
||||||
cpuSidePort->schedTimingResp(pkt, request_time, true);
|
cpuSidePort->schedTimingResp(pkt, request_time, true);
|
||||||
} else {
|
} else {
|
||||||
|
DPRINTF(Cache, "%s satisfied %s addr %#llx, no response needed\n",
|
||||||
|
__func__, pkt->cmdString(), pkt->getAddr(),
|
||||||
|
pkt->getSize());
|
||||||
|
|
||||||
// queue the packet for deletion, as the sending cache is
|
// queue the packet for deletion, as the sending cache is
|
||||||
// still relying on it; if the block is found in access(),
|
// still relying on it; if the block is found in access(),
|
||||||
// CleanEvict and Writeback messages will be deleted
|
// CleanEvict and Writeback messages will be deleted
|
||||||
|
@ -765,9 +803,9 @@ Cache::recvTimingReq(PacketPtr pkt)
|
||||||
|
|
||||||
// Coalesce unless it was a software prefetch (see above).
|
// Coalesce unless it was a software prefetch (see above).
|
||||||
if (pkt) {
|
if (pkt) {
|
||||||
assert(pkt->cmd != MemCmd::Writeback);
|
assert(!pkt->isWriteback());
|
||||||
// CleanEvicts corresponding to blocks which have outstanding
|
// CleanEvicts corresponding to blocks which have
|
||||||
// requests in MSHRs can be deleted here.
|
// outstanding requests in MSHRs are simply sunk here
|
||||||
if (pkt->cmd == MemCmd::CleanEvict) {
|
if (pkt->cmd == MemCmd::CleanEvict) {
|
||||||
pendingDelete.reset(pkt);
|
pendingDelete.reset(pkt);
|
||||||
} else {
|
} else {
|
||||||
|
@ -820,7 +858,7 @@ Cache::recvTimingReq(PacketPtr pkt)
|
||||||
mshr_misses[pkt->cmdToIndex()][pkt->req->masterId()]++;
|
mshr_misses[pkt->cmdToIndex()][pkt->req->masterId()]++;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pkt->evictingBlock() ||
|
if (pkt->isEviction() ||
|
||||||
(pkt->req->isUncacheable() && pkt->isWrite())) {
|
(pkt->req->isUncacheable() && pkt->isWrite())) {
|
||||||
// We use forward_time here because there is an
|
// We use forward_time here because there is an
|
||||||
// uncached memory write, forwarded to WriteBuffer.
|
// uncached memory write, forwarded to WriteBuffer.
|
||||||
|
@ -888,7 +926,7 @@ Cache::getBusPacket(PacketPtr cpu_pkt, CacheBlk *blk,
|
||||||
|
|
||||||
if (!blkValid &&
|
if (!blkValid &&
|
||||||
(cpu_pkt->isUpgrade() ||
|
(cpu_pkt->isUpgrade() ||
|
||||||
cpu_pkt->evictingBlock())) {
|
cpu_pkt->isEviction())) {
|
||||||
// Writebacks that weren't allocated in access() and upgrades
|
// Writebacks that weren't allocated in access() and upgrades
|
||||||
// from upper-level caches that missed completely just go
|
// from upper-level caches that missed completely just go
|
||||||
// through.
|
// through.
|
||||||
|
@ -1108,8 +1146,8 @@ Cache::recvAtomic(PacketPtr pkt)
|
||||||
schedule(writebackTempBlockAtomicEvent, curTick());
|
schedule(writebackTempBlockAtomicEvent, curTick());
|
||||||
}
|
}
|
||||||
|
|
||||||
tempBlockWriteback = blk->isDirty() ? writebackBlk(blk) :
|
tempBlockWriteback = (blk->isDirty() || writebackClean) ?
|
||||||
cleanEvictBlk(blk);
|
writebackBlk(blk) : cleanEvictBlk(blk);
|
||||||
blk->invalidate();
|
blk->invalidate();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1458,7 +1496,7 @@ Cache::recvTimingResp(PacketPtr pkt)
|
||||||
// Writebacks/CleanEvicts to write buffer. It specifies the latency to
|
// Writebacks/CleanEvicts to write buffer. It specifies the latency to
|
||||||
// allocate an internal buffer and to schedule an event to the
|
// allocate an internal buffer and to schedule an event to the
|
||||||
// queued port.
|
// queued port.
|
||||||
if (blk->isDirty()) {
|
if (blk->isDirty() || writebackClean) {
|
||||||
PacketPtr wbPkt = writebackBlk(blk);
|
PacketPtr wbPkt = writebackBlk(blk);
|
||||||
allocateWriteBuffer(wbPkt, forward_time);
|
allocateWriteBuffer(wbPkt, forward_time);
|
||||||
// Set BLOCK_CACHED flag if cached above.
|
// Set BLOCK_CACHED flag if cached above.
|
||||||
|
@ -1484,41 +1522,50 @@ Cache::recvTimingResp(PacketPtr pkt)
|
||||||
PacketPtr
|
PacketPtr
|
||||||
Cache::writebackBlk(CacheBlk *blk)
|
Cache::writebackBlk(CacheBlk *blk)
|
||||||
{
|
{
|
||||||
chatty_assert(!isReadOnly, "Writeback from read-only cache");
|
chatty_assert(!isReadOnly || writebackClean,
|
||||||
assert(blk && blk->isValid() && blk->isDirty());
|
"Writeback from read-only cache");
|
||||||
|
assert(blk && blk->isValid() && (blk->isDirty() || writebackClean));
|
||||||
|
|
||||||
writebacks[Request::wbMasterId]++;
|
writebacks[Request::wbMasterId]++;
|
||||||
|
|
||||||
Request *writebackReq =
|
Request *req = new Request(tags->regenerateBlkAddr(blk->tag, blk->set),
|
||||||
new Request(tags->regenerateBlkAddr(blk->tag, blk->set), blkSize, 0,
|
blkSize, 0, Request::wbMasterId);
|
||||||
Request::wbMasterId);
|
|
||||||
if (blk->isSecure())
|
if (blk->isSecure())
|
||||||
writebackReq->setFlags(Request::SECURE);
|
req->setFlags(Request::SECURE);
|
||||||
|
|
||||||
writebackReq->taskId(blk->task_id);
|
req->taskId(blk->task_id);
|
||||||
blk->task_id= ContextSwitchTaskId::Unknown;
|
blk->task_id= ContextSwitchTaskId::Unknown;
|
||||||
blk->tickInserted = curTick();
|
blk->tickInserted = curTick();
|
||||||
|
|
||||||
PacketPtr writeback = new Packet(writebackReq, MemCmd::Writeback);
|
PacketPtr pkt =
|
||||||
|
new Packet(req, blk->isDirty() ?
|
||||||
|
MemCmd::WritebackDirty : MemCmd::WritebackClean);
|
||||||
|
|
||||||
|
DPRINTF(Cache, "Create Writeback %#llx writable: %d, dirty: %d\n",
|
||||||
|
pkt->getAddr(), blk->isWritable(), blk->isDirty());
|
||||||
|
|
||||||
if (blk->isWritable()) {
|
if (blk->isWritable()) {
|
||||||
// not asserting shared means we pass the block in modified
|
// not asserting shared means we pass the block in modified
|
||||||
// state, mark our own block non-writeable
|
// state, mark our own block non-writeable
|
||||||
blk->status &= ~BlkWritable;
|
blk->status &= ~BlkWritable;
|
||||||
} else {
|
} else {
|
||||||
// we are in the owned state, tell the receiver
|
// we are in the owned state, tell the receiver
|
||||||
writeback->assertShared();
|
pkt->assertShared();
|
||||||
}
|
}
|
||||||
|
|
||||||
writeback->allocate();
|
// make sure the block is not marked dirty
|
||||||
std::memcpy(writeback->getPtr<uint8_t>(), blk->data, blkSize);
|
|
||||||
|
|
||||||
blk->status &= ~BlkDirty;
|
blk->status &= ~BlkDirty;
|
||||||
return writeback;
|
|
||||||
|
pkt->allocate();
|
||||||
|
std::memcpy(pkt->getPtr<uint8_t>(), blk->data, blkSize);
|
||||||
|
|
||||||
|
return pkt;
|
||||||
}
|
}
|
||||||
|
|
||||||
PacketPtr
|
PacketPtr
|
||||||
Cache::cleanEvictBlk(CacheBlk *blk)
|
Cache::cleanEvictBlk(CacheBlk *blk)
|
||||||
{
|
{
|
||||||
|
assert(!writebackClean);
|
||||||
assert(blk && blk->isValid() && !blk->isDirty());
|
assert(blk && blk->isValid() && !blk->isDirty());
|
||||||
// Creating a zero sized write, a message to the snoop filter
|
// Creating a zero sized write, a message to the snoop filter
|
||||||
Request *req =
|
Request *req =
|
||||||
|
@ -1628,7 +1675,7 @@ Cache::allocateBlock(Addr addr, bool is_secure, PacketList &writebacks)
|
||||||
|
|
||||||
// Will send up Writeback/CleanEvict snoops via isCachedAbove
|
// Will send up Writeback/CleanEvict snoops via isCachedAbove
|
||||||
// when pushing this writeback list into the write buffer.
|
// when pushing this writeback list into the write buffer.
|
||||||
if (blk->isDirty()) {
|
if (blk->isDirty() || writebackClean) {
|
||||||
// Save writeback packet for handling by caller
|
// Save writeback packet for handling by caller
|
||||||
writebacks.push_back(writebackBlk(blk));
|
writebacks.push_back(writebackBlk(blk));
|
||||||
} else {
|
} else {
|
||||||
|
@ -2051,9 +2098,9 @@ Cache::recvTimingSnoopReq(PacketPtr pkt)
|
||||||
// Writebacks/CleanEvicts.
|
// Writebacks/CleanEvicts.
|
||||||
assert(wb_entry->getNumTargets() == 1);
|
assert(wb_entry->getNumTargets() == 1);
|
||||||
PacketPtr wb_pkt = wb_entry->getTarget()->pkt;
|
PacketPtr wb_pkt = wb_entry->getTarget()->pkt;
|
||||||
assert(wb_pkt->evictingBlock());
|
assert(wb_pkt->isEviction());
|
||||||
|
|
||||||
if (pkt->evictingBlock()) {
|
if (pkt->isEviction()) {
|
||||||
// if the block is found in the write queue, set the BLOCK_CACHED
|
// if the block is found in the write queue, set the BLOCK_CACHED
|
||||||
// flag for Writeback/CleanEvict snoop. On return the snoop will
|
// flag for Writeback/CleanEvict snoop. On return the snoop will
|
||||||
// propagate the BLOCK_CACHED flag in Writeback packets and prevent
|
// propagate the BLOCK_CACHED flag in Writeback packets and prevent
|
||||||
|
@ -2064,7 +2111,7 @@ Cache::recvTimingSnoopReq(PacketPtr pkt)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (wb_pkt->cmd == MemCmd::Writeback) {
|
if (wb_pkt->cmd == MemCmd::WritebackDirty) {
|
||||||
assert(!pkt->memInhibitAsserted());
|
assert(!pkt->memInhibitAsserted());
|
||||||
pkt->assertMemInhibit();
|
pkt->assertMemInhibit();
|
||||||
if (!pkt->needsExclusive()) {
|
if (!pkt->needsExclusive()) {
|
||||||
|
@ -2082,18 +2129,26 @@ Cache::recvTimingSnoopReq(PacketPtr pkt)
|
||||||
doTimingSupplyResponse(pkt, wb_pkt->getConstPtr<uint8_t>(),
|
doTimingSupplyResponse(pkt, wb_pkt->getConstPtr<uint8_t>(),
|
||||||
false, false);
|
false, false);
|
||||||
} else {
|
} else {
|
||||||
assert(wb_pkt->cmd == MemCmd::CleanEvict);
|
// on hitting a clean writeback we play it safe and do not
|
||||||
|
// provide a response, the block may be dirty somewhere
|
||||||
|
// else
|
||||||
|
assert(wb_pkt->isCleanEviction());
|
||||||
// The cache technically holds the block until the
|
// The cache technically holds the block until the
|
||||||
// corresponding CleanEvict message reaches the crossbar
|
// corresponding message reaches the crossbar
|
||||||
// below. Therefore when a snoop encounters a CleanEvict
|
// below. Therefore when a snoop encounters a CleanEvict
|
||||||
// message we must set assertShared (just like when it
|
// or WritebackClean message we must set assertShared
|
||||||
// encounters a Writeback) to avoid the snoop filter
|
// (just like when it encounters a Writeback) to avoid the
|
||||||
// prematurely clearing the holder bit in the crossbar
|
// snoop filter prematurely clearing the holder bit in the
|
||||||
// below
|
// crossbar below
|
||||||
if (!pkt->needsExclusive())
|
if (!pkt->needsExclusive()) {
|
||||||
pkt->assertShared();
|
pkt->assertShared();
|
||||||
else
|
// the writeback is no longer passing exclusivity (the
|
||||||
|
// receiving cache should consider the block owned
|
||||||
|
// rather than modified)
|
||||||
|
wb_pkt->assertShared();
|
||||||
|
} else {
|
||||||
assert(pkt->isInvalidate());
|
assert(pkt->isInvalidate());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pkt->isInvalidate()) {
|
if (pkt->isInvalidate()) {
|
||||||
|
@ -2243,7 +2298,7 @@ Cache::isCachedAbove(PacketPtr pkt, bool is_timing) const
|
||||||
// Assert that packet is either Writeback or CleanEvict and not a
|
// Assert that packet is either Writeback or CleanEvict and not a
|
||||||
// prefetch request because prefetch requests need an MSHR and may
|
// prefetch request because prefetch requests need an MSHR and may
|
||||||
// generate a snoop response.
|
// generate a snoop response.
|
||||||
assert(pkt->evictingBlock());
|
assert(pkt->isEviction());
|
||||||
snoop_pkt.senderState = NULL;
|
snoop_pkt.senderState = NULL;
|
||||||
cpuSidePort->sendTimingSnoopReq(&snoop_pkt);
|
cpuSidePort->sendTimingSnoopReq(&snoop_pkt);
|
||||||
// Writeback/CleanEvict snoops do not generate a snoop response.
|
// Writeback/CleanEvict snoops do not generate a snoop response.
|
||||||
|
@ -2312,7 +2367,7 @@ Cache::getTimingPacket()
|
||||||
mshr->blkAddr);
|
mshr->blkAddr);
|
||||||
|
|
||||||
// Deallocate the mshr target
|
// Deallocate the mshr target
|
||||||
if (tgt_pkt->cmd != MemCmd::Writeback) {
|
if (!tgt_pkt->isWriteback()) {
|
||||||
if (mshr->queue->forceDeallocateTarget(mshr)) {
|
if (mshr->queue->forceDeallocateTarget(mshr)) {
|
||||||
// Clear block if this deallocation resulted freed an
|
// Clear block if this deallocation resulted freed an
|
||||||
// mshr when all had previously been utilized
|
// mshr when all had previously been utilized
|
||||||
|
|
9
src/mem/cache/cache.hh
vendored
9
src/mem/cache/cache.hh
vendored
|
@ -202,6 +202,15 @@ class Cache : public BaseCache
|
||||||
*/
|
*/
|
||||||
const Enums::Clusivity clusivity;
|
const Enums::Clusivity clusivity;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Determine if clean lines should be written back or not. In
|
||||||
|
* cases where a downstream cache is mostly inclusive we likely
|
||||||
|
* want it to act as a victim cache also for lines that have not
|
||||||
|
* been modified. Hence, we cannot simply drop the line (or send a
|
||||||
|
* clean evict), but rather need to send the actual data.
|
||||||
|
*/
|
||||||
|
const bool writebackClean;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Upstream caches need this packet until true is returned, so
|
* Upstream caches need this packet until true is returned, so
|
||||||
* hold it for deletion until a subsequent call
|
* hold it for deletion until a subsequent call
|
||||||
|
|
|
@ -199,7 +199,7 @@ CoherentXBar::recvTimingReq(PacketPtr pkt, PortID slave_port_id)
|
||||||
pkt->cmdString(), pkt->getAddr(), sf_res.first.size(),
|
pkt->cmdString(), pkt->getAddr(), sf_res.first.size(),
|
||||||
sf_res.second);
|
sf_res.second);
|
||||||
|
|
||||||
if (pkt->evictingBlock()) {
|
if (pkt->isEviction()) {
|
||||||
// for block-evicting packets, i.e. writebacks and
|
// for block-evicting packets, i.e. writebacks and
|
||||||
// clean evictions, there is no need to snoop up, as
|
// clean evictions, there is no need to snoop up, as
|
||||||
// all we do is determine if the block is cached or
|
// all we do is determine if the block is cached or
|
||||||
|
@ -220,10 +220,11 @@ CoherentXBar::recvTimingReq(PacketPtr pkt, PortID slave_port_id)
|
||||||
}
|
}
|
||||||
|
|
||||||
// forwardTiming snooped into peer caches of the sender, and if
|
// forwardTiming snooped into peer caches of the sender, and if
|
||||||
// this is a clean evict, but the packet is found in a cache, do
|
// this is a clean evict or clean writeback, but the packet is
|
||||||
// not forward it
|
// found in a cache, do not forward it
|
||||||
if (pkt->cmd == MemCmd::CleanEvict && pkt->isBlockCached()) {
|
if ((pkt->cmd == MemCmd::CleanEvict ||
|
||||||
DPRINTF(CoherentXBar, "recvTimingReq: Clean evict 0x%x still cached, "
|
pkt->cmd == MemCmd::WritebackClean) && pkt->isBlockCached()) {
|
||||||
|
DPRINTF(CoherentXBar, "Clean evict/writeback %#llx still cached, "
|
||||||
"not forwarding\n", pkt->getAddr());
|
"not forwarding\n", pkt->getAddr());
|
||||||
|
|
||||||
// update the layer state and schedule an idle event
|
// update the layer state and schedule an idle event
|
||||||
|
@ -634,8 +635,9 @@ CoherentXBar::recvAtomic(PacketPtr pkt, PortID slave_port_id)
|
||||||
// forwardAtomic snooped into peer caches of the sender, and if
|
// forwardAtomic snooped into peer caches of the sender, and if
|
||||||
// this is a clean evict, but the packet is found in a cache, do
|
// this is a clean evict, but the packet is found in a cache, do
|
||||||
// not forward it
|
// not forward it
|
||||||
if (pkt->cmd == MemCmd::CleanEvict && pkt->isBlockCached()) {
|
if ((pkt->cmd == MemCmd::CleanEvict ||
|
||||||
DPRINTF(CoherentXBar, "recvAtomic: Clean evict 0x%x still cached, "
|
pkt->cmd == MemCmd::WritebackClean) && pkt->isBlockCached()) {
|
||||||
|
DPRINTF(CoherentXBar, "Clean evict/writeback %#llx still cached, "
|
||||||
"not forwarding\n", pkt->getAddr());
|
"not forwarding\n", pkt->getAddr());
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -84,11 +84,16 @@ MemCmd::commandInfo[] =
|
||||||
WriteResp, "WriteReq" },
|
WriteResp, "WriteReq" },
|
||||||
/* WriteResp */
|
/* WriteResp */
|
||||||
{ SET3(IsWrite, NeedsExclusive, IsResponse), InvalidCmd, "WriteResp" },
|
{ SET3(IsWrite, NeedsExclusive, IsResponse), InvalidCmd, "WriteResp" },
|
||||||
/* Writeback */
|
/* WritebackDirty */
|
||||||
{ SET4(IsWrite, NeedsExclusive, IsRequest, HasData),
|
{ SET4(IsWrite, IsRequest, IsEviction, HasData),
|
||||||
InvalidCmd, "Writeback" },
|
InvalidCmd, "WritebackDirty" },
|
||||||
|
/* WritebackClean - This allows the upstream cache to writeback a
|
||||||
|
* line to the downstream cache without it being considered
|
||||||
|
* dirty. */
|
||||||
|
{ SET4(IsWrite, IsRequest, IsEviction, HasData),
|
||||||
|
InvalidCmd, "WritebackClean" },
|
||||||
/* CleanEvict */
|
/* CleanEvict */
|
||||||
{ SET1(IsRequest), InvalidCmd, "CleanEvict" },
|
{ SET2(IsRequest, IsEviction), InvalidCmd, "CleanEvict" },
|
||||||
/* SoftPFReq */
|
/* SoftPFReq */
|
||||||
{ SET4(IsRead, IsRequest, IsSWPrefetch, NeedsResponse),
|
{ SET4(IsRead, IsRequest, IsSWPrefetch, NeedsResponse),
|
||||||
SoftPFResp, "SoftPFReq" },
|
SoftPFResp, "SoftPFReq" },
|
||||||
|
|
|
@ -86,7 +86,8 @@ class MemCmd
|
||||||
ReadRespWithInvalidate,
|
ReadRespWithInvalidate,
|
||||||
WriteReq,
|
WriteReq,
|
||||||
WriteResp,
|
WriteResp,
|
||||||
Writeback,
|
WritebackDirty,
|
||||||
|
WritebackClean,
|
||||||
CleanEvict,
|
CleanEvict,
|
||||||
SoftPFReq,
|
SoftPFReq,
|
||||||
HardPFReq,
|
HardPFReq,
|
||||||
|
@ -144,6 +145,7 @@ class MemCmd
|
||||||
IsRequest, //!< Issued by requester
|
IsRequest, //!< Issued by requester
|
||||||
IsResponse, //!< Issue by responder
|
IsResponse, //!< Issue by responder
|
||||||
NeedsResponse, //!< Requester needs response from target
|
NeedsResponse, //!< Requester needs response from target
|
||||||
|
IsEviction,
|
||||||
IsSWPrefetch,
|
IsSWPrefetch,
|
||||||
IsHWPrefetch,
|
IsHWPrefetch,
|
||||||
IsLlsc, //!< Alpha/MIPS LL or SC access
|
IsLlsc, //!< Alpha/MIPS LL or SC access
|
||||||
|
@ -192,6 +194,13 @@ class MemCmd
|
||||||
bool needsExclusive() const { return testCmdAttrib(NeedsExclusive); }
|
bool needsExclusive() const { return testCmdAttrib(NeedsExclusive); }
|
||||||
bool needsResponse() const { return testCmdAttrib(NeedsResponse); }
|
bool needsResponse() const { return testCmdAttrib(NeedsResponse); }
|
||||||
bool isInvalidate() const { return testCmdAttrib(IsInvalidate); }
|
bool isInvalidate() const { return testCmdAttrib(IsInvalidate); }
|
||||||
|
bool isEviction() const { return testCmdAttrib(IsEviction); }
|
||||||
|
|
||||||
|
/**
|
||||||
|
* A writeback is an eviction that carries data.
|
||||||
|
*/
|
||||||
|
bool isWriteback() const { return testCmdAttrib(IsEviction) &&
|
||||||
|
testCmdAttrib(HasData); }
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Check if this particular packet type carries payload data. Note
|
* Check if this particular packet type carries payload data. Note
|
||||||
|
@ -491,6 +500,8 @@ class Packet : public Printable
|
||||||
bool needsExclusive() const { return cmd.needsExclusive(); }
|
bool needsExclusive() const { return cmd.needsExclusive(); }
|
||||||
bool needsResponse() const { return cmd.needsResponse(); }
|
bool needsResponse() const { return cmd.needsResponse(); }
|
||||||
bool isInvalidate() const { return cmd.isInvalidate(); }
|
bool isInvalidate() const { return cmd.isInvalidate(); }
|
||||||
|
bool isEviction() const { return cmd.isEviction(); }
|
||||||
|
bool isWriteback() const { return cmd.isWriteback(); }
|
||||||
bool hasData() const { return cmd.hasData(); }
|
bool hasData() const { return cmd.hasData(); }
|
||||||
bool isLLSC() const { return cmd.isLLSC(); }
|
bool isLLSC() const { return cmd.isLLSC(); }
|
||||||
bool isError() const { return cmd.isError(); }
|
bool isError() const { return cmd.isError(); }
|
||||||
|
@ -1007,16 +1018,6 @@ class Packet : public Printable
|
||||||
other->getPtr<uint8_t>() : NULL);
|
other->getPtr<uint8_t>() : NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Is this request notification of a clean or dirty eviction from the cache.
|
|
||||||
**/
|
|
||||||
bool
|
|
||||||
evictingBlock() const
|
|
||||||
{
|
|
||||||
return (cmd == MemCmd::Writeback ||
|
|
||||||
cmd == MemCmd::CleanEvict);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Does the request need to check for cached copies of the same block
|
* Does the request need to check for cached copies of the same block
|
||||||
* in the memory hierarchy above.
|
* in the memory hierarchy above.
|
||||||
|
@ -1024,8 +1025,17 @@ class Packet : public Printable
|
||||||
bool
|
bool
|
||||||
mustCheckAbove() const
|
mustCheckAbove() const
|
||||||
{
|
{
|
||||||
return (cmd == MemCmd::HardPFReq ||
|
return cmd == MemCmd::HardPFReq || isEviction();
|
||||||
evictingBlock());
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Is this packet a clean eviction, including both actual clean
|
||||||
|
* evict packets, but also clean writebacks.
|
||||||
|
*/
|
||||||
|
bool
|
||||||
|
isCleanEviction() const
|
||||||
|
{
|
||||||
|
return cmd == MemCmd::CleanEvict || cmd == MemCmd::WritebackClean;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -128,7 +128,7 @@ SnoopFilter::lookupRequest(const Packet* cpkt, const SlavePort& slave_port)
|
||||||
__func__, sf_item.requested, sf_item.holder);
|
__func__, sf_item.requested, sf_item.holder);
|
||||||
}
|
}
|
||||||
} else { // if (!cpkt->needsResponse())
|
} else { // if (!cpkt->needsResponse())
|
||||||
assert(cpkt->evictingBlock());
|
assert(cpkt->isEviction());
|
||||||
// make sure that the sender actually had the line
|
// make sure that the sender actually had the line
|
||||||
panic_if(!(sf_item.holder & req_port), "requester %x is not a " \
|
panic_if(!(sf_item.holder & req_port), "requester %x is not a " \
|
||||||
"holder :( SF value %x.%x\n", req_port,
|
"holder :( SF value %x.%x\n", req_port,
|
||||||
|
@ -207,7 +207,7 @@ SnoopFilter::lookupSnoop(const Packet* cpkt)
|
||||||
// not the invalidation. Previously Writebacks did not generate upward
|
// not the invalidation. Previously Writebacks did not generate upward
|
||||||
// snoops so this was never an aissue. Now that Writebacks generate snoops
|
// snoops so this was never an aissue. Now that Writebacks generate snoops
|
||||||
// we need to special case for Writebacks.
|
// we need to special case for Writebacks.
|
||||||
assert(cpkt->cmd == MemCmd::Writeback || cpkt->req->isUncacheable() ||
|
assert(cpkt->isWriteback() || cpkt->req->isUncacheable() ||
|
||||||
(cpkt->isInvalidate() == cpkt->needsExclusive()));
|
(cpkt->isInvalidate() == cpkt->needsExclusive()));
|
||||||
if (cpkt->isInvalidate() && !sf_item.requested) {
|
if (cpkt->isInvalidate() && !sf_item.requested) {
|
||||||
// Early clear of the holder, if no other request is currently going on
|
// Early clear of the holder, if no other request is currently going on
|
||||||
|
@ -270,7 +270,7 @@ SnoopFilter::updateSnoopResponse(const Packet* cpkt,
|
||||||
//assert(sf_item.holder == 0);
|
//assert(sf_item.holder == 0);
|
||||||
sf_item.holder = 0;
|
sf_item.holder = 0;
|
||||||
}
|
}
|
||||||
assert(cpkt->cmd != MemCmd::Writeback);
|
assert(!cpkt->isWriteback());
|
||||||
sf_item.holder |= req_mask;
|
sf_item.holder |= req_mask;
|
||||||
sf_item.requested &= ~req_mask;
|
sf_item.requested &= ~req_mask;
|
||||||
assert(sf_item.requested | sf_item.holder);
|
assert(sf_item.requested | sf_item.holder);
|
||||||
|
|
Loading…
Reference in a new issue