mem: Make cache terminology easier to understand

This patch changes the name of a bunch of packet flags and MSHR member
functions and variables to make the coherency protocol easier to
understand. In addition the patch adds and updates lots of
descriptions, explicitly spelling out assumptions.

The following name changes are made:

* the packet memInhibit flag is renamed to cacheResponding

* the packet sharedAsserted flag is renamed to hasSharers

* the packet NeedsExclusive attribute is renamed to NeedsWritable

* the packet isSupplyExclusive is renamed responderHadWritable

* the MSHR pendingDirty is renamed to pendingModified

The cache states, Modified, Owned, Exclusive, Shared are also called
out in the cache and MSHR code to make it easier to understand.
This commit is contained in:
Andreas Hansson 2015-12-31 09:32:58 -05:00
parent a317764577
commit 0fcb376e5f
28 changed files with 538 additions and 374 deletions

View file

@ -92,9 +92,9 @@ bool
FullO3CPU<Impl>::IcachePort::recvTimingResp(PacketPtr pkt)
{
DPRINTF(O3CPU, "Fetch unit received timing\n");
// We shouldn't ever get a cacheable block in ownership state
// We shouldn't ever get a cacheable block in Modified state
assert(pkt->req->isUncacheable() ||
!(pkt->memInhibitAsserted() && !pkt->sharedAsserted()));
!(pkt->cacheResponding() && !pkt->hasSharers()));
fetch->processCacheCompletion(pkt);
return true;

View file

@ -105,9 +105,9 @@ DmaPort::handleResp(PacketPtr pkt, Tick delay)
bool
DmaPort::recvTimingResp(PacketPtr pkt)
{
// We shouldn't ever get a cacheable block in ownership state
// We shouldn't ever get a cacheable block in Modified state
assert(pkt->req->isUncacheable() ||
!(pkt->memInhibitAsserted() && !pkt->sharedAsserted()));
!(pkt->cacheResponding() && !pkt->hasSharers()));
handleResp(pkt);

View file

@ -323,8 +323,8 @@ AbstractMemory::checkLockedAddrList(PacketPtr pkt)
void
AbstractMemory::access(PacketPtr pkt)
{
if (pkt->memInhibitAsserted()) {
DPRINTF(MemoryAccess, "mem inhibited on 0x%x: not responding\n",
if (pkt->cacheResponding()) {
DPRINTF(MemoryAccess, "Cache responding to %#llx: not responding\n",
pkt->getAddr());
return;
}

View file

@ -116,16 +116,15 @@ AddrMapper::recvTimingReq(PacketPtr pkt)
{
Addr orig_addr = pkt->getAddr();
bool needsResponse = pkt->needsResponse();
bool memInhibitAsserted = pkt->memInhibitAsserted();
bool cacheResponding = pkt->cacheResponding();
if (needsResponse && !memInhibitAsserted) {
if (needsResponse && !cacheResponding) {
pkt->pushSenderState(new AddrMapperSenderState(orig_addr));
}
pkt->setAddr(remapAddr(orig_addr));
// Attempt to send the packet (always succeeds for inhibited
// packets)
// Attempt to send the packet
bool successful = masterPort.sendTimingReq(pkt);
// If not successful, restore the address and sender state

View file

@ -154,9 +154,9 @@ Bridge::BridgeSlavePort::recvTimingReq(PacketPtr pkt)
DPRINTF(Bridge, "recvTimingReq: %s addr 0x%x\n",
pkt->cmdString(), pkt->getAddr());
// sink inhibited packets without further action, also discard any
// packet that is not a read or a write
if (pkt->memInhibitAsserted() ||
// if a cache is responding, sink the packet without further
// action, also discard any packet that is not a read or a write
if (pkt->cacheResponding() ||
!(pkt->isWrite() || pkt->isRead())) {
assert(!pkt->needsResponse());
pendingDelete.reset(pkt);

View file

@ -224,11 +224,11 @@ class BaseCache : public MemObject
return mshr;
}
void markInServiceInternal(MSHR *mshr, bool pending_dirty_resp)
void markInServiceInternal(MSHR *mshr, bool pending_modified_resp)
{
MSHRQueue *mq = mshr->queue;
bool wasFull = mq->isFull();
mq->markInService(mshr, pending_dirty_resp);
mq->markInService(mshr, pending_modified_resp);
if (wasFull && !mq->isFull()) {
clearBlocked((BlockedCause)mq->index);
}

13
src/mem/cache/blk.hh vendored
View file

@ -281,7 +281,7 @@ class CacheBlk
/**
* Pretty-print a tag, and interpret state bits to readable form
* including mapping to a MOESI stat.
* including mapping to a MOESI state.
*
* @return string with basic state information
*/
@ -299,6 +299,17 @@ class CacheBlk
* E 1 0 1
* S 0 0 1
* I 0 0 0
*
* Note that only one cache ever has a block in Modified or
* Owned state, i.e., only one cache owns the block, or
* equivalently has the BlkDirty bit set. However, multiple
* caches on the same path to memory can have a block in the
* Exclusive state (despite the name). Exclusive means this
* cache has the only copy at this level of the hierarchy,
* i.e., there may be copies in caches above this cache (in
* various states), but there are no peers that have copies on
* this branch of the hierarchy, and no caches at or above
* this level on any other branch have copies either.
**/
unsigned state = isWritable() << 2 | isDirty() << 1 | isValid();
char s = '?';

433
src/mem/cache/cache.cc vendored
View file

@ -157,7 +157,7 @@ Cache::satisfyCpuSideRequest(PacketPtr pkt, CacheBlk *blk,
// can satisfy a following ReadEx anyway since we can rely on the
// Read requester(s) to have buffered the ReadEx snoop and to
// invalidate their blocks after receiving them.
// assert(!pkt->needsExclusive() || blk->isWritable());
// assert(!pkt->needsWritable() || blk->isWritable());
assert(pkt->getOffset(blkSize) + pkt->getSize() <= blkSize);
// Check RMW operations first since both isRead() and
@ -165,15 +165,19 @@ Cache::satisfyCpuSideRequest(PacketPtr pkt, CacheBlk *blk,
if (pkt->cmd == MemCmd::SwapReq) {
cmpAndSwap(blk, pkt);
} else if (pkt->isWrite()) {
// we have the block in a writable state and can go ahead,
// note that the line may be also be considered writable in
// downstream caches along the path to memory, but always
// Exclusive, and never Modified
assert(blk->isWritable());
// Write or WriteLine at the first cache with block in Exclusive
// Write or WriteLine at the first cache with block in writable state
if (blk->checkWrite(pkt)) {
pkt->writeDataToBlock(blk->data, blkSize);
}
// Always mark the line as dirty even if we are a failed
// StoreCond so we supply data to any snoops that have
// appended themselves to this cache before knowing the store
// will fail.
// Always mark the line as dirty (and thus transition to the
// Modified state) even if we are a failed StoreCond so we
// supply data to any snoops that have appended themselves to
// this cache before knowing the store will fail.
blk->status |= BlkDirty;
DPRINTF(Cache, "%s for %s addr %#llx size %d (write)\n", __func__,
pkt->cmdString(), pkt->getAddr(), pkt->getSize());
@ -193,79 +197,80 @@ Cache::satisfyCpuSideRequest(PacketPtr pkt, CacheBlk *blk,
assert(pkt->getSize() == blkSize);
// special handling for coherent block requests from
// upper-level caches
if (pkt->needsExclusive()) {
if (pkt->needsWritable()) {
// sanity check
assert(pkt->cmd == MemCmd::ReadExReq ||
pkt->cmd == MemCmd::SCUpgradeFailReq);
// if we have a dirty copy, make sure the recipient
// keeps it marked dirty
// keeps it marked dirty (in the modified state)
if (blk->isDirty()) {
pkt->assertMemInhibit();
pkt->setCacheResponding();
}
// on ReadExReq we give up our copy unconditionally,
// even if this cache is mostly inclusive, we may want
// to revisit this
invalidateBlock(blk);
} else if (blk->isWritable() && !pending_downgrade &&
!pkt->sharedAsserted() &&
!pkt->hasSharers() &&
pkt->cmd != MemCmd::ReadCleanReq) {
// we can give the requester an exclusive copy (by not
// asserting shared line) on a read request if:
// - we have an exclusive copy at this level (& below)
// we can give the requester a writable copy on a read
// request if:
// - we have a writable copy at this level (& below)
// - we don't have a pending snoop from below
// signaling another read request
// - no other cache above has a copy (otherwise it
// would have asseretd shared line on request)
// - we are not satisfying an instruction fetch (this
// prevents dirty data in the i-cache)
// would have set hasSharers flag when
// snooping the packet)
// - the read has explicitly asked for a clean
// copy of the line
if (blk->isDirty()) {
// special considerations if we're owner:
if (!deferred_response) {
// if we are responding immediately and can
// signal that we're transferring ownership
// (inhibit set) along with exclusivity
// (shared not set), do so
pkt->assertMemInhibit();
// respond with the line in Modified state
// (cacheResponding set, hasSharers not set)
pkt->setCacheResponding();
// if this cache is mostly inclusive, we keep
// the block as writable (exclusive), and pass
// it upwards as writable and dirty
// (modified), hence we have multiple caches
// considering the same block writable,
// something that we get away with due to the
// fact that: 1) this cache has been
// considered the ordering points and
// responded to all snoops up till now, and 2)
// we always snoop upwards before consulting
// the local cache, both on a normal request
// (snooping done by the crossbar), and on a
// snoop
blk->status &= ~BlkDirty;
// if this cache is mostly exclusive with
// respect to the cache above, drop the block
if (clusivity == Enums::mostly_excl) {
// if this cache is mostly exclusive with
// respect to the cache above, drop the
// block, no need to first unset the dirty
// bit
invalidateBlock(blk);
} else {
// if this cache is mostly inclusive, we
// keep the block in the Exclusive state,
// and pass it upwards as Modified
// (writable and dirty), hence we have
// multiple caches, all on the same path
// towards memory, all considering the
// same block writable, but only one
// considering it Modified
// we get away with multiple caches (on
// the same path to memory) considering
// the block writeable as we always enter
// the cache hierarchy through a cache,
// and first snoop upwards in all other
// branches
blk->status &= ~BlkDirty;
}
} else {
// if we're responding after our own miss,
// there's a window where the recipient didn't
// know it was getting ownership and may not
// have responded to snoops correctly, so we
// can't pass off ownership *or* exclusivity
pkt->assertShared();
// have to respond with a shared line
pkt->setHasSharers();
}
}
} else {
// otherwise only respond with a shared copy
pkt->assertShared();
pkt->setHasSharers();
}
}
} else {
// Upgrade or Invalidate, since we have it Exclusively (E or
// M), we ack then invalidate.
// Upgrade or Invalidate
assert(pkt->isUpgrade() || pkt->isInvalidate());
// for invalidations we could be looking at the temp block
@ -285,9 +290,9 @@ Cache::satisfyCpuSideRequest(PacketPtr pkt, CacheBlk *blk,
void
Cache::markInService(MSHR *mshr, bool pending_dirty_resp)
Cache::markInService(MSHR *mshr, bool pending_modified_resp)
{
markInServiceInternal(mshr, pending_dirty_resp);
markInServiceInternal(mshr, pending_modified_resp);
}
/////////////////////////////////////////////////////
@ -420,9 +425,10 @@ Cache::access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat,
if (pkt->cmd == MemCmd::WritebackDirty) {
blk->status |= BlkDirty;
}
// if shared is not asserted we got the writeback in modified
// state, if it is asserted we are in the owned state
if (!pkt->sharedAsserted()) {
// if the packet does not have sharers, it is passing
// writable, and we got the writeback in Modified or Exclusive
// state, if not we are in the Owned or Shared state
if (!pkt->hasSharers()) {
blk->status |= BlkWritable;
}
// nothing else to do; writeback doesn't expect response
@ -445,8 +451,7 @@ Cache::access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat,
// go to next level.
return false;
} else if ((blk != NULL) &&
(pkt->needsExclusive() ? blk->isWritable()
: blk->isReadable())) {
(pkt->needsWritable() ? blk->isWritable() : blk->isReadable())) {
// OK to satisfy access
incHitCount(pkt);
satisfyCpuSideRequest(pkt, blk);
@ -454,7 +459,7 @@ Cache::access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat,
}
// Can't satisfy access normally... either no block (blk == NULL)
// or have block but need exclusive & only have shared.
// or have block but need writable
incMissCount(pkt);
@ -607,18 +612,32 @@ Cache::recvTimingReq(PacketPtr pkt)
promoteWholeLineWrites(pkt);
if (pkt->memInhibitAsserted()) {
if (pkt->cacheResponding()) {
// a cache above us (but not where the packet came from) is
// responding to the request
DPRINTF(Cache, "mem inhibited on addr %#llx (%s): not responding\n",
// responding to the request, in other words it has the line
// in Modified or Owned state
DPRINTF(Cache, "Cache above responding to %#llx (%s): "
"not responding\n",
pkt->getAddr(), pkt->isSecure() ? "s" : "ns");
// if the packet needs exclusive, and the cache that has
// promised to respond (setting the inhibit flag) is not
// providing exclusive (it is in O vs M state), we know that
// there may be other shared copies in the system; go out and
// invalidate them all
if (pkt->needsExclusive() && !pkt->isSupplyExclusive()) {
// if the packet needs the block to be writable, and the cache
// that has promised to respond (setting the cache responding
// flag) is not providing writable (it is in Owned rather than
// the Modified state), we know that there may be other Shared
// copies in the system; go out and invalidate them all
if (pkt->needsWritable() && !pkt->responderHadWritable()) {
// an upstream cache that had the line in Owned state
// (dirty, but not writable), is responding and thus
// transferring the dirty line from one branch of the
// cache hierarchy to another
// send out an express snoop and invalidate all other
// copies (snooping a packet that needs writable is the
// same as an invalidation), thus turning the Owned line
// into a Modified line, note that we don't invalidate the
// block in the current cache or any other cache on the
// path to memory
// create a downstream express snoop with cleared packet
// flags, there is no need to allocate any data as the
// packet is merely used to co-ordinate state transitions
@ -629,11 +648,12 @@ Cache::recvTimingReq(PacketPtr pkt)
snoop_pkt->headerDelay = snoop_pkt->payloadDelay = 0;
// make this an instantaneous express snoop, and let the
// other caches in the system know that the packet is
// inhibited, because we have found the authorative copy
// (O) that will supply the right data
// other caches in the system know that the another cache
// is responding, because we have found the authorative
// copy (Modified or Owned) that will supply the right
// data
snoop_pkt->setExpressSnoop();
snoop_pkt->assertMemInhibit();
snoop_pkt->setCacheResponding();
// this express snoop travels towards the memory, and at
// every crossbar it is snooped upwards thus reaching
@ -642,17 +662,20 @@ Cache::recvTimingReq(PacketPtr pkt)
// express snoops always succeed
assert(success);
// main memory will delete the packet
// main memory will delete the snoop packet
}
// queue for deletion, as the sending cache is still relying
// on the packet
// queue for deletion, as opposed to immediate deletion, as
// the sending cache is still relying on the packet
pendingDelete.reset(pkt);
// no need to take any action in this particular cache as the
// caches along the path to memory are allowed to keep lines
// in a shared state, and a cache above us already committed
// to responding
// no need to take any action in this particular cache as an
// upstream cache has already committed to responding, and
// either the packet does not need writable (and we can let
// the cache that set the cache responding flag pass on the
// line without any need for intervention), or if the packet
// needs writable it is provided, or we have already sent out
// any express snoops in the section above
return true;
}
@ -872,7 +895,7 @@ Cache::recvTimingReq(PacketPtr pkt)
// internally, and have a sufficiently weak memory
// model, this is probably unnecessary, but at some
// point it must have seemed like we needed it...
assert(pkt->needsExclusive());
assert(pkt->needsWritable());
assert(!blk->isWritable());
blk->status &= ~BlkReadable;
}
@ -900,7 +923,7 @@ Cache::recvTimingReq(PacketPtr pkt)
// See comment in cache.hh.
PacketPtr
Cache::getBusPacket(PacketPtr cpu_pkt, CacheBlk *blk,
bool needsExclusive) const
bool needsWritable) const
{
bool blkValid = blk && blk->isValid();
@ -931,9 +954,9 @@ Cache::getBusPacket(PacketPtr cpu_pkt, CacheBlk *blk,
// which will clobber the owned copy.
const bool useUpgrades = true;
if (blkValid && useUpgrades) {
// only reason to be here is that blk is shared
// (read-only) and we need exclusive
assert(needsExclusive);
// only reason to be here is that blk is read only and we need
// it to be writable
assert(needsWritable);
assert(!blk->isWritable());
cmd = cpu_pkt->isLLSC() ? MemCmd::SCUpgradeReq : MemCmd::UpgradeReq;
} else if (cpu_pkt->cmd == MemCmd::SCUpgradeFailReq ||
@ -945,24 +968,27 @@ Cache::getBusPacket(PacketPtr cpu_pkt, CacheBlk *blk,
cmd = MemCmd::SCUpgradeFailReq;
} else if (cpu_pkt->cmd == MemCmd::WriteLineReq) {
// forward as invalidate to all other caches, this gives us
// the line in exclusive state, and invalidates all other
// the line in Exclusive state, and invalidates all other
// copies
cmd = MemCmd::InvalidateReq;
} else {
// block is invalid
cmd = needsExclusive ? MemCmd::ReadExReq :
cmd = needsWritable ? MemCmd::ReadExReq :
(isReadOnly ? MemCmd::ReadCleanReq : MemCmd::ReadSharedReq);
}
PacketPtr pkt = new Packet(cpu_pkt->req, cmd, blkSize);
// if there are sharers in the upper levels, pass that info downstream
if (cpu_pkt->sharedAsserted()) {
// if there are upstream caches that have already marked the
// packet as having sharers (not passing writable), pass that info
// downstream
if (cpu_pkt->hasSharers()) {
// note that cpu_pkt may have spent a considerable time in the
// MSHR queue and that the information could possibly be out
// of date, however, there is no harm in conservatively
// assuming the block is shared
pkt->assertShared();
DPRINTF(Cache, "%s passing shared from %s to %s addr %#llx size %d\n",
// assuming the block has sharers
pkt->setHasSharers();
DPRINTF(Cache, "%s passing hasSharers from %s to %s addr %#llx "
"size %d\n",
__func__, cpu_pkt->cmdString(), pkt->cmdString(),
pkt->getAddr(), pkt->getSize());
}
@ -992,7 +1018,7 @@ Cache::recvAtomic(PacketPtr pkt)
promoteWholeLineWrites(pkt);
if (pkt->memInhibitAsserted()) {
if (pkt->cacheResponding()) {
// have to invalidate ourselves and any lower caches even if
// upper cache will be responding
if (pkt->isInvalidate()) {
@ -1000,19 +1026,21 @@ Cache::recvAtomic(PacketPtr pkt)
if (blk && blk->isValid()) {
tags->invalidate(blk);
blk->invalidate();
DPRINTF(Cache, "rcvd mem-inhibited %s on %#llx (%s):"
DPRINTF(Cache, "Other cache responding to %s on %#llx (%s):"
" invalidating\n",
pkt->cmdString(), pkt->getAddr(),
pkt->isSecure() ? "s" : "ns");
}
if (!last_level_cache) {
DPRINTF(Cache, "forwarding mem-inhibited %s on %#llx (%s)\n",
DPRINTF(Cache, "Other cache responding to %s on %#llx (%s):"
" forwarding\n",
pkt->cmdString(), pkt->getAddr(),
pkt->isSecure() ? "s" : "ns");
lat += ticksToCycles(memSidePort->sendAtomic(pkt));
}
} else {
DPRINTF(Cache, "rcvd mem-inhibited %s on %#llx: not responding\n",
DPRINTF(Cache, "Other cache responding to %s on %#llx: "
"not responding\n",
pkt->cmdString(), pkt->getAddr());
}
@ -1034,7 +1062,7 @@ Cache::recvAtomic(PacketPtr pkt)
if (!satisfied) {
// MISS
PacketPtr bus_pkt = getBusPacket(pkt, blk, pkt->needsExclusive());
PacketPtr bus_pkt = getBusPacket(pkt, blk, pkt->needsWritable());
bool is_forward = (bus_pkt == NULL);
@ -1181,11 +1209,11 @@ Cache::functionalAccess(PacketPtr pkt, bool fromCpuSide)
&& pkt->checkFunctional(&cbpw, blk_addr, is_secure, blkSize,
blk->data);
// data we have is dirty if marked as such or if valid & ownership
// pending due to outstanding UpgradeReq
// data we have is dirty if marked as such or if we have an
// in-service MSHR that is pending a modified line
bool have_dirty =
have_data && (blk->isDirty() ||
(mshr && mshr->inService && mshr->isPendingDirty()));
(mshr && mshr->inService && mshr->isPendingModified()));
bool done = have_dirty
|| cpuSidePort->checkFunctional(pkt)
@ -1281,9 +1309,10 @@ Cache::recvTimingResp(PacketPtr pkt)
miss_latency;
}
// upgrade deferred targets if we got exclusive
if (!pkt->sharedAsserted()) {
mshr->promoteExclusive();
// upgrade deferred targets if the response has no sharers, and is
// thus passing writable
if (!pkt->hasSharers()) {
mshr->promoteWritable();
}
bool is_fill = !mshr->isForward &&
@ -1335,9 +1364,9 @@ Cache::recvTimingResp(PacketPtr pkt)
// from above.
if (tgt_pkt->cmd == MemCmd::WriteLineReq) {
assert(!is_error);
// we got the block in exclusive state, so promote any
// deferred targets if possible
mshr->promoteExclusive();
// we got the block in a writable state, so promote
// any deferred targets if possible
mshr->promoteWritable();
// NB: we use the original packet here and not the response!
blk = handleFill(tgt_pkt, blk, writebacks, mshr->allocOnFill);
assert(blk != NULL);
@ -1538,8 +1567,8 @@ Cache::writebackBlk(CacheBlk *blk)
// state, mark our own block non-writeable
blk->status &= ~BlkWritable;
} else {
// we are in the owned state, tell the receiver
pkt->assertShared();
// we are in the Owned state, tell the receiver
pkt->setHasSharers();
}
// make sure the block is not marked dirty
@ -1652,7 +1681,7 @@ Cache::allocateBlock(Addr addr, bool is_secure, PacketList &writebacks)
// must be an outstanding upgrade request
// on a block we're about to replace...
assert(!blk->isWritable() || blk->isDirty());
assert(repl_mshr->needsExclusive());
assert(repl_mshr->needsWritable());
// too hard to replace block with transient state
// allocation failed, block not inserted
return NULL;
@ -1753,27 +1782,30 @@ Cache::handleFill(PacketPtr pkt, CacheBlk *blk, PacketList &writebacks,
// marked as writable as part of the fill, and then later marked
// dirty as part of satisfyCpuSideRequest
if (pkt->cmd == MemCmd::WriteLineReq) {
assert(!pkt->sharedAsserted());
assert(!pkt->hasSharers());
// at the moment other caches do not respond to the
// invalidation requests corresponding to a whole-line write
assert(!pkt->memInhibitAsserted());
assert(!pkt->cacheResponding());
}
if (!pkt->sharedAsserted()) {
// we could get non-shared responses from memory (rather than
// a cache) even in a read-only cache, note that we set this
// bit even for a read-only cache as we use it to represent
// the exclusive state
// here we deal with setting the appropriate state of the line,
// and we start by looking at the hasSharers flag, and ignore the
// cacheResponding flag (normally signalling dirty data) if the
// packet has sharers, thus the line is never allocated as Owned
// (dirty but not writable), and always ends up being either
// Shared, Exclusive or Modified, see Packet::setCacheResponding
// for more details
if (!pkt->hasSharers()) {
// we could get a writable line from memory (rather than a
// cache) even in a read-only cache, note that we set this bit
// even for a read-only cache, possibly revisit this decision
blk->status |= BlkWritable;
// If we got this via cache-to-cache transfer (i.e., from a
// cache that was an owner) and took away that owner's copy,
// then we need to write it back. Normally this happens
// anyway as a side effect of getting a copy to write it, but
// there are cases (such as failed store conditionals or
// compare-and-swaps) where we'll demand an exclusive copy but
// end up not writing it.
if (pkt->memInhibitAsserted()) {
// check if we got this via cache-to-cache transfer (i.e., from a
// cache that had the block in Modified or Owned state)
if (pkt->cacheResponding()) {
// we got the block in Modified state, and invalidated the
// owners copy
blk->status |= BlkDirty;
chatty_assert(!isReadOnly, "Should never see dirty snoop response "
@ -1827,7 +1859,7 @@ Cache::doTimingSupplyResponse(PacketPtr req_pkt, const uint8_t *blk_data,
pkt = new Packet(req_pkt, false, req_pkt->isRead());
assert(req_pkt->req->isUncacheable() || req_pkt->isInvalidate() ||
pkt->sharedAsserted());
pkt->hasSharers());
pkt->makeTimingResponse();
if (pkt->isRead()) {
pkt->setDataFromBlock(blk_data, blkSize);
@ -1835,11 +1867,11 @@ Cache::doTimingSupplyResponse(PacketPtr req_pkt, const uint8_t *blk_data,
if (pkt->cmd == MemCmd::ReadResp && pending_inval) {
// Assume we defer a response to a read from a far-away cache
// A, then later defer a ReadExcl from a cache B on the same
// bus as us. We'll assert MemInhibit in both cases, but in
// the latter case MemInhibit will keep the invalidation from
// reaching cache A. This special response tells cache A that
// it gets the block to satisfy its read, but must immediately
// invalidate it.
// bus as us. We'll assert cacheResponding in both cases, but
// in the latter case cacheResponding will keep the
// invalidation from reaching cache A. This special response
// tells cache A that it gets the block to satisfy its read,
// but must immediately invalidate it.
pkt->cmd = MemCmd::ReadRespWithInvalidate;
}
// Here we consider forward_time, paying for just forward latency and
@ -1870,7 +1902,7 @@ Cache::handleSnoop(PacketPtr pkt, CacheBlk *blk, bool is_timing,
// responds in atomic mode, so remember a few things about the
// original packet up front
bool invalidate = pkt->isInvalidate();
bool M5_VAR_USED needs_exclusive = pkt->needsExclusive();
bool M5_VAR_USED needs_writable = pkt->needsWritable();
uint32_t snoop_delay = 0;
@ -1878,7 +1910,7 @@ Cache::handleSnoop(PacketPtr pkt, CacheBlk *blk, bool is_timing,
// first propagate snoop upward to see if anyone above us wants to
// handle it. save & restore packet src since it will get
// rewritten to be relative to cpu-side bus (if any)
bool alreadyResponded = pkt->memInhibitAsserted();
bool alreadyResponded = pkt->cacheResponding();
if (is_timing) {
// copy the packet so that we can clear any flags before
// forwarding it upwards, we also allocate data (passing
@ -1896,13 +1928,15 @@ Cache::handleSnoop(PacketPtr pkt, CacheBlk *blk, bool is_timing,
// cache
snoop_delay += snoopPkt.headerDelay;
if (snoopPkt.memInhibitAsserted()) {
if (snoopPkt.cacheResponding()) {
// cache-to-cache response from some upper cache
assert(!alreadyResponded);
pkt->assertMemInhibit();
pkt->setCacheResponding();
}
if (snoopPkt.sharedAsserted()) {
pkt->assertShared();
// upstream cache has the block, or has an outstanding
// MSHR, pass the flag on
if (snoopPkt.hasSharers()) {
pkt->setHasSharers();
}
// If this request is a prefetch or clean evict and an upper level
// signals block present, make sure to propagate the block
@ -1912,7 +1946,7 @@ Cache::handleSnoop(PacketPtr pkt, CacheBlk *blk, bool is_timing,
}
} else {
cpuSidePort->sendAtomicSnoop(pkt);
if (!alreadyResponded && pkt->memInhibitAsserted()) {
if (!alreadyResponded && pkt->cacheResponding()) {
// cache-to-cache response from some upper cache:
// forward response to original requester
assert(pkt->isResponse());
@ -1941,7 +1975,7 @@ Cache::handleSnoop(PacketPtr pkt, CacheBlk *blk, bool is_timing,
// invalidation itself is taken care of below.
bool respond = blk->isDirty() && pkt->needsResponse() &&
pkt->cmd != MemCmd::InvalidateReq;
bool have_exclusive = blk->isWritable();
bool have_writable = blk->isWritable();
// Invalidate any prefetch's from below that would strip write permissions
// MemCmd::HardPFReq is only observed by upstream caches. After missing
@ -1955,31 +1989,37 @@ Cache::handleSnoop(PacketPtr pkt, CacheBlk *blk, bool is_timing,
}
if (!pkt->req->isUncacheable() && pkt->isRead() && !invalidate) {
// reading non-exclusive shared data, note that we retain
// the block in owned state if it is dirty, with the response
// taken care of below, and otherwhise simply downgrade to
// shared
assert(!needs_exclusive);
pkt->assertShared();
// reading without requiring the line in a writable state,
// note that we retain the block as Owned if it is Modified
// (dirty data), with the response taken care of below, and
// otherwhise simply downgrade from Exclusive to Shared (or
// remain in Shared)
assert(!needs_writable);
pkt->setHasSharers();
blk->status &= ~BlkWritable;
}
if (respond) {
// prevent anyone else from responding, cache as well as
// memory, and also prevent any memory from even seeing the
// request (with current inhibited semantics), note that this
// applies both to reads and writes and that for writes it
// works thanks to the fact that we still have dirty data and
// will write it back at a later point
assert(!pkt->memInhibitAsserted());
pkt->assertMemInhibit();
if (have_exclusive) {
// request
pkt->setCacheResponding();
if (have_writable) {
// inform the cache hierarchy that this cache had the line
// in the Modified state so that we avoid unnecessary
// invalidations (see Packet::setResponderHadWritable)
pkt->setResponderHadWritable();
// in the case of an uncacheable request there is no point
// in setting the exclusive flag, but since the recipient
// does not care there is no harm in doing so, in any case
// it is just a hint
pkt->setSupplyExclusive();
// in setting the responderHadWritable flag, but since the
// recipient does not care there is no harm in doing so
} else {
// if the packet has needsWritable set we invalidate our
// copy below and all other copies will be invalidates
// through express snoops, and if needsWritable is not set
// we already called setHasSharers above
}
if (is_timing) {
doTimingSupplyResponse(pkt, blk->data, is_deferred, pending_inval);
} else {
@ -2090,18 +2130,19 @@ Cache::recvTimingSnoopReq(PacketPtr pkt)
}
if (wb_pkt->cmd == MemCmd::WritebackDirty) {
assert(!pkt->memInhibitAsserted());
pkt->assertMemInhibit();
if (!pkt->needsExclusive()) {
pkt->assertShared();
// the writeback is no longer passing exclusivity (the
// receiving cache should consider the block owned
// rather than modified)
wb_pkt->assertShared();
// we have dirty data, and so will proceed to respond
pkt->setCacheResponding();
if (!pkt->needsWritable()) {
// the packet should end up in the Shared state (non
// writable) on the completion of the fill
pkt->setHasSharers();
// similarly, the writeback is no longer passing
// writeable (the receiving cache should consider the
// block Owned rather than Modified)
wb_pkt->setHasSharers();
} else {
// if we're not asserting the shared line, we need to
// invalidate our copy. we'll do that below as long as
// the packet's invalidate flag is set...
// we need to invalidate our copy. we do that
// below.
assert(pkt->isInvalidate());
}
doTimingSupplyResponse(pkt, wb_pkt->getConstPtr<uint8_t>(),
@ -2114,16 +2155,16 @@ Cache::recvTimingSnoopReq(PacketPtr pkt)
// The cache technically holds the block until the
// corresponding message reaches the crossbar
// below. Therefore when a snoop encounters a CleanEvict
// or WritebackClean message we must set assertShared
// (just like when it encounters a Writeback) to avoid the
// snoop filter prematurely clearing the holder bit in the
// crossbar below
if (!pkt->needsExclusive()) {
pkt->assertShared();
// the writeback is no longer passing exclusivity (the
// receiving cache should consider the block owned
// rather than modified)
wb_pkt->assertShared();
// or WritebackClean message we must call
// setHasSharers (just like when it encounters a
// Writeback) to avoid the snoop filter prematurely
// clearing the holder bit in the crossbar below
if (!pkt->needsWritable()) {
pkt->setHasSharers();
// the writeback is no longer passing writeable (the
// receiving cache should consider the block Owned
// rather than Modified)
wb_pkt->setHasSharers();
} else {
assert(pkt->isInvalidate());
}
@ -2280,7 +2321,7 @@ Cache::isCachedAbove(PacketPtr pkt, bool is_timing) const
snoop_pkt.senderState = NULL;
cpuSidePort->sendTimingSnoopReq(&snoop_pkt);
// Writeback/CleanEvict snoops do not generate a snoop response.
assert(!(snoop_pkt.memInhibitAsserted()));
assert(!(snoop_pkt.cacheResponding()));
return snoop_pkt.isBlockCached();
} else {
cpuSidePort->sendAtomicSnoop(pkt);
@ -2327,18 +2368,21 @@ Cache::getTimingPacket()
// the MSHRs and when it was selected to be sent or if the
// prefetch was squashed by an upper cache.
// It is important to check memInhibitAsserted before
// prefetchSquashed. If another cache has asserted MEM_INGIBIT, it
// will be sending a response which will arrive at the MSHR
// allocated ofr this request. Checking the prefetchSquash first
// may result in the MSHR being prematurely deallocated.
if (snoop_pkt.memInhibitAsserted()) {
// It is important to check cacheResponding before
// prefetchSquashed. If another cache has committed to
// responding, it will be sending a dirty response which will
// arrive at the MSHR allocated for this request. Checking the
// prefetchSquash first may result in the MSHR being
// prematurely deallocated.
if (snoop_pkt.cacheResponding()) {
auto M5_VAR_USED r = outstandingSnoop.insert(snoop_pkt.req);
assert(r.second);
// If we are getting a non-shared response it is dirty
bool pending_dirty_resp = !snoop_pkt.sharedAsserted();
markInService(mshr, pending_dirty_resp);
// if we are getting a snoop response with no sharers it
// will be allocated as Modified
bool pending_modified_resp = !snoop_pkt.hasSharers();
markInService(mshr, pending_modified_resp);
DPRINTF(Cache, "Upward snoop of prefetch for addr"
" %#x (%s) hit\n",
tgt_pkt->getAddr(), tgt_pkt->isSecure()? "s": "ns");
@ -2364,7 +2408,7 @@ Cache::getTimingPacket()
assert(tags->findBlock(mshr->blkAddr, mshr->isSecure) == NULL);
pkt = tgt_pkt;
} else {
pkt = getBusPacket(tgt_pkt, blk, mshr->needsExclusive());
pkt = getBusPacket(tgt_pkt, blk, mshr->needsWritable());
mshr->isForward = (pkt == NULL);
@ -2454,10 +2498,11 @@ Cache::CpuSidePort::recvTimingReq(PacketPtr pkt)
bool success = false;
// always let inhibited requests through, even if blocked,
// ultimately we should check if this is an express snoop, but at
// the moment that flag is only set in the cache itself
if (pkt->memInhibitAsserted()) {
// always let packets through if an upstream cache has committed
// to responding, even if blocked (we should technically look at
// the isExpressSnoop flag, but it is set by the cache itself, and
// consequently we have to rely on the cacheResponding flag)
if (pkt->cacheResponding()) {
// do not change the current retry state
bool M5_VAR_USED bypass_success = cache->recvTimingReq(pkt);
assert(bypass_success);
@ -2597,18 +2642,16 @@ Cache::CacheReqPacketQueue::sendDeferredPacket()
// it gets retried
} else {
// As part of the call to sendTimingReq the packet is
// forwarded to all neighbouring caches (and any
// caches above them) as a snoop. The packet is also
// sent to any potential cache below as the
// interconnect is not allowed to buffer the
// packet. Thus at this point we know if any of the
// neighbouring, or the downstream cache is
// responding, and if so, if it is with a dirty line
// or not.
bool pending_dirty_resp = !pkt->sharedAsserted() &&
pkt->memInhibitAsserted();
// forwarded to all neighbouring caches (and any caches
// above them) as a snoop. Thus at this point we know if
// any of the neighbouring caches are responding, and if
// so, we know it is dirty, and we can determine if it is
// being passed as Modified, making our MSHR the ordering
// point
bool pending_modified_resp = !pkt->hasSharers() &&
pkt->cacheResponding();
cache.markInService(mshr, pending_dirty_resp);
cache.markInService(mshr, pending_modified_resp);
}
}

View file

@ -472,13 +472,14 @@ class Cache : public BaseCache
PacketPtr getTimingPacket();
/**
* Marks a request as in service (sent on the bus). This can have
* side effect since storage for no response commands is
* deallocated once they are successfully sent. Also remember if
* we are expecting a dirty response from another cache,
* effectively making this MSHR the ordering point.
* Marks a request as in service (sent downstream in the memory
* system). This can have side effect since storage for no
* response commands is deallocated once they are successfully
* sent. Also remember if we are expecting a Modified (dirty and
* writable) response from another cache, effectively making this
* MSHR the ordering point.
*/
void markInService(MSHR *mshr, bool pending_dirty_resp);
void markInService(MSHR *mshr, bool pending_modified_resp);
/**
* Return whether there are any outstanding misses.

100
src/mem/cache/mshr.cc vendored
View file

@ -62,7 +62,7 @@
using namespace std;
MSHR::MSHR() : readyTime(0), _isUncacheable(false), downstreamPending(false),
pendingDirty(false),
pendingModified(false),
postInvalidate(false), postDowngrade(false),
queue(NULL), order(0), blkAddr(0),
blkSize(0), isSecure(false), inService(false),
@ -73,7 +73,7 @@ MSHR::MSHR() : readyTime(0), _isUncacheable(false), downstreamPending(false),
MSHR::TargetList::TargetList()
: needsExclusive(false), hasUpgrade(false)
: needsWritable(false), hasUpgrade(false)
{}
@ -82,8 +82,8 @@ MSHR::TargetList::add(PacketPtr pkt, Tick readyTime,
Counter order, Target::Source source, bool markPending)
{
if (source != Target::FromSnoop) {
if (pkt->needsExclusive()) {
needsExclusive = true;
if (pkt->needsWritable()) {
needsWritable = true;
}
// StoreCondReq is effectively an upgrade if it's in an MSHR
@ -238,7 +238,7 @@ MSHR::clearDownstreamPending()
}
bool
MSHR::markInService(bool pending_dirty_resp)
MSHR::markInService(bool pending_modified_resp)
{
assert(!inService);
if (isForwardNoResponse()) {
@ -250,7 +250,7 @@ MSHR::markInService(bool pending_dirty_resp)
}
inService = true;
pendingDirty = targets.needsExclusive || pending_dirty_resp;
pendingModified = targets.needsWritable || pending_modified_resp;
postInvalidate = postDowngrade = false;
if (!downstreamPending) {
@ -297,14 +297,14 @@ MSHR::allocateTarget(PacketPtr pkt, Tick whenReady, Counter _order,
// - there are other targets already deferred
// - there's a pending invalidate to be applied after the response
// comes back (but before this target is processed)
// - this target requires an exclusive block and either we're not
// getting an exclusive block back or we have already snooped
// another read request that will downgrade our exclusive block
// to shared
// - this target requires a writable block and either we're not
// getting a writable block back or we have already snooped
// another read request that will downgrade our writable block
// to non-writable (Shared or Owned)
if (inService &&
(!deferredTargets.empty() || hasPostInvalidate() ||
(pkt->needsExclusive() &&
(!isPendingDirty() || hasPostDowngrade() || isForward)))) {
(pkt->needsWritable() &&
(!isPendingModified() || hasPostDowngrade() || isForward)))) {
// need to put on deferred list
if (hasPostInvalidate())
replaceUpgrade(pkt);
@ -324,11 +324,11 @@ MSHR::handleSnoop(PacketPtr pkt, Counter _order)
DPRINTF(Cache, "%s for %s addr %#llx size %d\n", __func__,
pkt->cmdString(), pkt->getAddr(), pkt->getSize());
// when we snoop packets the needsExclusive and isInvalidate flags
// when we snoop packets the needsWritable and isInvalidate flags
// should always be the same, however, this assumes that we never
// snoop writes as they are currently not marked as invalidations
panic_if(pkt->needsExclusive() != pkt->isInvalidate(),
"%s got snoop %s to addr %#llx where needsExclusive, "
panic_if(pkt->needsWritable() != pkt->isInvalidate(),
"%s got snoop %s to addr %#llx where needsWritable, "
"does not match isInvalidate", name(), pkt->cmdString(),
pkt->getAddr());
@ -346,7 +346,7 @@ MSHR::handleSnoop(PacketPtr pkt, Counter _order)
// That is, even though the upper-level cache got out on its
// local bus first, some other invalidating transaction
// reached the global bus before the upgrade did.
if (pkt->needsExclusive()) {
if (pkt->needsWritable()) {
targets.replaceUpgrades();
deferredTargets.replaceUpgrades();
}
@ -356,7 +356,7 @@ MSHR::handleSnoop(PacketPtr pkt, Counter _order)
// From here on down, the request issued by this MSHR logically
// precedes the request we're snooping.
if (pkt->needsExclusive()) {
if (pkt->needsWritable()) {
// snooped request still precedes the re-request we'll have to
// issue for deferred targets, if any...
deferredTargets.replaceUpgrades();
@ -369,17 +369,18 @@ MSHR::handleSnoop(PacketPtr pkt, Counter _order)
return true;
}
if (isPendingDirty() || pkt->isInvalidate()) {
if (isPendingModified() || pkt->isInvalidate()) {
// We need to save and replay the packet in two cases:
// 1. We're awaiting an exclusive copy, so ownership is pending,
// and we need to deal with the snoop after we receive data.
// 1. We're awaiting a writable copy (Modified or Exclusive),
// so this MSHR is the orgering point, and we need to respond
// after we receive data.
// 2. It's an invalidation (e.g., UpgradeReq), and we need
// to forward the snoop up the hierarchy after the current
// transaction completes.
// Start by determining if we will eventually respond or not,
// matching the conditions checked in Cache::handleSnoop
bool will_respond = isPendingDirty() && pkt->needsResponse() &&
bool will_respond = isPendingModified() && pkt->needsResponse() &&
pkt->cmd != MemCmd::InvalidateReq;
// The packet we are snooping may be deleted by the time we
@ -395,29 +396,39 @@ MSHR::handleSnoop(PacketPtr pkt, Counter _order)
PacketPtr cp_pkt = will_respond ? new Packet(pkt, true, true) :
new Packet(new Request(*pkt->req), pkt->cmd);
if (isPendingDirty()) {
// The new packet will need to get the response from the
// MSHR already queued up here
pkt->assertMemInhibit();
if (isPendingModified()) {
// we are the ordering point, and will consequently
// respond, and depending on whether the packet
// needsWritable or not we either pass a Shared line or a
// Modified line
pkt->setCacheResponding();
// inform the cache hierarchy that this cache had the line
// in the Modified state, even if the response is passed
// as Shared (and thus non-writable)
pkt->setResponderHadWritable();
// in the case of an uncacheable request there is no need
// to set the exclusive flag, but since the recipient does
// not care there is no harm in doing so
pkt->setSupplyExclusive();
// to set the responderHadWritable flag, but since the
// recipient does not care there is no harm in doing so
}
targets.add(cp_pkt, curTick(), _order, Target::FromSnoop,
downstreamPending && targets.needsExclusive);
downstreamPending && targets.needsWritable);
if (pkt->needsExclusive()) {
if (pkt->needsWritable()) {
// This transaction will take away our pending copy
postInvalidate = true;
}
}
if (!pkt->needsExclusive() && !pkt->req->isUncacheable()) {
if (!pkt->needsWritable() && !pkt->req->isUncacheable()) {
// This transaction will get a read-shared copy, downgrading
// our copy if we had an exclusive one
// our copy if we had a writable one
postDowngrade = true;
pkt->assertShared();
// make sure that any downstream cache does not respond with a
// writable (and dirty) copy even if it has one, unless it was
// explicitly asked for one
pkt->setHasSharers();
}
return true;
@ -446,20 +457,19 @@ MSHR::promoteDeferredTargets()
void
MSHR::promoteExclusive()
MSHR::promoteWritable()
{
if (deferredTargets.needsExclusive &&
if (deferredTargets.needsWritable &&
!(hasPostInvalidate() || hasPostDowngrade())) {
// We got an exclusive response, but we have deferred targets
// which are waiting to request an exclusive copy (not because
// We got a writable response, but we have deferred targets
// which are waiting to request a writable copy (not because
// of a pending invalidate). This can happen if the original
// request was for a read-only (non-exclusive) block, but we
// got an exclusive copy anyway because of the E part of the
// MOESI/MESI protocol. Since we got the exclusive copy
// there's no need to defer the targets, so move them up to
// the regular target list.
assert(!targets.needsExclusive);
targets.needsExclusive = true;
// request was for a read-only block, but we got a writable
// response anyway. Since we got the writable copy there's no
// need to defer the targets, so move them up to the regular
// target list.
assert(!targets.needsWritable);
targets.needsWritable = true;
// if any of the deferred targets were upper-level cache
// requests marked downstreamPending, need to clear that
assert(!downstreamPending); // not pending here anymore
@ -496,7 +506,7 @@ MSHR::print(std::ostream &os, int verbosity, const std::string &prefix) const
isForward ? "Forward" : "",
allocOnFill ? "AllocOnFill" : "",
isForwardNoResponse() ? "ForwNoResp" : "",
needsExclusive() ? "Excl" : "",
needsWritable() ? "Wrtbl" : "",
_isUncacheable ? "Unc" : "",
inService ? "InSvc" : "",
downstreamPending ? "DwnPend" : "",

45
src/mem/cache/mshr.hh vendored
View file

@ -80,8 +80,31 @@ class MSHR : public Packet::SenderState, public Printable
/** Flag set by downstream caches */
bool downstreamPending;
/** Will we have a dirty copy after this request? */
bool pendingDirty;
/**
* Here we use one flag to track both if:
*
* 1. We are going to become owner or not, i.e., we will get the
* block in an ownership state (Owned or Modified) with BlkDirty
* set. This determines whether or not we are going to become the
* responder and ordering point for future requests that we snoop.
*
* 2. We know that we are going to get a writable block, i.e. we
* will get the block in writable state (Exclusive or Modified
* state) with BlkWritable set. That determines whether additional
* targets with needsWritable set will be able to be satisfied, or
* if not should be put on the deferred list to possibly wait for
* another request that does give us writable access.
*
* Condition 2 is actually just a shortcut that saves us from
* possibly building a deferred target list and calling
* promoteWritable() every time we get a writable block. Condition
* 1, tracking ownership, is what is important. However, we never
* receive ownership without marking the block dirty, and
* consequently use pendingModified to track both ownership and
* writability rather than having separate pendingDirty and
* pendingWritable flags.
*/
bool pendingModified;
/** Did we snoop an invalidate while waiting for data? */
bool postInvalidate;
@ -118,12 +141,12 @@ class MSHR : public Packet::SenderState, public Printable
class TargetList : public std::list<Target> {
public:
bool needsExclusive;
bool needsWritable;
bool hasUpgrade;
TargetList();
void resetFlags() { needsExclusive = hasUpgrade = false; }
bool isReset() const { return !needsExclusive && !hasUpgrade; }
void resetFlags() { needsWritable = hasUpgrade = false; }
bool isReset() const { return !needsWritable && !hasUpgrade; }
void add(PacketPtr pkt, Tick readyTime, Counter order,
Target::Source source, bool markPending);
void replaceUpgrades();
@ -169,11 +192,11 @@ class MSHR : public Packet::SenderState, public Printable
* flags are accessed improperly.
*/
/** True if we need to get an exclusive copy of the block. */
bool needsExclusive() const { return targets.needsExclusive; }
/** True if we need to get a writable copy of the block. */
bool needsWritable() const { return targets.needsWritable; }
bool isPendingDirty() const {
assert(inService); return pendingDirty;
bool isPendingModified() const {
assert(inService); return pendingModified;
}
bool hasPostInvalidate() const {
@ -223,7 +246,7 @@ class MSHR : public Packet::SenderState, public Printable
void allocate(Addr blk_addr, unsigned blk_size, PacketPtr pkt,
Tick when_ready, Counter _order, bool alloc_on_fill);
bool markInService(bool pending_dirty_resp);
bool markInService(bool pending_modified_resp);
void clearDownstreamPending();
@ -284,7 +307,7 @@ class MSHR : public Packet::SenderState, public Printable
bool promoteDeferredTargets();
void promoteExclusive();
void promoteWritable();
bool checkFunctional(PacketPtr pkt);

View file

@ -200,9 +200,9 @@ MSHRQueue::moveToFront(MSHR *mshr)
}
void
MSHRQueue::markInService(MSHR *mshr, bool pending_dirty_resp)
MSHRQueue::markInService(MSHR *mshr, bool pending_modified_resp)
{
if (mshr->markInService(pending_dirty_resp)) {
if (mshr->markInService(pending_modified_resp)) {
deallocate(mshr);
} else {
readyList.erase(mshr->readyIter);

View file

@ -188,10 +188,10 @@ class MSHRQueue : public Drainable
* readyList or deallocates the MSHR if it does not expect a response.
*
* @param mshr The MSHR to mark in service.
* @param pending_dirty_resp Whether we expect a dirty response
* from another cache
* @param pending_modified_resp Whether we expect a modified response
* from another cache
*/
void markInService(MSHR *mshr, bool pending_dirty_resp);
void markInService(MSHR *mshr, bool pending_modified_resp);
/**
* Mark an in service entry as pending, used to resend a request.

View file

@ -145,10 +145,10 @@ CoherentXBar::recvTimingReq(PacketPtr pkt, PortID slave_port_id)
// remember if the packet is an express snoop
bool is_express_snoop = pkt->isExpressSnoop();
bool is_inhibited = pkt->memInhibitAsserted();
bool cache_responding = pkt->cacheResponding();
// for normal requests, going downstream, the express snoop flag
// and the inhibited flag should always be the same
assert(is_express_snoop == is_inhibited);
// and the cache responding flag should always be the same
assert(is_express_snoop == cache_responding);
// determine the destination based on the address
PortID master_port_id = findPort(pkt->getAddr());
@ -236,10 +236,12 @@ CoherentXBar::recvTimingReq(PacketPtr pkt, PortID slave_port_id)
return true;
}
// remember if the packet will generate a snoop response
const bool expect_snoop_resp = !is_inhibited && pkt->memInhibitAsserted();
// remember if the packet will generate a snoop response by
// checking if a cache set the cacheResponding flag during the
// snooping above
const bool expect_snoop_resp = !cache_responding && pkt->cacheResponding();
const bool expect_response = pkt->needsResponse() &&
!pkt->memInhibitAsserted();
!pkt->cacheResponding();
// since it is a normal request, attempt to send the packet
bool success = masterPorts[master_port_id]->sendTimingReq(pkt);
@ -251,10 +253,8 @@ CoherentXBar::recvTimingReq(PacketPtr pkt, PortID slave_port_id)
// check if we were successful in sending the packet onwards
if (!success) {
// express snoops and inhibited packets should never be forced
// to retry
// express snoops should never be forced to retry
assert(!is_express_snoop);
assert(!pkt->memInhibitAsserted());
// restore the header delay
pkt->headerDelay = old_header_delay;
@ -386,8 +386,9 @@ CoherentXBar::recvTimingSnoopReq(PacketPtr pkt, PortID master_port_id)
// @todo Assess the choice of latency further
calcPacketTiming(pkt, forwardLatency * clockPeriod());
// remeber if the packet is inhibited so we can see if it changes
const bool is_inhibited = pkt->memInhibitAsserted();
// remember if a cache has already committed to responding so we
// can see if it changes during the snooping
const bool cache_responding = pkt->cacheResponding();
assert(pkt->snoopDelay == 0);
@ -414,7 +415,7 @@ CoherentXBar::recvTimingSnoopReq(PacketPtr pkt, PortID master_port_id)
pkt->snoopDelay = 0;
// if we can expect a response, remember how to route it
if (!is_inhibited && pkt->memInhibitAsserted()) {
if (!cache_responding && pkt->cacheResponding()) {
assert(routeTo.find(pkt->req) == routeTo.end());
routeTo[pkt->req] = master_port_id;
}
@ -760,7 +761,7 @@ CoherentXBar::forwardAtomic(PacketPtr pkt, PortID exclude_slave_port_id,
// response from snoop agent
assert(pkt->cmd != orig_cmd);
assert(pkt->memInhibitAsserted());
assert(pkt->cacheResponding());
// should only happen once
assert(snoop_response_cmd == MemCmd::InvalidCmd);
// save response state

View file

@ -144,18 +144,17 @@ CommMonitor::recvTimingReq(PacketPtr pkt)
const bool is_read = pkt->isRead();
const bool is_write = pkt->isWrite();
const bool expects_response(
pkt->needsResponse() && !pkt->memInhibitAsserted());
pkt->needsResponse() && !pkt->cacheResponding());
// If a cache miss is served by a cache, a monitor near the memory
// would see a request which needs a response, but this response
// would be inhibited and not come back from the memory. Therefore
// we additionally have to check the inhibit flag.
// would not come back from the memory. Therefore we additionally
// have to check the cacheResponding flag
if (expects_response && !stats.disableLatencyHists) {
pkt->pushSenderState(new CommMonitorSenderState(curTick()));
}
// Attempt to send the packet (always succeeds for inhibited
// packets)
// Attempt to send the packet
bool successful = masterPort.sendTimingReq(pkt);
// If not successful, restore the sender state

View file

@ -277,7 +277,7 @@ DRAMCtrl::recvAtomic(PacketPtr pkt)
access(pkt);
Tick latency = 0;
if (!pkt->memInhibitAsserted() && pkt->hasData()) {
if (!pkt->cacheResponding() && pkt->hasData()) {
// this value is not supposed to be accurate, just enough to
// keep things going, mimic a closed page
latency = tRP + tRCD + tCL;
@ -590,8 +590,8 @@ DRAMCtrl::recvTimingReq(PacketPtr pkt)
DPRINTF(DRAM, "recvTimingReq: request %s addr %lld size %d\n",
pkt->cmdString(), pkt->getAddr(), pkt->getSize());
// sink inhibited packets without further action
if (pkt->memInhibitAsserted()) {
// if a cache is responding, sink the packet without further action
if (pkt->cacheResponding()) {
pendingDelete.reset(pkt);
return true;
}

View file

@ -155,7 +155,7 @@ DRAMSim2::recvAtomic(PacketPtr pkt)
access(pkt);
// 50 ns is just an arbitrary value at this point
return pkt->memInhibitAsserted() ? 0 : 50000;
return pkt->cacheResponding() ? 0 : 50000;
}
void
@ -175,8 +175,8 @@ DRAMSim2::recvFunctional(PacketPtr pkt)
bool
DRAMSim2::recvTimingReq(PacketPtr pkt)
{
// sink inhibited packets without further action
if (pkt->memInhibitAsserted()) {
// if a cache is responding, sink the packet without further action
if (pkt->cacheResponding()) {
pendingDelete.reset(pkt);
return true;
}

View file

@ -81,15 +81,12 @@ bool HMCController::recvTimingReq(PacketPtr pkt, PortID slave_port_id)
// before forwarding the packet (and possibly altering it),
// remember if we are expecting a response
const bool expect_response = pkt->needsResponse() &&
!pkt->memInhibitAsserted();
!pkt->cacheResponding();
// since it is a normal request, attempt to send the packet
bool success = masterPorts[master_port_id]->sendTimingReq(pkt);
if (!success) {
// inhibited packets should never be forced to retry
assert(!pkt->memInhibitAsserted());
DPRINTF(HMCController, "recvTimingReq: src %s %s 0x%x RETRY\n",
src_port->name(), pkt->cmdString(), pkt->getAddr());

View file

@ -156,7 +156,7 @@ MemCheckerMonitor::recvTimingReq(PacketPtr pkt)
bool is_write = pkt->isWrite();
unsigned size = pkt->getSize();
Addr addr = pkt->getAddr();
bool expects_response = pkt->needsResponse() && !pkt->memInhibitAsserted();
bool expects_response = pkt->needsResponse() && !pkt->cacheResponding();
std::unique_ptr<uint8_t> pkt_data;
MemCheckerMonitorSenderState* state = NULL;
@ -170,15 +170,14 @@ MemCheckerMonitor::recvTimingReq(PacketPtr pkt)
// If a cache miss is served by a cache, a monitor near the memory
// would see a request which needs a response, but this response
// would be inhibited and not come back from the memory. Therefore
// would not come back from the memory. Therefore
// we additionally have to check the inhibit flag.
if (expects_response && (is_read || is_write)) {
state = new MemCheckerMonitorSenderState(0);
pkt->pushSenderState(state);
}
// Attempt to send the packet (always succeeds for inhibited
// packets)
// Attempt to send the packet
bool successful = masterPort.sendTimingReq(pkt);
// If not successful, restore the sender state
@ -227,7 +226,8 @@ MemCheckerMonitor::recvTimingReq(PacketPtr pkt)
}
} else if (successful) {
DPRINTF(MemCheckerMonitor,
"Forwarded inhibited request: addr = %#llx\n", addr);
"Forwarded request marked for cache response: addr = %#llx\n",
addr);
}
return successful;

View file

@ -142,15 +142,12 @@ NoncoherentXBar::recvTimingReq(PacketPtr pkt, PortID slave_port_id)
// before forwarding the packet (and possibly altering it),
// remember if we are expecting a response
const bool expect_response = pkt->needsResponse() &&
!pkt->memInhibitAsserted();
!pkt->cacheResponding();
// since it is a normal request, attempt to send the packet
bool success = masterPorts[master_port_id]->sendTimingReq(pkt);
if (!success) {
// inhibited packets should never be forced to retry
assert(!pkt->memInhibitAsserted());
DPRINTF(NoncoherentXBar, "recvTimingReq: src %s %s 0x%x RETRY\n",
src_port->name(), pkt->cmdString(), pkt->getAddr());

View file

@ -80,10 +80,10 @@ MemCmd::commandInfo[] =
{ SET4(IsRead, IsResponse, HasData, IsInvalidate),
InvalidCmd, "ReadRespWithInvalidate" },
/* WriteReq */
{ SET5(IsWrite, NeedsExclusive, IsRequest, NeedsResponse, HasData),
{ SET5(IsWrite, NeedsWritable, IsRequest, NeedsResponse, HasData),
WriteResp, "WriteReq" },
/* WriteResp */
{ SET3(IsWrite, NeedsExclusive, IsResponse), InvalidCmd, "WriteResp" },
{ SET3(IsWrite, NeedsWritable, IsResponse), InvalidCmd, "WriteResp" },
/* WritebackDirty */
{ SET4(IsWrite, IsRequest, IsEviction, HasData),
InvalidCmd, "WritebackDirty" },
@ -107,34 +107,34 @@ MemCmd::commandInfo[] =
{ SET4(IsRead, IsResponse, IsHWPrefetch, HasData),
InvalidCmd, "HardPFResp" },
/* WriteLineReq */
{ SET5(IsWrite, NeedsExclusive, IsRequest, NeedsResponse, HasData),
{ SET5(IsWrite, NeedsWritable, IsRequest, NeedsResponse, HasData),
WriteResp, "WriteLineReq" },
/* UpgradeReq */
{ SET5(IsInvalidate, NeedsExclusive, IsUpgrade, IsRequest, NeedsResponse),
{ SET5(IsInvalidate, NeedsWritable, IsUpgrade, IsRequest, NeedsResponse),
UpgradeResp, "UpgradeReq" },
/* SCUpgradeReq: response could be UpgradeResp or UpgradeFailResp */
{ SET6(IsInvalidate, NeedsExclusive, IsUpgrade, IsLlsc,
{ SET6(IsInvalidate, NeedsWritable, IsUpgrade, IsLlsc,
IsRequest, NeedsResponse),
UpgradeResp, "SCUpgradeReq" },
/* UpgradeResp */
{ SET3(NeedsExclusive, IsUpgrade, IsResponse),
{ SET3(NeedsWritable, IsUpgrade, IsResponse),
InvalidCmd, "UpgradeResp" },
/* SCUpgradeFailReq: generates UpgradeFailResp but still gets the data */
{ SET6(IsRead, NeedsExclusive, IsInvalidate,
{ SET6(IsRead, NeedsWritable, IsInvalidate,
IsLlsc, IsRequest, NeedsResponse),
UpgradeFailResp, "SCUpgradeFailReq" },
/* UpgradeFailResp - Behaves like a ReadExReq, but notifies an SC
* that it has failed, acquires line as Dirty*/
{ SET4(IsRead, NeedsExclusive, IsResponse, HasData),
{ SET4(IsRead, NeedsWritable, IsResponse, HasData),
InvalidCmd, "UpgradeFailResp" },
/* ReadExReq - Read issues by a cache, always cache-line aligned,
* and the response is guaranteed to be writeable (exclusive or
* even modified) */
{ SET5(IsRead, NeedsExclusive, IsInvalidate, IsRequest, NeedsResponse),
{ SET5(IsRead, NeedsWritable, IsInvalidate, IsRequest, NeedsResponse),
ReadExResp, "ReadExReq" },
/* ReadExResp - Response matching a read exclusive, as we check
* the need for exclusive also on responses */
{ SET4(IsRead, NeedsExclusive, IsResponse, HasData),
{ SET4(IsRead, NeedsWritable, IsResponse, HasData),
InvalidCmd, "ReadExResp" },
/* ReadCleanReq - Read issued by a cache, always cache-line
* aligned, and the response is guaranteed to not contain dirty data
@ -149,21 +149,21 @@ MemCmd::commandInfo[] =
{ SET4(IsRead, IsLlsc, IsRequest, NeedsResponse),
ReadResp, "LoadLockedReq" },
/* StoreCondReq */
{ SET6(IsWrite, NeedsExclusive, IsLlsc,
{ SET6(IsWrite, NeedsWritable, IsLlsc,
IsRequest, NeedsResponse, HasData),
StoreCondResp, "StoreCondReq" },
/* StoreCondFailReq: generates failing StoreCondResp */
{ SET6(IsWrite, NeedsExclusive, IsLlsc,
{ SET6(IsWrite, NeedsWritable, IsLlsc,
IsRequest, NeedsResponse, HasData),
StoreCondResp, "StoreCondFailReq" },
/* StoreCondResp */
{ SET4(IsWrite, NeedsExclusive, IsLlsc, IsResponse),
{ SET4(IsWrite, NeedsWritable, IsLlsc, IsResponse),
InvalidCmd, "StoreCondResp" },
/* SwapReq -- for Swap ldstub type operations */
{ SET6(IsRead, IsWrite, NeedsExclusive, IsRequest, HasData, NeedsResponse),
{ SET6(IsRead, IsWrite, NeedsWritable, IsRequest, HasData, NeedsResponse),
SwapResp, "SwapReq" },
/* SwapResp -- for Swap ldstub type operations */
{ SET5(IsRead, IsWrite, NeedsExclusive, IsResponse, HasData),
{ SET5(IsRead, IsWrite, NeedsWritable, IsResponse, HasData),
InvalidCmd, "SwapResp" },
/* IntReq -- for interrupts */
{ SET4(IsWrite, IsRequest, NeedsResponse, HasData),
@ -185,12 +185,12 @@ MemCmd::commandInfo[] =
/* PrintReq */
{ SET2(IsRequest, IsPrint), InvalidCmd, "PrintReq" },
/* Flush Request */
{ SET3(IsRequest, IsFlush, NeedsExclusive), InvalidCmd, "FlushReq" },
{ SET3(IsRequest, IsFlush, NeedsWritable), InvalidCmd, "FlushReq" },
/* Invalidation Request */
{ SET4(IsInvalidate, IsRequest, NeedsExclusive, NeedsResponse),
{ SET4(IsInvalidate, IsRequest, NeedsWritable, NeedsResponse),
InvalidateResp, "InvalidateReq" },
/* Invalidation Response */
{ SET3(IsInvalidate, IsResponse, NeedsExclusive),
{ SET3(IsInvalidate, IsResponse, NeedsWritable),
InvalidCmd, "InvalidateResp" }
};

View file

@ -139,7 +139,7 @@ class MemCmd
IsWrite, //!< Data flows from requester to responder
IsUpgrade,
IsInvalidate,
NeedsExclusive, //!< Requires exclusive copy to complete in-cache
NeedsWritable, //!< Requires writable copy to complete in-cache
IsRequest, //!< Issued by requester
IsResponse, //!< Issue by responder
NeedsResponse, //!< Requester needs response from target
@ -189,7 +189,7 @@ class MemCmd
bool isUpgrade() const { return testCmdAttrib(IsUpgrade); }
bool isRequest() const { return testCmdAttrib(IsRequest); }
bool isResponse() const { return testCmdAttrib(IsResponse); }
bool needsExclusive() const { return testCmdAttrib(NeedsExclusive); }
bool needsWritable() const { return testCmdAttrib(NeedsWritable); }
bool needsResponse() const { return testCmdAttrib(NeedsResponse); }
bool isInvalidate() const { return testCmdAttrib(IsInvalidate); }
bool isEviction() const { return testCmdAttrib(IsEviction); }
@ -252,15 +252,22 @@ class Packet : public Printable
// Flags to transfer across when copying a packet
COPY_FLAGS = 0x0000000F,
SHARED = 0x00000001,
// Does this packet have sharers (which means it should not be
// considered writable) or not. See setHasSharers below.
HAS_SHARERS = 0x00000001,
// Special control flags
/// Special timing-mode atomic snoop for multi-level coherence.
EXPRESS_SNOOP = 0x00000002,
/// Does supplier have exclusive copy?
/// Useful for multi-level coherence.
SUPPLY_EXCLUSIVE = 0x00000004,
// Snoop response flags
MEM_INHIBIT = 0x00000008,
/// Allow a responding cache to inform the cache hierarchy
/// that it had a writable copy before responding. See
/// setResponderHadWritable below.
RESPONDER_HAD_WRITABLE = 0x00000004,
// Snoop co-ordination flag to indicate that a cache is
// responding to a snoop. See setCacheResponding below.
CACHE_RESPONDING = 0x00000008,
/// Are the 'addr' and 'size' fields valid?
VALID_ADDR = 0x00000100,
@ -495,7 +502,7 @@ class Packet : public Printable
bool isUpgrade() const { return cmd.isUpgrade(); }
bool isRequest() const { return cmd.isRequest(); }
bool isResponse() const { return cmd.isResponse(); }
bool needsExclusive() const { return cmd.needsExclusive(); }
bool needsWritable() const { return cmd.needsWritable(); }
bool needsResponse() const { return cmd.needsResponse(); }
bool isInvalidate() const { return cmd.isInvalidate(); }
bool isEviction() const { return cmd.isEviction(); }
@ -506,22 +513,94 @@ class Packet : public Printable
bool isPrint() const { return cmd.isPrint(); }
bool isFlush() const { return cmd.isFlush(); }
// Snoop flags
void assertMemInhibit()
//@{
/// Snoop flags
/**
* Set the cacheResponding flag. This is used by the caches to
* signal another cache that they are responding to a request. A
* cache will only respond to snoops if it has the line in either
* Modified or Owned state. Note that on snoop hits we always pass
* the line as Modified and never Owned. In the case of an Owned
* line we proceed to invalidate all other copies.
*
* On a cache fill (see Cache::handleFill), we check hasSharers
* first, ignoring the cacheResponding flag if hasSharers is set.
* A line is consequently allocated as:
*
* hasSharers cacheResponding state
* true false Shared
* true true Shared
* false false Exclusive
* false true Modified
*/
void setCacheResponding()
{
assert(isRequest());
assert(!flags.isSet(MEM_INHIBIT));
flags.set(MEM_INHIBIT);
assert(!flags.isSet(CACHE_RESPONDING));
flags.set(CACHE_RESPONDING);
}
bool memInhibitAsserted() const { return flags.isSet(MEM_INHIBIT); }
void assertShared() { flags.set(SHARED); }
bool sharedAsserted() const { return flags.isSet(SHARED); }
bool cacheResponding() const { return flags.isSet(CACHE_RESPONDING); }
/**
* On fills, the hasSharers flag is used by the caches in
* combination with the cacheResponding flag, as clarified
* above. If the hasSharers flag is not set, the packet is passing
* writable. Thus, a response from a memory passes the line as
* writable by default.
*
* The hasSharers flag is also used by upstream caches to inform a
* downstream cache that they have the block (by calling
* setHasSharers on snoop request packets that hit in upstream
* cachs tags or MSHRs). If the snoop packet has sharers, a
* downstream cache is prevented from passing a dirty line upwards
* if it was not explicitly asked for a writable copy. See
* Cache::satisfyCpuSideRequest.
*
* The hasSharers flag is also used on writebacks, in
* combination with the WritbackClean or WritebackDirty commands,
* to allocate the block downstream either as:
*
* command hasSharers state
* WritebackDirty false Modified
* WritebackDirty true Owned
* WritebackClean false Exclusive
* WritebackClean true Shared
*/
void setHasSharers() { flags.set(HAS_SHARERS); }
bool hasSharers() const { return flags.isSet(HAS_SHARERS); }
//@}
/**
* The express snoop flag is used for two purposes. Firstly, it is
* used to bypass flow control for normal (non-snoop) requests
* going downstream in the memory system. In cases where a cache
* is responding to a snoop from another cache (it had a dirty
* line), but the line is not writable (and there are possibly
* other copies), the express snoop flag is set by the downstream
* cache to invalidate all other copies in zero time. Secondly,
* the express snoop flag is also set to be able to distinguish
* snoop packets that came from a downstream cache, rather than
* snoop packets from neighbouring caches.
*/
void setExpressSnoop() { flags.set(EXPRESS_SNOOP); }
bool isExpressSnoop() const { return flags.isSet(EXPRESS_SNOOP); }
/**
* On responding to a snoop request (which only happens for
* Modified or Owned lines), make sure that we can transform an
* Owned response to a Modified one. If this flag is not set, the
* responding cache had the line in the Owned state, and there are
* possibly other Shared copies in the memory system. A downstream
* cache helps in orchestrating the invalidation of these copies
* by sending out the appropriate express snoops.
*/
void setResponderHadWritable()
{
assert(cacheResponding());
flags.set(RESPONDER_HAD_WRITABLE);
}
bool responderHadWritable() const
{ return flags.isSet(RESPONDER_HAD_WRITABLE); }
// Special control flags
void setExpressSnoop() { flags.set(EXPRESS_SNOOP); }
bool isExpressSnoop() const { return flags.isSet(EXPRESS_SNOOP); }
void setSupplyExclusive() { flags.set(SUPPLY_EXCLUSIVE); }
bool isSupplyExclusive() const { return flags.isSet(SUPPLY_EXCLUSIVE); }
void setSuppressFuncError() { flags.set(SUPPRESS_FUNC_ERROR); }
bool suppressFuncError() const { return flags.isSet(SUPPRESS_FUNC_ERROR); }
void setBlockCached() { flags.set(BLOCK_CACHED); }

View file

@ -89,8 +89,9 @@ DMASequencer::MemSlavePort::recvTimingReq(PacketPtr pkt)
pkt->getAddr(), id);
DMASequencer *seq = static_cast<DMASequencer *>(&owner);
if (pkt->memInhibitAsserted())
panic("DMASequencer should never see an inhibited request\n");
if (pkt->cacheResponding())
panic("DMASequencer should never see a request with the "
"cacheResponding flag set\n");
assert(isPhysMemAddress(pkt->getAddr()));
assert(getOffset(pkt->getAddr()) + pkt->getSize() <=

View file

@ -231,8 +231,9 @@ RubyPort::MemSlavePort::recvTimingReq(PacketPtr pkt)
pkt->getAddr(), id);
RubyPort *ruby_port = static_cast<RubyPort *>(&owner);
if (pkt->memInhibitAsserted())
panic("RubyPort should never see an inhibited request\n");
if (pkt->cacheResponding())
panic("RubyPort should never see request with the "
"cacheResponding flag set\n");
// Check for pio requests and directly send them to the dedicated
// pio port.

View file

@ -185,7 +185,7 @@ SerialLink::SerialLinkSlavePort::recvTimingReq(PacketPtr pkt)
} else if ( !retryReq ) {
// look at the response queue if we expect to see a response
bool expects_response = pkt->needsResponse() &&
!pkt->memInhibitAsserted();
!pkt->cacheResponding();
if (expects_response) {
if (respQueueFull()) {
DPRINTF(SerialLink, "Response queue full\n");

View file

@ -73,7 +73,7 @@ Tick
SimpleMemory::recvAtomic(PacketPtr pkt)
{
access(pkt);
return pkt->memInhibitAsserted() ? 0 : getLatency();
return pkt->cacheResponding() ? 0 : getLatency();
}
void
@ -97,8 +97,8 @@ SimpleMemory::recvFunctional(PacketPtr pkt)
bool
SimpleMemory::recvTimingReq(PacketPtr pkt)
{
// sink inhibited packets without further action
if (pkt->memInhibitAsserted()) {
// if a cache is responding, sink the packet without further action
if (pkt->cacheResponding()) {
pendingDelete.reset(pkt);
return true;
}

View file

@ -108,7 +108,7 @@ SnoopFilter::lookupRequest(const Packet* cpkt, const SlavePort& slave_port)
lookupLatency);
if (cpkt->needsResponse()) {
if (!cpkt->memInhibitAsserted()) {
if (!cpkt->cacheResponding()) {
// Max one request per address per port
panic_if(sf_item.requested & req_port, "double request :( " \
"SF value %x.%x\n", sf_item.requested, sf_item.holder);
@ -208,7 +208,7 @@ SnoopFilter::lookupSnoop(const Packet* cpkt)
// snoops so this was never an aissue. Now that Writebacks generate snoops
// we need to special case for Writebacks.
assert(cpkt->isWriteback() || cpkt->req->isUncacheable() ||
(cpkt->isInvalidate() == cpkt->needsExclusive()));
(cpkt->isInvalidate() == cpkt->needsWritable()));
if (cpkt->isInvalidate() && !sf_item.requested) {
// Early clear of the holder, if no other request is currently going on
// @todo: This should possibly be updated even though we do not filter
@ -233,7 +233,7 @@ SnoopFilter::updateSnoopResponse(const Packet* cpkt,
cpkt->cmdString());
assert(cpkt->isResponse());
assert(cpkt->memInhibitAsserted());
assert(cpkt->cacheResponding());
// Ultimately we should check if the packet came from an
// allocating source, not just if the port is snooping
@ -258,10 +258,10 @@ SnoopFilter::updateSnoopResponse(const Packet* cpkt,
"the original request\n", sf_item.requested, sf_item.holder);
// Update the residency of the cache line.
if (cpkt->needsExclusive() || !cpkt->sharedAsserted()) {
DPRINTF(SnoopFilter, "%s: dropping %x because needs: %i shared: %i "\
if (cpkt->needsWritable() || !cpkt->hasSharers()) {
DPRINTF(SnoopFilter, "%s: dropping %x because needs: %i writable: %i "\
"SF val: %x.%x\n", __func__, rsp_mask,
cpkt->needsExclusive(), cpkt->sharedAsserted(),
cpkt->needsWritable(), !cpkt->hasSharers(),
sf_item.requested, sf_item.holder);
sf_item.holder &= ~rsp_mask;
@ -287,7 +287,7 @@ SnoopFilter::updateSnoopForward(const Packet* cpkt,
cpkt->cmdString());
assert(cpkt->isResponse());
assert(cpkt->memInhibitAsserted());
assert(cpkt->cacheResponding());
Addr line_addr = cpkt->getBlockAddr(linesize);
auto sf_it = cachedLocations.find(line_addr);
@ -305,7 +305,7 @@ SnoopFilter::updateSnoopForward(const Packet* cpkt,
// Remote (to this snoop filter) snoops update the filter
// already when they arrive from below, because we may not see
// any response.
if (cpkt->needsExclusive()) {
if (cpkt->needsWritable()) {
// If the request to this snoop response hit an in-flight
// transaction,
// the holder was not reset -> no assertion & do that here, now!
@ -345,7 +345,7 @@ SnoopFilter::updateResponse(const Packet* cpkt, const SlavePort& slave_port)
// Update the residency of the cache line. Here we assume that the
// line has been zapped in all caches that are not the responder.
if (cpkt->needsExclusive() || !cpkt->sharedAsserted())
if (cpkt->needsWritable() || !cpkt->hasSharers())
sf_item.holder = 0;
sf_item.holder |= slave_mask;
sf_item.requested &= ~slave_mask;

View file

@ -63,9 +63,11 @@ bool
SimpleTimingPort::recvTimingReq(PacketPtr pkt)
{
// the SimpleTimingPort should not be used anywhere where there is
// a need to deal with inhibited packets
if (pkt->memInhibitAsserted())
panic("SimpleTimingPort should never see an inhibited request\n");
// a need to deal with snoop responses and their flow control
// requirements
if (pkt->cacheResponding())
panic("SimpleTimingPort should never see packets with the "
"cacheResponding flag set\n");
bool needsResponse = pkt->needsResponse();
Tick latency = recvAtomic(pkt);