cache: coherence protocol enhancements & bug fixes

Allow lower-level caches (e.g., L2 or L3) to pass exclusive
copies to higher levels (e.g., L1).  This eliminates a lot
of unnecessary upgrade transactions on read-write sequences
to non-shared data.

Also some cleanup of MSHR coherence handling and multiple
bug fixes.
This commit is contained in:
Steve Reinhardt 2010-09-09 14:40:18 -04:00
parent 7c4dc4491a
commit 71aca6d29e
7 changed files with 146 additions and 75 deletions

View file

@ -170,11 +170,11 @@ class BaseCache : public MemObject
return mshr;
}
void markInServiceInternal(MSHR *mshr)
void markInServiceInternal(MSHR *mshr, PacketPtr pkt)
{
MSHRQueue *mq = mshr->queue;
bool wasFull = mq->isFull();
mq->markInService(mshr);
mq->markInService(mshr, pkt);
if (wasFull && !mq->isFull()) {
clearBlocked((BlockedCause)mq->index);
}

View file

@ -178,7 +178,9 @@ class Cache : public BaseCache
BlkType *handleFill(PacketPtr pkt, BlkType *blk,
PacketList &writebacks);
void satisfyCpuSideRequest(PacketPtr pkt, BlkType *blk);
void satisfyCpuSideRequest(PacketPtr pkt, BlkType *blk,
bool deferred_response = false,
bool pending_downgrade = false);
bool satisfyMSHR(MSHR *mshr, PacketPtr pkt, BlkType *blk);
void doTimingSupplyResponse(PacketPtr req_pkt, uint8_t *blk_data,
@ -292,7 +294,7 @@ class Cache : public BaseCache
* are successfully sent.
* @param pkt The request that was sent on the bus.
*/
void markInService(MSHR *mshr);
void markInService(MSHR *mshr, PacketPtr pkt = 0);
/**
* Perform the given writeback request.

View file

@ -165,16 +165,18 @@ Cache<TagStore>::cmpAndSwap(BlkType *blk, PacketPtr pkt)
template<class TagStore>
void
Cache<TagStore>::satisfyCpuSideRequest(PacketPtr pkt, BlkType *blk)
Cache<TagStore>::satisfyCpuSideRequest(PacketPtr pkt, BlkType *blk,
bool deferred_response,
bool pending_downgrade)
{
assert(blk);
assert(blk && blk->isValid());
// Occasionally this is not true... if we are a lower-level cache
// satisfying a string of Read and ReadEx requests from
// upper-level caches, a Read will mark the block as shared but we
// can satisfy a following ReadEx anyway since we can rely on the
// Read requester(s) to have buffered the ReadEx snoop and to
// invalidate their blocks after receiving them.
// assert(pkt->needsExclusive() ? blk->isWritable() : blk->isValid());
// assert(!pkt->needsExclusive() || blk->isWritable());
assert(pkt->getOffset(blkSize) + pkt->getSize() <= blkSize);
// Check RMW operations first since both isRead() and
@ -195,13 +197,43 @@ Cache<TagStore>::satisfyCpuSideRequest(PacketPtr pkt, BlkType *blk)
// special handling for coherent block requests from
// upper-level caches
if (pkt->needsExclusive()) {
// on ReadExReq we give up our copy
// if we have a dirty copy, make sure the recipient
// keeps it marked dirty
if (blk->isDirty()) {
pkt->assertMemInhibit();
}
// on ReadExReq we give up our copy unconditionally
tags->invalidateBlk(blk);
} else if (blk->isWritable() && !pending_downgrade
&& !pkt->sharedAsserted()) {
// we can give the requester an exclusive copy (by not
// asserting shared line) on a read request if:
// - we have an exclusive copy at this level (& below)
// - we don't have a pending snoop from below
// signaling another read request
// - no other cache above has a copy (otherwise it
// would have asseretd shared line on request)
if (blk->isDirty()) {
// special considerations if we're owner:
if (!deferred_response) {
// if we are responding immediately and can
// signal that we're transferring ownership
// along with exclusivity, do so
pkt->assertMemInhibit();
blk->status &= ~BlkDirty;
} else {
// on ReadReq we create shareable copies here and in
// the requester
// if we're responding after our own miss,
// there's a window where the recipient didn't
// know it was getting ownership and may not
// have responded to snoops correctly, so we
// can't pass off ownership *or* exclusivity
pkt->assertShared();
}
}
} else {
// otherwise only respond with a shared copy
pkt->assertShared();
blk->status &= ~BlkWritable;
}
}
} else {
@ -223,9 +255,9 @@ Cache<TagStore>::satisfyCpuSideRequest(PacketPtr pkt, BlkType *blk)
template<class TagStore>
void
Cache<TagStore>::markInService(MSHR *mshr)
Cache<TagStore>::markInService(MSHR *mshr, PacketPtr pkt)
{
markInServiceInternal(mshr);
markInServiceInternal(mshr, pkt);
#if 0
if (mshr->originalCmd == MemCmd::HardPFReq) {
DPRINTF(HWPrefetch, "%s:Marking a HW_PF in service\n",
@ -829,7 +861,8 @@ Cache<TagStore>::handleResponse(PacketPtr pkt)
case MSHR::Target::FromCPU:
Tick completion_time;
if (is_fill) {
satisfyCpuSideRequest(target->pkt, blk);
satisfyCpuSideRequest(target->pkt, blk,
true, mshr->hasPostDowngrade());
// How many bytes past the first request is this one
int transfer_offset =
target->pkt->getOffset(blkSize) - initial_offset;
@ -860,12 +893,11 @@ Cache<TagStore>::handleResponse(PacketPtr pkt)
// if this packet is an error copy that to the new packet
if (is_error)
target->pkt->copyError(pkt);
if (pkt->isInvalidate()) {
if (target->pkt->cmd == MemCmd::ReadResp &&
(pkt->isInvalidate() || mshr->hasPostInvalidate())) {
// If intermediate cache got ReadRespWithInvalidate,
// propagate that. Response should not have
// isInvalidate() set otherwise.
assert(target->pkt->cmd == MemCmd::ReadResp);
assert(pkt->cmd == MemCmd::ReadRespWithInvalidate);
target->pkt->cmd = MemCmd::ReadRespWithInvalidate;
}
cpuSidePort->respond(target->pkt, completion_time);
@ -884,8 +916,9 @@ Cache<TagStore>::handleResponse(PacketPtr pkt)
assert(!is_error);
// response to snoop request
DPRINTF(Cache, "processing deferred snoop...\n");
assert(!(pkt->isInvalidate() && !mshr->hasPostInvalidate()));
handleSnoop(target->pkt, blk, true, true,
mshr->pendingInvalidate || pkt->isInvalidate());
mshr->hasPostInvalidate());
break;
default:
@ -895,14 +928,20 @@ Cache<TagStore>::handleResponse(PacketPtr pkt)
mshr->popTarget();
}
if (pkt->isInvalidate()) {
if (blk) {
if (pkt->isInvalidate() || mshr->hasPostInvalidate()) {
tags->invalidateBlk(blk);
} else if (mshr->hasPostDowngrade()) {
blk->status &= ~BlkWritable;
}
}
if (mshr->promoteDeferredTargets()) {
// avoid later read getting stale data while write miss is
// outstanding.. see comment in timingAccess()
if (blk) {
blk->status &= ~BlkReadable;
}
MSHRQueue *mq = mshr->queue;
mq->markPending(mshr);
requestMemSideBus((RequestCause)mq->index, pkt->finishTime);
@ -1017,14 +1056,19 @@ Cache<TagStore>::handleFill(PacketPtr pkt, BlkType *blk,
int id = pkt->req->hasContextId() ? pkt->req->contextId() : -1;
tags->insertBlock(pkt->getAddr(), blk, id);
}
// starting from scratch with a new block
blk->status = 0;
} else {
// existing block... probably an upgrade
assert(blk->tag == tags->extractTag(addr));
// either we're getting new data or the block should already be valid
assert(pkt->hasData() || blk->isValid());
// don't clear block status... if block is already dirty we
// don't want to lose that
}
blk->status = BlkValid | BlkReadable;
blk->status |= BlkValid | BlkReadable;
if (!pkt->sharedAsserted()) {
blk->status |= BlkWritable;
@ -1587,7 +1631,7 @@ Cache<TagStore>::MemSidePort::sendPacket()
delete pkt;
}
} else {
myCache()->markInService(mshr);
myCache()->markInService(mshr, pkt);
}
}
}

97
src/mem/cache/mshr.cc vendored
View file

@ -89,6 +89,19 @@ MSHR::TargetList::add(PacketPtr pkt, Tick readyTime,
}
static void
replaceUpgrade(PacketPtr pkt)
{
if (pkt->cmd == MemCmd::UpgradeReq) {
pkt->cmd = MemCmd::ReadExReq;
DPRINTF(Cache, "Replacing UpgradeReq with ReadExReq\n");
} else if (pkt->cmd == MemCmd::SCUpgradeReq) {
pkt->cmd = MemCmd::SCUpgradeFailReq;
DPRINTF(Cache, "Replacing SCUpgradeReq with SCUpgradeFailReq\n");
}
}
void
MSHR::TargetList::replaceUpgrades()
{
@ -97,13 +110,7 @@ MSHR::TargetList::replaceUpgrades()
Iterator end_i = end();
for (Iterator i = begin(); i != end_i; ++i) {
if (i->pkt->cmd == MemCmd::UpgradeReq) {
i->pkt->cmd = MemCmd::ReadExReq;
DPRINTF(Cache, "Replacing UpgradeReq with ReadExReq\n");
} else if (i->pkt->cmd == MemCmd::SCUpgradeReq) {
i->pkt->cmd = MemCmd::SCUpgradeFailReq;
DPRINTF(Cache, "Replacing SCUpgradeReq with SCUpgradeFailReq\n");
}
replaceUpgrade(i->pkt);
}
hasUpgrade = false;
@ -180,8 +187,6 @@ MSHR::allocate(Addr _addr, int _size, PacketPtr target,
Target::FromPrefetcher : Target::FromCPU;
targets->add(target, whenReady, _order, source, true);
assert(deferredTargets->isReset());
pendingInvalidate = false;
pendingShared = false;
data = NULL;
}
@ -197,7 +202,7 @@ MSHR::clearDownstreamPending()
}
bool
MSHR::markInService()
MSHR::markInService(PacketPtr pkt)
{
assert(!inService);
if (isForwardNoResponse()) {
@ -208,6 +213,10 @@ MSHR::markInService()
return true;
}
inService = true;
pendingDirty = (targets->needsExclusive ||
(!pkt->sharedAsserted() && pkt->memInhibitAsserted()));
postInvalidate = postDowngrade = false;
if (!downstreamPending) {
// let upstream caches know that the request has made it to a
// level where it's going to get a response
@ -225,8 +234,6 @@ MSHR::deallocate()
assert(deferredTargets->isReset());
assert(ntargets == 0);
inService = false;
//allocIter = NULL;
//readyIter = NULL;
}
/*
@ -241,17 +248,22 @@ MSHR::allocateTarget(PacketPtr pkt, Tick whenReady, Counter _order)
// - there are other targets already deferred
// - there's a pending invalidate to be applied after the response
// comes back (but before this target is processed)
// - the outstanding request is for a non-exclusive block and this
// target requires an exclusive block
// - this target requires an exclusive block and either we're not
// getting an exclusive block back or we have already snooped
// another read request that will downgrade our exclusive block
// to shared
// assume we'd never issue a prefetch when we've got an
// outstanding miss
assert(pkt->cmd != MemCmd::HardPFReq);
if (inService &&
(!deferredTargets->empty() || pendingInvalidate ||
(!targets->needsExclusive && pkt->needsExclusive()))) {
(!deferredTargets->empty() || hasPostInvalidate() ||
(pkt->needsExclusive() &&
(!isPendingDirty() || hasPostDowngrade() || isForward)))) {
// need to put on deferred list
if (hasPostInvalidate())
replaceUpgrade(pkt);
deferredTargets->add(pkt, whenReady, _order, Target::FromCPU, true);
} else {
// No request outstanding, or still OK to append to
@ -291,49 +303,49 @@ MSHR::handleSnoop(PacketPtr pkt, Counter _order)
// From here on down, the request issued by this MSHR logically
// precedes the request we're snooping.
if (pkt->needsExclusive()) {
// snooped request still precedes the re-request we'll have to
// issue for deferred targets, if any...
deferredTargets->replaceUpgrades();
}
if (pendingInvalidate) {
if (hasPostInvalidate()) {
// a prior snoop has already appended an invalidation, so
// logically we don't have the block anymore; no need for
// further snooping.
return true;
}
if (targets->needsExclusive || pkt->needsExclusive()) {
// actual target device (typ. PhysicalMemory) will delete the
// packet on reception, so we need to save a copy here
if (isPendingDirty() || pkt->isInvalidate()) {
// We need to save and replay the packet in two cases:
// 1. We're awaiting an exclusive copy, so ownership is pending,
// and we need to respond after we receive data.
// 2. It's an invalidation (e.g., UpgradeReq), and we need
// to forward the snoop up the hierarchy after the current
// transaction completes.
// Actual target device (typ. PhysicalMemory) will delete the
// packet on reception, so we need to save a copy here.
PacketPtr cp_pkt = new Packet(pkt, true);
targets->add(cp_pkt, curTick, _order, Target::FromSnoop,
downstreamPending && targets->needsExclusive);
++ntargets;
if (targets->needsExclusive) {
// We're awaiting an exclusive copy, so ownership is pending.
// It's up to us to respond once the data arrives.
if (isPendingDirty()) {
pkt->assertMemInhibit();
pkt->setSupplyExclusive();
} else {
// Someone else may respond before we get around to
// processing this snoop, which means the copied request
// pointer will no longer be valid
cp_pkt->req = NULL;
}
if (pkt->needsExclusive()) {
// This transaction will take away our pending copy
pendingInvalidate = true;
postInvalidate = true;
}
} else {
// Read to a read: no conflict, so no need to record as
// target, but make sure neither reader thinks he's getting an
// exclusive copy
pendingShared = true;
}
if (!pkt->needsExclusive()) {
// This transaction will get a read-shared copy, downgrading
// our copy if we had an exclusive one
postDowngrade = true;
pkt->assertShared();
}
@ -359,8 +371,6 @@ MSHR::promoteDeferredTargets()
// clear deferredTargets flags
deferredTargets->resetFlags();
pendingInvalidate = false;
pendingShared = false;
order = targets->front().order;
readyTime = std::max(curTick, targets->front().readyTime);
@ -371,13 +381,8 @@ MSHR::promoteDeferredTargets()
void
MSHR::handleFill(Packet *pkt, CacheBlk *blk)
{
if (pendingShared) {
// we snooped another read while this read was in
// service... assert shared line on its behalf
pkt->assertShared();
}
if (!pkt->sharedAsserted() && !pendingInvalidate
if (!pkt->sharedAsserted()
&& !(hasPostInvalidate() || hasPostDowngrade())
&& deferredTargets->needsExclusive) {
// We got an exclusive response, but we have deferred targets
// which are waiting to request an exclusive copy (not because
@ -427,8 +432,8 @@ MSHR::print(std::ostream &os, int verbosity, const std::string &prefix) const
_isUncacheable ? "Unc" : "",
inService ? "InSvc" : "",
downstreamPending ? "DwnPend" : "",
pendingInvalidate ? "PendInv" : "",
pendingShared ? "PendShared" : "");
hasPostInvalidate() ? "PostInv" : "",
hasPostDowngrade() ? "PostDowngr" : "");
ccprintf(os, "%s Targets:\n", prefix);
targets->print(os, verbosity, prefix + " ");

26
src/mem/cache/mshr.hh vendored
View file

@ -134,8 +134,28 @@ class MSHR : public Packet::SenderState, public Printable
bool downstreamPending;
bool pendingInvalidate;
bool pendingShared;
/** The pending* and post* flags are only valid if inService is
* true. Using the accessor functions lets us detect if these
* flags are accessed improperly.
*/
/** Will we have a dirty copy after this request? */
bool pendingDirty;
bool isPendingDirty() const {
assert(inService); return pendingDirty;
}
/** Did we snoop an invalidate while waiting for data? */
bool postInvalidate;
bool hasPostInvalidate() const {
assert(inService); return postInvalidate;
}
/** Did we snoop a read while waiting for data? */
bool postDowngrade;
bool hasPostDowngrade() const {
assert(inService); return postDowngrade;
}
/** Thread number of the miss. */
ThreadID threadNum;
@ -180,7 +200,7 @@ public:
void allocate(Addr addr, int size, PacketPtr pkt,
Tick when, Counter _order);
bool markInService();
bool markInService(PacketPtr pkt);
void clearDownstreamPending();

View file

@ -197,9 +197,9 @@ MSHRQueue::moveToFront(MSHR *mshr)
}
void
MSHRQueue::markInService(MSHR *mshr)
MSHRQueue::markInService(MSHR *mshr, PacketPtr pkt)
{
if (mshr->markInService()) {
if (mshr->markInService(pkt)) {
deallocate(mshr);
} else {
readyList.erase(mshr->readyIter);

View file

@ -160,7 +160,7 @@ class MSHRQueue
* readyList. Deallocates the MSHR if it does not expect a response.
* @param mshr The MSHR to mark in service.
*/
void markInService(MSHR *mshr);
void markInService(MSHR *mshr, PacketPtr pkt);
/**
* Mark an in service entry as pending, used to resend a request.