2006-06-28 17:02:14 +02:00
|
|
|
/*
|
2016-03-17 14:51:18 +01:00
|
|
|
* Copyright (c) 2012-2013, 2015-2016 ARM Limited
|
2013-04-22 19:20:33 +02:00
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* The license below extends only to copyright in the software and shall
|
|
|
|
* not be construed as granting a license to any other intellectual
|
|
|
|
* property including but not limited to intellectual property relating
|
|
|
|
* to a hardware implementation of the functionality of the software
|
|
|
|
* licensed hereunder. You may use the software subject to the license
|
|
|
|
* terms below provided that you ensure that this notice is replicated
|
|
|
|
* unmodified and in its entirety in all distributions of the software,
|
|
|
|
* modified or unmodified, in source code or in binary form.
|
|
|
|
*
|
2006-06-28 17:02:14 +02:00
|
|
|
* Copyright (c) 2002-2005 The Regents of The University of Michigan
|
2010-08-25 23:08:27 +02:00
|
|
|
* Copyright (c) 2010 Advanced Micro Devices, Inc.
|
2006-06-28 17:02:14 +02:00
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions are
|
|
|
|
* met: redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer;
|
|
|
|
* redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution;
|
|
|
|
* neither the name of the copyright holders nor the names of its
|
|
|
|
* contributors may be used to endorse or promote products derived from
|
|
|
|
* this software without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
|
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
|
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
|
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
|
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
|
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
|
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
|
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
|
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
|
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
|
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*
|
|
|
|
* Authors: Erik Hallnor
|
|
|
|
* Dave Greene
|
|
|
|
*/
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @file
|
|
|
|
* Miss Status and Handling Register (MSHR) definitions.
|
|
|
|
*/
|
|
|
|
|
2016-05-26 12:56:24 +02:00
|
|
|
#include "mem/cache/mshr.hh"
|
|
|
|
|
2009-05-17 23:34:52 +02:00
|
|
|
#include <algorithm>
|
|
|
|
#include <cassert>
|
2006-06-28 17:02:14 +02:00
|
|
|
#include <string>
|
|
|
|
#include <vector>
|
|
|
|
|
|
|
|
#include "base/misc.hh"
|
2009-05-17 23:34:52 +02:00
|
|
|
#include "base/types.hh"
|
2011-04-15 19:44:32 +02:00
|
|
|
#include "debug/Cache.hh"
|
2006-06-28 17:02:14 +02:00
|
|
|
#include "mem/cache/cache.hh"
|
2009-05-17 23:34:52 +02:00
|
|
|
#include "sim/core.hh"
|
2006-06-28 17:02:14 +02:00
|
|
|
|
|
|
|
using namespace std;
|
|
|
|
|
2016-03-17 14:51:18 +01:00
|
|
|
MSHR::MSHR() : downstreamPending(false),
|
2015-12-31 15:32:58 +01:00
|
|
|
pendingModified(false),
|
2014-10-22 00:04:41 +02:00
|
|
|
postInvalidate(false), postDowngrade(false),
|
2016-12-05 22:48:18 +01:00
|
|
|
isForward(false)
|
2006-06-28 17:02:14 +02:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2007-07-22 03:18:42 +02:00
|
|
|
MSHR::TargetList::TargetList()
|
2016-12-05 22:48:18 +01:00
|
|
|
: needsWritable(false), hasUpgrade(false), allocOnFill(false)
|
2007-07-22 03:18:42 +02:00
|
|
|
{}
|
|
|
|
|
|
|
|
|
2016-12-05 22:48:17 +01:00
|
|
|
void
|
2016-12-05 22:48:18 +01:00
|
|
|
MSHR::TargetList::updateFlags(PacketPtr pkt, Target::Source source,
|
|
|
|
bool alloc_on_fill)
|
2007-07-22 03:18:42 +02:00
|
|
|
{
|
2009-02-16 17:56:40 +01:00
|
|
|
if (source != Target::FromSnoop) {
|
2015-12-31 15:32:58 +01:00
|
|
|
if (pkt->needsWritable()) {
|
|
|
|
needsWritable = true;
|
2007-07-22 03:18:42 +02:00
|
|
|
}
|
|
|
|
|
2010-09-09 20:40:19 +02:00
|
|
|
// StoreCondReq is effectively an upgrade if it's in an MSHR
|
|
|
|
// since it would have been failed already if we didn't have a
|
|
|
|
// read-only copy
|
|
|
|
if (pkt->isUpgrade() || pkt->cmd == MemCmd::StoreCondReq) {
|
2007-07-22 03:18:42 +02:00
|
|
|
hasUpgrade = true;
|
|
|
|
}
|
2016-12-05 22:48:18 +01:00
|
|
|
|
|
|
|
// potentially re-evaluate whether we should allocate on a fill or
|
|
|
|
// not
|
|
|
|
allocOnFill = allocOnFill || alloc_on_fill;
|
2008-01-03 00:18:33 +01:00
|
|
|
}
|
2016-12-05 22:48:17 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
MSHR::TargetList::populateFlags()
|
|
|
|
{
|
|
|
|
resetFlags();
|
|
|
|
for (auto& t: *this) {
|
2016-12-05 22:48:18 +01:00
|
|
|
updateFlags(t.pkt, t.source, t.allocOnFill);
|
2016-12-05 22:48:17 +01:00
|
|
|
}
|
|
|
|
}
|
2007-07-23 06:43:38 +02:00
|
|
|
|
2016-12-05 22:48:17 +01:00
|
|
|
inline void
|
|
|
|
MSHR::TargetList::add(PacketPtr pkt, Tick readyTime,
|
2016-12-05 22:48:18 +01:00
|
|
|
Counter order, Target::Source source, bool markPending,
|
|
|
|
bool alloc_on_fill)
|
2016-12-05 22:48:17 +01:00
|
|
|
{
|
2016-12-05 22:48:18 +01:00
|
|
|
updateFlags(pkt, source, alloc_on_fill);
|
2008-01-03 00:18:33 +01:00
|
|
|
if (markPending) {
|
2013-02-19 11:56:06 +01:00
|
|
|
// Iterate over the SenderState stack and see if we find
|
|
|
|
// an MSHR entry. If we do, set the downstreamPending
|
|
|
|
// flag. Otherwise, do nothing.
|
|
|
|
MSHR *mshr = pkt->findNextSenderState<MSHR>();
|
2016-05-26 12:56:24 +02:00
|
|
|
if (mshr != nullptr) {
|
2007-07-23 06:43:38 +02:00
|
|
|
assert(!mshr->downstreamPending);
|
|
|
|
mshr->downstreamPending = true;
|
2015-09-04 19:14:03 +02:00
|
|
|
} else {
|
|
|
|
// No need to clear downstreamPending later
|
|
|
|
markPending = false;
|
2007-07-23 06:43:38 +02:00
|
|
|
}
|
2007-07-22 03:18:42 +02:00
|
|
|
}
|
|
|
|
|
2016-12-05 22:48:18 +01:00
|
|
|
emplace_back(pkt, readyTime, order, source, markPending, alloc_on_fill);
|
2007-07-22 03:18:42 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-09-09 20:40:18 +02:00
|
|
|
static void
|
|
|
|
replaceUpgrade(PacketPtr pkt)
|
|
|
|
{
|
2015-12-31 15:33:39 +01:00
|
|
|
// remember if the current packet has data allocated
|
|
|
|
bool has_data = pkt->hasData() || pkt->hasRespData();
|
|
|
|
|
2010-09-09 20:40:18 +02:00
|
|
|
if (pkt->cmd == MemCmd::UpgradeReq) {
|
|
|
|
pkt->cmd = MemCmd::ReadExReq;
|
|
|
|
DPRINTF(Cache, "Replacing UpgradeReq with ReadExReq\n");
|
|
|
|
} else if (pkt->cmd == MemCmd::SCUpgradeReq) {
|
|
|
|
pkt->cmd = MemCmd::SCUpgradeFailReq;
|
|
|
|
DPRINTF(Cache, "Replacing SCUpgradeReq with SCUpgradeFailReq\n");
|
2010-09-09 20:40:19 +02:00
|
|
|
} else if (pkt->cmd == MemCmd::StoreCondReq) {
|
|
|
|
pkt->cmd = MemCmd::StoreCondFailReq;
|
|
|
|
DPRINTF(Cache, "Replacing StoreCondReq with StoreCondFailReq\n");
|
2010-09-09 20:40:18 +02:00
|
|
|
}
|
2015-12-31 15:33:39 +01:00
|
|
|
|
|
|
|
if (!has_data) {
|
|
|
|
// there is no sensible way of setting the data field if the
|
|
|
|
// new command actually would carry data
|
|
|
|
assert(!pkt->hasData());
|
|
|
|
|
|
|
|
if (pkt->hasRespData()) {
|
|
|
|
// we went from a packet that had no data (neither request,
|
|
|
|
// nor response), to one that does, and therefore we need to
|
|
|
|
// actually allocate space for the data payload
|
|
|
|
pkt->allocate();
|
|
|
|
}
|
|
|
|
}
|
2010-09-09 20:40:18 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-07-22 03:18:42 +02:00
|
|
|
void
|
|
|
|
MSHR::TargetList::replaceUpgrades()
|
|
|
|
{
|
|
|
|
if (!hasUpgrade)
|
|
|
|
return;
|
|
|
|
|
2015-03-27 09:55:57 +01:00
|
|
|
for (auto& t : *this) {
|
|
|
|
replaceUpgrade(t.pkt);
|
2007-07-22 03:18:42 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
hasUpgrade = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-07-23 06:43:38 +02:00
|
|
|
void
|
|
|
|
MSHR::TargetList::clearDownstreamPending()
|
|
|
|
{
|
2015-03-27 09:55:57 +01:00
|
|
|
for (auto& t : *this) {
|
|
|
|
if (t.markedPending) {
|
2013-02-19 11:56:06 +01:00
|
|
|
// Iterate over the SenderState stack and see if we find
|
|
|
|
// an MSHR entry. If we find one, clear the
|
|
|
|
// downstreamPending flag by calling
|
|
|
|
// clearDownstreamPending(). This recursively clears the
|
|
|
|
// downstreamPending flag in all caches this packet has
|
|
|
|
// passed through.
|
2015-03-27 09:55:57 +01:00
|
|
|
MSHR *mshr = t.pkt->findNextSenderState<MSHR>();
|
2016-05-26 12:56:24 +02:00
|
|
|
if (mshr != nullptr) {
|
2008-01-03 00:18:33 +01:00
|
|
|
mshr->clearDownstreamPending();
|
|
|
|
}
|
2016-12-05 22:48:19 +01:00
|
|
|
t.markedPending = false;
|
2007-07-23 06:43:38 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-07-27 21:46:45 +02:00
|
|
|
bool
|
|
|
|
MSHR::TargetList::checkFunctional(PacketPtr pkt)
|
|
|
|
{
|
2015-03-27 09:55:57 +01:00
|
|
|
for (auto& t : *this) {
|
|
|
|
if (pkt->checkFunctional(t.pkt)) {
|
2007-07-27 21:46:45 +02:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-01-02 21:20:15 +01:00
|
|
|
void
|
2015-03-27 09:55:57 +01:00
|
|
|
MSHR::TargetList::print(std::ostream &os, int verbosity,
|
|
|
|
const std::string &prefix) const
|
2008-01-02 21:20:15 +01:00
|
|
|
{
|
2015-03-27 09:55:57 +01:00
|
|
|
for (auto& t : *this) {
|
2009-02-16 17:56:40 +01:00
|
|
|
const char *s;
|
2015-03-27 09:55:57 +01:00
|
|
|
switch (t.source) {
|
2012-05-11 01:04:26 +02:00
|
|
|
case Target::FromCPU:
|
|
|
|
s = "FromCPU";
|
|
|
|
break;
|
|
|
|
case Target::FromSnoop:
|
|
|
|
s = "FromSnoop";
|
|
|
|
break;
|
|
|
|
case Target::FromPrefetcher:
|
|
|
|
s = "FromPrefetcher";
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
s = "";
|
|
|
|
break;
|
2009-02-16 17:56:40 +01:00
|
|
|
}
|
|
|
|
ccprintf(os, "%s%s: ", prefix, s);
|
2015-03-27 09:55:57 +01:00
|
|
|
t.pkt->print(os, verbosity, "");
|
2016-12-05 22:48:21 +01:00
|
|
|
ccprintf(os, "\n");
|
2008-01-02 21:20:15 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2006-06-28 17:02:14 +02:00
|
|
|
void
|
2015-03-27 09:55:55 +01:00
|
|
|
MSHR::allocate(Addr blk_addr, unsigned blk_size, PacketPtr target,
|
mem: Add cache clusivity
This patch adds a parameter to control the cache clusivity, that is if
the cache is mostly inclusive or exclusive. At the moment there is no
intention to support strict policies, and thus the options are: 1)
mostly inclusive, or 2) mostly exclusive.
The choice of policy guides the behaviuor on a cache fill, and a new
helper function, allocOnFill, is created to encapsulate the decision
making process. For the timing mode, the decision is annotated on the
MSHR on sending out the downstream packet, and in atomic we directly
pass the decision to handleFill. We (ab)use the tempBlock in cases
where we are not allocating on fill, leaving the rest of the cache
unaffected. Simple and effective.
This patch also makes it more explicit that multiple caches are
allowed to consider a block writable (this is the case
also before this patch). That is, for a mostly inclusive cache,
multiple caches upstream may also consider the block exclusive. The
caches considering the block writable/exclusive all appear along the
same path to memory, and from a coherency protocol point of view it
works due to the fact that we always snoop upwards in zero time before
querying any downstream cache.
Note that this patch does not introduce clean writebacks. Thus, for
clean lines we are essentially removing a cache level if it is made
mostly exclusive. For example, lines from the read-only L1 instruction
cache or table-walker cache are always clean, and simply get dropped
rather than being passed to the L2. If the L2 is mostly exclusive and
does not allocate on fill it will thus never hold the line. A follow
on patch adds the clean writebacks.
The patch changes the L2 of the O3_ARM_v7a CPU configuration to be
mostly exclusive (and stats are affected accordingly).
2015-11-06 09:26:41 +01:00
|
|
|
Tick when_ready, Counter _order, bool alloc_on_fill)
|
2006-06-28 17:02:14 +02:00
|
|
|
{
|
2015-03-27 09:55:55 +01:00
|
|
|
blkAddr = blk_addr;
|
|
|
|
blkSize = blk_size;
|
2014-01-24 22:29:30 +01:00
|
|
|
isSecure = target->isSecure();
|
2015-03-27 09:55:55 +01:00
|
|
|
readyTime = when_ready;
|
2007-06-25 15:47:05 +02:00
|
|
|
order = _order;
|
2007-06-18 02:27:53 +02:00
|
|
|
assert(target);
|
2008-11-10 23:10:28 +01:00
|
|
|
isForward = false;
|
2007-06-18 02:27:53 +02:00
|
|
|
_isUncacheable = target->req->isUncacheable();
|
|
|
|
inService = false;
|
2007-07-23 06:43:38 +02:00
|
|
|
downstreamPending = false;
|
2013-05-30 18:54:11 +02:00
|
|
|
assert(targets.isReset());
|
2009-02-16 17:56:40 +01:00
|
|
|
// Don't know of a case where we would allocate a new MSHR for a
|
|
|
|
// snoop (mem-side request), so set source according to request here
|
|
|
|
Target::Source source = (target->cmd == MemCmd::HardPFReq) ?
|
|
|
|
Target::FromPrefetcher : Target::FromCPU;
|
2016-12-05 22:48:18 +01:00
|
|
|
targets.add(target, when_ready, _order, source, true, alloc_on_fill);
|
2013-05-30 18:54:11 +02:00
|
|
|
assert(deferredTargets.isReset());
|
2006-06-28 17:02:14 +02:00
|
|
|
}
|
|
|
|
|
2007-07-23 06:43:38 +02:00
|
|
|
|
2008-01-03 00:18:33 +01:00
|
|
|
void
|
|
|
|
MSHR::clearDownstreamPending()
|
|
|
|
{
|
|
|
|
assert(downstreamPending);
|
|
|
|
downstreamPending = false;
|
|
|
|
// recursively clear flag on any MSHRs we will be forwarding
|
|
|
|
// responses to
|
2013-05-30 18:54:11 +02:00
|
|
|
targets.clearDownstreamPending();
|
2008-01-03 00:18:33 +01:00
|
|
|
}
|
|
|
|
|
2016-03-17 14:51:18 +01:00
|
|
|
void
|
2015-12-31 15:32:58 +01:00
|
|
|
MSHR::markInService(bool pending_modified_resp)
|
2007-07-23 06:43:38 +02:00
|
|
|
{
|
|
|
|
assert(!inService);
|
2014-10-09 23:51:56 +02:00
|
|
|
|
2007-07-23 06:43:38 +02:00
|
|
|
inService = true;
|
2015-12-31 15:32:58 +01:00
|
|
|
pendingModified = targets.needsWritable || pending_modified_resp;
|
2010-09-09 20:40:18 +02:00
|
|
|
postInvalidate = postDowngrade = false;
|
|
|
|
|
2007-07-23 06:43:38 +02:00
|
|
|
if (!downstreamPending) {
|
|
|
|
// let upstream caches know that the request has made it to a
|
|
|
|
// level where it's going to get a response
|
2013-05-30 18:54:11 +02:00
|
|
|
targets.clearDownstreamPending();
|
2007-07-23 06:43:38 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2006-06-28 17:02:14 +02:00
|
|
|
void
|
|
|
|
MSHR::deallocate()
|
|
|
|
{
|
2013-05-30 18:54:11 +02:00
|
|
|
assert(targets.empty());
|
|
|
|
targets.resetFlags();
|
|
|
|
assert(deferredTargets.isReset());
|
2006-06-28 17:02:14 +02:00
|
|
|
inService = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Adds a target to an MSHR
|
|
|
|
*/
|
|
|
|
void
|
mem: Add cache clusivity
This patch adds a parameter to control the cache clusivity, that is if
the cache is mostly inclusive or exclusive. At the moment there is no
intention to support strict policies, and thus the options are: 1)
mostly inclusive, or 2) mostly exclusive.
The choice of policy guides the behaviuor on a cache fill, and a new
helper function, allocOnFill, is created to encapsulate the decision
making process. For the timing mode, the decision is annotated on the
MSHR on sending out the downstream packet, and in atomic we directly
pass the decision to handleFill. We (ab)use the tempBlock in cases
where we are not allocating on fill, leaving the rest of the cache
unaffected. Simple and effective.
This patch also makes it more explicit that multiple caches are
allowed to consider a block writable (this is the case
also before this patch). That is, for a mostly inclusive cache,
multiple caches upstream may also consider the block exclusive. The
caches considering the block writable/exclusive all appear along the
same path to memory, and from a coherency protocol point of view it
works due to the fact that we always snoop upwards in zero time before
querying any downstream cache.
Note that this patch does not introduce clean writebacks. Thus, for
clean lines we are essentially removing a cache level if it is made
mostly exclusive. For example, lines from the read-only L1 instruction
cache or table-walker cache are always clean, and simply get dropped
rather than being passed to the L2. If the L2 is mostly exclusive and
does not allocate on fill it will thus never hold the line. A follow
on patch adds the clean writebacks.
The patch changes the L2 of the O3_ARM_v7a CPU configuration to be
mostly exclusive (and stats are affected accordingly).
2015-11-06 09:26:41 +01:00
|
|
|
MSHR::allocateTarget(PacketPtr pkt, Tick whenReady, Counter _order,
|
|
|
|
bool alloc_on_fill)
|
2006-06-28 17:02:14 +02:00
|
|
|
{
|
2015-03-27 09:56:00 +01:00
|
|
|
// assume we'd never issue a prefetch when we've got an
|
|
|
|
// outstanding miss
|
|
|
|
assert(pkt->cmd != MemCmd::HardPFReq);
|
|
|
|
|
|
|
|
// uncacheable accesses always allocate a new MSHR, and cacheable
|
|
|
|
// accesses ignore any uncacheable MSHRs, thus we should never
|
|
|
|
// have targets addded if originally allocated uncacheable
|
|
|
|
assert(!_isUncacheable);
|
|
|
|
|
2007-07-22 03:18:42 +02:00
|
|
|
// if there's a request already in service for this MSHR, we will
|
|
|
|
// have to defer the new target until after the response if any of
|
|
|
|
// the following are true:
|
|
|
|
// - there are other targets already deferred
|
|
|
|
// - there's a pending invalidate to be applied after the response
|
|
|
|
// comes back (but before this target is processed)
|
2015-12-31 15:32:58 +01:00
|
|
|
// - this target requires a writable block and either we're not
|
|
|
|
// getting a writable block back or we have already snooped
|
|
|
|
// another read request that will downgrade our writable block
|
|
|
|
// to non-writable (Shared or Owned)
|
2007-07-22 03:18:42 +02:00
|
|
|
if (inService &&
|
2013-05-30 18:54:11 +02:00
|
|
|
(!deferredTargets.empty() || hasPostInvalidate() ||
|
2015-12-31 15:32:58 +01:00
|
|
|
(pkt->needsWritable() &&
|
|
|
|
(!isPendingModified() || hasPostDowngrade() || isForward)))) {
|
2007-07-22 03:18:42 +02:00
|
|
|
// need to put on deferred list
|
2010-09-09 20:40:18 +02:00
|
|
|
if (hasPostInvalidate())
|
|
|
|
replaceUpgrade(pkt);
|
2016-12-05 22:48:18 +01:00
|
|
|
deferredTargets.add(pkt, whenReady, _order, Target::FromCPU, true,
|
|
|
|
alloc_on_fill);
|
2007-06-25 02:32:31 +02:00
|
|
|
} else {
|
2008-01-03 00:18:33 +01:00
|
|
|
// No request outstanding, or still OK to append to
|
|
|
|
// outstanding request: append to regular target list. Only
|
|
|
|
// mark pending if current request hasn't been issued yet
|
|
|
|
// (isn't in service).
|
2016-12-05 22:48:18 +01:00
|
|
|
targets.add(pkt, whenReady, _order, Target::FromCPU, !inService,
|
|
|
|
alloc_on_fill);
|
2007-06-25 02:32:31 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-07-21 22:45:17 +02:00
|
|
|
bool
|
|
|
|
MSHR::handleSnoop(PacketPtr pkt, Counter _order)
|
2007-06-25 02:32:31 +02:00
|
|
|
{
|
2016-12-05 22:48:21 +01:00
|
|
|
DPRINTF(Cache, "%s for %s\n", __func__, pkt->print());
|
2015-12-28 17:14:18 +01:00
|
|
|
|
2015-12-31 15:32:58 +01:00
|
|
|
// when we snoop packets the needsWritable and isInvalidate flags
|
2015-12-28 17:14:18 +01:00
|
|
|
// should always be the same, however, this assumes that we never
|
|
|
|
// snoop writes as they are currently not marked as invalidations
|
2015-12-31 15:32:58 +01:00
|
|
|
panic_if(pkt->needsWritable() != pkt->isInvalidate(),
|
2016-12-05 22:48:21 +01:00
|
|
|
"%s got snoop %s where needsWritable, "
|
|
|
|
"does not match isInvalidate", name(), pkt->print(),
|
2015-12-28 17:14:18 +01:00
|
|
|
pkt->getAddr());
|
|
|
|
|
2007-07-24 07:28:40 +02:00
|
|
|
if (!inService || (pkt->isExpressSnoop() && downstreamPending)) {
|
2007-07-22 03:18:42 +02:00
|
|
|
// Request has not been issued yet, or it's been issued
|
|
|
|
// locally but is buffered unissued at some downstream cache
|
|
|
|
// which is forwarding us this snoop. Either way, the packet
|
|
|
|
// we're snooping logically precedes this MSHR's request, so
|
|
|
|
// the snoop has no impact on the MSHR, but must be processed
|
|
|
|
// in the standard way by the cache. The only exception is
|
|
|
|
// that if we're an L2+ cache buffering an UpgradeReq from a
|
|
|
|
// higher-level cache, and the snoop is invalidating, then our
|
|
|
|
// buffered upgrades must be converted to read exclusives,
|
|
|
|
// since the upper-level cache no longer has a valid copy.
|
|
|
|
// That is, even though the upper-level cache got out on its
|
|
|
|
// local bus first, some other invalidating transaction
|
|
|
|
// reached the global bus before the upgrade did.
|
2015-12-31 15:32:58 +01:00
|
|
|
if (pkt->needsWritable()) {
|
2013-05-30 18:54:11 +02:00
|
|
|
targets.replaceUpgrades();
|
|
|
|
deferredTargets.replaceUpgrades();
|
2007-07-22 03:18:42 +02:00
|
|
|
}
|
|
|
|
|
2007-07-21 22:45:17 +02:00
|
|
|
return false;
|
|
|
|
}
|
2007-06-25 02:32:31 +02:00
|
|
|
|
2007-07-22 03:18:42 +02:00
|
|
|
// From here on down, the request issued by this MSHR logically
|
|
|
|
// precedes the request we're snooping.
|
2015-12-31 15:32:58 +01:00
|
|
|
if (pkt->needsWritable()) {
|
2007-07-22 03:18:42 +02:00
|
|
|
// snooped request still precedes the re-request we'll have to
|
|
|
|
// issue for deferred targets, if any...
|
2013-05-30 18:54:11 +02:00
|
|
|
deferredTargets.replaceUpgrades();
|
2007-07-22 03:18:42 +02:00
|
|
|
}
|
|
|
|
|
2010-09-09 20:40:18 +02:00
|
|
|
if (hasPostInvalidate()) {
|
2007-06-25 02:32:31 +02:00
|
|
|
// a prior snoop has already appended an invalidation, so
|
2007-07-22 03:18:42 +02:00
|
|
|
// logically we don't have the block anymore; no need for
|
|
|
|
// further snooping.
|
2007-07-21 22:45:17 +02:00
|
|
|
return true;
|
2006-06-28 17:02:14 +02:00
|
|
|
}
|
2007-06-25 02:32:31 +02:00
|
|
|
|
2015-12-31 15:32:58 +01:00
|
|
|
if (isPendingModified() || pkt->isInvalidate()) {
|
2010-09-09 20:40:18 +02:00
|
|
|
// We need to save and replay the packet in two cases:
|
2015-12-31 15:32:58 +01:00
|
|
|
// 1. We're awaiting a writable copy (Modified or Exclusive),
|
|
|
|
// so this MSHR is the orgering point, and we need to respond
|
|
|
|
// after we receive data.
|
2010-09-09 20:40:18 +02:00
|
|
|
// 2. It's an invalidation (e.g., UpgradeReq), and we need
|
|
|
|
// to forward the snoop up the hierarchy after the current
|
|
|
|
// transaction completes.
|
2014-12-02 12:07:54 +01:00
|
|
|
|
2015-12-17 23:07:11 +01:00
|
|
|
// Start by determining if we will eventually respond or not,
|
|
|
|
// matching the conditions checked in Cache::handleSnoop
|
2016-12-05 22:48:29 +01:00
|
|
|
bool will_respond = isPendingModified() && pkt->needsResponse();
|
2015-12-17 23:07:11 +01:00
|
|
|
|
|
|
|
// The packet we are snooping may be deleted by the time we
|
|
|
|
// actually process the target, and we consequently need to
|
|
|
|
// save a copy here. Clear flags and also allocate new data as
|
|
|
|
// the original packet data storage may have been deleted by
|
|
|
|
// the time we get to process this packet. In the cases where
|
|
|
|
// we are not responding after handling the snoop we also need
|
|
|
|
// to create a copy of the request to be on the safe side. In
|
|
|
|
// the latter case the cache is responsible for deleting both
|
|
|
|
// the packet and the request as part of handling the deferred
|
|
|
|
// snoop.
|
|
|
|
PacketPtr cp_pkt = will_respond ? new Packet(pkt, true, true) :
|
|
|
|
new Packet(new Request(*pkt->req), pkt->cmd);
|
2007-06-27 07:23:10 +02:00
|
|
|
|
2016-05-26 12:56:24 +02:00
|
|
|
if (will_respond) {
|
2015-12-31 15:32:58 +01:00
|
|
|
// we are the ordering point, and will consequently
|
|
|
|
// respond, and depending on whether the packet
|
|
|
|
// needsWritable or not we either pass a Shared line or a
|
|
|
|
// Modified line
|
|
|
|
pkt->setCacheResponding();
|
|
|
|
|
|
|
|
// inform the cache hierarchy that this cache had the line
|
|
|
|
// in the Modified state, even if the response is passed
|
|
|
|
// as Shared (and thus non-writable)
|
|
|
|
pkt->setResponderHadWritable();
|
|
|
|
|
2015-05-05 09:22:29 +02:00
|
|
|
// in the case of an uncacheable request there is no need
|
2015-12-31 15:32:58 +01:00
|
|
|
// to set the responderHadWritable flag, but since the
|
|
|
|
// recipient does not care there is no harm in doing so
|
2007-06-27 07:23:10 +02:00
|
|
|
}
|
2015-03-17 12:50:55 +01:00
|
|
|
targets.add(cp_pkt, curTick(), _order, Target::FromSnoop,
|
2016-12-05 22:48:18 +01:00
|
|
|
downstreamPending && targets.needsWritable, false);
|
2007-06-26 07:23:29 +02:00
|
|
|
|
2015-12-31 15:32:58 +01:00
|
|
|
if (pkt->needsWritable()) {
|
2007-06-27 07:23:10 +02:00
|
|
|
// This transaction will take away our pending copy
|
2010-09-09 20:40:18 +02:00
|
|
|
postInvalidate = true;
|
2007-06-27 07:23:10 +02:00
|
|
|
}
|
2010-09-09 20:40:18 +02:00
|
|
|
}
|
|
|
|
|
2015-12-31 15:32:58 +01:00
|
|
|
if (!pkt->needsWritable() && !pkt->req->isUncacheable()) {
|
2010-09-09 20:40:18 +02:00
|
|
|
// This transaction will get a read-shared copy, downgrading
|
2015-12-31 15:32:58 +01:00
|
|
|
// our copy if we had a writable one
|
2010-09-09 20:40:18 +02:00
|
|
|
postDowngrade = true;
|
2015-12-31 15:32:58 +01:00
|
|
|
// make sure that any downstream cache does not respond with a
|
|
|
|
// writable (and dirty) copy even if it has one, unless it was
|
|
|
|
// explicitly asked for one
|
|
|
|
pkt->setHasSharers();
|
2006-06-28 17:02:14 +02:00
|
|
|
}
|
2007-07-21 22:45:17 +02:00
|
|
|
|
|
|
|
return true;
|
2007-06-25 02:32:31 +02:00
|
|
|
}
|
|
|
|
|
2016-12-05 22:48:19 +01:00
|
|
|
MSHR::TargetList
|
|
|
|
MSHR::extractServiceableTargets(PacketPtr pkt)
|
|
|
|
{
|
|
|
|
TargetList ready_targets;
|
|
|
|
// If the downstream MSHR got an invalidation request then we only
|
|
|
|
// service the first of the FromCPU targets and any other
|
|
|
|
// non-FromCPU target. This way the remaining FromCPU targets
|
|
|
|
// issue a new request and get a fresh copy of the block and we
|
|
|
|
// avoid memory consistency violations.
|
|
|
|
if (pkt->cmd == MemCmd::ReadRespWithInvalidate) {
|
|
|
|
auto it = targets.begin();
|
|
|
|
assert(it->source == Target::FromCPU);
|
|
|
|
ready_targets.push_back(*it);
|
|
|
|
it = targets.erase(it);
|
|
|
|
while (it != targets.end()) {
|
|
|
|
if (it->source == Target::FromCPU) {
|
|
|
|
it++;
|
|
|
|
} else {
|
|
|
|
assert(it->source == Target::FromSnoop);
|
|
|
|
ready_targets.push_back(*it);
|
|
|
|
it = targets.erase(it);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
ready_targets.populateFlags();
|
|
|
|
} else {
|
|
|
|
std::swap(ready_targets, targets);
|
|
|
|
}
|
|
|
|
targets.populateFlags();
|
|
|
|
|
|
|
|
return ready_targets;
|
|
|
|
}
|
2007-06-25 02:32:31 +02:00
|
|
|
|
|
|
|
bool
|
|
|
|
MSHR::promoteDeferredTargets()
|
|
|
|
{
|
2016-12-05 22:48:19 +01:00
|
|
|
if (targets.empty()) {
|
|
|
|
if (deferredTargets.empty()) {
|
|
|
|
return false;
|
|
|
|
}
|
2007-06-25 02:32:31 +02:00
|
|
|
|
2016-12-05 22:48:19 +01:00
|
|
|
std::swap(targets, deferredTargets);
|
|
|
|
} else {
|
|
|
|
// If the targets list is not empty then we have one targets
|
|
|
|
// from the deferredTargets list to the targets list. A new
|
|
|
|
// request will then service the targets list.
|
|
|
|
targets.splice(targets.end(), deferredTargets);
|
|
|
|
targets.populateFlags();
|
|
|
|
}
|
2007-07-22 03:18:42 +02:00
|
|
|
|
|
|
|
// clear deferredTargets flags
|
2013-05-30 18:54:11 +02:00
|
|
|
deferredTargets.resetFlags();
|
2006-06-28 17:02:14 +02:00
|
|
|
|
2013-05-30 18:54:11 +02:00
|
|
|
order = targets.front().order;
|
|
|
|
readyTime = std::max(curTick(), targets.front().readyTime);
|
2007-06-25 02:32:31 +02:00
|
|
|
|
|
|
|
return true;
|
2006-06-28 17:02:14 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-06-26 23:53:15 +02:00
|
|
|
void
|
2015-12-31 15:32:58 +01:00
|
|
|
MSHR::promoteWritable()
|
2007-06-26 23:53:15 +02:00
|
|
|
{
|
2015-12-31 15:32:58 +01:00
|
|
|
if (deferredTargets.needsWritable &&
|
2015-10-29 13:48:20 +01:00
|
|
|
!(hasPostInvalidate() || hasPostDowngrade())) {
|
2015-12-31 15:32:58 +01:00
|
|
|
// We got a writable response, but we have deferred targets
|
|
|
|
// which are waiting to request a writable copy (not because
|
2007-11-17 05:10:32 +01:00
|
|
|
// of a pending invalidate). This can happen if the original
|
2015-12-31 15:32:58 +01:00
|
|
|
// request was for a read-only block, but we got a writable
|
|
|
|
// response anyway. Since we got the writable copy there's no
|
|
|
|
// need to defer the targets, so move them up to the regular
|
|
|
|
// target list.
|
|
|
|
assert(!targets.needsWritable);
|
|
|
|
targets.needsWritable = true;
|
2008-01-03 00:18:33 +01:00
|
|
|
// if any of the deferred targets were upper-level cache
|
|
|
|
// requests marked downstreamPending, need to clear that
|
|
|
|
assert(!downstreamPending); // not pending here anymore
|
2013-05-30 18:54:11 +02:00
|
|
|
deferredTargets.clearDownstreamPending();
|
2007-11-17 05:10:32 +01:00
|
|
|
// this clears out deferredTargets too
|
2013-05-30 18:54:11 +02:00
|
|
|
targets.splice(targets.end(), deferredTargets);
|
|
|
|
deferredTargets.resetFlags();
|
2007-11-17 05:10:32 +01:00
|
|
|
}
|
2007-06-26 23:53:15 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-01-02 21:20:15 +01:00
|
|
|
bool
|
|
|
|
MSHR::checkFunctional(PacketPtr pkt)
|
|
|
|
{
|
|
|
|
// For printing, we treat the MSHR as a whole as single entity.
|
|
|
|
// For other requests, we iterate over the individual targets
|
|
|
|
// since that's where the actual data lies.
|
|
|
|
if (pkt->isPrint()) {
|
2016-05-26 12:56:24 +02:00
|
|
|
pkt->checkFunctional(this, blkAddr, isSecure, blkSize, nullptr);
|
2008-01-02 21:20:15 +01:00
|
|
|
return false;
|
|
|
|
} else {
|
2013-05-30 18:54:11 +02:00
|
|
|
return (targets.checkFunctional(pkt) ||
|
|
|
|
deferredTargets.checkFunctional(pkt));
|
2008-01-02 21:20:15 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-03-17 14:51:18 +01:00
|
|
|
bool
|
|
|
|
MSHR::sendPacket(Cache &cache)
|
|
|
|
{
|
|
|
|
return cache.sendMSHRQueuePacket(this);
|
|
|
|
}
|
2008-01-02 21:20:15 +01:00
|
|
|
|
2006-06-28 17:02:14 +02:00
|
|
|
void
|
2008-01-02 21:20:15 +01:00
|
|
|
MSHR::print(std::ostream &os, int verbosity, const std::string &prefix) const
|
2006-06-28 17:02:14 +02:00
|
|
|
{
|
2015-03-02 10:00:56 +01:00
|
|
|
ccprintf(os, "%s[%#llx:%#llx](%s) %s %s %s state: %s %s %s %s %s\n",
|
2015-03-27 09:55:55 +01:00
|
|
|
prefix, blkAddr, blkAddr + blkSize - 1,
|
2014-01-24 22:29:30 +01:00
|
|
|
isSecure ? "s" : "ns",
|
2008-11-10 23:10:28 +01:00
|
|
|
isForward ? "Forward" : "",
|
2016-12-05 22:48:18 +01:00
|
|
|
allocOnFill() ? "AllocOnFill" : "",
|
2015-12-31 15:32:58 +01:00
|
|
|
needsWritable() ? "Wrtbl" : "",
|
2008-01-02 21:20:15 +01:00
|
|
|
_isUncacheable ? "Unc" : "",
|
|
|
|
inService ? "InSvc" : "",
|
|
|
|
downstreamPending ? "DwnPend" : "",
|
2016-08-15 13:00:36 +02:00
|
|
|
postInvalidate ? "PostInv" : "",
|
|
|
|
postDowngrade ? "PostDowngr" : "");
|
2008-01-02 21:20:15 +01:00
|
|
|
|
2016-08-15 13:00:36 +02:00
|
|
|
if (!targets.empty()) {
|
|
|
|
ccprintf(os, "%s Targets:\n", prefix);
|
|
|
|
targets.print(os, verbosity, prefix + " ");
|
|
|
|
}
|
2013-05-30 18:54:11 +02:00
|
|
|
if (!deferredTargets.empty()) {
|
2008-01-02 21:20:15 +01:00
|
|
|
ccprintf(os, "%s Deferred Targets:\n", prefix);
|
2013-05-30 18:54:11 +02:00
|
|
|
deferredTargets.print(os, verbosity, prefix + " ");
|
2006-06-28 17:02:14 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-04-22 19:20:33 +02:00
|
|
|
std::string
|
|
|
|
MSHR::print() const
|
|
|
|
{
|
|
|
|
ostringstream str;
|
|
|
|
print(str);
|
|
|
|
return str.str();
|
|
|
|
}
|