2014-09-20 23:18:26 +02:00
|
|
|
/*
|
2016-08-12 15:11:45 +02:00
|
|
|
* Copyright (c) 2013-2016 ARM Limited
|
2014-09-20 23:18:26 +02:00
|
|
|
* All rights reserved
|
|
|
|
*
|
|
|
|
* The license below extends only to copyright in the software and shall
|
|
|
|
* not be construed as granting a license to any other intellectual
|
|
|
|
* property including but not limited to intellectual property relating
|
|
|
|
* to a hardware implementation of the functionality of the software
|
|
|
|
* licensed hereunder. You may use the software subject to the license
|
|
|
|
* terms below provided that you ensure that this notice is replicated
|
|
|
|
* unmodified and in its entirety in all distributions of the software,
|
|
|
|
* modified or unmodified, in source code or in binary form.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions are
|
|
|
|
* met: redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer;
|
|
|
|
* redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution;
|
|
|
|
* neither the name of the copyright holders nor the names of its
|
|
|
|
* contributors may be used to endorse or promote products derived from
|
|
|
|
* this software without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
|
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
|
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
|
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
|
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
|
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
|
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
|
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
|
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
|
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
|
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*
|
2015-09-25 13:26:57 +02:00
|
|
|
* Authors: Stephan Diestelhorst
|
2014-09-20 23:18:26 +02:00
|
|
|
*/
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @file
|
2015-09-25 13:26:57 +02:00
|
|
|
* Implementation of a snoop filter.
|
2014-09-20 23:18:26 +02:00
|
|
|
*/
|
|
|
|
|
2016-11-09 21:27:37 +01:00
|
|
|
#include "mem/snoop_filter.hh"
|
|
|
|
|
2014-09-20 23:18:26 +02:00
|
|
|
#include "base/misc.hh"
|
|
|
|
#include "base/trace.hh"
|
|
|
|
#include "debug/SnoopFilter.hh"
|
|
|
|
#include "sim/system.hh"
|
|
|
|
|
2015-09-25 13:26:57 +02:00
|
|
|
void
|
|
|
|
SnoopFilter::eraseIfNullEntry(SnoopFilterCache::iterator& sf_it)
|
|
|
|
{
|
|
|
|
SnoopItem& sf_item = sf_it->second;
|
|
|
|
if (!(sf_item.requested | sf_item.holder)) {
|
|
|
|
cachedLocations.erase(sf_it);
|
|
|
|
DPRINTF(SnoopFilter, "%s: Removed SF entry.\n",
|
|
|
|
__func__);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-09-20 23:18:26 +02:00
|
|
|
std::pair<SnoopFilter::SnoopList, Cycles>
|
|
|
|
SnoopFilter::lookupRequest(const Packet* cpkt, const SlavePort& slave_port)
|
|
|
|
{
|
2016-12-05 22:48:21 +01:00
|
|
|
DPRINTF(SnoopFilter, "%s: src %s packet %s\n", __func__,
|
|
|
|
slave_port.name(), cpkt->print());
|
2014-09-20 23:18:26 +02:00
|
|
|
|
2016-08-12 15:11:45 +02:00
|
|
|
// check if the packet came from a cache
|
|
|
|
bool allocate = !cpkt->req->isUncacheable() && slave_port.isSnooping() &&
|
|
|
|
cpkt->fromCache();
|
2015-09-25 13:26:57 +02:00
|
|
|
Addr line_addr = cpkt->getBlockAddr(linesize);
|
2016-08-12 15:11:45 +02:00
|
|
|
if (cpkt->isSecure()) {
|
|
|
|
line_addr |= LineSecure;
|
|
|
|
}
|
2014-09-20 23:18:26 +02:00
|
|
|
SnoopMask req_port = portToMask(slave_port);
|
2015-09-25 13:26:57 +02:00
|
|
|
reqLookupResult = cachedLocations.find(line_addr);
|
|
|
|
bool is_hit = (reqLookupResult != cachedLocations.end());
|
2015-09-25 13:26:57 +02:00
|
|
|
|
|
|
|
// If the snoop filter has no entry, and we should not allocate,
|
|
|
|
// do not create a new snoop filter entry, simply return a NULL
|
|
|
|
// portlist.
|
|
|
|
if (!is_hit && !allocate)
|
|
|
|
return snoopDown(lookupLatency);
|
|
|
|
|
2015-09-25 13:26:57 +02:00
|
|
|
// If no hit in snoop filter create a new element and update iterator
|
|
|
|
if (!is_hit)
|
|
|
|
reqLookupResult = cachedLocations.emplace(line_addr, SnoopItem()).first;
|
|
|
|
SnoopItem& sf_item = reqLookupResult->second;
|
2014-04-25 13:36:16 +02:00
|
|
|
SnoopMask interested = sf_item.holder | sf_item.requested;
|
|
|
|
|
2015-09-25 13:26:57 +02:00
|
|
|
// Store unmodified value of snoop filter item in temp storage in
|
|
|
|
// case we need to revert because of a send retry in
|
|
|
|
// updateRequest.
|
|
|
|
retryItem = sf_item;
|
|
|
|
|
2014-04-25 13:36:16 +02:00
|
|
|
totRequests++;
|
|
|
|
if (is_hit) {
|
|
|
|
// Single bit set -> value is a power of two
|
|
|
|
if (isPow2(interested))
|
|
|
|
hitSingleRequests++;
|
|
|
|
else
|
|
|
|
hitMultiRequests++;
|
|
|
|
}
|
2014-09-20 23:18:26 +02:00
|
|
|
|
|
|
|
DPRINTF(SnoopFilter, "%s: SF value %x.%x\n",
|
|
|
|
__func__, sf_item.requested, sf_item.holder);
|
|
|
|
|
2015-09-25 13:26:57 +02:00
|
|
|
// If we are not allocating, we are done
|
|
|
|
if (!allocate)
|
|
|
|
return snoopSelected(maskToPortList(interested & ~req_port),
|
|
|
|
lookupLatency);
|
|
|
|
|
|
|
|
if (cpkt->needsResponse()) {
|
2015-12-31 15:32:58 +01:00
|
|
|
if (!cpkt->cacheResponding()) {
|
2014-09-20 23:18:26 +02:00
|
|
|
// Max one request per address per port
|
2015-09-25 13:26:57 +02:00
|
|
|
panic_if(sf_item.requested & req_port, "double request :( " \
|
2014-09-20 23:18:26 +02:00
|
|
|
"SF value %x.%x\n", sf_item.requested, sf_item.holder);
|
|
|
|
|
|
|
|
// Mark in-flight requests to distinguish later on
|
|
|
|
sf_item.requested |= req_port;
|
2015-09-25 13:26:57 +02:00
|
|
|
DPRINTF(SnoopFilter, "%s: new SF value %x.%x\n",
|
|
|
|
__func__, sf_item.requested, sf_item.holder);
|
2014-09-20 23:18:26 +02:00
|
|
|
} else {
|
|
|
|
// NOTE: The memInhibit might have been asserted by a cache closer
|
|
|
|
// to the CPU, already -> the response will not be seen by this
|
|
|
|
// filter -> we do not need to keep the in-flight request, but make
|
|
|
|
// sure that we know that that cluster has a copy
|
|
|
|
panic_if(!(sf_item.holder & req_port), "Need to hold the value!");
|
2015-09-25 13:26:57 +02:00
|
|
|
DPRINTF(SnoopFilter,
|
|
|
|
"%s: not marking request. SF value %x.%x\n",
|
|
|
|
__func__, sf_item.requested, sf_item.holder);
|
|
|
|
}
|
|
|
|
} else { // if (!cpkt->needsResponse())
|
mem: Add an option to perform clean writebacks from caches
This patch adds the necessary commands and cache functionality to
allow clean writebacks. This functionality is crucial, especially when
having exclusive (victim) caches. For example, if read-only L1
instruction caches are not sending clean writebacks, there will never
be any spills from the L1 to the L2. At the moment the cache model
defaults to not sending clean writebacks, and this should possibly be
re-evaluated.
The implementation of clean writebacks relies on a new packet command
WritebackClean, which acts much like a Writeback (renamed
WritebackDirty), and also much like a CleanEvict. On eviction of a
clean block the cache either sends a clean evict, or a clean
writeback, and if any copies are still cached upstream the clean
evict/writeback is dropped. Similarly, if a clean evict/writeback
reaches a cache where there are outstanding MSHRs for the block, the
packet is dropped. In the typical case though, the clean writeback
allocates a block in the downstream cache, and marks it writable if
the evicted block was writable.
The patch changes the O3_ARM_v7a L1 cache configuration and the
default L1 caches in config/common/Caches.py
2015-11-06 09:26:43 +01:00
|
|
|
assert(cpkt->isEviction());
|
2015-09-25 13:26:57 +02:00
|
|
|
// make sure that the sender actually had the line
|
|
|
|
panic_if(!(sf_item.holder & req_port), "requester %x is not a " \
|
|
|
|
"holder :( SF value %x.%x\n", req_port,
|
|
|
|
sf_item.requested, sf_item.holder);
|
|
|
|
// CleanEvicts and Writebacks -> the sender and all caches above
|
|
|
|
// it may not have the line anymore.
|
|
|
|
if (!cpkt->isBlockCached()) {
|
|
|
|
sf_item.holder &= ~req_port;
|
|
|
|
DPRINTF(SnoopFilter, "%s: new SF value %x.%x\n",
|
2014-09-20 23:18:26 +02:00
|
|
|
__func__, sf_item.requested, sf_item.holder);
|
|
|
|
}
|
|
|
|
}
|
2015-09-25 13:26:57 +02:00
|
|
|
|
2014-04-25 13:36:16 +02:00
|
|
|
return snoopSelected(maskToPortList(interested & ~req_port), lookupLatency);
|
2014-09-20 23:18:26 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2016-08-12 15:11:45 +02:00
|
|
|
SnoopFilter::finishRequest(bool will_retry, Addr addr, bool is_secure)
|
2014-09-20 23:18:26 +02:00
|
|
|
{
|
2015-09-25 13:26:57 +02:00
|
|
|
if (reqLookupResult != cachedLocations.end()) {
|
|
|
|
// since we rely on the caller, do a basic check to ensure
|
|
|
|
// that finishRequest is being called following lookupRequest
|
2016-08-12 15:11:45 +02:00
|
|
|
Addr line_addr = (addr & ~(Addr(linesize - 1)));
|
|
|
|
if (is_secure) {
|
|
|
|
line_addr |= LineSecure;
|
|
|
|
}
|
|
|
|
assert(reqLookupResult->first == line_addr);
|
2015-09-25 13:26:57 +02:00
|
|
|
if (will_retry) {
|
|
|
|
// Undo any changes made in lookupRequest to the snoop filter
|
|
|
|
// entry if the request will come again. retryItem holds
|
|
|
|
// the previous value of the snoopfilter entry.
|
|
|
|
reqLookupResult->second = retryItem;
|
|
|
|
|
|
|
|
DPRINTF(SnoopFilter, "%s: restored SF value %x.%x\n",
|
|
|
|
__func__, retryItem.requested, retryItem.holder);
|
|
|
|
}
|
2015-05-05 09:22:29 +02:00
|
|
|
|
2015-09-25 13:26:57 +02:00
|
|
|
eraseIfNullEntry(reqLookupResult);
|
2014-09-20 23:18:26 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
std::pair<SnoopFilter::SnoopList, Cycles>
|
|
|
|
SnoopFilter::lookupSnoop(const Packet* cpkt)
|
|
|
|
{
|
2016-12-05 22:48:21 +01:00
|
|
|
DPRINTF(SnoopFilter, "%s: packet %s\n", __func__, cpkt->print());
|
2014-09-20 23:18:26 +02:00
|
|
|
|
|
|
|
assert(cpkt->isRequest());
|
|
|
|
|
2015-09-25 13:26:57 +02:00
|
|
|
Addr line_addr = cpkt->getBlockAddr(linesize);
|
2016-08-12 15:11:45 +02:00
|
|
|
if (cpkt->isSecure()) {
|
|
|
|
line_addr |= LineSecure;
|
|
|
|
}
|
2014-04-25 13:36:16 +02:00
|
|
|
auto sf_it = cachedLocations.find(line_addr);
|
|
|
|
bool is_hit = (sf_it != cachedLocations.end());
|
2015-09-25 13:26:57 +02:00
|
|
|
|
2015-09-25 13:26:57 +02:00
|
|
|
panic_if(!is_hit && (cachedLocations.size() >= maxEntryCount),
|
|
|
|
"snoop filter exceeded capacity of %d cache blocks\n",
|
|
|
|
maxEntryCount);
|
|
|
|
|
2015-09-25 13:26:57 +02:00
|
|
|
// If the snoop filter has no entry, simply return a NULL
|
|
|
|
// portlist, there is no point creating an entry only to remove it
|
|
|
|
// later
|
|
|
|
if (!is_hit)
|
2015-09-25 13:26:57 +02:00
|
|
|
return snoopDown(lookupLatency);
|
|
|
|
|
|
|
|
SnoopItem& sf_item = sf_it->second;
|
2014-09-20 23:18:26 +02:00
|
|
|
|
|
|
|
DPRINTF(SnoopFilter, "%s: old SF value %x.%x\n",
|
|
|
|
__func__, sf_item.requested, sf_item.holder);
|
|
|
|
|
|
|
|
SnoopMask interested = (sf_item.holder | sf_item.requested);
|
2014-04-25 13:36:16 +02:00
|
|
|
|
|
|
|
totSnoops++;
|
2015-09-25 13:26:57 +02:00
|
|
|
// Single bit set -> value is a power of two
|
|
|
|
if (isPow2(interested))
|
|
|
|
hitSingleSnoops++;
|
|
|
|
else
|
|
|
|
hitMultiSnoops++;
|
|
|
|
|
2015-07-03 16:14:37 +02:00
|
|
|
// ReadEx and Writes require both invalidation and exlusivity, while reads
|
|
|
|
// require neither. Writebacks on the other hand require exclusivity but
|
|
|
|
// not the invalidation. Previously Writebacks did not generate upward
|
|
|
|
// snoops so this was never an aissue. Now that Writebacks generate snoops
|
|
|
|
// we need to special case for Writebacks.
|
mem: Add an option to perform clean writebacks from caches
This patch adds the necessary commands and cache functionality to
allow clean writebacks. This functionality is crucial, especially when
having exclusive (victim) caches. For example, if read-only L1
instruction caches are not sending clean writebacks, there will never
be any spills from the L1 to the L2. At the moment the cache model
defaults to not sending clean writebacks, and this should possibly be
re-evaluated.
The implementation of clean writebacks relies on a new packet command
WritebackClean, which acts much like a Writeback (renamed
WritebackDirty), and also much like a CleanEvict. On eviction of a
clean block the cache either sends a clean evict, or a clean
writeback, and if any copies are still cached upstream the clean
evict/writeback is dropped. Similarly, if a clean evict/writeback
reaches a cache where there are outstanding MSHRs for the block, the
packet is dropped. In the typical case though, the clean writeback
allocates a block in the downstream cache, and marks it writable if
the evicted block was writable.
The patch changes the O3_ARM_v7a L1 cache configuration and the
default L1 caches in config/common/Caches.py
2015-11-06 09:26:43 +01:00
|
|
|
assert(cpkt->isWriteback() || cpkt->req->isUncacheable() ||
|
2015-12-31 15:32:58 +01:00
|
|
|
(cpkt->isInvalidate() == cpkt->needsWritable()));
|
2014-09-20 23:18:26 +02:00
|
|
|
if (cpkt->isInvalidate() && !sf_item.requested) {
|
|
|
|
// Early clear of the holder, if no other request is currently going on
|
|
|
|
// @todo: This should possibly be updated even though we do not filter
|
|
|
|
// upward snoops
|
|
|
|
sf_item.holder = 0;
|
|
|
|
}
|
|
|
|
|
2015-09-25 13:26:57 +02:00
|
|
|
eraseIfNullEntry(sf_it);
|
2014-09-20 23:18:26 +02:00
|
|
|
DPRINTF(SnoopFilter, "%s: new SF value %x.%x interest: %x \n",
|
|
|
|
__func__, sf_item.requested, sf_item.holder, interested);
|
|
|
|
|
|
|
|
return snoopSelected(maskToPortList(interested), lookupLatency);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
SnoopFilter::updateSnoopResponse(const Packet* cpkt,
|
|
|
|
const SlavePort& rsp_port,
|
|
|
|
const SlavePort& req_port)
|
|
|
|
{
|
2016-12-05 22:48:21 +01:00
|
|
|
DPRINTF(SnoopFilter, "%s: rsp %s req %s packet %s\n",
|
|
|
|
__func__, rsp_port.name(), req_port.name(), cpkt->print());
|
2014-09-20 23:18:26 +02:00
|
|
|
|
2015-05-05 09:22:29 +02:00
|
|
|
assert(cpkt->isResponse());
|
2015-12-31 15:32:58 +01:00
|
|
|
assert(cpkt->cacheResponding());
|
2015-05-05 09:22:29 +02:00
|
|
|
|
2016-08-12 15:11:45 +02:00
|
|
|
// if this snoop response is due to an uncacheable request, or is
|
|
|
|
// being turned into a normal response, there is nothing more to
|
|
|
|
// do
|
|
|
|
if (cpkt->req->isUncacheable() || !req_port.isSnooping()) {
|
2015-05-05 09:22:29 +02:00
|
|
|
return;
|
2016-08-12 15:11:45 +02:00
|
|
|
}
|
2015-05-05 09:22:29 +02:00
|
|
|
|
2015-09-25 13:26:57 +02:00
|
|
|
Addr line_addr = cpkt->getBlockAddr(linesize);
|
2016-08-12 15:11:45 +02:00
|
|
|
if (cpkt->isSecure()) {
|
|
|
|
line_addr |= LineSecure;
|
|
|
|
}
|
2014-09-20 23:18:26 +02:00
|
|
|
SnoopMask rsp_mask = portToMask(rsp_port);
|
|
|
|
SnoopMask req_mask = portToMask(req_port);
|
|
|
|
SnoopItem& sf_item = cachedLocations[line_addr];
|
|
|
|
|
|
|
|
DPRINTF(SnoopFilter, "%s: old SF value %x.%x\n",
|
|
|
|
__func__, sf_item.requested, sf_item.holder);
|
|
|
|
|
|
|
|
// The source should have the line
|
|
|
|
panic_if(!(sf_item.holder & rsp_mask), "SF value %x.%x does not have "\
|
|
|
|
"the line\n", sf_item.requested, sf_item.holder);
|
|
|
|
|
|
|
|
// The destination should have had a request in
|
|
|
|
panic_if(!(sf_item.requested & req_mask), "SF value %x.%x missing "\
|
|
|
|
"the original request\n", sf_item.requested, sf_item.holder);
|
|
|
|
|
mem: Do not rely on the NeedsWritable flag for responses
This patch removes the NeedsWritable flag for all responses, as it is
really only the request that needs a writable response. The response,
on the other hand, should in these cases always provide the line in a
writable state, as indicated by the hasSharers flag not being set.
When we send requests that has NeedsWritable set, the response will
always have the hasSharers flag not set. Additionally, there are cases
where the request did not have NeedsWritable set, and we still get a
writable response with the hasSharers flag not set. This never happens
on snoops, but is used by downstream caches to pass ownership
upstream.
As part of this patch, the affected response types are updated, and
the snoop filter is similarly modified to check only the hasSharers
flag (as it should). A sanity check is also added to the packet class,
asserting that we never look at the NeedsWritable flag for responses.
No regressions are affected.
2015-12-31 15:34:18 +01:00
|
|
|
// If the snoop response has no sharers the line is passed in
|
|
|
|
// Modified state, and we know that there are no other copies, or
|
|
|
|
// they will all be invalidated imminently
|
|
|
|
if (!cpkt->hasSharers()) {
|
|
|
|
DPRINTF(SnoopFilter,
|
|
|
|
"%s: dropping %x because non-shared snoop "
|
|
|
|
"response SF val: %x.%x\n", __func__, rsp_mask,
|
2014-09-20 23:18:26 +02:00
|
|
|
sf_item.requested, sf_item.holder);
|
|
|
|
sf_item.holder = 0;
|
|
|
|
}
|
mem: Add an option to perform clean writebacks from caches
This patch adds the necessary commands and cache functionality to
allow clean writebacks. This functionality is crucial, especially when
having exclusive (victim) caches. For example, if read-only L1
instruction caches are not sending clean writebacks, there will never
be any spills from the L1 to the L2. At the moment the cache model
defaults to not sending clean writebacks, and this should possibly be
re-evaluated.
The implementation of clean writebacks relies on a new packet command
WritebackClean, which acts much like a Writeback (renamed
WritebackDirty), and also much like a CleanEvict. On eviction of a
clean block the cache either sends a clean evict, or a clean
writeback, and if any copies are still cached upstream the clean
evict/writeback is dropped. Similarly, if a clean evict/writeback
reaches a cache where there are outstanding MSHRs for the block, the
packet is dropped. In the typical case though, the clean writeback
allocates a block in the downstream cache, and marks it writable if
the evicted block was writable.
The patch changes the O3_ARM_v7a L1 cache configuration and the
default L1 caches in config/common/Caches.py
2015-11-06 09:26:43 +01:00
|
|
|
assert(!cpkt->isWriteback());
|
2016-08-12 15:11:45 +02:00
|
|
|
// @todo Deal with invalidating responses
|
2014-09-20 23:18:26 +02:00
|
|
|
sf_item.holder |= req_mask;
|
|
|
|
sf_item.requested &= ~req_mask;
|
2015-09-25 13:26:57 +02:00
|
|
|
assert(sf_item.requested | sf_item.holder);
|
2014-09-20 23:18:26 +02:00
|
|
|
DPRINTF(SnoopFilter, "%s: new SF value %x.%x\n",
|
|
|
|
__func__, sf_item.requested, sf_item.holder);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
SnoopFilter::updateSnoopForward(const Packet* cpkt,
|
|
|
|
const SlavePort& rsp_port, const MasterPort& req_port)
|
|
|
|
{
|
2016-12-05 22:48:21 +01:00
|
|
|
DPRINTF(SnoopFilter, "%s: rsp %s req %s packet %s\n",
|
|
|
|
__func__, rsp_port.name(), req_port.name(), cpkt->print());
|
2014-09-20 23:18:26 +02:00
|
|
|
|
2015-09-25 13:26:57 +02:00
|
|
|
assert(cpkt->isResponse());
|
2015-12-31 15:32:58 +01:00
|
|
|
assert(cpkt->cacheResponding());
|
2015-09-25 13:26:57 +02:00
|
|
|
|
2015-09-25 13:26:57 +02:00
|
|
|
Addr line_addr = cpkt->getBlockAddr(linesize);
|
2016-08-12 15:11:45 +02:00
|
|
|
if (cpkt->isSecure()) {
|
|
|
|
line_addr |= LineSecure;
|
|
|
|
}
|
2015-09-25 13:26:57 +02:00
|
|
|
auto sf_it = cachedLocations.find(line_addr);
|
2015-09-25 13:26:57 +02:00
|
|
|
bool is_hit = sf_it != cachedLocations.end();
|
2014-09-20 23:18:26 +02:00
|
|
|
|
2015-09-25 13:26:57 +02:00
|
|
|
// Nothing to do if it is not a hit
|
|
|
|
if (!is_hit)
|
|
|
|
return;
|
|
|
|
|
|
|
|
SnoopItem& sf_item = sf_it->second;
|
2014-09-20 23:18:26 +02:00
|
|
|
|
|
|
|
DPRINTF(SnoopFilter, "%s: old SF value %x.%x\n",
|
|
|
|
__func__, sf_item.requested, sf_item.holder);
|
|
|
|
|
mem: Do not rely on the NeedsWritable flag for responses
This patch removes the NeedsWritable flag for all responses, as it is
really only the request that needs a writable response. The response,
on the other hand, should in these cases always provide the line in a
writable state, as indicated by the hasSharers flag not being set.
When we send requests that has NeedsWritable set, the response will
always have the hasSharers flag not set. Additionally, there are cases
where the request did not have NeedsWritable set, and we still get a
writable response with the hasSharers flag not set. This never happens
on snoops, but is used by downstream caches to pass ownership
upstream.
As part of this patch, the affected response types are updated, and
the snoop filter is similarly modified to check only the hasSharers
flag (as it should). A sanity check is also added to the packet class,
asserting that we never look at the NeedsWritable flag for responses.
No regressions are affected.
2015-12-31 15:34:18 +01:00
|
|
|
// If the snoop response has no sharers the line is passed in
|
|
|
|
// Modified state, and we know that there are no other copies, or
|
|
|
|
// they will all be invalidated imminently
|
|
|
|
if (!cpkt->hasSharers()) {
|
2014-09-20 23:18:26 +02:00
|
|
|
sf_item.holder = 0;
|
|
|
|
}
|
|
|
|
DPRINTF(SnoopFilter, "%s: new SF value %x.%x\n",
|
|
|
|
__func__, sf_item.requested, sf_item.holder);
|
2015-09-25 13:26:57 +02:00
|
|
|
eraseIfNullEntry(sf_it);
|
2015-09-25 13:26:57 +02:00
|
|
|
|
2014-09-20 23:18:26 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
SnoopFilter::updateResponse(const Packet* cpkt, const SlavePort& slave_port)
|
|
|
|
{
|
2016-12-05 22:48:21 +01:00
|
|
|
DPRINTF(SnoopFilter, "%s: src %s packet %s\n",
|
|
|
|
__func__, slave_port.name(), cpkt->print());
|
2014-09-20 23:18:26 +02:00
|
|
|
|
2015-05-05 09:22:29 +02:00
|
|
|
assert(cpkt->isResponse());
|
|
|
|
|
2016-08-12 15:11:45 +02:00
|
|
|
// we only allocate if the packet actually came from a cache, but
|
|
|
|
// start by checking if the port is snooping
|
|
|
|
if (cpkt->req->isUncacheable() || !slave_port.isSnooping())
|
2015-05-05 09:22:29 +02:00
|
|
|
return;
|
|
|
|
|
2016-08-12 15:11:45 +02:00
|
|
|
// next check if we actually allocated an entry
|
2015-09-25 13:26:57 +02:00
|
|
|
Addr line_addr = cpkt->getBlockAddr(linesize);
|
2016-08-12 15:11:45 +02:00
|
|
|
if (cpkt->isSecure()) {
|
|
|
|
line_addr |= LineSecure;
|
|
|
|
}
|
2016-08-12 15:11:45 +02:00
|
|
|
auto sf_it = cachedLocations.find(line_addr);
|
|
|
|
if (sf_it == cachedLocations.end())
|
|
|
|
return;
|
|
|
|
|
2014-09-20 23:18:26 +02:00
|
|
|
SnoopMask slave_mask = portToMask(slave_port);
|
2016-08-12 15:11:45 +02:00
|
|
|
SnoopItem& sf_item = sf_it->second;
|
2014-09-20 23:18:26 +02:00
|
|
|
|
|
|
|
DPRINTF(SnoopFilter, "%s: old SF value %x.%x\n",
|
|
|
|
__func__, sf_item.requested, sf_item.holder);
|
|
|
|
|
|
|
|
// Make sure we have seen the actual request, too
|
|
|
|
panic_if(!(sf_item.requested & slave_mask), "SF value %x.%x missing "\
|
|
|
|
"request bit\n", sf_item.requested, sf_item.holder);
|
|
|
|
|
2016-12-05 22:48:26 +01:00
|
|
|
// Update the residency of the cache line.
|
2014-09-20 23:18:26 +02:00
|
|
|
sf_item.holder |= slave_mask;
|
|
|
|
sf_item.requested &= ~slave_mask;
|
2015-09-25 13:26:57 +02:00
|
|
|
assert(sf_item.holder | sf_item.requested);
|
2014-09-20 23:18:26 +02:00
|
|
|
DPRINTF(SnoopFilter, "%s: new SF value %x.%x\n",
|
|
|
|
__func__, sf_item.requested, sf_item.holder);
|
|
|
|
}
|
|
|
|
|
2014-04-25 13:36:16 +02:00
|
|
|
void
|
|
|
|
SnoopFilter::regStats()
|
|
|
|
{
|
2016-06-06 18:16:43 +02:00
|
|
|
SimObject::regStats();
|
|
|
|
|
2014-04-25 13:36:16 +02:00
|
|
|
totRequests
|
|
|
|
.name(name() + ".tot_requests")
|
|
|
|
.desc("Total number of requests made to the snoop filter.");
|
|
|
|
|
|
|
|
hitSingleRequests
|
|
|
|
.name(name() + ".hit_single_requests")
|
|
|
|
.desc("Number of requests hitting in the snoop filter with a single "\
|
|
|
|
"holder of the requested data.");
|
|
|
|
|
|
|
|
hitMultiRequests
|
|
|
|
.name(name() + ".hit_multi_requests")
|
|
|
|
.desc("Number of requests hitting in the snoop filter with multiple "\
|
|
|
|
"(>1) holders of the requested data.");
|
|
|
|
|
|
|
|
totSnoops
|
|
|
|
.name(name() + ".tot_snoops")
|
|
|
|
.desc("Total number of snoops made to the snoop filter.");
|
|
|
|
|
|
|
|
hitSingleSnoops
|
|
|
|
.name(name() + ".hit_single_snoops")
|
|
|
|
.desc("Number of snoops hitting in the snoop filter with a single "\
|
|
|
|
"holder of the requested data.");
|
|
|
|
|
|
|
|
hitMultiSnoops
|
|
|
|
.name(name() + ".hit_multi_snoops")
|
|
|
|
.desc("Number of snoops hitting in the snoop filter with multiple "\
|
|
|
|
"(>1) holders of the requested data.");
|
|
|
|
}
|
|
|
|
|
2014-09-20 23:18:26 +02:00
|
|
|
SnoopFilter *
|
|
|
|
SnoopFilterParams::create()
|
|
|
|
{
|
|
|
|
return new SnoopFilter(this);
|
|
|
|
}
|