mem: Snoop into caches on uncacheable accesses

This patch takes a last step in fixing issues related to uncacheable
accesses. We do not separate uncacheable memory from uncacheable
devices, and in cases where it is really memory, there are valid
scenarios where we need to snoop since we do not support cache
maintenance instructions (yet). On snooping an uncacheable access we
thus provide data if possible. In essence this makes uncacheable
accesses IO coherent.

The snoop filter is also queried to steer the snoops, but not updated
since the uncacheable accesses do not allocate a block.
This commit is contained in:
Andreas Hansson 2015-05-05 03:22:29 -04:00
parent 554ddc7c07
commit 36f29496a0
8 changed files with 65 additions and 49 deletions

View file

@ -92,8 +92,9 @@ bool
FullO3CPU<Impl>::IcachePort::recvTimingResp(PacketPtr pkt)
{
DPRINTF(O3CPU, "Fetch unit received timing\n");
// We shouldn't ever get a block in ownership state
assert(!(pkt->memInhibitAsserted() && !pkt->sharedAsserted()));
// We shouldn't ever get a cacheable block in ownership state
assert(pkt->req->isUncacheable() ||
!(pkt->memInhibitAsserted() && !pkt->sharedAsserted()));
fetch->processCacheCompletion(pkt);
return true;

View file

@ -104,8 +104,9 @@ DmaPort::handleResp(PacketPtr pkt, Tick delay)
bool
DmaPort::recvTimingResp(PacketPtr pkt)
{
// We shouldn't ever get a block in ownership state
assert(!(pkt->memInhibitAsserted() && !pkt->sharedAsserted()));
// We shouldn't ever get a cacheable block in ownership state
assert(pkt->req->isUncacheable() ||
!(pkt->memInhibitAsserted() && !pkt->sharedAsserted()));
handleResp(pkt);

11
src/mem/cache/base.cc vendored
View file

@ -153,6 +153,17 @@ BaseCache::getSlavePort(const std::string &if_name, PortID idx)
}
}
bool
BaseCache::inRange(Addr addr) const
{
for (const auto& r : addrRanges) {
if (r.contains(addr)) {
return true;
}
}
return false;
}
void
BaseCache::regStats()
{

10
src/mem/cache/base.hh vendored
View file

@ -262,6 +262,16 @@ class BaseCache : public MemObject
*/
virtual bool isDirty() const = 0;
/**
* Determine if an address is in the ranges covered by this
* cache. This is useful to filter snoops.
*
* @param addr Address to check against
*
* @return If the address in question is in range
*/
bool inRange(Addr addr) const;
/** Block size of this cache */
const unsigned blkSize;

View file

@ -475,7 +475,6 @@ Cache::recvTimingReq(PacketPtr pkt)
// responding to the request
DPRINTF(Cache, "mem inhibited on addr %#llx (%s): not responding\n",
pkt->getAddr(), pkt->isSecure() ? "s" : "ns");
assert(!pkt->req->isUncacheable());
// if the packet needs exclusive, and the cache that has
// promised to respond (setting the inhibit flag) is not
@ -856,7 +855,6 @@ Cache::recvAtomic(PacketPtr pkt)
promoteWholeLineWrites(pkt);
if (pkt->memInhibitAsserted()) {
assert(!pkt->req->isUncacheable());
// have to invalidate ourselves and any lower caches even if
// upper cache will be responding
if (pkt->isInvalidate()) {
@ -1560,7 +1558,8 @@ Cache::doTimingSupplyResponse(PacketPtr req_pkt, const uint8_t *blk_data,
// responses)
pkt = new Packet(req_pkt, false, req_pkt->isRead());
assert(req_pkt->isInvalidate() || pkt->sharedAsserted());
assert(req_pkt->req->isUncacheable() || req_pkt->isInvalidate() ||
pkt->sharedAsserted());
pkt->makeTimingResponse();
if (pkt->isRead()) {
pkt->setDataFromBlock(blk_data, blkSize);
@ -1676,7 +1675,7 @@ Cache::handleSnoop(PacketPtr pkt, CacheBlk *blk, bool is_timing,
return;
}
if (pkt->isRead() && !invalidate) {
if (!pkt->req->isUncacheable() && pkt->isRead() && !invalidate) {
assert(!needs_exclusive);
pkt->assertShared();
int bits_to_clear = BlkWritable;
@ -1699,6 +1698,9 @@ Cache::handleSnoop(PacketPtr pkt, CacheBlk *blk, bool is_timing,
// will write it back at a later point
pkt->assertMemInhibit();
if (have_exclusive) {
// in the case of an uncacheable request there is no need
// to set the exclusive flag, but since the recipient does
// not care there is no harm in doing so
pkt->setSupplyExclusive();
}
if (is_timing) {
@ -1707,7 +1709,9 @@ Cache::handleSnoop(PacketPtr pkt, CacheBlk *blk, bool is_timing,
pkt->makeAtomicResponse();
pkt->setDataFromBlock(blk->data, blkSize);
}
} else if (is_timing && is_deferred) {
}
if (!respond && is_timing && is_deferred) {
// if it's a deferred timing snoop then we've made a copy of
// the packet, and so if we're not using that copy to respond
// then we need to delete it here.
@ -1735,25 +1739,8 @@ Cache::recvTimingSnoopReq(PacketPtr pkt)
// Snoops shouldn't happen when bypassing caches
assert(!system->bypassCaches());
// check if the packet is for an address range covered by this
// cache, partly to not waste time looking for it, but also to
// ensure that we only forward the snoop upwards if it is within
// our address ranges
bool in_range = false;
for (AddrRangeList::const_iterator r = addrRanges.begin();
r != addrRanges.end(); ++r) {
if (r->contains(pkt->getAddr())) {
in_range = true;
break;
}
}
// Note that some deferred snoops don't have requests, since the
// original access may have already completed
if ((pkt->req && pkt->req->isUncacheable()) ||
pkt->cmd == MemCmd::Writeback || !in_range) {
//Can't get a hit on an uncacheable address
//Revisit this for multi level coherence
// no need to snoop writebacks or requests that are not in range
if (pkt->cmd == MemCmd::Writeback || !inRange(pkt->getAddr())) {
return;
}
@ -1843,9 +1830,8 @@ Cache::recvAtomicSnoop(PacketPtr pkt)
// Snoops shouldn't happen when bypassing caches
assert(!system->bypassCaches());
if (pkt->req->isUncacheable() || pkt->cmd == MemCmd::Writeback) {
// Can't get a hit on an uncacheable address
// Revisit this for multi level coherence
// no need to snoop writebacks or requests that are not in range
if (pkt->cmd == MemCmd::Writeback || !inRange(pkt->getAddr())) {
return 0;
}

View file

@ -371,6 +371,9 @@ MSHR::handleSnoop(PacketPtr pkt, Counter _order)
if (isPendingDirty()) {
pkt->assertMemInhibit();
// in the case of an uncacheable request there is no need
// to set the exclusive flag, but since the recipient does
// not care there is no harm in doing so
pkt->setSupplyExclusive();
}
@ -380,7 +383,7 @@ MSHR::handleSnoop(PacketPtr pkt, Counter _order)
}
}
if (!pkt->needsExclusive()) {
if (!pkt->needsExclusive() && !pkt->req->isUncacheable()) {
// This transaction will get a read-shared copy, downgrading
// our copy if we had an exclusive one
postDowngrade = true;

View file

@ -180,8 +180,7 @@ CoherentXBar::recvTimingReq(PacketPtr pkt, PortID slave_port_id)
// determine how long to be crossbar layer is busy
Tick packetFinishTime = clockEdge(Cycles(1)) + pkt->payloadDelay;
// uncacheable requests need never be snooped
if (!pkt->req->isUncacheable() && !system->bypassCaches()) {
if (!system->bypassCaches()) {
// the packet is a memory-mapped request and should be
// broadcasted to our snoopers but the source
if (snoopFilter) {
@ -213,8 +212,7 @@ CoherentXBar::recvTimingReq(PacketPtr pkt, PortID slave_port_id)
// since it is a normal request, attempt to send the packet
bool success = masterPorts[master_port_id]->sendTimingReq(pkt);
if (snoopFilter && !pkt->req->isUncacheable()
&& !system->bypassCaches()) {
if (snoopFilter && !system->bypassCaches()) {
// The packet may already be overwritten by the sendTimingReq function.
// The snoop filter needs to see the original request *and* the return
// status of the send operation, so we need to recreate the original
@ -323,7 +321,7 @@ CoherentXBar::recvTimingResp(PacketPtr pkt, PortID master_port_id)
// determine how long to be crossbar layer is busy
Tick packetFinishTime = clockEdge(Cycles(1)) + pkt->payloadDelay;
if (snoopFilter && !pkt->req->isUncacheable() && !system->bypassCaches()) {
if (snoopFilter && !system->bypassCaches()) {
// let the snoop filter inspect the response and update its state
snoopFilter->updateResponse(pkt, *slavePorts[slave_port_id]);
}
@ -578,8 +576,7 @@ CoherentXBar::recvAtomic(PacketPtr pkt, PortID slave_port_id)
MemCmd snoop_response_cmd = MemCmd::InvalidCmd;
Tick snoop_response_latency = 0;
// uncacheable requests need never be snooped
if (!pkt->req->isUncacheable() && !system->bypassCaches()) {
if (!system->bypassCaches()) {
// forward to all snoopers but the source
std::pair<MemCmd, Tick> snoop_result;
if (snoopFilter) {
@ -613,8 +610,7 @@ CoherentXBar::recvAtomic(PacketPtr pkt, PortID slave_port_id)
Tick response_latency = masterPorts[master_port_id]->sendAtomic(pkt);
// Lower levels have replied, tell the snoop filter
if (snoopFilter && !pkt->req->isUncacheable() && !system->bypassCaches() &&
pkt->isResponse()) {
if (snoopFilter && !system->bypassCaches() && pkt->isResponse()) {
snoopFilter->updateResponse(pkt, *slavePorts[slave_port_id]);
}
@ -764,8 +760,7 @@ CoherentXBar::recvFunctional(PacketPtr pkt, PortID slave_port_id)
pkt->cmdString());
}
// uncacheable requests need never be snooped
if (!pkt->req->isUncacheable() && !system->bypassCaches()) {
if (!system->bypassCaches()) {
// forward to all snoopers but the source
forwardFunctional(pkt, slave_port_id);
}

View file

@ -74,7 +74,7 @@ SnoopFilter::lookupRequest(const Packet* cpkt, const SlavePort& slave_port)
DPRINTF(SnoopFilter, "%s: SF value %x.%x\n",
__func__, sf_item.requested, sf_item.holder);
if (cpkt->needsResponse()) {
if (!cpkt->req->isUncacheable() && cpkt->needsResponse()) {
if (!cpkt->memInhibitAsserted()) {
// Max one request per address per port
panic_if(sf_item.requested & req_port, "double request :( "\
@ -104,6 +104,9 @@ SnoopFilter::updateRequest(const Packet* cpkt, const SlavePort& slave_port,
DPRINTF(SnoopFilter, "%s: packet src %s addr 0x%x cmd %s\n",
__func__, slave_port.name(), cpkt->getAddr(), cpkt->cmdString());
if (cpkt->req->isUncacheable())
return;
Addr line_addr = cpkt->getAddr() & ~(linesize - 1);
SnoopMask req_port = portToMask(slave_port);
SnoopItem& sf_item = cachedLocations[line_addr];
@ -195,14 +198,17 @@ SnoopFilter::updateSnoopResponse(const Packet* cpkt,
__func__, rsp_port.name(), req_port.name(), cpkt->getAddr(),
cpkt->cmdString());
assert(cpkt->isResponse());
assert(cpkt->memInhibitAsserted());
if (cpkt->req->isUncacheable())
return;
Addr line_addr = cpkt->getAddr() & ~(linesize - 1);
SnoopMask rsp_mask = portToMask(rsp_port);
SnoopMask req_mask = portToMask(req_port);
SnoopItem& sf_item = cachedLocations[line_addr];
assert(cpkt->isResponse());
assert(cpkt->memInhibitAsserted());
DPRINTF(SnoopFilter, "%s: old SF value %x.%x\n",
__func__, sf_item.requested, sf_item.holder);
@ -270,12 +276,15 @@ SnoopFilter::updateResponse(const Packet* cpkt, const SlavePort& slave_port)
DPRINTF(SnoopFilter, "%s: packet src %s addr 0x%x cmd %s\n",
__func__, slave_port.name(), cpkt->getAddr(), cpkt->cmdString());
assert(cpkt->isResponse());
if (cpkt->req->isUncacheable())
return;
Addr line_addr = cpkt->getAddr() & ~(linesize - 1);
SnoopMask slave_mask = portToMask(slave_port);
SnoopItem& sf_item = cachedLocations[line_addr];
assert(cpkt->isResponse());
DPRINTF(SnoopFilter, "%s: old SF value %x.%x\n",
__func__, sf_item.requested, sf_item.holder);