mem: Align rules for sinking inhibited packets at the slave

This patch aligns how the memory-system slaves, i.e. the various
memory controllers and the bridge, identify and deal with sinking of
inhibited packets that are only useful within the coherent part of the
memory system.

In the future we could shift the onus to the crossbar, and add a
parameter "is_point_of_coherence" that would allow it to sink the
aforementioned packets.
This commit is contained in:
Andreas Hansson 2015-11-06 03:26:35 -05:00
parent 8e55d51aaa
commit 8bc925e36d
5 changed files with 35 additions and 20 deletions

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2011-2013 ARM Limited * Copyright (c) 2011-2013, 2015 ARM Limited
* All rights reserved * All rights reserved
* *
* The license below extends only to copyright in the software and shall * The license below extends only to copyright in the software and shall
@ -150,8 +150,20 @@ Bridge::BridgeSlavePort::recvTimingReq(PacketPtr pkt)
DPRINTF(Bridge, "recvTimingReq: %s addr 0x%x\n", DPRINTF(Bridge, "recvTimingReq: %s addr 0x%x\n",
pkt->cmdString(), pkt->getAddr()); pkt->cmdString(), pkt->getAddr());
// we should not see a timing request if we are already in a retry // sink inhibited packets without further action, also discard any
assert(!retryReq); // packet that is not a read or a write
if (pkt->memInhibitAsserted() ||
!(pkt->isWrite() || pkt->isRead())) {
assert(!pkt->needsResponse());
pendingDelete.reset(pkt);
return true;
}
// we should not get a new request after committing to retry the
// current one, but unfortunately the CPU violates this rule, so
// simply ignore it for now
if (retryReq)
return false;
DPRINTF(Bridge, "Response queue size: %d outresp: %d\n", DPRINTF(Bridge, "Response queue size: %d outresp: %d\n",
transmitList.size(), outstandingResponses); transmitList.size(), outstandingResponses);
@ -162,8 +174,7 @@ Bridge::BridgeSlavePort::recvTimingReq(PacketPtr pkt)
retryReq = true; retryReq = true;
} else { } else {
// look at the response queue if we expect to see a response // look at the response queue if we expect to see a response
bool expects_response = pkt->needsResponse() && bool expects_response = pkt->needsResponse();
!pkt->memInhibitAsserted();
if (expects_response) { if (expects_response) {
if (respQueueFull()) { if (respQueueFull()) {
DPRINTF(Bridge, "Response queue full\n"); DPRINTF(Bridge, "Response queue full\n");

View file

@ -135,6 +135,12 @@ class Bridge : public MemObject
/** Max queue size for reserved responses. */ /** Max queue size for reserved responses. */
unsigned int respQueueLimit; unsigned int respQueueLimit;
/**
* Upstream caches need this packet until true is returned, so
* hold it for deletion until a subsequent call
*/
std::unique_ptr<Packet> pendingDelete;
/** /**
* Is this side blocked from accepting new response packets. * Is this side blocked from accepting new response packets.
* *

View file

@ -590,10 +590,8 @@ DRAMCtrl::recvTimingReq(PacketPtr pkt)
DPRINTF(DRAM, "recvTimingReq: request %s addr %lld size %d\n", DPRINTF(DRAM, "recvTimingReq: request %s addr %lld size %d\n",
pkt->cmdString(), pkt->getAddr(), pkt->getSize()); pkt->cmdString(), pkt->getAddr(), pkt->getSize());
// simply drop inhibited packets and clean evictions // sink inhibited packets without further action
if (pkt->memInhibitAsserted() || if (pkt->memInhibitAsserted()) {
pkt->cmd == MemCmd::CleanEvict) {
DPRINTF(DRAM, "Inhibited packet or clean evict -- Dropping it now\n");
pendingDelete.reset(pkt); pendingDelete.reset(pkt);
return true; return true;
} }

View file

@ -175,16 +175,18 @@ DRAMSim2::recvFunctional(PacketPtr pkt)
bool bool
DRAMSim2::recvTimingReq(PacketPtr pkt) DRAMSim2::recvTimingReq(PacketPtr pkt)
{ {
// we should never see a new request while in retry // sink inhibited packets without further action
assert(!retryReq);
if (pkt->memInhibitAsserted()) { if (pkt->memInhibitAsserted()) {
// snooper will supply based on copy of packet
// still target's responsibility to delete packet
pendingDelete.reset(pkt); pendingDelete.reset(pkt);
return true; return true;
} }
// we should not get a new request after committing to retry the
// current one, but unfortunately the CPU violates this rule, so
// simply ignore it for now
if (retryReq)
return false;
// if we cannot accept we need to send a retry once progress can // if we cannot accept we need to send a retry once progress can
// be made // be made
bool can_accept = nbrOutstanding() < wrapper.queueSize(); bool can_accept = nbrOutstanding() < wrapper.queueSize();

View file

@ -97,17 +97,15 @@ SimpleMemory::recvFunctional(PacketPtr pkt)
bool bool
SimpleMemory::recvTimingReq(PacketPtr pkt) SimpleMemory::recvTimingReq(PacketPtr pkt)
{ {
// sink inhibited packets without further action
if (pkt->memInhibitAsserted()) { if (pkt->memInhibitAsserted()) {
// snooper will supply based on copy of packet
// still target's responsibility to delete packet
pendingDelete.reset(pkt); pendingDelete.reset(pkt);
return true; return true;
} }
// we should never get a new request after committing to retry the // we should not get a new request after committing to retry the
// current one, the bus violates the rule as it simply sends a // current one, but unfortunately the CPU violates this rule, so
// retry to the next one waiting on the retry list, so simply // simply ignore it for now
// ignore it
if (retryReq) if (retryReq)
return false; return false;