MEM: Separate requests and responses for timing accesses

This patch moves send/recvTiming and send/recvTimingSnoop from the
Port base class to the MasterPort and SlavePort, and also splits them
into separate member functions for requests and responses:
send/recvTimingReq, send/recvTimingResp, and send/recvTimingSnoopReq,
send/recvTimingSnoopResp. A master port sends requests and receives
responses, and also receives snoop requests and sends snoop
responses. A slave port has the reciprocal behaviour as it receives
requests and sends responses, and sends snoop requests and receives
snoop responses.

For all MemObjects that have only master ports or slave ports (but not
both), e.g. a CPU, or a PIO device, this patch merely adds more
clarity to what kind of access is taking place. For example, a CPU
port used to call sendTiming, and will now call
sendTimingReq. Similarly, a response previously came back through
recvTiming, which is now recvTimingResp. For the modules that have
both master and slave ports, e.g. the bus, the behaviour was
previously relying on branches based on pkt->isRequest(), and this is
now replaced with a direct call to the apprioriate member function
depending on the type of access. Please note that send/recvRetry is
still shared by all the timing accessors and remains in the Port base
class for now (to maintain the current bus functionality and avoid
changing the statistics of all regressions).

The packet queue is split into a MasterPort and SlavePort version to
facilitate the use of the new timing accessors. All uses of the
PacketQueue are updated accordingly.

With this patch, the type of packet (request or response) is now well
defined for each type of access, and asserts on pkt->isRequest() and
pkt->isResponse() are now moved to the appropriate send member
functions. It is also worth noting that sendTimingSnoopReq no longer
returns a boolean, as the semantics do not alow snoop requests to be
rejected or stalled. All these assumptions are now excplicitly part of
the port interface itself.
This commit is contained in:
Andreas Hansson 2012-05-01 13:40:42 -04:00
parent 8966e6d36d
commit 3fea59e162
47 changed files with 546 additions and 424 deletions

View file

@ -114,15 +114,14 @@ Walker::startFunctional(ThreadContext * _tc, Addr &addr, unsigned &logBytes,
} }
bool bool
Walker::WalkerPort::recvTiming(PacketPtr pkt) Walker::WalkerPort::recvTimingResp(PacketPtr pkt)
{ {
return walker->recvTiming(pkt); return walker->recvTimingResp(pkt);
} }
bool bool
Walker::recvTiming(PacketPtr pkt) Walker::recvTimingResp(PacketPtr pkt)
{ {
assert(pkt->isResponse());
WalkerSenderState * senderState = WalkerSenderState * senderState =
dynamic_cast<WalkerSenderState *>(pkt->senderState); dynamic_cast<WalkerSenderState *>(pkt->senderState);
pkt->senderState = senderState->saved; pkt->senderState = senderState->saved;
@ -171,7 +170,7 @@ Walker::recvRetry()
bool Walker::sendTiming(WalkerState* sendingState, PacketPtr pkt) bool Walker::sendTiming(WalkerState* sendingState, PacketPtr pkt)
{ {
pkt->senderState = new WalkerSenderState(sendingState, pkt->senderState); pkt->senderState = new WalkerSenderState(sendingState, pkt->senderState);
return port.sendTiming(pkt); return port.sendTimingReq(pkt);
} }
MasterPort & MasterPort &

View file

@ -70,12 +70,12 @@ namespace X86ISA
protected: protected:
Walker *walker; Walker *walker;
bool recvTiming(PacketPtr pkt); bool recvTimingResp(PacketPtr pkt);
/** /**
* Snooping a coherence request, do nothing. * Snooping a coherence request, do nothing.
*/ */
bool recvTimingSnoop(PacketPtr pkt) { return true; } void recvTimingSnoopReq(PacketPtr pkt) { }
Tick recvAtomicSnoop(PacketPtr pkt) { return 0; } Tick recvAtomicSnoop(PacketPtr pkt) { return 0; }
void recvFunctionalSnoop(PacketPtr pkt) { } void recvFunctionalSnoop(PacketPtr pkt) { }
void recvRetry(); void recvRetry();
@ -179,7 +179,7 @@ namespace X86ISA
MasterID masterId; MasterID masterId;
// Functions for dealing with packets. // Functions for dealing with packets.
bool recvTiming(PacketPtr pkt); bool recvTimingResp(PacketPtr pkt);
void recvRetry(); void recvRetry();
bool sendTiming(WalkerState * sendingState, PacketPtr pkt); bool sendTiming(WalkerState * sendingState, PacketPtr pkt);

View file

@ -532,7 +532,7 @@ BaseCPU::traceFunctionsInternal(Addr pc)
} }
bool bool
BaseCPU::CpuPort::recvTiming(PacketPtr pkt) BaseCPU::CpuPort::recvTimingResp(PacketPtr pkt)
{ {
panic("BaseCPU doesn't expect recvTiming!\n"); panic("BaseCPU doesn't expect recvTiming!\n");
return true; return true;

View file

@ -133,7 +133,7 @@ class BaseCPU : public MemObject
protected: protected:
virtual bool recvTiming(PacketPtr pkt); virtual bool recvTimingResp(PacketPtr pkt);
virtual void recvRetry(); virtual void recvRetry();

View file

@ -88,10 +88,8 @@ InOrderCPU::CachePort::CachePort(CacheUnit *_cacheUnit) :
{ } { }
bool bool
InOrderCPU::CachePort::recvTiming(Packet *pkt) InOrderCPU::CachePort::recvTimingResp(Packet *pkt)
{ {
assert(pkt->isResponse());
if (pkt->isError()) if (pkt->isError())
DPRINTF(InOrderCachePort, "Got error packet back for address: %x\n", DPRINTF(InOrderCachePort, "Got error packet back for address: %x\n",
pkt->getAddr()); pkt->getAddr());

View file

@ -170,13 +170,13 @@ class InOrderCPU : public BaseCPU
protected: protected:
/** Timing version of receive */ /** Timing version of receive */
bool recvTiming(PacketPtr pkt); bool recvTimingResp(PacketPtr pkt);
/** Handles doing a retry of a failed timing request. */ /** Handles doing a retry of a failed timing request. */
void recvRetry(); void recvRetry();
/** Ignoring snoops for now. */ /** Ignoring snoops for now. */
bool recvTimingSnoop(PacketPtr pkt) { return true; } void recvTimingSnoopReq(PacketPtr pkt) { }
}; };
/** Define TickEvent for the CPU */ /** Define TickEvent for the CPU */

View file

@ -873,7 +873,7 @@ CacheUnit::doCacheAccess(DynInstPtr inst, uint64_t *write_res,
tid, inst->seqNum, cache_req->dataPkt->getAddr()); tid, inst->seqNum, cache_req->dataPkt->getAddr());
if (do_access) { if (do_access) {
if (!cachePort->sendTiming(cache_req->dataPkt)) { if (!cachePort->sendTimingReq(cache_req->dataPkt)) {
DPRINTF(InOrderCachePort, DPRINTF(InOrderCachePort,
"[tid:%i] [sn:%i] cannot access cache, because port " "[tid:%i] [sn:%i] cannot access cache, because port "
"is blocked. now waiting to retry request\n", tid, "is blocked. now waiting to retry request\n", tid,

View file

@ -87,9 +87,8 @@ BaseO3CPU::regStats()
template<class Impl> template<class Impl>
bool bool
FullO3CPU<Impl>::IcachePort::recvTiming(PacketPtr pkt) FullO3CPU<Impl>::IcachePort::recvTimingResp(PacketPtr pkt)
{ {
assert(pkt->isResponse());
DPRINTF(O3CPU, "Fetch unit received timing\n"); DPRINTF(O3CPU, "Fetch unit received timing\n");
// We shouldn't ever get a block in ownership state // We shouldn't ever get a block in ownership state
assert(!(pkt->memInhibitAsserted() && !pkt->sharedAsserted())); assert(!(pkt->memInhibitAsserted() && !pkt->sharedAsserted()));
@ -107,18 +106,16 @@ FullO3CPU<Impl>::IcachePort::recvRetry()
template <class Impl> template <class Impl>
bool bool
FullO3CPU<Impl>::DcachePort::recvTiming(PacketPtr pkt) FullO3CPU<Impl>::DcachePort::recvTimingResp(PacketPtr pkt)
{ {
assert(pkt->isResponse()); return lsq->recvTimingResp(pkt);
return lsq->recvTiming(pkt);
} }
template <class Impl> template <class Impl>
bool void
FullO3CPU<Impl>::DcachePort::recvTimingSnoop(PacketPtr pkt) FullO3CPU<Impl>::DcachePort::recvTimingSnoopReq(PacketPtr pkt)
{ {
assert(pkt->isRequest()); lsq->recvTimingSnoopReq(pkt);
return lsq->recvTimingSnoop(pkt);
} }
template <class Impl> template <class Impl>

View file

@ -148,8 +148,8 @@ class FullO3CPU : public BaseO3CPU
/** Timing version of receive. Handles setting fetch to the /** Timing version of receive. Handles setting fetch to the
* proper status to start fetching. */ * proper status to start fetching. */
virtual bool recvTiming(PacketPtr pkt); virtual bool recvTimingResp(PacketPtr pkt);
virtual bool recvTimingSnoop(PacketPtr pkt) { return true; } virtual void recvTimingSnoopReq(PacketPtr pkt) { }
/** Handles doing a retry of a failed fetch. */ /** Handles doing a retry of a failed fetch. */
virtual void recvRetry(); virtual void recvRetry();
@ -176,8 +176,8 @@ class FullO3CPU : public BaseO3CPU
/** Timing version of receive. Handles writing back and /** Timing version of receive. Handles writing back and
* completing the load or store that has returned from * completing the load or store that has returned from
* memory. */ * memory. */
virtual bool recvTiming(PacketPtr pkt); virtual bool recvTimingResp(PacketPtr pkt);
virtual bool recvTimingSnoop(PacketPtr pkt); virtual void recvTimingSnoopReq(PacketPtr pkt);
/** Handles doing a retry of the previous send. */ /** Handles doing a retry of the previous send. */
virtual void recvRetry(); virtual void recvRetry();

View file

@ -621,7 +621,7 @@ DefaultFetch<Impl>::finishTranslation(Fault fault, RequestPtr mem_req)
fetchedCacheLines++; fetchedCacheLines++;
// Access the cache. // Access the cache.
if (!cpu->getInstPort().sendTiming(data_pkt)) { if (!cpu->getInstPort().sendTimingReq(data_pkt)) {
assert(retryPkt == NULL); assert(retryPkt == NULL);
assert(retryTid == InvalidThreadID); assert(retryTid == InvalidThreadID);
DPRINTF(Fetch, "[tid:%i] Out of MSHRs!\n", tid); DPRINTF(Fetch, "[tid:%i] Out of MSHRs!\n", tid);
@ -1356,7 +1356,7 @@ DefaultFetch<Impl>::recvRetry()
assert(retryTid != InvalidThreadID); assert(retryTid != InvalidThreadID);
assert(fetchStatus[retryTid] == IcacheWaitRetry); assert(fetchStatus[retryTid] == IcacheWaitRetry);
if (cpu->getInstPort().sendTiming(retryPkt)) { if (cpu->getInstPort().sendTimingReq(retryPkt)) {
fetchStatus[retryTid] = IcacheWaitResponse; fetchStatus[retryTid] = IcacheWaitResponse;
retryPkt = NULL; retryPkt = NULL;
retryTid = InvalidThreadID; retryTid = InvalidThreadID;

View file

@ -297,9 +297,9 @@ class LSQ {
* *
* @param pkt Response packet from the memory sub-system * @param pkt Response packet from the memory sub-system
*/ */
bool recvTiming(PacketPtr pkt); bool recvTimingResp(PacketPtr pkt);
bool recvTimingSnoop(PacketPtr pkt); void recvTimingSnoopReq(PacketPtr pkt);
/** The CPU pointer. */ /** The CPU pointer. */
O3CPU *cpu; O3CPU *cpu;

View file

@ -319,9 +319,8 @@ LSQ<Impl>::recvRetry()
template <class Impl> template <class Impl>
bool bool
LSQ<Impl>::recvTiming(PacketPtr pkt) LSQ<Impl>::recvTimingResp(PacketPtr pkt)
{ {
assert(pkt->isResponse());
if (pkt->isError()) if (pkt->isError())
DPRINTF(LSQ, "Got error packet back for address: %#X\n", DPRINTF(LSQ, "Got error packet back for address: %#X\n",
pkt->getAddr()); pkt->getAddr());
@ -330,10 +329,9 @@ LSQ<Impl>::recvTiming(PacketPtr pkt)
} }
template <class Impl> template <class Impl>
bool void
LSQ<Impl>::recvTimingSnoop(PacketPtr pkt) LSQ<Impl>::recvTimingSnoopReq(PacketPtr pkt)
{ {
assert(pkt->isRequest());
DPRINTF(LSQ, "received pkt for addr:%#x %s\n", pkt->getAddr(), DPRINTF(LSQ, "received pkt for addr:%#x %s\n", pkt->getAddr(),
pkt->cmdString()); pkt->cmdString());
@ -345,9 +343,6 @@ LSQ<Impl>::recvTimingSnoop(PacketPtr pkt)
thread[tid].checkSnoop(pkt); thread[tid].checkSnoop(pkt);
} }
} }
// to provide stronger consistency model
return true;
} }
template<class Impl> template<class Impl>

View file

@ -801,7 +801,7 @@ LSQUnit<Impl>::read(Request *req, Request *sreqLow, Request *sreqHigh,
state->mainPkt = data_pkt; state->mainPkt = data_pkt;
} }
if (!dcachePort->sendTiming(fst_data_pkt)) { if (!dcachePort->sendTimingReq(fst_data_pkt)) {
// Delete state and data packet because a load retry // Delete state and data packet because a load retry
// initiates a pipeline restart; it does not retry. // initiates a pipeline restart; it does not retry.
delete state; delete state;
@ -830,7 +830,7 @@ LSQUnit<Impl>::read(Request *req, Request *sreqLow, Request *sreqHigh,
// The first packet will return in completeDataAccess and be // The first packet will return in completeDataAccess and be
// handled there. // handled there.
++usedPorts; ++usedPorts;
if (!dcachePort->sendTiming(snd_data_pkt)) { if (!dcachePort->sendTimingReq(snd_data_pkt)) {
// The main packet will be deleted in completeDataAccess. // The main packet will be deleted in completeDataAccess.
delete snd_data_pkt->req; delete snd_data_pkt->req;

View file

@ -1180,7 +1180,7 @@ template <class Impl>
bool bool
LSQUnit<Impl>::sendStore(PacketPtr data_pkt) LSQUnit<Impl>::sendStore(PacketPtr data_pkt)
{ {
if (!dcachePort->sendTiming(data_pkt)) { if (!dcachePort->sendTimingReq(data_pkt)) {
// Need to handle becoming blocked on a store. // Need to handle becoming blocked on a store.
isStoreBlocked = true; isStoreBlocked = true;
++lsqCacheBlocked; ++lsqCacheBlocked;
@ -1203,7 +1203,7 @@ LSQUnit<Impl>::recvRetry()
LSQSenderState *state = LSQSenderState *state =
dynamic_cast<LSQSenderState *>(retryPkt->senderState); dynamic_cast<LSQSenderState *>(retryPkt->senderState);
if (dcachePort->sendTiming(retryPkt)) { if (dcachePort->sendTimingReq(retryPkt)) {
// Don't finish the store unless this is the last packet. // Don't finish the store unless this is the last packet.
if (!TheISA::HasUnalignedMemAcc || !state->pktToSend || if (!TheISA::HasUnalignedMemAcc || !state->pktToSend ||
state->pendingPacket == retryPkt) { state->pendingPacket == retryPkt) {

View file

@ -234,7 +234,7 @@ TimingSimpleCPU::handleReadPacket(PacketPtr pkt)
new IprEvent(pkt, this, nextCycle(curTick() + delay)); new IprEvent(pkt, this, nextCycle(curTick() + delay));
_status = DcacheWaitResponse; _status = DcacheWaitResponse;
dcache_pkt = NULL; dcache_pkt = NULL;
} else if (!dcachePort.sendTiming(pkt)) { } else if (!dcachePort.sendTimingReq(pkt)) {
_status = DcacheRetry; _status = DcacheRetry;
dcache_pkt = pkt; dcache_pkt = pkt;
} else { } else {
@ -449,7 +449,7 @@ TimingSimpleCPU::handleWritePacket()
new IprEvent(dcache_pkt, this, nextCycle(curTick() + delay)); new IprEvent(dcache_pkt, this, nextCycle(curTick() + delay));
_status = DcacheWaitResponse; _status = DcacheWaitResponse;
dcache_pkt = NULL; dcache_pkt = NULL;
} else if (!dcachePort.sendTiming(dcache_pkt)) { } else if (!dcachePort.sendTimingReq(dcache_pkt)) {
_status = DcacheRetry; _status = DcacheRetry;
} else { } else {
_status = DcacheWaitResponse; _status = DcacheWaitResponse;
@ -581,7 +581,7 @@ TimingSimpleCPU::sendFetch(Fault fault, RequestPtr req, ThreadContext *tc)
ifetch_pkt->dataStatic(&inst); ifetch_pkt->dataStatic(&inst);
DPRINTF(SimpleCPU, " -- pkt addr: %#x\n", ifetch_pkt->getAddr()); DPRINTF(SimpleCPU, " -- pkt addr: %#x\n", ifetch_pkt->getAddr());
if (!icachePort.sendTiming(ifetch_pkt)) { if (!icachePort.sendTimingReq(ifetch_pkt)) {
// Need to wait for retry // Need to wait for retry
_status = IcacheRetry; _status = IcacheRetry;
} else { } else {
@ -715,9 +715,8 @@ TimingSimpleCPU::IcachePort::ITickEvent::process()
} }
bool bool
TimingSimpleCPU::IcachePort::recvTiming(PacketPtr pkt) TimingSimpleCPU::IcachePort::recvTimingResp(PacketPtr pkt)
{ {
assert(pkt->isResponse());
if (!pkt->wasNacked()) { if (!pkt->wasNacked()) {
DPRINTF(SimpleCPU, "Received timing response %#x\n", pkt->getAddr()); DPRINTF(SimpleCPU, "Received timing response %#x\n", pkt->getAddr());
// delay processing of returned data until next CPU clock edge // delay processing of returned data until next CPU clock edge
@ -732,7 +731,7 @@ TimingSimpleCPU::IcachePort::recvTiming(PacketPtr pkt)
} else { } else {
assert(cpu->_status == IcacheWaitResponse); assert(cpu->_status == IcacheWaitResponse);
pkt->reinitNacked(); pkt->reinitNacked();
if (!sendTiming(pkt)) { if (!sendTimingReq(pkt)) {
cpu->_status = IcacheRetry; cpu->_status = IcacheRetry;
cpu->ifetch_pkt = pkt; cpu->ifetch_pkt = pkt;
} }
@ -749,7 +748,7 @@ TimingSimpleCPU::IcachePort::recvRetry()
assert(cpu->ifetch_pkt != NULL); assert(cpu->ifetch_pkt != NULL);
assert(cpu->_status == IcacheRetry); assert(cpu->_status == IcacheRetry);
PacketPtr tmp = cpu->ifetch_pkt; PacketPtr tmp = cpu->ifetch_pkt;
if (sendTiming(tmp)) { if (sendTimingReq(tmp)) {
cpu->_status = IcacheWaitResponse; cpu->_status = IcacheWaitResponse;
cpu->ifetch_pkt = NULL; cpu->ifetch_pkt = NULL;
} }
@ -836,9 +835,8 @@ TimingSimpleCPU::completeDrain()
} }
bool bool
TimingSimpleCPU::DcachePort::recvTiming(PacketPtr pkt) TimingSimpleCPU::DcachePort::recvTimingResp(PacketPtr pkt)
{ {
assert(pkt->isResponse());
if (!pkt->wasNacked()) { if (!pkt->wasNacked()) {
// delay processing of returned data until next CPU clock edge // delay processing of returned data until next CPU clock edge
Tick next_tick = cpu->nextCycle(curTick()); Tick next_tick = cpu->nextCycle(curTick());
@ -862,7 +860,7 @@ TimingSimpleCPU::DcachePort::recvTiming(PacketPtr pkt)
} else { } else {
assert(cpu->_status == DcacheWaitResponse); assert(cpu->_status == DcacheWaitResponse);
pkt->reinitNacked(); pkt->reinitNacked();
if (!sendTiming(pkt)) { if (!sendTimingReq(pkt)) {
cpu->_status = DcacheRetry; cpu->_status = DcacheRetry;
cpu->dcache_pkt = pkt; cpu->dcache_pkt = pkt;
} }
@ -896,7 +894,7 @@ TimingSimpleCPU::DcachePort::recvRetry()
dynamic_cast<SplitMainSenderState *>(big_pkt->senderState); dynamic_cast<SplitMainSenderState *>(big_pkt->senderState);
assert(main_send_state); assert(main_send_state);
if (sendTiming(tmp)) { if (sendTimingReq(tmp)) {
// If we were able to send without retrying, record that fact // If we were able to send without retrying, record that fact
// and try sending the other fragment. // and try sending the other fragment.
send_state->clearFromParent(); send_state->clearFromParent();
@ -914,7 +912,7 @@ TimingSimpleCPU::DcachePort::recvRetry()
cpu->dcache_pkt = NULL; cpu->dcache_pkt = NULL;
} }
} }
} else if (sendTiming(tmp)) { } else if (sendTimingReq(tmp)) {
cpu->_status = DcacheWaitResponse; cpu->_status = DcacheWaitResponse;
// memory system takes ownership of packet // memory system takes ownership of packet
cpu->dcache_pkt = NULL; cpu->dcache_pkt = NULL;

View file

@ -156,7 +156,7 @@ class TimingSimpleCPU : public BaseSimpleCPU
/** /**
* Snooping a coherence request, do nothing. * Snooping a coherence request, do nothing.
*/ */
virtual bool recvTimingSnoop(PacketPtr pkt) { return true; } virtual void recvTimingSnoopReq(PacketPtr pkt) { }
TimingSimpleCPU* cpu; TimingSimpleCPU* cpu;
@ -185,7 +185,7 @@ class TimingSimpleCPU : public BaseSimpleCPU
protected: protected:
virtual bool recvTiming(PacketPtr pkt); virtual bool recvTimingResp(PacketPtr pkt);
virtual void recvRetry(); virtual void recvRetry();
@ -212,7 +212,7 @@ class TimingSimpleCPU : public BaseSimpleCPU
protected: protected:
virtual bool recvTiming(PacketPtr pkt); virtual bool recvTimingResp(PacketPtr pkt);
virtual void recvRetry(); virtual void recvRetry();

View file

@ -80,7 +80,7 @@ InvalidateGenerator::initiate()
*dummyData = 0; *dummyData = 0;
pkt->dataDynamic(dummyData); pkt->dataDynamic(dummyData);
if (port->sendTiming(pkt)) { if (port->sendTimingReq(pkt)) {
DPRINTF(DirectedTest, "initiating request - successful\n"); DPRINTF(DirectedTest, "initiating request - successful\n");
if (m_status == InvalidateGeneratorStatus_Load_Waiting) { if (m_status == InvalidateGeneratorStatus_Load_Waiting) {
m_status = InvalidateGeneratorStatus_Load_Pending; m_status = InvalidateGeneratorStatus_Load_Pending;

View file

@ -91,7 +91,7 @@ RubyDirectedTester::getMasterPort(const std::string &if_name, int idx)
} }
bool bool
RubyDirectedTester::CpuPort::recvTiming(PacketPtr pkt) RubyDirectedTester::CpuPort::recvTimingResp(PacketPtr pkt)
{ {
tester->hitCallback(id, pkt->getAddr()); tester->hitCallback(id, pkt->getAddr());

View file

@ -59,7 +59,7 @@ class RubyDirectedTester : public MemObject
{} {}
protected: protected:
virtual bool recvTiming(PacketPtr pkt); virtual bool recvTimingResp(PacketPtr pkt);
virtual void recvRetry() virtual void recvRetry()
{ panic("%s does not expect a retry\n", name()); } { panic("%s does not expect a retry\n", name()); }
}; };

View file

@ -70,7 +70,7 @@ SeriesRequestGenerator::initiate()
*dummyData = 0; *dummyData = 0;
pkt->dataDynamic(dummyData); pkt->dataDynamic(dummyData);
if (port->sendTiming(pkt)) { if (port->sendTimingReq(pkt)) {
DPRINTF(DirectedTest, "initiating request - successful\n"); DPRINTF(DirectedTest, "initiating request - successful\n");
m_status = SeriesRequestGeneratorStatus_Request_Pending; m_status = SeriesRequestGeneratorStatus_Request_Pending;
return true; return true;

View file

@ -53,9 +53,8 @@ using namespace std;
int TESTER_ALLOCATOR=0; int TESTER_ALLOCATOR=0;
bool bool
MemTest::CpuPort::recvTiming(PacketPtr pkt) MemTest::CpuPort::recvTimingResp(PacketPtr pkt)
{ {
assert(pkt->isResponse());
memtest->completeRequest(pkt); memtest->completeRequest(pkt);
return true; return true;
} }
@ -72,7 +71,7 @@ MemTest::sendPkt(PacketPtr pkt) {
cachePort.sendAtomic(pkt); cachePort.sendAtomic(pkt);
completeRequest(pkt); completeRequest(pkt);
} }
else if (!cachePort.sendTiming(pkt)) { else if (!cachePort.sendTimingReq(pkt)) {
DPRINTF(MemTest, "accessRetry setting to true\n"); DPRINTF(MemTest, "accessRetry setting to true\n");
// //
@ -379,7 +378,7 @@ MemTest::tick()
void void
MemTest::doRetry() MemTest::doRetry()
{ {
if (cachePort.sendTiming(retryPkt)) { if (cachePort.sendTimingReq(retryPkt)) {
DPRINTF(MemTest, "accessRetry setting to false\n"); DPRINTF(MemTest, "accessRetry setting to false\n");
accessRetry = false; accessRetry = false;
retryPkt = NULL; retryPkt = NULL;

View file

@ -97,9 +97,9 @@ class MemTest : public MemObject
protected: protected:
virtual bool recvTiming(PacketPtr pkt); virtual bool recvTimingResp(PacketPtr pkt);
virtual bool recvTimingSnoop(PacketPtr pkt) { return true; } virtual void recvTimingSnoopReq(PacketPtr pkt) { }
virtual Tick recvAtomicSnoop(PacketPtr pkt) { return 0; } virtual Tick recvAtomicSnoop(PacketPtr pkt) { return 0; }

View file

@ -51,9 +51,8 @@ using namespace std;
int TESTER_NETWORK=0; int TESTER_NETWORK=0;
bool bool
NetworkTest::CpuPort::recvTiming(PacketPtr pkt) NetworkTest::CpuPort::recvTimingResp(PacketPtr pkt)
{ {
assert(pkt->isResponse());
networktest->completeRequest(pkt); networktest->completeRequest(pkt);
return true; return true;
} }
@ -67,7 +66,7 @@ NetworkTest::CpuPort::recvRetry()
void void
NetworkTest::sendPkt(PacketPtr pkt) NetworkTest::sendPkt(PacketPtr pkt)
{ {
if (!cachePort.sendTiming(pkt)) { if (!cachePort.sendTimingReq(pkt)) {
retryPkt = pkt; // RubyPort will retry sending retryPkt = pkt; // RubyPort will retry sending
} }
numPacketsSent++; numPacketsSent++;
@ -269,7 +268,7 @@ NetworkTest::generatePkt()
void void
NetworkTest::doRetry() NetworkTest::doRetry()
{ {
if (cachePort.sendTiming(retryPkt)) { if (cachePort.sendTimingReq(retryPkt)) {
retryPkt = NULL; retryPkt = NULL;
} }
} }

View file

@ -92,7 +92,7 @@ class NetworkTest : public MemObject
protected: protected:
virtual bool recvTiming(PacketPtr pkt); virtual bool recvTimingResp(PacketPtr pkt);
virtual void recvRetry(); virtual void recvRetry();
}; };

View file

@ -114,7 +114,7 @@ Check::initiatePrefetch()
pkt->senderState = pkt->senderState =
new SenderState(m_address, req->getSize(), pkt->senderState); new SenderState(m_address, req->getSize(), pkt->senderState);
if (port->sendTiming(pkt)) { if (port->sendTimingReq(pkt)) {
DPRINTF(RubyTest, "successfully initiated prefetch.\n"); DPRINTF(RubyTest, "successfully initiated prefetch.\n");
} else { } else {
// If the packet did not issue, must delete // If the packet did not issue, must delete
@ -154,7 +154,7 @@ Check::initiateFlush()
pkt->senderState = pkt->senderState =
new SenderState(m_address, req->getSize(), pkt->senderState); new SenderState(m_address, req->getSize(), pkt->senderState);
if (port->sendTiming(pkt)) { if (port->sendTimingReq(pkt)) {
DPRINTF(RubyTest, "initiating Flush - successful\n"); DPRINTF(RubyTest, "initiating Flush - successful\n");
} }
} }
@ -201,7 +201,7 @@ Check::initiateAction()
pkt->senderState = pkt->senderState =
new SenderState(writeAddr, req->getSize(), pkt->senderState); new SenderState(writeAddr, req->getSize(), pkt->senderState);
if (port->sendTiming(pkt)) { if (port->sendTimingReq(pkt)) {
DPRINTF(RubyTest, "initiating action - successful\n"); DPRINTF(RubyTest, "initiating action - successful\n");
DPRINTF(RubyTest, "status before action update: %s\n", DPRINTF(RubyTest, "status before action update: %s\n",
(TesterStatus_to_string(m_status)).c_str()); (TesterStatus_to_string(m_status)).c_str());
@ -253,7 +253,7 @@ Check::initiateCheck()
pkt->senderState = pkt->senderState =
new SenderState(m_address, req->getSize(), pkt->senderState); new SenderState(m_address, req->getSize(), pkt->senderState);
if (port->sendTiming(pkt)) { if (port->sendTimingReq(pkt)) {
DPRINTF(RubyTest, "initiating check - successful\n"); DPRINTF(RubyTest, "initiating check - successful\n");
DPRINTF(RubyTest, "status before check update: %s\n", DPRINTF(RubyTest, "status before check update: %s\n",
TesterStatus_to_string(m_status).c_str()); TesterStatus_to_string(m_status).c_str());

View file

@ -145,7 +145,7 @@ RubyTester::getMasterPort(const std::string &if_name, int idx)
} }
bool bool
RubyTester::CpuPort::recvTiming(PacketPtr pkt) RubyTester::CpuPort::recvTimingResp(PacketPtr pkt)
{ {
// retrieve the subblock and call hitCallback // retrieve the subblock and call hitCallback
RubyTester::SenderState* senderState = RubyTester::SenderState* senderState =

View file

@ -62,7 +62,7 @@ class RubyTester : public MemObject
{} {}
protected: protected:
virtual bool recvTiming(PacketPtr pkt); virtual bool recvTimingResp(PacketPtr pkt);
virtual void recvRetry() virtual void recvRetry()
{ panic("%s does not expect a retry\n", name()); } { panic("%s does not expect a retry\n", name()); }
}; };

View file

@ -131,9 +131,8 @@ DmaPort::DmaPort(MemObject *dev, System *s, Tick min_backoff, Tick max_backoff,
{ } { }
bool bool
DmaPort::recvTiming(PacketPtr pkt) DmaPort::recvTimingResp(PacketPtr pkt)
{ {
assert(pkt->isResponse());
if (pkt->wasNacked()) { if (pkt->wasNacked()) {
DPRINTF(DMA, "Received nacked %s addr %#x\n", DPRINTF(DMA, "Received nacked %s addr %#x\n",
pkt->cmdString(), pkt->getAddr()); pkt->cmdString(), pkt->getAddr());
@ -234,7 +233,7 @@ DmaPort::recvRetry()
PacketPtr pkt = transmitList.front(); PacketPtr pkt = transmitList.front();
DPRINTF(DMA, "Retry on %s addr %#x\n", DPRINTF(DMA, "Retry on %s addr %#x\n",
pkt->cmdString(), pkt->getAddr()); pkt->cmdString(), pkt->getAddr());
result = sendTiming(pkt); result = sendTimingReq(pkt);
if (result) { if (result) {
DPRINTF(DMA, "-- Done\n"); DPRINTF(DMA, "-- Done\n");
transmitList.pop_front(); transmitList.pop_front();
@ -320,7 +319,7 @@ DmaPort::sendDma()
bool result; bool result;
do { do {
result = sendTiming(pkt); result = sendTimingReq(pkt);
if (result) { if (result) {
transmitList.pop_front(); transmitList.pop_front();
DPRINTF(DMA, "-- Done\n"); DPRINTF(DMA, "-- Done\n");

View file

@ -146,13 +146,12 @@ class DmaPort : public MasterPort
/** Port accesses a cache which requires snooping */ /** Port accesses a cache which requires snooping */
bool recvSnoops; bool recvSnoops;
virtual bool recvTiming(PacketPtr pkt); virtual bool recvTimingResp(PacketPtr pkt);
virtual bool recvTimingSnoop(PacketPtr pkt) virtual void recvTimingSnoopReq(PacketPtr pkt)
{ {
if (!recvSnoops) if (!recvSnoops)
panic("%s was not expecting a snoop\n", name()); panic("%s was not expecting a snoop\n", name());
return true;
} }
virtual Tick recvAtomicSnoop(PacketPtr pkt) virtual Tick recvAtomicSnoop(PacketPtr pkt)

View file

@ -137,11 +137,8 @@ Bridge::BridgeMasterPort::reqQueueFull()
} }
bool bool
Bridge::BridgeMasterPort::recvTiming(PacketPtr pkt) Bridge::BridgeMasterPort::recvTimingResp(PacketPtr pkt)
{ {
// should only see responses on the master side
assert(pkt->isResponse());
// all checks are done when the request is accepted on the slave // all checks are done when the request is accepted on the slave
// side, so we are guaranteed to have space for the response // side, so we are guaranteed to have space for the response
DPRINTF(BusBridge, "recvTiming: response %s addr 0x%x\n", DPRINTF(BusBridge, "recvTiming: response %s addr 0x%x\n",
@ -155,12 +152,8 @@ Bridge::BridgeMasterPort::recvTiming(PacketPtr pkt)
} }
bool bool
Bridge::BridgeSlavePort::recvTiming(PacketPtr pkt) Bridge::BridgeSlavePort::recvTimingReq(PacketPtr pkt)
{ {
// should only see requests on the slave side
assert(pkt->isRequest());
DPRINTF(BusBridge, "recvTiming: request %s addr 0x%x\n", DPRINTF(BusBridge, "recvTiming: request %s addr 0x%x\n",
pkt->cmdString(), pkt->getAddr()); pkt->cmdString(), pkt->getAddr());
@ -318,7 +311,7 @@ Bridge::BridgeMasterPort::trySend()
if (!buf->expectResponse) if (!buf->expectResponse)
pkt->senderState = NULL; pkt->senderState = NULL;
if (sendTiming(pkt)) { if (sendTimingReq(pkt)) {
// send successful // send successful
requestQueue.pop_front(); requestQueue.pop_front();
// we no longer own packet, so it's not safe to look at it // we no longer own packet, so it's not safe to look at it
@ -365,7 +358,7 @@ Bridge::BridgeSlavePort::trySend()
// no need to worry about the sender state since we are not // no need to worry about the sender state since we are not
// modifying it // modifying it
if (sendTiming(pkt)) { if (sendTimingResp(pkt)) {
DPRINTF(BusBridge, " successful\n"); DPRINTF(BusBridge, " successful\n");
// send successful // send successful
responseQueue.pop_front(); responseQueue.pop_front();

View file

@ -230,7 +230,7 @@ class Bridge : public MemObject
/** When receiving a timing request from the peer port, /** When receiving a timing request from the peer port,
pass it to the bridge. */ pass it to the bridge. */
virtual bool recvTiming(PacketPtr pkt); virtual bool recvTimingReq(PacketPtr pkt);
/** When receiving a retry request from the peer port, /** When receiving a retry request from the peer port,
pass it to the bridge. */ pass it to the bridge. */
@ -353,7 +353,7 @@ class Bridge : public MemObject
/** When receiving a timing request from the peer port, /** When receiving a timing request from the peer port,
pass it to the bridge. */ pass it to the bridge. */
virtual bool recvTiming(PacketPtr pkt); virtual bool recvTimingResp(PacketPtr pkt);
/** When receiving a retry request from the peer port, /** When receiving a retry request from the peer port,
pass it to the bridge. */ pass it to the bridge. */

View file

@ -198,90 +198,65 @@ Bus::isOccupied(PacketPtr pkt, Port* port)
} }
bool bool
Bus::recvTiming(PacketPtr pkt) Bus::recvTimingReq(PacketPtr pkt)
{ {
// get the source id // determine the source port based on the id
Packet::NodeID src_id = pkt->getSrc(); SlavePort *src_port = slavePorts[pkt->getSrc()];
// determine the source port based on the id and direction
Port *src_port = NULL;
if (pkt->isRequest())
src_port = slavePorts[src_id];
else
src_port = masterPorts[src_id];
// test if the bus should be considered occupied for the current // test if the bus should be considered occupied for the current
// packet, and exclude express snoops from the check // packet, and exclude express snoops from the check
if (!pkt->isExpressSnoop() && isOccupied(pkt, src_port)) { if (!pkt->isExpressSnoop() && isOccupied(pkt, src_port)) {
DPRINTF(Bus, "recvTiming: src %s %s 0x%x BUSY\n", DPRINTF(Bus, "recvTimingReq: src %s %s 0x%x BUSY\n",
src_port->name(), pkt->cmdString(), pkt->getAddr()); src_port->name(), pkt->cmdString(), pkt->getAddr());
return false; return false;
} }
DPRINTF(Bus, "recvTiming: src %s %s 0x%x\n", DPRINTF(Bus, "recvTimingReq: src %s %s 0x%x\n",
src_port->name(), pkt->cmdString(), pkt->getAddr()); src_port->name(), pkt->cmdString(), pkt->getAddr());
Tick headerFinishTime = pkt->isExpressSnoop() ? 0 : calcPacketTiming(pkt); Tick headerFinishTime = pkt->isExpressSnoop() ? 0 : calcPacketTiming(pkt);
Tick packetFinishTime = pkt->isExpressSnoop() ? 0 : pkt->finishTime; Tick packetFinishTime = pkt->isExpressSnoop() ? 0 : pkt->finishTime;
// decide what to do based on the direction // the packet is a memory-mapped request and should be
if (pkt->isRequest()) { // broadcasted to our snoopers but the source
// the packet is a memory-mapped request and should be forwardTiming(pkt, pkt->getSrc());
// broadcasted to our snoopers but the source
forwardTiming(pkt, src_id);
// remember if we add an outstanding req so we can undo it if // remember if we add an outstanding req so we can undo it if
// necessary, if the packet needs a response, we should add it // necessary, if the packet needs a response, we should add it
// as outstanding and express snoops never fail so there is // as outstanding and express snoops never fail so there is
// not need to worry about them // not need to worry about them
bool add_outstanding = !pkt->isExpressSnoop() && pkt->needsResponse(); bool add_outstanding = !pkt->isExpressSnoop() && pkt->needsResponse();
// keep track that we have an outstanding request packet // keep track that we have an outstanding request packet
// matching this request, this is used by the coherency // matching this request, this is used by the coherency
// mechanism in determining what to do with snoop responses // mechanism in determining what to do with snoop responses
// (in recvTimingSnoop) // (in recvTimingSnoop)
if (add_outstanding) { if (add_outstanding) {
// we should never have an exsiting request outstanding // we should never have an exsiting request outstanding
assert(outstandingReq.find(pkt->req) == outstandingReq.end()); assert(outstandingReq.find(pkt->req) == outstandingReq.end());
outstandingReq.insert(pkt->req); outstandingReq.insert(pkt->req);
} }
// since it is a normal request, determine the destination // since it is a normal request, determine the destination
// based on the address and attempt to send the packet // based on the address and attempt to send the packet
bool success = masterPorts[findPort(pkt->getAddr())]->sendTiming(pkt); bool success = masterPorts[findPort(pkt->getAddr())]->sendTimingReq(pkt);
if (!success) { if (!success) {
// inhibited packets should never be forced to retry // inhibited packets should never be forced to retry
assert(!pkt->memInhibitAsserted()); assert(!pkt->memInhibitAsserted());
// if it was added as outstanding and the send failed, then // if it was added as outstanding and the send failed, then
// erase it again // erase it again
if (add_outstanding) if (add_outstanding)
outstandingReq.erase(pkt->req); outstandingReq.erase(pkt->req);
DPRINTF(Bus, "recvTiming: src %s %s 0x%x RETRY\n", DPRINTF(Bus, "recvTimingReq: src %s %s 0x%x RETRY\n",
src_port->name(), pkt->cmdString(), pkt->getAddr()); src_port->name(), pkt->cmdString(), pkt->getAddr());
addToRetryList(src_port); addToRetryList(src_port);
occupyBus(headerFinishTime); occupyBus(headerFinishTime);
return false; return false;
}
} else {
// the packet is a normal response to a request that we should
// have seen passing through the bus
assert(outstandingReq.find(pkt->req) != outstandingReq.end());
// remove it as outstanding
outstandingReq.erase(pkt->req);
// send the packet to the destination through one of our slave
// ports, as determined by the destination field
bool success M5_VAR_USED = slavePorts[pkt->getDest()]->sendTiming(pkt);
// currently it is illegal to block responses... can lead to
// deadlock
assert(success);
} }
succeededTiming(packetFinishTime); succeededTiming(packetFinishTime);
@ -290,94 +265,132 @@ Bus::recvTiming(PacketPtr pkt)
} }
bool bool
Bus::recvTimingSnoop(PacketPtr pkt) Bus::recvTimingResp(PacketPtr pkt)
{ {
// get the source id // determine the source port based on the id
Packet::NodeID src_id = pkt->getSrc(); MasterPort *src_port = masterPorts[pkt->getSrc()];
if (pkt->isRequest()) { // test if the bus should be considered occupied for the current
DPRINTF(Bus, "recvTimingSnoop: src %d %s 0x%x\n", // packet
src_id, pkt->cmdString(), pkt->getAddr()); if (isOccupied(pkt, src_port)) {
DPRINTF(Bus, "recvTimingResp: src %s %s 0x%x BUSY\n",
// the packet is an express snoop request and should be
// broadcasted to our snoopers
assert(pkt->isExpressSnoop());
// forward to all snoopers
forwardTiming(pkt, Port::INVALID_PORT_ID);
// a snoop request came from a connected slave device (one of
// our master ports), and if it is not coming from the slave
// device responsible for the address range something is
// wrong, hence there is nothing further to do as the packet
// would be going back to where it came from
assert(src_id == findPort(pkt->getAddr()));
// this is an express snoop and is never forced to retry
assert(!inRetry);
return true;
} else {
// determine the source port based on the id
SlavePort* src_port = slavePorts[src_id];
if (isOccupied(pkt, src_port)) {
DPRINTF(Bus, "recvTimingSnoop: src %s %s 0x%x BUSY\n",
src_port->name(), pkt->cmdString(), pkt->getAddr());
return false;
}
DPRINTF(Bus, "recvTimingSnoop: src %s %s 0x%x\n",
src_port->name(), pkt->cmdString(), pkt->getAddr()); src_port->name(), pkt->cmdString(), pkt->getAddr());
return false;
// get the destination from the packet
Packet::NodeID dest = pkt->getDest();
// responses are never express snoops
assert(!pkt->isExpressSnoop());
calcPacketTiming(pkt);
Tick packetFinishTime = pkt->finishTime;
// determine if the response is from a snoop request we
// created as the result of a normal request (in which case it
// should be in the outstandingReq), or if we merely forwarded
// someone else's snoop request
if (outstandingReq.find(pkt->req) == outstandingReq.end()) {
// this is a snoop response to a snoop request we
// forwarded, e.g. coming from the L1 and going to the L2
// this should be forwarded as a snoop response
bool success M5_VAR_USED = masterPorts[dest]->sendTimingSnoop(pkt);
assert(success);
} else {
// we got a snoop response on one of our slave ports,
// i.e. from a coherent master connected to the bus, and
// since we created the snoop request as part of
// recvTiming, this should now be a normal response again
outstandingReq.erase(pkt->req);
// this is a snoop response from a coherent master, with a
// destination field set on its way through the bus as
// request, hence it should never go back to where the
// snoop response came from, but instead to where the
// original request came from
assert(src_id != dest);
// as a normal response, it should go back to a master
// through one of our slave ports
bool success M5_VAR_USED = slavePorts[dest]->sendTiming(pkt);
// currently it is illegal to block responses... can lead
// to deadlock
assert(success);
}
succeededTiming(packetFinishTime);
return true;
} }
DPRINTF(Bus, "recvTimingResp: src %s %s 0x%x\n",
src_port->name(), pkt->cmdString(), pkt->getAddr());
calcPacketTiming(pkt);
Tick packetFinishTime = pkt->finishTime;
// the packet is a normal response to a request that we should
// have seen passing through the bus
assert(outstandingReq.find(pkt->req) != outstandingReq.end());
// remove it as outstanding
outstandingReq.erase(pkt->req);
// send the packet to the destination through one of our slave
// ports, as determined by the destination field
bool success M5_VAR_USED = slavePorts[pkt->getDest()]->sendTimingResp(pkt);
// currently it is illegal to block responses... can lead to
// deadlock
assert(success);
succeededTiming(packetFinishTime);
return true;
} }
void
Bus::recvTimingSnoopReq(PacketPtr pkt)
{
DPRINTF(Bus, "recvTimingSnoopReq: src %s %s 0x%x\n",
masterPorts[pkt->getSrc()]->name(), pkt->cmdString(),
pkt->getAddr());
// we should only see express snoops from caches
assert(pkt->isExpressSnoop());
// forward to all snoopers
forwardTiming(pkt, Port::INVALID_PORT_ID);
// a snoop request came from a connected slave device (one of
// our master ports), and if it is not coming from the slave
// device responsible for the address range something is
// wrong, hence there is nothing further to do as the packet
// would be going back to where it came from
assert(pkt->getSrc() == findPort(pkt->getAddr()));
// this is an express snoop and is never forced to retry
assert(!inRetry);
}
bool
Bus::recvTimingSnoopResp(PacketPtr pkt)
{
// determine the source port based on the id
SlavePort* src_port = slavePorts[pkt->getSrc()];
if (isOccupied(pkt, src_port)) {
DPRINTF(Bus, "recvTimingSnoopResp: src %s %s 0x%x BUSY\n",
src_port->name(), pkt->cmdString(), pkt->getAddr());
return false;
}
DPRINTF(Bus, "recvTimingSnoop: src %s %s 0x%x\n",
src_port->name(), pkt->cmdString(), pkt->getAddr());
// get the destination from the packet
Packet::NodeID dest = pkt->getDest();
// responses are never express snoops
assert(!pkt->isExpressSnoop());
calcPacketTiming(pkt);
Tick packetFinishTime = pkt->finishTime;
// determine if the response is from a snoop request we
// created as the result of a normal request (in which case it
// should be in the outstandingReq), or if we merely forwarded
// someone else's snoop request
if (outstandingReq.find(pkt->req) == outstandingReq.end()) {
// this is a snoop response to a snoop request we
// forwarded, e.g. coming from the L1 and going to the L2
// this should be forwarded as a snoop response
bool success M5_VAR_USED = masterPorts[dest]->sendTimingSnoopResp(pkt);
assert(success);
} else {
// we got a snoop response on one of our slave ports,
// i.e. from a coherent master connected to the bus, and
// since we created the snoop request as part of
// recvTiming, this should now be a normal response again
outstandingReq.erase(pkt->req);
// this is a snoop response from a coherent master, with a
// destination field set on its way through the bus as
// request, hence it should never go back to where the
// snoop response came from, but instead to where the
// original request came from
assert(pkt->getSrc() != dest);
// as a normal response, it should go back to a master
// through one of our slave ports
bool success M5_VAR_USED = slavePorts[dest]->sendTimingResp(pkt);
// currently it is illegal to block responses... can lead
// to deadlock
assert(success);
}
succeededTiming(packetFinishTime);
return true;
}
void void
Bus::succeededTiming(Tick busy_time) Bus::succeededTiming(Tick busy_time)
{ {
@ -405,8 +418,7 @@ Bus::forwardTiming(PacketPtr pkt, int exclude_slave_port_id)
if (exclude_slave_port_id == Port::INVALID_PORT_ID || if (exclude_slave_port_id == Port::INVALID_PORT_ID ||
p->getId() != exclude_slave_port_id) { p->getId() != exclude_slave_port_id) {
// cache is not allowed to refuse snoop // cache is not allowed to refuse snoop
bool success M5_VAR_USED = p->sendTimingSnoop(pkt); p->sendTimingSnoopReq(pkt);
assert(success);
} }
} }
} }
@ -531,9 +543,6 @@ Bus::recvAtomic(PacketPtr pkt)
slavePorts[pkt->getSrc()]->name(), pkt->getAddr(), slavePorts[pkt->getSrc()]->name(), pkt->getAddr(),
pkt->cmdString()); pkt->cmdString());
// we should always see a request routed based on the address
assert(pkt->isRequest());
// forward to all snoopers but the source // forward to all snoopers but the source
std::pair<MemCmd, Tick> snoop_result = forwardAtomic(pkt, pkt->getSrc()); std::pair<MemCmd, Tick> snoop_result = forwardAtomic(pkt, pkt->getSrc());
MemCmd snoop_response_cmd = snoop_result.first; MemCmd snoop_response_cmd = snoop_result.first;
@ -565,9 +574,6 @@ Bus::recvAtomicSnoop(PacketPtr pkt)
masterPorts[pkt->getSrc()]->name(), pkt->getAddr(), masterPorts[pkt->getSrc()]->name(), pkt->getAddr(),
pkt->cmdString()); pkt->cmdString());
// we should always see a request routed based on the address
assert(pkt->isRequest());
// forward to all snoopers // forward to all snoopers
std::pair<MemCmd, Tick> snoop_result = std::pair<MemCmd, Tick> snoop_result =
forwardAtomic(pkt, Port::INVALID_PORT_ID); forwardAtomic(pkt, Port::INVALID_PORT_ID);
@ -637,9 +643,6 @@ Bus::recvFunctional(PacketPtr pkt)
pkt->cmdString()); pkt->cmdString());
} }
// we should always see a request routed based on the address
assert(pkt->isRequest());
// forward to all snoopers but the source // forward to all snoopers but the source
forwardFunctional(pkt, pkt->getSrc()); forwardFunctional(pkt, pkt->getSrc());
@ -663,9 +666,6 @@ Bus::recvFunctionalSnoop(PacketPtr pkt)
pkt->cmdString()); pkt->cmdString());
} }
// we should always see a request routed based on the address
assert(pkt->isRequest());
// forward to all snoopers // forward to all snoopers
forwardFunctional(pkt, Port::INVALID_PORT_ID); forwardFunctional(pkt, Port::INVALID_PORT_ID);
} }

View file

@ -89,14 +89,14 @@ class Bus : public MemObject
/** /**
* When receiving a timing request, pass it to the bus. * When receiving a timing request, pass it to the bus.
*/ */
virtual bool recvTiming(PacketPtr pkt) virtual bool recvTimingReq(PacketPtr pkt)
{ pkt->setSrc(id); return bus->recvTiming(pkt); } { pkt->setSrc(id); return bus->recvTimingReq(pkt); }
/** /**
* When receiving a timing snoop response, pass it to the bus. * When receiving a timing snoop response, pass it to the bus.
*/ */
virtual bool recvTimingSnoop(PacketPtr pkt) virtual bool recvTimingSnoopResp(PacketPtr pkt)
{ pkt->setSrc(id); return bus->recvTimingSnoop(pkt); } { pkt->setSrc(id); return bus->recvTimingSnoopResp(pkt); }
/** /**
* When receiving an atomic request, pass it to the bus. * When receiving an atomic request, pass it to the bus.
@ -163,14 +163,14 @@ class Bus : public MemObject
/** /**
* When receiving a timing response, pass it to the bus. * When receiving a timing response, pass it to the bus.
*/ */
virtual bool recvTiming(PacketPtr pkt) virtual bool recvTimingResp(PacketPtr pkt)
{ pkt->setSrc(id); return bus->recvTiming(pkt); } { pkt->setSrc(id); return bus->recvTimingResp(pkt); }
/** /**
* When receiving a timing snoop request, pass it to the bus. * When receiving a timing snoop request, pass it to the bus.
*/ */
virtual bool recvTimingSnoop(PacketPtr pkt) virtual void recvTimingSnoopReq(PacketPtr pkt)
{ pkt->setSrc(id); return bus->recvTimingSnoop(pkt); } { pkt->setSrc(id); return bus->recvTimingSnoopReq(pkt); }
/** /**
* When receiving an atomic snoop request, pass it to the bus. * When receiving an atomic snoop request, pass it to the bus.
@ -228,12 +228,20 @@ class Bus : public MemObject
std::set<RequestPtr> outstandingReq; std::set<RequestPtr> outstandingReq;
/** Function called by the port when the bus is recieving a Timing /** Function called by the port when the bus is recieving a Timing
transaction.*/ request packet.*/
bool recvTiming(PacketPtr pkt); bool recvTimingReq(PacketPtr pkt);
/** Function called by the port when the bus is recieving a Timing
response packet.*/
bool recvTimingResp(PacketPtr pkt);
/** Function called by the port when the bus is recieving a timing /** Function called by the port when the bus is recieving a timing
snoop transaction.*/ snoop request.*/
bool recvTimingSnoop(PacketPtr pkt); void recvTimingSnoopReq(PacketPtr pkt);
/** Function called by the port when the bus is recieving a timing
snoop response.*/
bool recvTimingSnoopResp(PacketPtr pkt);
/** /**
* Forward a timing packet to our snoopers, potentially excluding * Forward a timing packet to our snoopers, potentially excluding

View file

@ -148,7 +148,7 @@ class BaseCache : public MemObject
protected: protected:
CacheMasterPort(const std::string &_name, BaseCache *_cache, CacheMasterPort(const std::string &_name, BaseCache *_cache,
PacketQueue &_queue) : MasterPacketQueue &_queue) :
QueuedMasterPort(_name, _cache, _queue) QueuedMasterPort(_name, _cache, _queue)
{ } { }
@ -196,7 +196,7 @@ class BaseCache : public MemObject
const std::string &_label); const std::string &_label);
/** A normal packet queue used to store responses. */ /** A normal packet queue used to store responses. */
PacketQueue queue; SlavePacketQueue queue;
bool blocked; bool blocked;

View file

@ -90,9 +90,9 @@ class Cache : public BaseCache
protected: protected:
virtual bool recvTimingSnoop(PacketPtr pkt); virtual bool recvTimingSnoopResp(PacketPtr pkt);
virtual bool recvTiming(PacketPtr pkt); virtual bool recvTimingReq(PacketPtr pkt);
virtual Tick recvAtomic(PacketPtr pkt); virtual Tick recvAtomic(PacketPtr pkt);
@ -116,7 +116,7 @@ class Cache : public BaseCache
* current MSHR status. This queue has a pointer to our specific * current MSHR status. This queue has a pointer to our specific
* cache implementation and is used by the MemSidePort. * cache implementation and is used by the MemSidePort.
*/ */
class MemSidePacketQueue : public PacketQueue class MemSidePacketQueue : public MasterPacketQueue
{ {
protected: protected:
@ -125,9 +125,9 @@ class Cache : public BaseCache
public: public:
MemSidePacketQueue(Cache<TagStore> &cache, Port &port, MemSidePacketQueue(Cache<TagStore> &cache, MasterPort &port,
const std::string &label) : const std::string &label) :
PacketQueue(cache, port, label), cache(cache) { } MasterPacketQueue(cache, port, label), cache(cache) { }
/** /**
* Override the normal sendDeferredPacket and do not only * Override the normal sendDeferredPacket and do not only
@ -154,9 +154,9 @@ class Cache : public BaseCache
protected: protected:
virtual bool recvTimingSnoop(PacketPtr pkt); virtual void recvTimingSnoopReq(PacketPtr pkt);
virtual bool recvTiming(PacketPtr pkt); virtual bool recvTimingResp(PacketPtr pkt);
virtual Tick recvAtomicSnoop(PacketPtr pkt); virtual Tick recvAtomicSnoop(PacketPtr pkt);

View file

@ -417,7 +417,7 @@ Cache<TagStore>::timingAccess(PacketPtr pkt)
Packet *snoopPkt = new Packet(pkt, true); // clear flags Packet *snoopPkt = new Packet(pkt, true); // clear flags
snoopPkt->setExpressSnoop(); snoopPkt->setExpressSnoop();
snoopPkt->assertMemInhibit(); snoopPkt->assertMemInhibit();
memSidePort->sendTiming(snoopPkt); memSidePort->sendTimingReq(snoopPkt);
// main memory will delete snoopPkt // main memory will delete snoopPkt
} }
// since we're the official target but we aren't responding, // since we're the official target but we aren't responding,
@ -1181,7 +1181,7 @@ Cache<TagStore>::handleSnoop(PacketPtr pkt, BlkType *blk,
Packet snoopPkt(pkt, true); // clear flags Packet snoopPkt(pkt, true); // clear flags
snoopPkt.setExpressSnoop(); snoopPkt.setExpressSnoop();
snoopPkt.senderState = new ForwardResponseRecord(pkt, this); snoopPkt.senderState = new ForwardResponseRecord(pkt, this);
cpuSidePort->sendTimingSnoop(&snoopPkt); cpuSidePort->sendTimingSnoopReq(&snoopPkt);
if (snoopPkt.memInhibitAsserted()) { if (snoopPkt.memInhibitAsserted()) {
// cache-to-cache response from some upper cache // cache-to-cache response from some upper cache
assert(!alreadyResponded); assert(!alreadyResponded);
@ -1336,11 +1336,9 @@ Cache<TagStore>::snoopTiming(PacketPtr pkt)
template<class TagStore> template<class TagStore>
bool bool
Cache<TagStore>::CpuSidePort::recvTimingSnoop(PacketPtr pkt) Cache<TagStore>::CpuSidePort::recvTimingSnoopResp(PacketPtr pkt)
{ {
// Express snoop responses from master to slave, e.g., from L1 to L2 // Express snoop responses from master to slave, e.g., from L1 to L2
assert(pkt->isResponse());
cache->timingAccess(pkt); cache->timingAccess(pkt);
return true; return true;
} }
@ -1492,7 +1490,7 @@ Cache<TagStore>::getTimingPacket()
PacketPtr snoop_pkt = new Packet(tgt_pkt, true); PacketPtr snoop_pkt = new Packet(tgt_pkt, true);
snoop_pkt->setExpressSnoop(); snoop_pkt->setExpressSnoop();
snoop_pkt->senderState = mshr; snoop_pkt->senderState = mshr;
cpuSidePort->sendTimingSnoop(snoop_pkt); cpuSidePort->sendTimingSnoopReq(snoop_pkt);
if (snoop_pkt->memInhibitAsserted()) { if (snoop_pkt->memInhibitAsserted()) {
markInService(mshr, snoop_pkt); markInService(mshr, snoop_pkt);
@ -1557,9 +1555,8 @@ Cache<TagStore>::CpuSidePort::getAddrRanges()
template<class TagStore> template<class TagStore>
bool bool
Cache<TagStore>::CpuSidePort::recvTiming(PacketPtr pkt) Cache<TagStore>::CpuSidePort::recvTimingReq(PacketPtr pkt)
{ {
assert(pkt->isRequest());
// always let inhibited requests through even if blocked // always let inhibited requests through even if blocked
if (!pkt->memInhibitAsserted() && blocked) { if (!pkt->memInhibitAsserted() && blocked) {
DPRINTF(Cache,"Scheduling a retry while blocked\n"); DPRINTF(Cache,"Scheduling a retry while blocked\n");
@ -1575,7 +1572,6 @@ template<class TagStore>
Tick Tick
Cache<TagStore>::CpuSidePort::recvAtomic(PacketPtr pkt) Cache<TagStore>::CpuSidePort::recvAtomic(PacketPtr pkt)
{ {
assert(pkt->isRequest());
// atomic request // atomic request
return cache->atomicAccess(pkt); return cache->atomicAccess(pkt);
} }
@ -1584,7 +1580,6 @@ template<class TagStore>
void void
Cache<TagStore>::CpuSidePort::recvFunctional(PacketPtr pkt) Cache<TagStore>::CpuSidePort::recvFunctional(PacketPtr pkt)
{ {
assert(pkt->isRequest());
// functional request // functional request
cache->functionalAccess(pkt, true); cache->functionalAccess(pkt, true);
} }
@ -1605,7 +1600,7 @@ CpuSidePort::CpuSidePort(const std::string &_name, Cache<TagStore> *_cache,
template<class TagStore> template<class TagStore>
bool bool
Cache<TagStore>::MemSidePort::recvTiming(PacketPtr pkt) Cache<TagStore>::MemSidePort::recvTimingResp(PacketPtr pkt)
{ {
// this needs to be fixed so that the cache updates the mshr and sends the // this needs to be fixed so that the cache updates the mshr and sends the
// packet back out on the link, but it probably won't happen so until this // packet back out on the link, but it probably won't happen so until this
@ -1613,27 +1608,23 @@ Cache<TagStore>::MemSidePort::recvTiming(PacketPtr pkt)
if (pkt->wasNacked()) if (pkt->wasNacked())
panic("Need to implement cache resending nacked packets!\n"); panic("Need to implement cache resending nacked packets!\n");
assert(pkt->isResponse());
cache->handleResponse(pkt); cache->handleResponse(pkt);
return true; return true;
} }
// Express snooping requests to memside port // Express snooping requests to memside port
template<class TagStore> template<class TagStore>
bool void
Cache<TagStore>::MemSidePort::recvTimingSnoop(PacketPtr pkt) Cache<TagStore>::MemSidePort::recvTimingSnoopReq(PacketPtr pkt)
{ {
// handle snooping requests // handle snooping requests
assert(pkt->isRequest());
cache->snoopTiming(pkt); cache->snoopTiming(pkt);
return true;
} }
template<class TagStore> template<class TagStore>
Tick Tick
Cache<TagStore>::MemSidePort::recvAtomicSnoop(PacketPtr pkt) Cache<TagStore>::MemSidePort::recvAtomicSnoop(PacketPtr pkt)
{ {
assert(pkt->isRequest());
// atomic snoop // atomic snoop
return cache->snoopAtomic(pkt); return cache->snoopAtomic(pkt);
} }
@ -1642,7 +1633,6 @@ template<class TagStore>
void void
Cache<TagStore>::MemSidePort::recvFunctionalSnoop(PacketPtr pkt) Cache<TagStore>::MemSidePort::recvFunctionalSnoop(PacketPtr pkt)
{ {
assert(pkt->isRequest());
// functional snoop (note that in contrast to atomic we don't have // functional snoop (note that in contrast to atomic we don't have
// a specific functionalSnoop method, as they have the same // a specific functionalSnoop method, as they have the same
// behaviour regardless) // behaviour regardless)
@ -1668,7 +1658,7 @@ Cache<TagStore>::MemSidePacketQueue::sendDeferredPacket()
} else { } else {
MSHR *mshr = dynamic_cast<MSHR*>(pkt->senderState); MSHR *mshr = dynamic_cast<MSHR*>(pkt->senderState);
waitingOnRetry = !port.sendTiming(pkt); waitingOnRetry = !masterPort.sendTimingReq(pkt);
if (waitingOnRetry) { if (waitingOnRetry) {
DPRINTF(CachePort, "now waiting on a retry\n"); DPRINTF(CachePort, "now waiting on a retry\n");

View file

@ -82,12 +82,12 @@ class MessageMasterPort : public QueuedMasterPort
virtual ~MessageMasterPort() virtual ~MessageMasterPort()
{} {}
bool recvTiming(PacketPtr pkt) { recvResponse(pkt); return true; } bool recvTimingResp(PacketPtr pkt) { recvResponse(pkt); return true; }
protected: protected:
/** A packet queue for outgoing packets. */ /** A packet queue for outgoing packets. */
PacketQueue queue; MasterPacketQueue queue;
// Accept and ignore responses. // Accept and ignore responses.
virtual Tick recvResponse(PacketPtr pkt) virtual Tick recvResponse(PacketPtr pkt)

View file

@ -46,9 +46,8 @@
using namespace std; using namespace std;
PacketQueue::PacketQueue(EventManager& _em, Port& _port, PacketQueue::PacketQueue(EventManager& _em, const std::string& _label)
const std::string _label) : em(_em), sendEvent(this), drainEvent(NULL), label(_label),
: em(_em), label(_label), sendEvent(this), drainEvent(NULL), port(_port),
waitingOnRetry(false) waitingOnRetry(false)
{ {
} }
@ -142,11 +141,10 @@ void PacketQueue::trySendTiming()
DeferredPacket dp = transmitList.front(); DeferredPacket dp = transmitList.front();
transmitList.pop_front(); transmitList.pop_front();
// attempt to send the packet and remember the outcome // use the appropriate implementation of sendTiming based on the
if (!dp.sendAsSnoop) // type of port associated with the queue, and whether the packet
waitingOnRetry = !port.sendTiming(dp.pkt); // is to be sent as a snoop or not
else waitingOnRetry = !sendTiming(dp.pkt, dp.sendAsSnoop);
waitingOnRetry = !port.sendTimingSnoop(dp.pkt);
if (waitingOnRetry) { if (waitingOnRetry) {
// put the packet back at the front of the list (packet should // put the packet back at the front of the list (packet should
@ -206,3 +204,33 @@ PacketQueue::drain(Event *de)
drainEvent = de; drainEvent = de;
return 1; return 1;
} }
MasterPacketQueue::MasterPacketQueue(EventManager& _em, MasterPort& _masterPort,
const std::string _label)
: PacketQueue(_em, _label), masterPort(_masterPort)
{
}
bool
MasterPacketQueue::sendTiming(PacketPtr pkt, bool send_as_snoop)
{
// attempt to send the packet and return according to the outcome
if (!send_as_snoop)
return masterPort.sendTimingReq(pkt);
else
return masterPort.sendTimingSnoopResp(pkt);
}
SlavePacketQueue::SlavePacketQueue(EventManager& _em, SlavePort& _slavePort,
const std::string _label)
: PacketQueue(_em, _label), slavePort(_slavePort)
{
}
bool
SlavePacketQueue::sendTiming(PacketPtr pkt, bool send_as_snoop)
{
// we should never have queued snoop requests
assert(!send_as_snoop);
return slavePort.sendTimingResp(pkt);
}

View file

@ -86,9 +86,6 @@ class PacketQueue
/** The manager which is used for the event queue */ /** The manager which is used for the event queue */
EventManager& em; EventManager& em;
/** Label to use for print request packets label stack. */
const std::string label;
/** This function attempts to send deferred packets. Scheduled to /** This function attempts to send deferred packets. Scheduled to
* be called in the future via SendEvent. */ * be called in the future via SendEvent. */
void processSendEvent(); void processSendEvent();
@ -104,8 +101,8 @@ class PacketQueue
protected: protected:
/** The port used to send the packets. */ /** Label to use for print request packets label stack. */
Port& port; const std::string label;
/** Remember whether we're awaiting a retry from the bus. */ /** Remember whether we're awaiting a retry from the bus. */
bool waitingOnRetry; bool waitingOnRetry;
@ -134,6 +131,11 @@ class PacketQueue
*/ */
void trySendTiming(); void trySendTiming();
/**
*
*/
virtual bool sendTiming(PacketPtr pkt, bool send_as_snoop) = 0;
/** /**
* Based on the transmit list, or the provided time, schedule a * Based on the transmit list, or the provided time, schedule a
* send event if there are packets to send. If we are idle and * send event if there are packets to send. If we are idle and
@ -152,31 +154,28 @@ class PacketQueue
*/ */
virtual void recvRangeChange() { } virtual void recvRangeChange() { }
public:
/** /**
* Create a packet queue, linked to an event manager, a port used * Create a packet queue, linked to an event manager, and a label
* to send the packets, and potentially give it a label that will * that will be used for functional print request packets.
* be used for functional print request packets.
* *
* @param _em Event manager used for scheduling this queue * @param _em Event manager used for scheduling this queue
* @param _port Port used to send the packets
* @param _label Label to push on the label stack for print request packets * @param _label Label to push on the label stack for print request packets
*/ */
PacketQueue(EventManager& _em, Port& _port, PacketQueue(EventManager& _em, const std::string& _label);
const std::string _label = "PacketQueue");
/** /**
* Virtual desctructor since the class may be used as a base class. * Virtual desctructor since the class may be used as a base class.
*/ */
virtual ~PacketQueue(); virtual ~PacketQueue();
public:
/** /**
* Provide a name to simplify debugging. Base it on the port. * Provide a name to simplify debugging.
* *
* @return A complete name, appended to module and port * @return A complete name, appended to module and port
*/ */
const std::string name() const { return port.name() + "-queue"; } virtual const std::string name() const = 0;
/** Check the list of buffered packets against the supplied /** Check the list of buffered packets against the supplied
* functional request. */ * functional request. */
@ -217,4 +216,63 @@ class PacketQueue
unsigned int drain(Event *de); unsigned int drain(Event *de);
}; };
class MasterPacketQueue : public PacketQueue
{
protected:
MasterPort& masterPort;
public:
/**
* Create a master packet queue, linked to an event manager, a
* master port, and a label that will be used for functional print
* request packets.
*
* @param _em Event manager used for scheduling this queue
* @param _masterPort Master port used to send the packets
* @param _label Label to push on the label stack for print request packets
*/
MasterPacketQueue(EventManager& _em, MasterPort& _masterPort,
const std::string _label = "MasterPacketQueue");
virtual ~MasterPacketQueue() { }
const std::string name() const
{ return masterPort.name() + "-" + label; }
bool sendTiming(PacketPtr pkt, bool send_as_snoop);
};
class SlavePacketQueue : public PacketQueue
{
protected:
SlavePort& slavePort;
public:
/**
* Create a slave packet queue, linked to an event manager, a
* slave port, and a label that will be used for functional print
* request packets.
*
* @param _em Event manager used for scheduling this queue
* @param _slavePort Slave port used to send the packets
* @param _label Label to push on the label stack for print request packets
*/
SlavePacketQueue(EventManager& _em, SlavePort& _slavePort,
const std::string _label = "SlavePacketQueue");
virtual ~SlavePacketQueue() { }
const std::string name() const
{ return slavePort.name() + "-" + label; }
bool sendTiming(PacketPtr pkt, bool send_as_snoop);
};
#endif // __MEM_PACKET_QUEUE_HH__ #endif // __MEM_PACKET_QUEUE_HH__

View file

@ -107,15 +107,31 @@ MasterPort::peerBlockSize() const
Tick Tick
MasterPort::sendAtomic(PacketPtr pkt) MasterPort::sendAtomic(PacketPtr pkt)
{ {
assert(pkt->isRequest());
return _slavePort->recvAtomic(pkt); return _slavePort->recvAtomic(pkt);
} }
void void
MasterPort::sendFunctional(PacketPtr pkt) MasterPort::sendFunctional(PacketPtr pkt)
{ {
assert(pkt->isRequest());
return _slavePort->recvFunctional(pkt); return _slavePort->recvFunctional(pkt);
} }
bool
MasterPort::sendTimingReq(PacketPtr pkt)
{
assert(pkt->isRequest());
return _slavePort->recvTimingReq(pkt);
}
bool
MasterPort::sendTimingSnoopResp(PacketPtr pkt)
{
assert(pkt->isResponse());
return _slavePort->recvTimingSnoopResp(pkt);
}
void void
MasterPort::printAddr(Addr a) MasterPort::printAddr(Addr a)
{ {
@ -171,11 +187,27 @@ SlavePort::isConnected() const
Tick Tick
SlavePort::sendAtomicSnoop(PacketPtr pkt) SlavePort::sendAtomicSnoop(PacketPtr pkt)
{ {
assert(pkt->isRequest());
return _masterPort->recvAtomicSnoop(pkt); return _masterPort->recvAtomicSnoop(pkt);
} }
void void
SlavePort::sendFunctionalSnoop(PacketPtr pkt) SlavePort::sendFunctionalSnoop(PacketPtr pkt)
{ {
assert(pkt->isRequest());
return _masterPort->recvFunctionalSnoop(pkt); return _masterPort->recvFunctionalSnoop(pkt);
} }
bool
SlavePort::sendTimingResp(PacketPtr pkt)
{
assert(pkt->isResponse());
return _masterPort->recvTimingResp(pkt);
}
void
SlavePort::sendTimingSnoopReq(PacketPtr pkt)
{
assert(pkt->isRequest());
_masterPort->recvTimingSnoopReq(pkt);
}

View file

@ -73,8 +73,7 @@ class MemObject;
* opposite role. * opposite role.
* *
* Each port has a name and an owner, and enables three basic types of * Each port has a name and an owner, and enables three basic types of
* accesses to the peer port: sendFunctional, sendAtomic and * accesses to the peer port: functional, atomic and timing.
* sendTiming.
*/ */
class Port class Port
{ {
@ -130,61 +129,18 @@ class Port
protected: protected:
/** These functions are protected because they should only be
* called by a peer port, never directly by any outside object. */
/** /**
* Receive a timing request or response packet from the peer port. * Called by a peer port if sendTimingReq, sendTimingResp or
*/ * sendTimingSnoopResp was unsuccesful, and had to wait.
virtual bool recvTiming(PacketPtr pkt) = 0;
/**
* Receive a timing snoop request or snoop response packet from
* the peer port.
*/
virtual bool recvTimingSnoop(PacketPtr pkt)
{
panic("%s was not expecting a timing snoop\n", name());
return false;
}
/**
* Called by a peer port if sendTiming or sendTimingSnoop was
* unsuccesful, and had to wait.
*/ */
virtual void recvRetry() = 0; virtual void recvRetry() = 0;
public: public:
/**
* Attempt to send a timing request or response packet to the peer
* port by calling its receive function. If the send does not
* succeed, as indicated by the return value, then the sender must
* wait for a recvRetry at which point it can re-issue a
* sendTiming.
*
* @param pkt Packet to send.
*
* @return If the send was succesful or not.
*/
bool sendTiming(PacketPtr pkt) { return peer->recvTiming(pkt); }
/**
* Attempt to send a timing snoop request or snoop response packet
* to the peer port by calling its receive function. If the send
* does not succeed, as indicated by the return value, then the
* sender must wait for a recvRetry at which point it can re-issue
* a sendTimingSnoop.
*
* @param pkt Packet to send.
*
* @return If the send was succesful or not.
*/
bool sendTimingSnoop(PacketPtr pkt) { return peer->recvTimingSnoop(pkt); }
/** /**
* Send a retry to a peer port that previously attempted a * Send a retry to a peer port that previously attempted a
* sendTiming or sendTimingSnoop which was unsuccessful. * sendTimingReq, sendTimingResp or sendTimingSnoopResp which was
* unsuccessful.
*/ */
void sendRetry() { return peer->recvRetry(); } void sendRetry() { return peer->recvRetry(); }
@ -202,6 +158,8 @@ class SlavePort;
class MasterPort : public Port class MasterPort : public Port
{ {
friend class SlavePort;
private: private:
SlavePort* _slavePort; SlavePort* _slavePort;
@ -237,30 +195,28 @@ class MasterPort : public Port
void sendFunctional(PacketPtr pkt); void sendFunctional(PacketPtr pkt);
/** /**
* Receive an atomic snoop request packet from the slave port. * Attempt to send a timing request to the slave port by calling
*/ * its corresponding receive function. If the send does not
virtual Tick recvAtomicSnoop(PacketPtr pkt) * succeed, as indicated by the return value, then the sender must
{ * wait for a recvRetry at which point it can re-issue a
panic("%s was not expecting an atomic snoop\n", name()); * sendTimingReq.
return 0; *
} * @param pkt Packet to send.
*
* @return If the send was succesful or not.
*/
bool sendTimingReq(PacketPtr pkt);
/** /**
* Receive a functional snoop request packet from the slave port. * Attempt to send a timing snoop response packet to the slave
* port by calling its corresponding receive function. If the send
* does not succeed, as indicated by the return value, then the
* sender must wait for a recvRetry at which point it can re-issue
* a sendTimingSnoopResp.
*
* @param pkt Packet to send.
*/ */
virtual void recvFunctionalSnoop(PacketPtr pkt) bool sendTimingSnoopResp(PacketPtr pkt);
{
panic("%s was not expecting a functional snoop\n", name());
}
/**
* Called to receive an address range change from the peer slave
* port. the default implementation ignored the change and does
* nothing. Override this function in a derived class if the owner
* needs to be aware of he laesddress ranges, e.g. in an
* interconnect component like a bus.
*/
virtual void recvRangeChange() { }
/** /**
* Determine if this master port is snooping or not. The default * Determine if this master port is snooping or not. The default
@ -288,6 +244,47 @@ class MasterPort : public Port
* that address throughout the memory system. For debugging. * that address throughout the memory system. For debugging.
*/ */
void printAddr(Addr a); void printAddr(Addr a);
protected:
/**
* Receive an atomic snoop request packet from the slave port.
*/
virtual Tick recvAtomicSnoop(PacketPtr pkt)
{
panic("%s was not expecting an atomic snoop request\n", name());
return 0;
}
/**
* Receive a functional snoop request packet from the slave port.
*/
virtual void recvFunctionalSnoop(PacketPtr pkt)
{
panic("%s was not expecting a functional snoop request\n", name());
}
/**
* Receive a timing response from the slave port.
*/
virtual bool recvTimingResp(PacketPtr pkt) = 0;
/**
* Receive a timing snoop request from the slave port.
*/
virtual void recvTimingSnoopReq(PacketPtr pkt)
{
panic("%s was not expecting a timing snoop request\n", name());
}
/**
* Called to receive an address range change from the peer slave
* port. the default implementation ignored the change and does
* nothing. Override this function in a derived class if the owner
* needs to be aware of he laesddress ranges, e.g. in an
* interconnect component like a bus.
*/
virtual void recvRangeChange() { }
}; };
/** /**
@ -299,6 +296,8 @@ class MasterPort : public Port
class SlavePort : public Port class SlavePort : public Port
{ {
friend class MasterPort;
private: private:
MasterPort* _masterPort; MasterPort* _masterPort;
@ -334,14 +333,26 @@ class SlavePort : public Port
void sendFunctionalSnoop(PacketPtr pkt); void sendFunctionalSnoop(PacketPtr pkt);
/** /**
* Receive an atomic request packet from the master port. * Attempt to send a timing response to the master port by calling
*/ * its corresponding receive function. If the send does not
virtual Tick recvAtomic(PacketPtr pkt) = 0; * succeed, as indicated by the return value, then the sender must
* wait for a recvRetry at which point it can re-issue a
* sendTimingResp.
*
* @param pkt Packet to send.
*
* @return If the send was succesful or not.
*/
bool sendTimingResp(PacketPtr pkt);
/** /**
* Receive a functional request packet from the master port. * Attempt to send a timing snoop request packet to the master port
* by calling its corresponding receive function. Snoop requests
* always succeed and hence no return value is needed.
*
* @param pkt Packet to send.
*/ */
virtual void recvFunctional(PacketPtr pkt) = 0; void sendTimingSnoopReq(PacketPtr pkt);
/** /**
* Called by a peer port in order to determine the block size of * Called by a peer port in order to determine the block size of
@ -367,6 +378,32 @@ class SlavePort : public Port
* @return a list of ranges responded to * @return a list of ranges responded to
*/ */
virtual AddrRangeList getAddrRanges() = 0; virtual AddrRangeList getAddrRanges() = 0;
protected:
/**
* Receive an atomic request packet from the master port.
*/
virtual Tick recvAtomic(PacketPtr pkt) = 0;
/**
* Receive a functional request packet from the master port.
*/
virtual void recvFunctional(PacketPtr pkt) = 0;
/**
* Receive a timing request from the master port.
*/
virtual bool recvTimingReq(PacketPtr pkt) = 0;
/**
* Receive a timing snoop response from the master port.
*/
virtual bool recvTimingSnoopResp(PacketPtr pkt)
{
panic("%s was not expecting a timing snoop response\n", name());
}
}; };
#endif //__MEM_PORT_HH__ #endif //__MEM_PORT_HH__

View file

@ -62,7 +62,7 @@ class QueuedSlavePort : public SlavePort
protected: protected:
/** Packet queue used to store outgoing requests and responses. */ /** Packet queue used to store outgoing requests and responses. */
PacketQueue &queue; SlavePacketQueue &queue;
/** This function is notification that the device should attempt to send a /** This function is notification that the device should attempt to send a
* packet again. */ * packet again. */
@ -78,7 +78,7 @@ class QueuedSlavePort : public SlavePort
* QueuePort constructor. * QueuePort constructor.
*/ */
QueuedSlavePort(const std::string& name, MemObject* owner, QueuedSlavePort(const std::string& name, MemObject* owner,
PacketQueue &queue) : SlavePacketQueue &queue) :
SlavePort(name, owner), queue(queue) SlavePort(name, owner), queue(queue)
{ } { }
@ -103,7 +103,7 @@ class QueuedMasterPort : public MasterPort
protected: protected:
/** Packet queue used to store outgoing requests and responses. */ /** Packet queue used to store outgoing requests and responses. */
PacketQueue &queue; MasterPacketQueue &queue;
/** This function is notification that the device should attempt to send a /** This function is notification that the device should attempt to send a
* packet again. */ * packet again. */
@ -119,7 +119,7 @@ class QueuedMasterPort : public MasterPort
* QueuePort constructor. * QueuePort constructor.
*/ */
QueuedMasterPort(const std::string& name, MemObject* owner, QueuedMasterPort(const std::string& name, MemObject* owner,
PacketQueue &queue) : MasterPacketQueue &queue) :
MasterPort(name, owner), queue(queue) MasterPort(name, owner), queue(queue)
{ } { }

View file

@ -141,14 +141,12 @@ RubyPort::M5Port::recvAtomic(PacketPtr pkt)
bool bool
RubyPort::PioPort::recvTiming(PacketPtr pkt) RubyPort::PioPort::recvTimingResp(PacketPtr pkt)
{ {
// In FS mode, ruby memory will receive pio responses from devices // In FS mode, ruby memory will receive pio responses from devices
// and it must forward these responses back to the particular CPU. // and it must forward these responses back to the particular CPU.
DPRINTF(RubyPort, "Pio response for address %#x\n", pkt->getAddr()); DPRINTF(RubyPort, "Pio response for address %#x\n", pkt->getAddr());
assert(pkt->isResponse());
// First we must retrieve the request port from the sender State // First we must retrieve the request port from the sender State
RubyPort::SenderState *senderState = RubyPort::SenderState *senderState =
safe_cast<RubyPort::SenderState *>(pkt->senderState); safe_cast<RubyPort::SenderState *>(pkt->senderState);
@ -159,24 +157,23 @@ RubyPort::PioPort::recvTiming(PacketPtr pkt)
pkt->senderState = senderState->saved; pkt->senderState = senderState->saved;
delete senderState; delete senderState;
port->sendTiming(pkt); port->sendTimingResp(pkt);
return true; return true;
} }
bool bool
RubyPort::M5Port::recvTiming(PacketPtr pkt) RubyPort::M5Port::recvTimingReq(PacketPtr pkt)
{ {
DPRINTF(RubyPort, DPRINTF(RubyPort,
"Timing access caught for address %#x\n", pkt->getAddr()); "Timing access caught for address %#x\n", pkt->getAddr());
//dsm: based on SimpleTimingPort::recvTiming(pkt); //dsm: based on SimpleTimingPort::recvTimingReq(pkt);
// The received packets should only be M5 requests, which should never // The received packets should only be M5 requests, which should never
// get nacked. There used to be code to hanldle nacks here, but // get nacked. There used to be code to hanldle nacks here, but
// I'm pretty sure it didn't work correctly with the drain code, // I'm pretty sure it didn't work correctly with the drain code,
// so that would need to be fixed if we ever added it back. // so that would need to be fixed if we ever added it back.
assert(pkt->isRequest());
if (pkt->memInhibitAsserted()) { if (pkt->memInhibitAsserted()) {
warn("memInhibitAsserted???"); warn("memInhibitAsserted???");

View file

@ -62,7 +62,7 @@ class RubyPort : public MemObject
{ {
private: private:
PacketQueue queue; SlavePacketQueue queue;
RubyPort *ruby_port; RubyPort *ruby_port;
RubySystem* ruby_system; RubySystem* ruby_system;
bool _onRetryList; bool _onRetryList;
@ -83,7 +83,7 @@ class RubyPort : public MemObject
{ _onRetryList = newVal; } { _onRetryList = newVal; }
protected: protected:
virtual bool recvTiming(PacketPtr pkt); virtual bool recvTimingReq(PacketPtr pkt);
virtual Tick recvAtomic(PacketPtr pkt); virtual Tick recvAtomic(PacketPtr pkt);
virtual void recvFunctional(PacketPtr pkt); virtual void recvFunctional(PacketPtr pkt);
virtual AddrRangeList getAddrRanges(); virtual AddrRangeList getAddrRanges();
@ -100,7 +100,7 @@ class RubyPort : public MemObject
{ {
private: private:
PacketQueue queue; MasterPacketQueue queue;
RubyPort *ruby_port; RubyPort *ruby_port;
@ -109,7 +109,7 @@ class RubyPort : public MemObject
bool sendNextCycle(PacketPtr pkt); bool sendNextCycle(PacketPtr pkt);
protected: protected:
virtual bool recvTiming(PacketPtr pkt); virtual bool recvTimingResp(PacketPtr pkt);
}; };
friend class PioPort; friend class PioPort;

View file

@ -53,7 +53,6 @@ SimpleTimingPort::SimpleTimingPort(const std::string& _name,
void void
SimpleTimingPort::recvFunctional(PacketPtr pkt) SimpleTimingPort::recvFunctional(PacketPtr pkt)
{ {
assert(pkt->isRequest());
if (!queue.checkFunctional(pkt)) { if (!queue.checkFunctional(pkt)) {
// do an atomic access and throw away the returned latency // do an atomic access and throw away the returned latency
recvAtomic(pkt); recvAtomic(pkt);
@ -61,11 +60,8 @@ SimpleTimingPort::recvFunctional(PacketPtr pkt)
} }
bool bool
SimpleTimingPort::recvTiming(PacketPtr pkt) SimpleTimingPort::recvTimingReq(PacketPtr pkt)
{ {
// the port is a slave and should hence only get timing requests
assert(pkt->isRequest());
if (pkt->memInhibitAsserted()) { if (pkt->memInhibitAsserted()) {
// snooper will supply based on copy of packet // snooper will supply based on copy of packet
// still target's responsibility to delete packet // still target's responsibility to delete packet

View file

@ -54,7 +54,7 @@
/** /**
* The simple timing port uses a queued port to implement * The simple timing port uses a queued port to implement
* recvFunctional and recvTiming through recvAtomic. It is always a * recvFunctional and recvTimingReq through recvAtomic. It is always a
* slave port. * slave port.
*/ */
class SimpleTimingPort : public QueuedSlavePort class SimpleTimingPort : public QueuedSlavePort
@ -63,13 +63,13 @@ class SimpleTimingPort : public QueuedSlavePort
protected: protected:
/** The packet queue used to store outgoing responses. */ /** The packet queue used to store outgoing responses. */
PacketQueue queue; SlavePacketQueue queue;
/** Implemented using recvAtomic(). */ /** Implemented using recvAtomic(). */
void recvFunctional(PacketPtr pkt); void recvFunctional(PacketPtr pkt);
/** Implemented using recvAtomic(). */ /** Implemented using recvAtomic(). */
bool recvTiming(PacketPtr pkt); bool recvTimingReq(PacketPtr pkt);
virtual Tick recvAtomic(PacketPtr pkt) = 0; virtual Tick recvAtomic(PacketPtr pkt) = 0;
@ -77,7 +77,7 @@ class SimpleTimingPort : public QueuedSlavePort
/** /**
* Create a new SimpleTimingPort that relies on a packet queue to * Create a new SimpleTimingPort that relies on a packet queue to
* hold responses, and implements recvTiming and recvFunctional * hold responses, and implements recvTimingReq and recvFunctional
* through calls to recvAtomic. Once a request arrives, it is * through calls to recvAtomic. Once a request arrives, it is
* passed to recvAtomic, and in the case of a timing access any * passed to recvAtomic, and in the case of a timing access any
* response is scheduled to be sent after the delay of the atomic * response is scheduled to be sent after the delay of the atomic

View file

@ -88,7 +88,7 @@ class System : public MemObject
SystemPort(const std::string &_name, MemObject *_owner) SystemPort(const std::string &_name, MemObject *_owner)
: MasterPort(_name, _owner) : MasterPort(_name, _owner)
{ } { }
bool recvTiming(PacketPtr pkt) bool recvTimingResp(PacketPtr pkt)
{ panic("SystemPort does not receive timing!\n"); return false; } { panic("SystemPort does not receive timing!\n"); return false; }
void recvRetry() void recvRetry()
{ panic("SystemPort does not expect retry!\n"); } { panic("SystemPort does not expect retry!\n"); }