MEM: Separate requests and responses for timing accesses
This patch moves send/recvTiming and send/recvTimingSnoop from the Port base class to the MasterPort and SlavePort, and also splits them into separate member functions for requests and responses: send/recvTimingReq, send/recvTimingResp, and send/recvTimingSnoopReq, send/recvTimingSnoopResp. A master port sends requests and receives responses, and also receives snoop requests and sends snoop responses. A slave port has the reciprocal behaviour as it receives requests and sends responses, and sends snoop requests and receives snoop responses. For all MemObjects that have only master ports or slave ports (but not both), e.g. a CPU, or a PIO device, this patch merely adds more clarity to what kind of access is taking place. For example, a CPU port used to call sendTiming, and will now call sendTimingReq. Similarly, a response previously came back through recvTiming, which is now recvTimingResp. For the modules that have both master and slave ports, e.g. the bus, the behaviour was previously relying on branches based on pkt->isRequest(), and this is now replaced with a direct call to the apprioriate member function depending on the type of access. Please note that send/recvRetry is still shared by all the timing accessors and remains in the Port base class for now (to maintain the current bus functionality and avoid changing the statistics of all regressions). The packet queue is split into a MasterPort and SlavePort version to facilitate the use of the new timing accessors. All uses of the PacketQueue are updated accordingly. With this patch, the type of packet (request or response) is now well defined for each type of access, and asserts on pkt->isRequest() and pkt->isResponse() are now moved to the appropriate send member functions. It is also worth noting that sendTimingSnoopReq no longer returns a boolean, as the semantics do not alow snoop requests to be rejected or stalled. All these assumptions are now excplicitly part of the port interface itself.
This commit is contained in:
parent
8966e6d36d
commit
3fea59e162
47 changed files with 546 additions and 424 deletions
|
@ -114,15 +114,14 @@ Walker::startFunctional(ThreadContext * _tc, Addr &addr, unsigned &logBytes,
|
|||
}
|
||||
|
||||
bool
|
||||
Walker::WalkerPort::recvTiming(PacketPtr pkt)
|
||||
Walker::WalkerPort::recvTimingResp(PacketPtr pkt)
|
||||
{
|
||||
return walker->recvTiming(pkt);
|
||||
return walker->recvTimingResp(pkt);
|
||||
}
|
||||
|
||||
bool
|
||||
Walker::recvTiming(PacketPtr pkt)
|
||||
Walker::recvTimingResp(PacketPtr pkt)
|
||||
{
|
||||
assert(pkt->isResponse());
|
||||
WalkerSenderState * senderState =
|
||||
dynamic_cast<WalkerSenderState *>(pkt->senderState);
|
||||
pkt->senderState = senderState->saved;
|
||||
|
@ -171,7 +170,7 @@ Walker::recvRetry()
|
|||
bool Walker::sendTiming(WalkerState* sendingState, PacketPtr pkt)
|
||||
{
|
||||
pkt->senderState = new WalkerSenderState(sendingState, pkt->senderState);
|
||||
return port.sendTiming(pkt);
|
||||
return port.sendTimingReq(pkt);
|
||||
}
|
||||
|
||||
MasterPort &
|
||||
|
|
|
@ -70,12 +70,12 @@ namespace X86ISA
|
|||
protected:
|
||||
Walker *walker;
|
||||
|
||||
bool recvTiming(PacketPtr pkt);
|
||||
bool recvTimingResp(PacketPtr pkt);
|
||||
|
||||
/**
|
||||
* Snooping a coherence request, do nothing.
|
||||
*/
|
||||
bool recvTimingSnoop(PacketPtr pkt) { return true; }
|
||||
void recvTimingSnoopReq(PacketPtr pkt) { }
|
||||
Tick recvAtomicSnoop(PacketPtr pkt) { return 0; }
|
||||
void recvFunctionalSnoop(PacketPtr pkt) { }
|
||||
void recvRetry();
|
||||
|
@ -179,7 +179,7 @@ namespace X86ISA
|
|||
MasterID masterId;
|
||||
|
||||
// Functions for dealing with packets.
|
||||
bool recvTiming(PacketPtr pkt);
|
||||
bool recvTimingResp(PacketPtr pkt);
|
||||
void recvRetry();
|
||||
bool sendTiming(WalkerState * sendingState, PacketPtr pkt);
|
||||
|
||||
|
|
|
@ -532,7 +532,7 @@ BaseCPU::traceFunctionsInternal(Addr pc)
|
|||
}
|
||||
|
||||
bool
|
||||
BaseCPU::CpuPort::recvTiming(PacketPtr pkt)
|
||||
BaseCPU::CpuPort::recvTimingResp(PacketPtr pkt)
|
||||
{
|
||||
panic("BaseCPU doesn't expect recvTiming!\n");
|
||||
return true;
|
||||
|
|
|
@ -133,7 +133,7 @@ class BaseCPU : public MemObject
|
|||
|
||||
protected:
|
||||
|
||||
virtual bool recvTiming(PacketPtr pkt);
|
||||
virtual bool recvTimingResp(PacketPtr pkt);
|
||||
|
||||
virtual void recvRetry();
|
||||
|
||||
|
|
|
@ -88,10 +88,8 @@ InOrderCPU::CachePort::CachePort(CacheUnit *_cacheUnit) :
|
|||
{ }
|
||||
|
||||
bool
|
||||
InOrderCPU::CachePort::recvTiming(Packet *pkt)
|
||||
InOrderCPU::CachePort::recvTimingResp(Packet *pkt)
|
||||
{
|
||||
assert(pkt->isResponse());
|
||||
|
||||
if (pkt->isError())
|
||||
DPRINTF(InOrderCachePort, "Got error packet back for address: %x\n",
|
||||
pkt->getAddr());
|
||||
|
|
|
@ -170,13 +170,13 @@ class InOrderCPU : public BaseCPU
|
|||
protected:
|
||||
|
||||
/** Timing version of receive */
|
||||
bool recvTiming(PacketPtr pkt);
|
||||
bool recvTimingResp(PacketPtr pkt);
|
||||
|
||||
/** Handles doing a retry of a failed timing request. */
|
||||
void recvRetry();
|
||||
|
||||
/** Ignoring snoops for now. */
|
||||
bool recvTimingSnoop(PacketPtr pkt) { return true; }
|
||||
void recvTimingSnoopReq(PacketPtr pkt) { }
|
||||
};
|
||||
|
||||
/** Define TickEvent for the CPU */
|
||||
|
|
|
@ -873,7 +873,7 @@ CacheUnit::doCacheAccess(DynInstPtr inst, uint64_t *write_res,
|
|||
tid, inst->seqNum, cache_req->dataPkt->getAddr());
|
||||
|
||||
if (do_access) {
|
||||
if (!cachePort->sendTiming(cache_req->dataPkt)) {
|
||||
if (!cachePort->sendTimingReq(cache_req->dataPkt)) {
|
||||
DPRINTF(InOrderCachePort,
|
||||
"[tid:%i] [sn:%i] cannot access cache, because port "
|
||||
"is blocked. now waiting to retry request\n", tid,
|
||||
|
|
|
@ -87,9 +87,8 @@ BaseO3CPU::regStats()
|
|||
|
||||
template<class Impl>
|
||||
bool
|
||||
FullO3CPU<Impl>::IcachePort::recvTiming(PacketPtr pkt)
|
||||
FullO3CPU<Impl>::IcachePort::recvTimingResp(PacketPtr pkt)
|
||||
{
|
||||
assert(pkt->isResponse());
|
||||
DPRINTF(O3CPU, "Fetch unit received timing\n");
|
||||
// We shouldn't ever get a block in ownership state
|
||||
assert(!(pkt->memInhibitAsserted() && !pkt->sharedAsserted()));
|
||||
|
@ -107,18 +106,16 @@ FullO3CPU<Impl>::IcachePort::recvRetry()
|
|||
|
||||
template <class Impl>
|
||||
bool
|
||||
FullO3CPU<Impl>::DcachePort::recvTiming(PacketPtr pkt)
|
||||
FullO3CPU<Impl>::DcachePort::recvTimingResp(PacketPtr pkt)
|
||||
{
|
||||
assert(pkt->isResponse());
|
||||
return lsq->recvTiming(pkt);
|
||||
return lsq->recvTimingResp(pkt);
|
||||
}
|
||||
|
||||
template <class Impl>
|
||||
bool
|
||||
FullO3CPU<Impl>::DcachePort::recvTimingSnoop(PacketPtr pkt)
|
||||
void
|
||||
FullO3CPU<Impl>::DcachePort::recvTimingSnoopReq(PacketPtr pkt)
|
||||
{
|
||||
assert(pkt->isRequest());
|
||||
return lsq->recvTimingSnoop(pkt);
|
||||
lsq->recvTimingSnoopReq(pkt);
|
||||
}
|
||||
|
||||
template <class Impl>
|
||||
|
|
|
@ -148,8 +148,8 @@ class FullO3CPU : public BaseO3CPU
|
|||
|
||||
/** Timing version of receive. Handles setting fetch to the
|
||||
* proper status to start fetching. */
|
||||
virtual bool recvTiming(PacketPtr pkt);
|
||||
virtual bool recvTimingSnoop(PacketPtr pkt) { return true; }
|
||||
virtual bool recvTimingResp(PacketPtr pkt);
|
||||
virtual void recvTimingSnoopReq(PacketPtr pkt) { }
|
||||
|
||||
/** Handles doing a retry of a failed fetch. */
|
||||
virtual void recvRetry();
|
||||
|
@ -176,8 +176,8 @@ class FullO3CPU : public BaseO3CPU
|
|||
/** Timing version of receive. Handles writing back and
|
||||
* completing the load or store that has returned from
|
||||
* memory. */
|
||||
virtual bool recvTiming(PacketPtr pkt);
|
||||
virtual bool recvTimingSnoop(PacketPtr pkt);
|
||||
virtual bool recvTimingResp(PacketPtr pkt);
|
||||
virtual void recvTimingSnoopReq(PacketPtr pkt);
|
||||
|
||||
/** Handles doing a retry of the previous send. */
|
||||
virtual void recvRetry();
|
||||
|
|
|
@ -621,7 +621,7 @@ DefaultFetch<Impl>::finishTranslation(Fault fault, RequestPtr mem_req)
|
|||
fetchedCacheLines++;
|
||||
|
||||
// Access the cache.
|
||||
if (!cpu->getInstPort().sendTiming(data_pkt)) {
|
||||
if (!cpu->getInstPort().sendTimingReq(data_pkt)) {
|
||||
assert(retryPkt == NULL);
|
||||
assert(retryTid == InvalidThreadID);
|
||||
DPRINTF(Fetch, "[tid:%i] Out of MSHRs!\n", tid);
|
||||
|
@ -1356,7 +1356,7 @@ DefaultFetch<Impl>::recvRetry()
|
|||
assert(retryTid != InvalidThreadID);
|
||||
assert(fetchStatus[retryTid] == IcacheWaitRetry);
|
||||
|
||||
if (cpu->getInstPort().sendTiming(retryPkt)) {
|
||||
if (cpu->getInstPort().sendTimingReq(retryPkt)) {
|
||||
fetchStatus[retryTid] = IcacheWaitResponse;
|
||||
retryPkt = NULL;
|
||||
retryTid = InvalidThreadID;
|
||||
|
|
|
@ -297,9 +297,9 @@ class LSQ {
|
|||
*
|
||||
* @param pkt Response packet from the memory sub-system
|
||||
*/
|
||||
bool recvTiming(PacketPtr pkt);
|
||||
bool recvTimingResp(PacketPtr pkt);
|
||||
|
||||
bool recvTimingSnoop(PacketPtr pkt);
|
||||
void recvTimingSnoopReq(PacketPtr pkt);
|
||||
|
||||
/** The CPU pointer. */
|
||||
O3CPU *cpu;
|
||||
|
|
|
@ -319,9 +319,8 @@ LSQ<Impl>::recvRetry()
|
|||
|
||||
template <class Impl>
|
||||
bool
|
||||
LSQ<Impl>::recvTiming(PacketPtr pkt)
|
||||
LSQ<Impl>::recvTimingResp(PacketPtr pkt)
|
||||
{
|
||||
assert(pkt->isResponse());
|
||||
if (pkt->isError())
|
||||
DPRINTF(LSQ, "Got error packet back for address: %#X\n",
|
||||
pkt->getAddr());
|
||||
|
@ -330,10 +329,9 @@ LSQ<Impl>::recvTiming(PacketPtr pkt)
|
|||
}
|
||||
|
||||
template <class Impl>
|
||||
bool
|
||||
LSQ<Impl>::recvTimingSnoop(PacketPtr pkt)
|
||||
void
|
||||
LSQ<Impl>::recvTimingSnoopReq(PacketPtr pkt)
|
||||
{
|
||||
assert(pkt->isRequest());
|
||||
DPRINTF(LSQ, "received pkt for addr:%#x %s\n", pkt->getAddr(),
|
||||
pkt->cmdString());
|
||||
|
||||
|
@ -345,9 +343,6 @@ LSQ<Impl>::recvTimingSnoop(PacketPtr pkt)
|
|||
thread[tid].checkSnoop(pkt);
|
||||
}
|
||||
}
|
||||
|
||||
// to provide stronger consistency model
|
||||
return true;
|
||||
}
|
||||
|
||||
template<class Impl>
|
||||
|
|
|
@ -801,7 +801,7 @@ LSQUnit<Impl>::read(Request *req, Request *sreqLow, Request *sreqHigh,
|
|||
state->mainPkt = data_pkt;
|
||||
}
|
||||
|
||||
if (!dcachePort->sendTiming(fst_data_pkt)) {
|
||||
if (!dcachePort->sendTimingReq(fst_data_pkt)) {
|
||||
// Delete state and data packet because a load retry
|
||||
// initiates a pipeline restart; it does not retry.
|
||||
delete state;
|
||||
|
@ -830,7 +830,7 @@ LSQUnit<Impl>::read(Request *req, Request *sreqLow, Request *sreqHigh,
|
|||
// The first packet will return in completeDataAccess and be
|
||||
// handled there.
|
||||
++usedPorts;
|
||||
if (!dcachePort->sendTiming(snd_data_pkt)) {
|
||||
if (!dcachePort->sendTimingReq(snd_data_pkt)) {
|
||||
|
||||
// The main packet will be deleted in completeDataAccess.
|
||||
delete snd_data_pkt->req;
|
||||
|
|
|
@ -1180,7 +1180,7 @@ template <class Impl>
|
|||
bool
|
||||
LSQUnit<Impl>::sendStore(PacketPtr data_pkt)
|
||||
{
|
||||
if (!dcachePort->sendTiming(data_pkt)) {
|
||||
if (!dcachePort->sendTimingReq(data_pkt)) {
|
||||
// Need to handle becoming blocked on a store.
|
||||
isStoreBlocked = true;
|
||||
++lsqCacheBlocked;
|
||||
|
@ -1203,7 +1203,7 @@ LSQUnit<Impl>::recvRetry()
|
|||
LSQSenderState *state =
|
||||
dynamic_cast<LSQSenderState *>(retryPkt->senderState);
|
||||
|
||||
if (dcachePort->sendTiming(retryPkt)) {
|
||||
if (dcachePort->sendTimingReq(retryPkt)) {
|
||||
// Don't finish the store unless this is the last packet.
|
||||
if (!TheISA::HasUnalignedMemAcc || !state->pktToSend ||
|
||||
state->pendingPacket == retryPkt) {
|
||||
|
|
|
@ -234,7 +234,7 @@ TimingSimpleCPU::handleReadPacket(PacketPtr pkt)
|
|||
new IprEvent(pkt, this, nextCycle(curTick() + delay));
|
||||
_status = DcacheWaitResponse;
|
||||
dcache_pkt = NULL;
|
||||
} else if (!dcachePort.sendTiming(pkt)) {
|
||||
} else if (!dcachePort.sendTimingReq(pkt)) {
|
||||
_status = DcacheRetry;
|
||||
dcache_pkt = pkt;
|
||||
} else {
|
||||
|
@ -449,7 +449,7 @@ TimingSimpleCPU::handleWritePacket()
|
|||
new IprEvent(dcache_pkt, this, nextCycle(curTick() + delay));
|
||||
_status = DcacheWaitResponse;
|
||||
dcache_pkt = NULL;
|
||||
} else if (!dcachePort.sendTiming(dcache_pkt)) {
|
||||
} else if (!dcachePort.sendTimingReq(dcache_pkt)) {
|
||||
_status = DcacheRetry;
|
||||
} else {
|
||||
_status = DcacheWaitResponse;
|
||||
|
@ -581,7 +581,7 @@ TimingSimpleCPU::sendFetch(Fault fault, RequestPtr req, ThreadContext *tc)
|
|||
ifetch_pkt->dataStatic(&inst);
|
||||
DPRINTF(SimpleCPU, " -- pkt addr: %#x\n", ifetch_pkt->getAddr());
|
||||
|
||||
if (!icachePort.sendTiming(ifetch_pkt)) {
|
||||
if (!icachePort.sendTimingReq(ifetch_pkt)) {
|
||||
// Need to wait for retry
|
||||
_status = IcacheRetry;
|
||||
} else {
|
||||
|
@ -715,9 +715,8 @@ TimingSimpleCPU::IcachePort::ITickEvent::process()
|
|||
}
|
||||
|
||||
bool
|
||||
TimingSimpleCPU::IcachePort::recvTiming(PacketPtr pkt)
|
||||
TimingSimpleCPU::IcachePort::recvTimingResp(PacketPtr pkt)
|
||||
{
|
||||
assert(pkt->isResponse());
|
||||
if (!pkt->wasNacked()) {
|
||||
DPRINTF(SimpleCPU, "Received timing response %#x\n", pkt->getAddr());
|
||||
// delay processing of returned data until next CPU clock edge
|
||||
|
@ -732,7 +731,7 @@ TimingSimpleCPU::IcachePort::recvTiming(PacketPtr pkt)
|
|||
} else {
|
||||
assert(cpu->_status == IcacheWaitResponse);
|
||||
pkt->reinitNacked();
|
||||
if (!sendTiming(pkt)) {
|
||||
if (!sendTimingReq(pkt)) {
|
||||
cpu->_status = IcacheRetry;
|
||||
cpu->ifetch_pkt = pkt;
|
||||
}
|
||||
|
@ -749,7 +748,7 @@ TimingSimpleCPU::IcachePort::recvRetry()
|
|||
assert(cpu->ifetch_pkt != NULL);
|
||||
assert(cpu->_status == IcacheRetry);
|
||||
PacketPtr tmp = cpu->ifetch_pkt;
|
||||
if (sendTiming(tmp)) {
|
||||
if (sendTimingReq(tmp)) {
|
||||
cpu->_status = IcacheWaitResponse;
|
||||
cpu->ifetch_pkt = NULL;
|
||||
}
|
||||
|
@ -836,9 +835,8 @@ TimingSimpleCPU::completeDrain()
|
|||
}
|
||||
|
||||
bool
|
||||
TimingSimpleCPU::DcachePort::recvTiming(PacketPtr pkt)
|
||||
TimingSimpleCPU::DcachePort::recvTimingResp(PacketPtr pkt)
|
||||
{
|
||||
assert(pkt->isResponse());
|
||||
if (!pkt->wasNacked()) {
|
||||
// delay processing of returned data until next CPU clock edge
|
||||
Tick next_tick = cpu->nextCycle(curTick());
|
||||
|
@ -862,7 +860,7 @@ TimingSimpleCPU::DcachePort::recvTiming(PacketPtr pkt)
|
|||
} else {
|
||||
assert(cpu->_status == DcacheWaitResponse);
|
||||
pkt->reinitNacked();
|
||||
if (!sendTiming(pkt)) {
|
||||
if (!sendTimingReq(pkt)) {
|
||||
cpu->_status = DcacheRetry;
|
||||
cpu->dcache_pkt = pkt;
|
||||
}
|
||||
|
@ -896,7 +894,7 @@ TimingSimpleCPU::DcachePort::recvRetry()
|
|||
dynamic_cast<SplitMainSenderState *>(big_pkt->senderState);
|
||||
assert(main_send_state);
|
||||
|
||||
if (sendTiming(tmp)) {
|
||||
if (sendTimingReq(tmp)) {
|
||||
// If we were able to send without retrying, record that fact
|
||||
// and try sending the other fragment.
|
||||
send_state->clearFromParent();
|
||||
|
@ -914,7 +912,7 @@ TimingSimpleCPU::DcachePort::recvRetry()
|
|||
cpu->dcache_pkt = NULL;
|
||||
}
|
||||
}
|
||||
} else if (sendTiming(tmp)) {
|
||||
} else if (sendTimingReq(tmp)) {
|
||||
cpu->_status = DcacheWaitResponse;
|
||||
// memory system takes ownership of packet
|
||||
cpu->dcache_pkt = NULL;
|
||||
|
|
|
@ -156,7 +156,7 @@ class TimingSimpleCPU : public BaseSimpleCPU
|
|||
/**
|
||||
* Snooping a coherence request, do nothing.
|
||||
*/
|
||||
virtual bool recvTimingSnoop(PacketPtr pkt) { return true; }
|
||||
virtual void recvTimingSnoopReq(PacketPtr pkt) { }
|
||||
|
||||
TimingSimpleCPU* cpu;
|
||||
|
||||
|
@ -185,7 +185,7 @@ class TimingSimpleCPU : public BaseSimpleCPU
|
|||
|
||||
protected:
|
||||
|
||||
virtual bool recvTiming(PacketPtr pkt);
|
||||
virtual bool recvTimingResp(PacketPtr pkt);
|
||||
|
||||
virtual void recvRetry();
|
||||
|
||||
|
@ -212,7 +212,7 @@ class TimingSimpleCPU : public BaseSimpleCPU
|
|||
|
||||
protected:
|
||||
|
||||
virtual bool recvTiming(PacketPtr pkt);
|
||||
virtual bool recvTimingResp(PacketPtr pkt);
|
||||
|
||||
virtual void recvRetry();
|
||||
|
||||
|
|
|
@ -80,7 +80,7 @@ InvalidateGenerator::initiate()
|
|||
*dummyData = 0;
|
||||
pkt->dataDynamic(dummyData);
|
||||
|
||||
if (port->sendTiming(pkt)) {
|
||||
if (port->sendTimingReq(pkt)) {
|
||||
DPRINTF(DirectedTest, "initiating request - successful\n");
|
||||
if (m_status == InvalidateGeneratorStatus_Load_Waiting) {
|
||||
m_status = InvalidateGeneratorStatus_Load_Pending;
|
||||
|
|
|
@ -91,7 +91,7 @@ RubyDirectedTester::getMasterPort(const std::string &if_name, int idx)
|
|||
}
|
||||
|
||||
bool
|
||||
RubyDirectedTester::CpuPort::recvTiming(PacketPtr pkt)
|
||||
RubyDirectedTester::CpuPort::recvTimingResp(PacketPtr pkt)
|
||||
{
|
||||
tester->hitCallback(id, pkt->getAddr());
|
||||
|
||||
|
|
|
@ -59,7 +59,7 @@ class RubyDirectedTester : public MemObject
|
|||
{}
|
||||
|
||||
protected:
|
||||
virtual bool recvTiming(PacketPtr pkt);
|
||||
virtual bool recvTimingResp(PacketPtr pkt);
|
||||
virtual void recvRetry()
|
||||
{ panic("%s does not expect a retry\n", name()); }
|
||||
};
|
||||
|
|
|
@ -70,7 +70,7 @@ SeriesRequestGenerator::initiate()
|
|||
*dummyData = 0;
|
||||
pkt->dataDynamic(dummyData);
|
||||
|
||||
if (port->sendTiming(pkt)) {
|
||||
if (port->sendTimingReq(pkt)) {
|
||||
DPRINTF(DirectedTest, "initiating request - successful\n");
|
||||
m_status = SeriesRequestGeneratorStatus_Request_Pending;
|
||||
return true;
|
||||
|
|
|
@ -53,9 +53,8 @@ using namespace std;
|
|||
int TESTER_ALLOCATOR=0;
|
||||
|
||||
bool
|
||||
MemTest::CpuPort::recvTiming(PacketPtr pkt)
|
||||
MemTest::CpuPort::recvTimingResp(PacketPtr pkt)
|
||||
{
|
||||
assert(pkt->isResponse());
|
||||
memtest->completeRequest(pkt);
|
||||
return true;
|
||||
}
|
||||
|
@ -72,7 +71,7 @@ MemTest::sendPkt(PacketPtr pkt) {
|
|||
cachePort.sendAtomic(pkt);
|
||||
completeRequest(pkt);
|
||||
}
|
||||
else if (!cachePort.sendTiming(pkt)) {
|
||||
else if (!cachePort.sendTimingReq(pkt)) {
|
||||
DPRINTF(MemTest, "accessRetry setting to true\n");
|
||||
|
||||
//
|
||||
|
@ -379,7 +378,7 @@ MemTest::tick()
|
|||
void
|
||||
MemTest::doRetry()
|
||||
{
|
||||
if (cachePort.sendTiming(retryPkt)) {
|
||||
if (cachePort.sendTimingReq(retryPkt)) {
|
||||
DPRINTF(MemTest, "accessRetry setting to false\n");
|
||||
accessRetry = false;
|
||||
retryPkt = NULL;
|
||||
|
|
|
@ -97,9 +97,9 @@ class MemTest : public MemObject
|
|||
|
||||
protected:
|
||||
|
||||
virtual bool recvTiming(PacketPtr pkt);
|
||||
virtual bool recvTimingResp(PacketPtr pkt);
|
||||
|
||||
virtual bool recvTimingSnoop(PacketPtr pkt) { return true; }
|
||||
virtual void recvTimingSnoopReq(PacketPtr pkt) { }
|
||||
|
||||
virtual Tick recvAtomicSnoop(PacketPtr pkt) { return 0; }
|
||||
|
||||
|
|
|
@ -51,9 +51,8 @@ using namespace std;
|
|||
int TESTER_NETWORK=0;
|
||||
|
||||
bool
|
||||
NetworkTest::CpuPort::recvTiming(PacketPtr pkt)
|
||||
NetworkTest::CpuPort::recvTimingResp(PacketPtr pkt)
|
||||
{
|
||||
assert(pkt->isResponse());
|
||||
networktest->completeRequest(pkt);
|
||||
return true;
|
||||
}
|
||||
|
@ -67,7 +66,7 @@ NetworkTest::CpuPort::recvRetry()
|
|||
void
|
||||
NetworkTest::sendPkt(PacketPtr pkt)
|
||||
{
|
||||
if (!cachePort.sendTiming(pkt)) {
|
||||
if (!cachePort.sendTimingReq(pkt)) {
|
||||
retryPkt = pkt; // RubyPort will retry sending
|
||||
}
|
||||
numPacketsSent++;
|
||||
|
@ -269,7 +268,7 @@ NetworkTest::generatePkt()
|
|||
void
|
||||
NetworkTest::doRetry()
|
||||
{
|
||||
if (cachePort.sendTiming(retryPkt)) {
|
||||
if (cachePort.sendTimingReq(retryPkt)) {
|
||||
retryPkt = NULL;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -92,7 +92,7 @@ class NetworkTest : public MemObject
|
|||
|
||||
protected:
|
||||
|
||||
virtual bool recvTiming(PacketPtr pkt);
|
||||
virtual bool recvTimingResp(PacketPtr pkt);
|
||||
|
||||
virtual void recvRetry();
|
||||
};
|
||||
|
|
|
@ -114,7 +114,7 @@ Check::initiatePrefetch()
|
|||
pkt->senderState =
|
||||
new SenderState(m_address, req->getSize(), pkt->senderState);
|
||||
|
||||
if (port->sendTiming(pkt)) {
|
||||
if (port->sendTimingReq(pkt)) {
|
||||
DPRINTF(RubyTest, "successfully initiated prefetch.\n");
|
||||
} else {
|
||||
// If the packet did not issue, must delete
|
||||
|
@ -154,7 +154,7 @@ Check::initiateFlush()
|
|||
pkt->senderState =
|
||||
new SenderState(m_address, req->getSize(), pkt->senderState);
|
||||
|
||||
if (port->sendTiming(pkt)) {
|
||||
if (port->sendTimingReq(pkt)) {
|
||||
DPRINTF(RubyTest, "initiating Flush - successful\n");
|
||||
}
|
||||
}
|
||||
|
@ -201,7 +201,7 @@ Check::initiateAction()
|
|||
pkt->senderState =
|
||||
new SenderState(writeAddr, req->getSize(), pkt->senderState);
|
||||
|
||||
if (port->sendTiming(pkt)) {
|
||||
if (port->sendTimingReq(pkt)) {
|
||||
DPRINTF(RubyTest, "initiating action - successful\n");
|
||||
DPRINTF(RubyTest, "status before action update: %s\n",
|
||||
(TesterStatus_to_string(m_status)).c_str());
|
||||
|
@ -253,7 +253,7 @@ Check::initiateCheck()
|
|||
pkt->senderState =
|
||||
new SenderState(m_address, req->getSize(), pkt->senderState);
|
||||
|
||||
if (port->sendTiming(pkt)) {
|
||||
if (port->sendTimingReq(pkt)) {
|
||||
DPRINTF(RubyTest, "initiating check - successful\n");
|
||||
DPRINTF(RubyTest, "status before check update: %s\n",
|
||||
TesterStatus_to_string(m_status).c_str());
|
||||
|
|
|
@ -145,7 +145,7 @@ RubyTester::getMasterPort(const std::string &if_name, int idx)
|
|||
}
|
||||
|
||||
bool
|
||||
RubyTester::CpuPort::recvTiming(PacketPtr pkt)
|
||||
RubyTester::CpuPort::recvTimingResp(PacketPtr pkt)
|
||||
{
|
||||
// retrieve the subblock and call hitCallback
|
||||
RubyTester::SenderState* senderState =
|
||||
|
|
|
@ -62,7 +62,7 @@ class RubyTester : public MemObject
|
|||
{}
|
||||
|
||||
protected:
|
||||
virtual bool recvTiming(PacketPtr pkt);
|
||||
virtual bool recvTimingResp(PacketPtr pkt);
|
||||
virtual void recvRetry()
|
||||
{ panic("%s does not expect a retry\n", name()); }
|
||||
};
|
||||
|
|
|
@ -131,9 +131,8 @@ DmaPort::DmaPort(MemObject *dev, System *s, Tick min_backoff, Tick max_backoff,
|
|||
{ }
|
||||
|
||||
bool
|
||||
DmaPort::recvTiming(PacketPtr pkt)
|
||||
DmaPort::recvTimingResp(PacketPtr pkt)
|
||||
{
|
||||
assert(pkt->isResponse());
|
||||
if (pkt->wasNacked()) {
|
||||
DPRINTF(DMA, "Received nacked %s addr %#x\n",
|
||||
pkt->cmdString(), pkt->getAddr());
|
||||
|
@ -234,7 +233,7 @@ DmaPort::recvRetry()
|
|||
PacketPtr pkt = transmitList.front();
|
||||
DPRINTF(DMA, "Retry on %s addr %#x\n",
|
||||
pkt->cmdString(), pkt->getAddr());
|
||||
result = sendTiming(pkt);
|
||||
result = sendTimingReq(pkt);
|
||||
if (result) {
|
||||
DPRINTF(DMA, "-- Done\n");
|
||||
transmitList.pop_front();
|
||||
|
@ -320,7 +319,7 @@ DmaPort::sendDma()
|
|||
|
||||
bool result;
|
||||
do {
|
||||
result = sendTiming(pkt);
|
||||
result = sendTimingReq(pkt);
|
||||
if (result) {
|
||||
transmitList.pop_front();
|
||||
DPRINTF(DMA, "-- Done\n");
|
||||
|
|
|
@ -146,13 +146,12 @@ class DmaPort : public MasterPort
|
|||
/** Port accesses a cache which requires snooping */
|
||||
bool recvSnoops;
|
||||
|
||||
virtual bool recvTiming(PacketPtr pkt);
|
||||
virtual bool recvTimingResp(PacketPtr pkt);
|
||||
|
||||
virtual bool recvTimingSnoop(PacketPtr pkt)
|
||||
virtual void recvTimingSnoopReq(PacketPtr pkt)
|
||||
{
|
||||
if (!recvSnoops)
|
||||
panic("%s was not expecting a snoop\n", name());
|
||||
return true;
|
||||
}
|
||||
|
||||
virtual Tick recvAtomicSnoop(PacketPtr pkt)
|
||||
|
|
|
@ -137,11 +137,8 @@ Bridge::BridgeMasterPort::reqQueueFull()
|
|||
}
|
||||
|
||||
bool
|
||||
Bridge::BridgeMasterPort::recvTiming(PacketPtr pkt)
|
||||
Bridge::BridgeMasterPort::recvTimingResp(PacketPtr pkt)
|
||||
{
|
||||
// should only see responses on the master side
|
||||
assert(pkt->isResponse());
|
||||
|
||||
// all checks are done when the request is accepted on the slave
|
||||
// side, so we are guaranteed to have space for the response
|
||||
DPRINTF(BusBridge, "recvTiming: response %s addr 0x%x\n",
|
||||
|
@ -155,12 +152,8 @@ Bridge::BridgeMasterPort::recvTiming(PacketPtr pkt)
|
|||
}
|
||||
|
||||
bool
|
||||
Bridge::BridgeSlavePort::recvTiming(PacketPtr pkt)
|
||||
Bridge::BridgeSlavePort::recvTimingReq(PacketPtr pkt)
|
||||
{
|
||||
// should only see requests on the slave side
|
||||
assert(pkt->isRequest());
|
||||
|
||||
|
||||
DPRINTF(BusBridge, "recvTiming: request %s addr 0x%x\n",
|
||||
pkt->cmdString(), pkt->getAddr());
|
||||
|
||||
|
@ -318,7 +311,7 @@ Bridge::BridgeMasterPort::trySend()
|
|||
if (!buf->expectResponse)
|
||||
pkt->senderState = NULL;
|
||||
|
||||
if (sendTiming(pkt)) {
|
||||
if (sendTimingReq(pkt)) {
|
||||
// send successful
|
||||
requestQueue.pop_front();
|
||||
// we no longer own packet, so it's not safe to look at it
|
||||
|
@ -365,7 +358,7 @@ Bridge::BridgeSlavePort::trySend()
|
|||
// no need to worry about the sender state since we are not
|
||||
// modifying it
|
||||
|
||||
if (sendTiming(pkt)) {
|
||||
if (sendTimingResp(pkt)) {
|
||||
DPRINTF(BusBridge, " successful\n");
|
||||
// send successful
|
||||
responseQueue.pop_front();
|
||||
|
|
|
@ -230,7 +230,7 @@ class Bridge : public MemObject
|
|||
|
||||
/** When receiving a timing request from the peer port,
|
||||
pass it to the bridge. */
|
||||
virtual bool recvTiming(PacketPtr pkt);
|
||||
virtual bool recvTimingReq(PacketPtr pkt);
|
||||
|
||||
/** When receiving a retry request from the peer port,
|
||||
pass it to the bridge. */
|
||||
|
@ -353,7 +353,7 @@ class Bridge : public MemObject
|
|||
|
||||
/** When receiving a timing request from the peer port,
|
||||
pass it to the bridge. */
|
||||
virtual bool recvTiming(PacketPtr pkt);
|
||||
virtual bool recvTimingResp(PacketPtr pkt);
|
||||
|
||||
/** When receiving a retry request from the peer port,
|
||||
pass it to the bridge. */
|
||||
|
|
106
src/mem/bus.cc
106
src/mem/bus.cc
|
@ -198,37 +198,28 @@ Bus::isOccupied(PacketPtr pkt, Port* port)
|
|||
}
|
||||
|
||||
bool
|
||||
Bus::recvTiming(PacketPtr pkt)
|
||||
Bus::recvTimingReq(PacketPtr pkt)
|
||||
{
|
||||
// get the source id
|
||||
Packet::NodeID src_id = pkt->getSrc();
|
||||
|
||||
// determine the source port based on the id and direction
|
||||
Port *src_port = NULL;
|
||||
if (pkt->isRequest())
|
||||
src_port = slavePorts[src_id];
|
||||
else
|
||||
src_port = masterPorts[src_id];
|
||||
// determine the source port based on the id
|
||||
SlavePort *src_port = slavePorts[pkt->getSrc()];
|
||||
|
||||
// test if the bus should be considered occupied for the current
|
||||
// packet, and exclude express snoops from the check
|
||||
if (!pkt->isExpressSnoop() && isOccupied(pkt, src_port)) {
|
||||
DPRINTF(Bus, "recvTiming: src %s %s 0x%x BUSY\n",
|
||||
DPRINTF(Bus, "recvTimingReq: src %s %s 0x%x BUSY\n",
|
||||
src_port->name(), pkt->cmdString(), pkt->getAddr());
|
||||
return false;
|
||||
}
|
||||
|
||||
DPRINTF(Bus, "recvTiming: src %s %s 0x%x\n",
|
||||
DPRINTF(Bus, "recvTimingReq: src %s %s 0x%x\n",
|
||||
src_port->name(), pkt->cmdString(), pkt->getAddr());
|
||||
|
||||
Tick headerFinishTime = pkt->isExpressSnoop() ? 0 : calcPacketTiming(pkt);
|
||||
Tick packetFinishTime = pkt->isExpressSnoop() ? 0 : pkt->finishTime;
|
||||
|
||||
// decide what to do based on the direction
|
||||
if (pkt->isRequest()) {
|
||||
// the packet is a memory-mapped request and should be
|
||||
// broadcasted to our snoopers but the source
|
||||
forwardTiming(pkt, src_id);
|
||||
forwardTiming(pkt, pkt->getSrc());
|
||||
|
||||
// remember if we add an outstanding req so we can undo it if
|
||||
// necessary, if the packet needs a response, we should add it
|
||||
|
@ -248,7 +239,7 @@ Bus::recvTiming(PacketPtr pkt)
|
|||
|
||||
// since it is a normal request, determine the destination
|
||||
// based on the address and attempt to send the packet
|
||||
bool success = masterPorts[findPort(pkt->getAddr())]->sendTiming(pkt);
|
||||
bool success = masterPorts[findPort(pkt->getAddr())]->sendTimingReq(pkt);
|
||||
|
||||
if (!success) {
|
||||
// inhibited packets should never be forced to retry
|
||||
|
@ -259,7 +250,7 @@ Bus::recvTiming(PacketPtr pkt)
|
|||
if (add_outstanding)
|
||||
outstandingReq.erase(pkt->req);
|
||||
|
||||
DPRINTF(Bus, "recvTiming: src %s %s 0x%x RETRY\n",
|
||||
DPRINTF(Bus, "recvTimingReq: src %s %s 0x%x RETRY\n",
|
||||
src_port->name(), pkt->cmdString(), pkt->getAddr());
|
||||
|
||||
addToRetryList(src_port);
|
||||
|
@ -267,7 +258,32 @@ Bus::recvTiming(PacketPtr pkt)
|
|||
|
||||
return false;
|
||||
}
|
||||
} else {
|
||||
|
||||
succeededTiming(packetFinishTime);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
Bus::recvTimingResp(PacketPtr pkt)
|
||||
{
|
||||
// determine the source port based on the id
|
||||
MasterPort *src_port = masterPorts[pkt->getSrc()];
|
||||
|
||||
// test if the bus should be considered occupied for the current
|
||||
// packet
|
||||
if (isOccupied(pkt, src_port)) {
|
||||
DPRINTF(Bus, "recvTimingResp: src %s %s 0x%x BUSY\n",
|
||||
src_port->name(), pkt->cmdString(), pkt->getAddr());
|
||||
return false;
|
||||
}
|
||||
|
||||
DPRINTF(Bus, "recvTimingResp: src %s %s 0x%x\n",
|
||||
src_port->name(), pkt->cmdString(), pkt->getAddr());
|
||||
|
||||
calcPacketTiming(pkt);
|
||||
Tick packetFinishTime = pkt->finishTime;
|
||||
|
||||
// the packet is a normal response to a request that we should
|
||||
// have seen passing through the bus
|
||||
assert(outstandingReq.find(pkt->req) != outstandingReq.end());
|
||||
|
@ -277,30 +293,25 @@ Bus::recvTiming(PacketPtr pkt)
|
|||
|
||||
// send the packet to the destination through one of our slave
|
||||
// ports, as determined by the destination field
|
||||
bool success M5_VAR_USED = slavePorts[pkt->getDest()]->sendTiming(pkt);
|
||||
bool success M5_VAR_USED = slavePorts[pkt->getDest()]->sendTimingResp(pkt);
|
||||
|
||||
// currently it is illegal to block responses... can lead to
|
||||
// deadlock
|
||||
assert(success);
|
||||
}
|
||||
|
||||
succeededTiming(packetFinishTime);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
Bus::recvTimingSnoop(PacketPtr pkt)
|
||||
void
|
||||
Bus::recvTimingSnoopReq(PacketPtr pkt)
|
||||
{
|
||||
// get the source id
|
||||
Packet::NodeID src_id = pkt->getSrc();
|
||||
DPRINTF(Bus, "recvTimingSnoopReq: src %s %s 0x%x\n",
|
||||
masterPorts[pkt->getSrc()]->name(), pkt->cmdString(),
|
||||
pkt->getAddr());
|
||||
|
||||
if (pkt->isRequest()) {
|
||||
DPRINTF(Bus, "recvTimingSnoop: src %d %s 0x%x\n",
|
||||
src_id, pkt->cmdString(), pkt->getAddr());
|
||||
|
||||
// the packet is an express snoop request and should be
|
||||
// broadcasted to our snoopers
|
||||
// we should only see express snoops from caches
|
||||
assert(pkt->isExpressSnoop());
|
||||
|
||||
// forward to all snoopers
|
||||
|
@ -311,18 +322,20 @@ Bus::recvTimingSnoop(PacketPtr pkt)
|
|||
// device responsible for the address range something is
|
||||
// wrong, hence there is nothing further to do as the packet
|
||||
// would be going back to where it came from
|
||||
assert(src_id == findPort(pkt->getAddr()));
|
||||
assert(pkt->getSrc() == findPort(pkt->getAddr()));
|
||||
|
||||
// this is an express snoop and is never forced to retry
|
||||
assert(!inRetry);
|
||||
}
|
||||
|
||||
return true;
|
||||
} else {
|
||||
bool
|
||||
Bus::recvTimingSnoopResp(PacketPtr pkt)
|
||||
{
|
||||
// determine the source port based on the id
|
||||
SlavePort* src_port = slavePorts[src_id];
|
||||
SlavePort* src_port = slavePorts[pkt->getSrc()];
|
||||
|
||||
if (isOccupied(pkt, src_port)) {
|
||||
DPRINTF(Bus, "recvTimingSnoop: src %s %s 0x%x BUSY\n",
|
||||
DPRINTF(Bus, "recvTimingSnoopResp: src %s %s 0x%x BUSY\n",
|
||||
src_port->name(), pkt->cmdString(), pkt->getAddr());
|
||||
return false;
|
||||
}
|
||||
|
@ -347,7 +360,7 @@ Bus::recvTimingSnoop(PacketPtr pkt)
|
|||
// this is a snoop response to a snoop request we
|
||||
// forwarded, e.g. coming from the L1 and going to the L2
|
||||
// this should be forwarded as a snoop response
|
||||
bool success M5_VAR_USED = masterPorts[dest]->sendTimingSnoop(pkt);
|
||||
bool success M5_VAR_USED = masterPorts[dest]->sendTimingSnoopResp(pkt);
|
||||
assert(success);
|
||||
} else {
|
||||
// we got a snoop response on one of our slave ports,
|
||||
|
@ -361,11 +374,11 @@ Bus::recvTimingSnoop(PacketPtr pkt)
|
|||
// request, hence it should never go back to where the
|
||||
// snoop response came from, but instead to where the
|
||||
// original request came from
|
||||
assert(src_id != dest);
|
||||
assert(pkt->getSrc() != dest);
|
||||
|
||||
// as a normal response, it should go back to a master
|
||||
// through one of our slave ports
|
||||
bool success M5_VAR_USED = slavePorts[dest]->sendTiming(pkt);
|
||||
bool success M5_VAR_USED = slavePorts[dest]->sendTimingResp(pkt);
|
||||
|
||||
// currently it is illegal to block responses... can lead
|
||||
// to deadlock
|
||||
|
@ -375,9 +388,9 @@ Bus::recvTimingSnoop(PacketPtr pkt)
|
|||
succeededTiming(packetFinishTime);
|
||||
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
Bus::succeededTiming(Tick busy_time)
|
||||
{
|
||||
|
@ -405,8 +418,7 @@ Bus::forwardTiming(PacketPtr pkt, int exclude_slave_port_id)
|
|||
if (exclude_slave_port_id == Port::INVALID_PORT_ID ||
|
||||
p->getId() != exclude_slave_port_id) {
|
||||
// cache is not allowed to refuse snoop
|
||||
bool success M5_VAR_USED = p->sendTimingSnoop(pkt);
|
||||
assert(success);
|
||||
p->sendTimingSnoopReq(pkt);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -531,9 +543,6 @@ Bus::recvAtomic(PacketPtr pkt)
|
|||
slavePorts[pkt->getSrc()]->name(), pkt->getAddr(),
|
||||
pkt->cmdString());
|
||||
|
||||
// we should always see a request routed based on the address
|
||||
assert(pkt->isRequest());
|
||||
|
||||
// forward to all snoopers but the source
|
||||
std::pair<MemCmd, Tick> snoop_result = forwardAtomic(pkt, pkt->getSrc());
|
||||
MemCmd snoop_response_cmd = snoop_result.first;
|
||||
|
@ -565,9 +574,6 @@ Bus::recvAtomicSnoop(PacketPtr pkt)
|
|||
masterPorts[pkt->getSrc()]->name(), pkt->getAddr(),
|
||||
pkt->cmdString());
|
||||
|
||||
// we should always see a request routed based on the address
|
||||
assert(pkt->isRequest());
|
||||
|
||||
// forward to all snoopers
|
||||
std::pair<MemCmd, Tick> snoop_result =
|
||||
forwardAtomic(pkt, Port::INVALID_PORT_ID);
|
||||
|
@ -637,9 +643,6 @@ Bus::recvFunctional(PacketPtr pkt)
|
|||
pkt->cmdString());
|
||||
}
|
||||
|
||||
// we should always see a request routed based on the address
|
||||
assert(pkt->isRequest());
|
||||
|
||||
// forward to all snoopers but the source
|
||||
forwardFunctional(pkt, pkt->getSrc());
|
||||
|
||||
|
@ -663,9 +666,6 @@ Bus::recvFunctionalSnoop(PacketPtr pkt)
|
|||
pkt->cmdString());
|
||||
}
|
||||
|
||||
// we should always see a request routed based on the address
|
||||
assert(pkt->isRequest());
|
||||
|
||||
// forward to all snoopers
|
||||
forwardFunctional(pkt, Port::INVALID_PORT_ID);
|
||||
}
|
||||
|
|
|
@ -89,14 +89,14 @@ class Bus : public MemObject
|
|||
/**
|
||||
* When receiving a timing request, pass it to the bus.
|
||||
*/
|
||||
virtual bool recvTiming(PacketPtr pkt)
|
||||
{ pkt->setSrc(id); return bus->recvTiming(pkt); }
|
||||
virtual bool recvTimingReq(PacketPtr pkt)
|
||||
{ pkt->setSrc(id); return bus->recvTimingReq(pkt); }
|
||||
|
||||
/**
|
||||
* When receiving a timing snoop response, pass it to the bus.
|
||||
*/
|
||||
virtual bool recvTimingSnoop(PacketPtr pkt)
|
||||
{ pkt->setSrc(id); return bus->recvTimingSnoop(pkt); }
|
||||
virtual bool recvTimingSnoopResp(PacketPtr pkt)
|
||||
{ pkt->setSrc(id); return bus->recvTimingSnoopResp(pkt); }
|
||||
|
||||
/**
|
||||
* When receiving an atomic request, pass it to the bus.
|
||||
|
@ -163,14 +163,14 @@ class Bus : public MemObject
|
|||
/**
|
||||
* When receiving a timing response, pass it to the bus.
|
||||
*/
|
||||
virtual bool recvTiming(PacketPtr pkt)
|
||||
{ pkt->setSrc(id); return bus->recvTiming(pkt); }
|
||||
virtual bool recvTimingResp(PacketPtr pkt)
|
||||
{ pkt->setSrc(id); return bus->recvTimingResp(pkt); }
|
||||
|
||||
/**
|
||||
* When receiving a timing snoop request, pass it to the bus.
|
||||
*/
|
||||
virtual bool recvTimingSnoop(PacketPtr pkt)
|
||||
{ pkt->setSrc(id); return bus->recvTimingSnoop(pkt); }
|
||||
virtual void recvTimingSnoopReq(PacketPtr pkt)
|
||||
{ pkt->setSrc(id); return bus->recvTimingSnoopReq(pkt); }
|
||||
|
||||
/**
|
||||
* When receiving an atomic snoop request, pass it to the bus.
|
||||
|
@ -228,12 +228,20 @@ class Bus : public MemObject
|
|||
std::set<RequestPtr> outstandingReq;
|
||||
|
||||
/** Function called by the port when the bus is recieving a Timing
|
||||
transaction.*/
|
||||
bool recvTiming(PacketPtr pkt);
|
||||
request packet.*/
|
||||
bool recvTimingReq(PacketPtr pkt);
|
||||
|
||||
/** Function called by the port when the bus is recieving a Timing
|
||||
response packet.*/
|
||||
bool recvTimingResp(PacketPtr pkt);
|
||||
|
||||
/** Function called by the port when the bus is recieving a timing
|
||||
snoop transaction.*/
|
||||
bool recvTimingSnoop(PacketPtr pkt);
|
||||
snoop request.*/
|
||||
void recvTimingSnoopReq(PacketPtr pkt);
|
||||
|
||||
/** Function called by the port when the bus is recieving a timing
|
||||
snoop response.*/
|
||||
bool recvTimingSnoopResp(PacketPtr pkt);
|
||||
|
||||
/**
|
||||
* Forward a timing packet to our snoopers, potentially excluding
|
||||
|
|
4
src/mem/cache/base.hh
vendored
4
src/mem/cache/base.hh
vendored
|
@ -148,7 +148,7 @@ class BaseCache : public MemObject
|
|||
protected:
|
||||
|
||||
CacheMasterPort(const std::string &_name, BaseCache *_cache,
|
||||
PacketQueue &_queue) :
|
||||
MasterPacketQueue &_queue) :
|
||||
QueuedMasterPort(_name, _cache, _queue)
|
||||
{ }
|
||||
|
||||
|
@ -196,7 +196,7 @@ class BaseCache : public MemObject
|
|||
const std::string &_label);
|
||||
|
||||
/** A normal packet queue used to store responses. */
|
||||
PacketQueue queue;
|
||||
SlavePacketQueue queue;
|
||||
|
||||
bool blocked;
|
||||
|
||||
|
|
14
src/mem/cache/cache.hh
vendored
14
src/mem/cache/cache.hh
vendored
|
@ -90,9 +90,9 @@ class Cache : public BaseCache
|
|||
|
||||
protected:
|
||||
|
||||
virtual bool recvTimingSnoop(PacketPtr pkt);
|
||||
virtual bool recvTimingSnoopResp(PacketPtr pkt);
|
||||
|
||||
virtual bool recvTiming(PacketPtr pkt);
|
||||
virtual bool recvTimingReq(PacketPtr pkt);
|
||||
|
||||
virtual Tick recvAtomic(PacketPtr pkt);
|
||||
|
||||
|
@ -116,7 +116,7 @@ class Cache : public BaseCache
|
|||
* current MSHR status. This queue has a pointer to our specific
|
||||
* cache implementation and is used by the MemSidePort.
|
||||
*/
|
||||
class MemSidePacketQueue : public PacketQueue
|
||||
class MemSidePacketQueue : public MasterPacketQueue
|
||||
{
|
||||
|
||||
protected:
|
||||
|
@ -125,9 +125,9 @@ class Cache : public BaseCache
|
|||
|
||||
public:
|
||||
|
||||
MemSidePacketQueue(Cache<TagStore> &cache, Port &port,
|
||||
MemSidePacketQueue(Cache<TagStore> &cache, MasterPort &port,
|
||||
const std::string &label) :
|
||||
PacketQueue(cache, port, label), cache(cache) { }
|
||||
MasterPacketQueue(cache, port, label), cache(cache) { }
|
||||
|
||||
/**
|
||||
* Override the normal sendDeferredPacket and do not only
|
||||
|
@ -154,9 +154,9 @@ class Cache : public BaseCache
|
|||
|
||||
protected:
|
||||
|
||||
virtual bool recvTimingSnoop(PacketPtr pkt);
|
||||
virtual void recvTimingSnoopReq(PacketPtr pkt);
|
||||
|
||||
virtual bool recvTiming(PacketPtr pkt);
|
||||
virtual bool recvTimingResp(PacketPtr pkt);
|
||||
|
||||
virtual Tick recvAtomicSnoop(PacketPtr pkt);
|
||||
|
||||
|
|
28
src/mem/cache/cache_impl.hh
vendored
28
src/mem/cache/cache_impl.hh
vendored
|
@ -417,7 +417,7 @@ Cache<TagStore>::timingAccess(PacketPtr pkt)
|
|||
Packet *snoopPkt = new Packet(pkt, true); // clear flags
|
||||
snoopPkt->setExpressSnoop();
|
||||
snoopPkt->assertMemInhibit();
|
||||
memSidePort->sendTiming(snoopPkt);
|
||||
memSidePort->sendTimingReq(snoopPkt);
|
||||
// main memory will delete snoopPkt
|
||||
}
|
||||
// since we're the official target but we aren't responding,
|
||||
|
@ -1181,7 +1181,7 @@ Cache<TagStore>::handleSnoop(PacketPtr pkt, BlkType *blk,
|
|||
Packet snoopPkt(pkt, true); // clear flags
|
||||
snoopPkt.setExpressSnoop();
|
||||
snoopPkt.senderState = new ForwardResponseRecord(pkt, this);
|
||||
cpuSidePort->sendTimingSnoop(&snoopPkt);
|
||||
cpuSidePort->sendTimingSnoopReq(&snoopPkt);
|
||||
if (snoopPkt.memInhibitAsserted()) {
|
||||
// cache-to-cache response from some upper cache
|
||||
assert(!alreadyResponded);
|
||||
|
@ -1336,11 +1336,9 @@ Cache<TagStore>::snoopTiming(PacketPtr pkt)
|
|||
|
||||
template<class TagStore>
|
||||
bool
|
||||
Cache<TagStore>::CpuSidePort::recvTimingSnoop(PacketPtr pkt)
|
||||
Cache<TagStore>::CpuSidePort::recvTimingSnoopResp(PacketPtr pkt)
|
||||
{
|
||||
// Express snoop responses from master to slave, e.g., from L1 to L2
|
||||
assert(pkt->isResponse());
|
||||
|
||||
cache->timingAccess(pkt);
|
||||
return true;
|
||||
}
|
||||
|
@ -1492,7 +1490,7 @@ Cache<TagStore>::getTimingPacket()
|
|||
PacketPtr snoop_pkt = new Packet(tgt_pkt, true);
|
||||
snoop_pkt->setExpressSnoop();
|
||||
snoop_pkt->senderState = mshr;
|
||||
cpuSidePort->sendTimingSnoop(snoop_pkt);
|
||||
cpuSidePort->sendTimingSnoopReq(snoop_pkt);
|
||||
|
||||
if (snoop_pkt->memInhibitAsserted()) {
|
||||
markInService(mshr, snoop_pkt);
|
||||
|
@ -1557,9 +1555,8 @@ Cache<TagStore>::CpuSidePort::getAddrRanges()
|
|||
|
||||
template<class TagStore>
|
||||
bool
|
||||
Cache<TagStore>::CpuSidePort::recvTiming(PacketPtr pkt)
|
||||
Cache<TagStore>::CpuSidePort::recvTimingReq(PacketPtr pkt)
|
||||
{
|
||||
assert(pkt->isRequest());
|
||||
// always let inhibited requests through even if blocked
|
||||
if (!pkt->memInhibitAsserted() && blocked) {
|
||||
DPRINTF(Cache,"Scheduling a retry while blocked\n");
|
||||
|
@ -1575,7 +1572,6 @@ template<class TagStore>
|
|||
Tick
|
||||
Cache<TagStore>::CpuSidePort::recvAtomic(PacketPtr pkt)
|
||||
{
|
||||
assert(pkt->isRequest());
|
||||
// atomic request
|
||||
return cache->atomicAccess(pkt);
|
||||
}
|
||||
|
@ -1584,7 +1580,6 @@ template<class TagStore>
|
|||
void
|
||||
Cache<TagStore>::CpuSidePort::recvFunctional(PacketPtr pkt)
|
||||
{
|
||||
assert(pkt->isRequest());
|
||||
// functional request
|
||||
cache->functionalAccess(pkt, true);
|
||||
}
|
||||
|
@ -1605,7 +1600,7 @@ CpuSidePort::CpuSidePort(const std::string &_name, Cache<TagStore> *_cache,
|
|||
|
||||
template<class TagStore>
|
||||
bool
|
||||
Cache<TagStore>::MemSidePort::recvTiming(PacketPtr pkt)
|
||||
Cache<TagStore>::MemSidePort::recvTimingResp(PacketPtr pkt)
|
||||
{
|
||||
// this needs to be fixed so that the cache updates the mshr and sends the
|
||||
// packet back out on the link, but it probably won't happen so until this
|
||||
|
@ -1613,27 +1608,23 @@ Cache<TagStore>::MemSidePort::recvTiming(PacketPtr pkt)
|
|||
if (pkt->wasNacked())
|
||||
panic("Need to implement cache resending nacked packets!\n");
|
||||
|
||||
assert(pkt->isResponse());
|
||||
cache->handleResponse(pkt);
|
||||
return true;
|
||||
}
|
||||
|
||||
// Express snooping requests to memside port
|
||||
template<class TagStore>
|
||||
bool
|
||||
Cache<TagStore>::MemSidePort::recvTimingSnoop(PacketPtr pkt)
|
||||
void
|
||||
Cache<TagStore>::MemSidePort::recvTimingSnoopReq(PacketPtr pkt)
|
||||
{
|
||||
// handle snooping requests
|
||||
assert(pkt->isRequest());
|
||||
cache->snoopTiming(pkt);
|
||||
return true;
|
||||
}
|
||||
|
||||
template<class TagStore>
|
||||
Tick
|
||||
Cache<TagStore>::MemSidePort::recvAtomicSnoop(PacketPtr pkt)
|
||||
{
|
||||
assert(pkt->isRequest());
|
||||
// atomic snoop
|
||||
return cache->snoopAtomic(pkt);
|
||||
}
|
||||
|
@ -1642,7 +1633,6 @@ template<class TagStore>
|
|||
void
|
||||
Cache<TagStore>::MemSidePort::recvFunctionalSnoop(PacketPtr pkt)
|
||||
{
|
||||
assert(pkt->isRequest());
|
||||
// functional snoop (note that in contrast to atomic we don't have
|
||||
// a specific functionalSnoop method, as they have the same
|
||||
// behaviour regardless)
|
||||
|
@ -1668,7 +1658,7 @@ Cache<TagStore>::MemSidePacketQueue::sendDeferredPacket()
|
|||
} else {
|
||||
MSHR *mshr = dynamic_cast<MSHR*>(pkt->senderState);
|
||||
|
||||
waitingOnRetry = !port.sendTiming(pkt);
|
||||
waitingOnRetry = !masterPort.sendTimingReq(pkt);
|
||||
|
||||
if (waitingOnRetry) {
|
||||
DPRINTF(CachePort, "now waiting on a retry\n");
|
||||
|
|
|
@ -82,12 +82,12 @@ class MessageMasterPort : public QueuedMasterPort
|
|||
virtual ~MessageMasterPort()
|
||||
{}
|
||||
|
||||
bool recvTiming(PacketPtr pkt) { recvResponse(pkt); return true; }
|
||||
bool recvTimingResp(PacketPtr pkt) { recvResponse(pkt); return true; }
|
||||
|
||||
protected:
|
||||
|
||||
/** A packet queue for outgoing packets. */
|
||||
PacketQueue queue;
|
||||
MasterPacketQueue queue;
|
||||
|
||||
// Accept and ignore responses.
|
||||
virtual Tick recvResponse(PacketPtr pkt)
|
||||
|
|
|
@ -46,9 +46,8 @@
|
|||
|
||||
using namespace std;
|
||||
|
||||
PacketQueue::PacketQueue(EventManager& _em, Port& _port,
|
||||
const std::string _label)
|
||||
: em(_em), label(_label), sendEvent(this), drainEvent(NULL), port(_port),
|
||||
PacketQueue::PacketQueue(EventManager& _em, const std::string& _label)
|
||||
: em(_em), sendEvent(this), drainEvent(NULL), label(_label),
|
||||
waitingOnRetry(false)
|
||||
{
|
||||
}
|
||||
|
@ -142,11 +141,10 @@ void PacketQueue::trySendTiming()
|
|||
DeferredPacket dp = transmitList.front();
|
||||
transmitList.pop_front();
|
||||
|
||||
// attempt to send the packet and remember the outcome
|
||||
if (!dp.sendAsSnoop)
|
||||
waitingOnRetry = !port.sendTiming(dp.pkt);
|
||||
else
|
||||
waitingOnRetry = !port.sendTimingSnoop(dp.pkt);
|
||||
// use the appropriate implementation of sendTiming based on the
|
||||
// type of port associated with the queue, and whether the packet
|
||||
// is to be sent as a snoop or not
|
||||
waitingOnRetry = !sendTiming(dp.pkt, dp.sendAsSnoop);
|
||||
|
||||
if (waitingOnRetry) {
|
||||
// put the packet back at the front of the list (packet should
|
||||
|
@ -206,3 +204,33 @@ PacketQueue::drain(Event *de)
|
|||
drainEvent = de;
|
||||
return 1;
|
||||
}
|
||||
|
||||
MasterPacketQueue::MasterPacketQueue(EventManager& _em, MasterPort& _masterPort,
|
||||
const std::string _label)
|
||||
: PacketQueue(_em, _label), masterPort(_masterPort)
|
||||
{
|
||||
}
|
||||
|
||||
bool
|
||||
MasterPacketQueue::sendTiming(PacketPtr pkt, bool send_as_snoop)
|
||||
{
|
||||
// attempt to send the packet and return according to the outcome
|
||||
if (!send_as_snoop)
|
||||
return masterPort.sendTimingReq(pkt);
|
||||
else
|
||||
return masterPort.sendTimingSnoopResp(pkt);
|
||||
}
|
||||
|
||||
SlavePacketQueue::SlavePacketQueue(EventManager& _em, SlavePort& _slavePort,
|
||||
const std::string _label)
|
||||
: PacketQueue(_em, _label), slavePort(_slavePort)
|
||||
{
|
||||
}
|
||||
|
||||
bool
|
||||
SlavePacketQueue::sendTiming(PacketPtr pkt, bool send_as_snoop)
|
||||
{
|
||||
// we should never have queued snoop requests
|
||||
assert(!send_as_snoop);
|
||||
return slavePort.sendTimingResp(pkt);
|
||||
}
|
||||
|
|
|
@ -86,9 +86,6 @@ class PacketQueue
|
|||
/** The manager which is used for the event queue */
|
||||
EventManager& em;
|
||||
|
||||
/** Label to use for print request packets label stack. */
|
||||
const std::string label;
|
||||
|
||||
/** This function attempts to send deferred packets. Scheduled to
|
||||
* be called in the future via SendEvent. */
|
||||
void processSendEvent();
|
||||
|
@ -104,8 +101,8 @@ class PacketQueue
|
|||
|
||||
protected:
|
||||
|
||||
/** The port used to send the packets. */
|
||||
Port& port;
|
||||
/** Label to use for print request packets label stack. */
|
||||
const std::string label;
|
||||
|
||||
/** Remember whether we're awaiting a retry from the bus. */
|
||||
bool waitingOnRetry;
|
||||
|
@ -134,6 +131,11 @@ class PacketQueue
|
|||
*/
|
||||
void trySendTiming();
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
virtual bool sendTiming(PacketPtr pkt, bool send_as_snoop) = 0;
|
||||
|
||||
/**
|
||||
* Based on the transmit list, or the provided time, schedule a
|
||||
* send event if there are packets to send. If we are idle and
|
||||
|
@ -152,31 +154,28 @@ class PacketQueue
|
|||
*/
|
||||
virtual void recvRangeChange() { }
|
||||
|
||||
public:
|
||||
|
||||
/**
|
||||
* Create a packet queue, linked to an event manager, a port used
|
||||
* to send the packets, and potentially give it a label that will
|
||||
* be used for functional print request packets.
|
||||
* Create a packet queue, linked to an event manager, and a label
|
||||
* that will be used for functional print request packets.
|
||||
*
|
||||
* @param _em Event manager used for scheduling this queue
|
||||
* @param _port Port used to send the packets
|
||||
* @param _label Label to push on the label stack for print request packets
|
||||
*/
|
||||
PacketQueue(EventManager& _em, Port& _port,
|
||||
const std::string _label = "PacketQueue");
|
||||
PacketQueue(EventManager& _em, const std::string& _label);
|
||||
|
||||
/**
|
||||
* Virtual desctructor since the class may be used as a base class.
|
||||
*/
|
||||
virtual ~PacketQueue();
|
||||
|
||||
public:
|
||||
|
||||
/**
|
||||
* Provide a name to simplify debugging. Base it on the port.
|
||||
* Provide a name to simplify debugging.
|
||||
*
|
||||
* @return A complete name, appended to module and port
|
||||
*/
|
||||
const std::string name() const { return port.name() + "-queue"; }
|
||||
virtual const std::string name() const = 0;
|
||||
|
||||
/** Check the list of buffered packets against the supplied
|
||||
* functional request. */
|
||||
|
@ -217,4 +216,63 @@ class PacketQueue
|
|||
unsigned int drain(Event *de);
|
||||
};
|
||||
|
||||
class MasterPacketQueue : public PacketQueue
|
||||
{
|
||||
|
||||
protected:
|
||||
|
||||
MasterPort& masterPort;
|
||||
|
||||
public:
|
||||
|
||||
/**
|
||||
* Create a master packet queue, linked to an event manager, a
|
||||
* master port, and a label that will be used for functional print
|
||||
* request packets.
|
||||
*
|
||||
* @param _em Event manager used for scheduling this queue
|
||||
* @param _masterPort Master port used to send the packets
|
||||
* @param _label Label to push on the label stack for print request packets
|
||||
*/
|
||||
MasterPacketQueue(EventManager& _em, MasterPort& _masterPort,
|
||||
const std::string _label = "MasterPacketQueue");
|
||||
|
||||
virtual ~MasterPacketQueue() { }
|
||||
|
||||
const std::string name() const
|
||||
{ return masterPort.name() + "-" + label; }
|
||||
|
||||
bool sendTiming(PacketPtr pkt, bool send_as_snoop);
|
||||
};
|
||||
|
||||
class SlavePacketQueue : public PacketQueue
|
||||
{
|
||||
|
||||
protected:
|
||||
|
||||
SlavePort& slavePort;
|
||||
|
||||
public:
|
||||
|
||||
/**
|
||||
* Create a slave packet queue, linked to an event manager, a
|
||||
* slave port, and a label that will be used for functional print
|
||||
* request packets.
|
||||
*
|
||||
* @param _em Event manager used for scheduling this queue
|
||||
* @param _slavePort Slave port used to send the packets
|
||||
* @param _label Label to push on the label stack for print request packets
|
||||
*/
|
||||
SlavePacketQueue(EventManager& _em, SlavePort& _slavePort,
|
||||
const std::string _label = "SlavePacketQueue");
|
||||
|
||||
virtual ~SlavePacketQueue() { }
|
||||
|
||||
const std::string name() const
|
||||
{ return slavePort.name() + "-" + label; }
|
||||
|
||||
bool sendTiming(PacketPtr pkt, bool send_as_snoop);
|
||||
|
||||
};
|
||||
|
||||
#endif // __MEM_PACKET_QUEUE_HH__
|
||||
|
|
|
@ -107,15 +107,31 @@ MasterPort::peerBlockSize() const
|
|||
Tick
|
||||
MasterPort::sendAtomic(PacketPtr pkt)
|
||||
{
|
||||
assert(pkt->isRequest());
|
||||
return _slavePort->recvAtomic(pkt);
|
||||
}
|
||||
|
||||
void
|
||||
MasterPort::sendFunctional(PacketPtr pkt)
|
||||
{
|
||||
assert(pkt->isRequest());
|
||||
return _slavePort->recvFunctional(pkt);
|
||||
}
|
||||
|
||||
bool
|
||||
MasterPort::sendTimingReq(PacketPtr pkt)
|
||||
{
|
||||
assert(pkt->isRequest());
|
||||
return _slavePort->recvTimingReq(pkt);
|
||||
}
|
||||
|
||||
bool
|
||||
MasterPort::sendTimingSnoopResp(PacketPtr pkt)
|
||||
{
|
||||
assert(pkt->isResponse());
|
||||
return _slavePort->recvTimingSnoopResp(pkt);
|
||||
}
|
||||
|
||||
void
|
||||
MasterPort::printAddr(Addr a)
|
||||
{
|
||||
|
@ -171,11 +187,27 @@ SlavePort::isConnected() const
|
|||
Tick
|
||||
SlavePort::sendAtomicSnoop(PacketPtr pkt)
|
||||
{
|
||||
assert(pkt->isRequest());
|
||||
return _masterPort->recvAtomicSnoop(pkt);
|
||||
}
|
||||
|
||||
void
|
||||
SlavePort::sendFunctionalSnoop(PacketPtr pkt)
|
||||
{
|
||||
assert(pkt->isRequest());
|
||||
return _masterPort->recvFunctionalSnoop(pkt);
|
||||
}
|
||||
|
||||
bool
|
||||
SlavePort::sendTimingResp(PacketPtr pkt)
|
||||
{
|
||||
assert(pkt->isResponse());
|
||||
return _masterPort->recvTimingResp(pkt);
|
||||
}
|
||||
|
||||
void
|
||||
SlavePort::sendTimingSnoopReq(PacketPtr pkt)
|
||||
{
|
||||
assert(pkt->isRequest());
|
||||
_masterPort->recvTimingSnoopReq(pkt);
|
||||
}
|
||||
|
|
183
src/mem/port.hh
183
src/mem/port.hh
|
@ -73,8 +73,7 @@ class MemObject;
|
|||
* opposite role.
|
||||
*
|
||||
* Each port has a name and an owner, and enables three basic types of
|
||||
* accesses to the peer port: sendFunctional, sendAtomic and
|
||||
* sendTiming.
|
||||
* accesses to the peer port: functional, atomic and timing.
|
||||
*/
|
||||
class Port
|
||||
{
|
||||
|
@ -130,61 +129,18 @@ class Port
|
|||
|
||||
protected:
|
||||
|
||||
/** These functions are protected because they should only be
|
||||
* called by a peer port, never directly by any outside object. */
|
||||
|
||||
/**
|
||||
* Receive a timing request or response packet from the peer port.
|
||||
*/
|
||||
virtual bool recvTiming(PacketPtr pkt) = 0;
|
||||
|
||||
/**
|
||||
* Receive a timing snoop request or snoop response packet from
|
||||
* the peer port.
|
||||
*/
|
||||
virtual bool recvTimingSnoop(PacketPtr pkt)
|
||||
{
|
||||
panic("%s was not expecting a timing snoop\n", name());
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Called by a peer port if sendTiming or sendTimingSnoop was
|
||||
* unsuccesful, and had to wait.
|
||||
* Called by a peer port if sendTimingReq, sendTimingResp or
|
||||
* sendTimingSnoopResp was unsuccesful, and had to wait.
|
||||
*/
|
||||
virtual void recvRetry() = 0;
|
||||
|
||||
public:
|
||||
|
||||
/**
|
||||
* Attempt to send a timing request or response packet to the peer
|
||||
* port by calling its receive function. If the send does not
|
||||
* succeed, as indicated by the return value, then the sender must
|
||||
* wait for a recvRetry at which point it can re-issue a
|
||||
* sendTiming.
|
||||
*
|
||||
* @param pkt Packet to send.
|
||||
*
|
||||
* @return If the send was succesful or not.
|
||||
*/
|
||||
bool sendTiming(PacketPtr pkt) { return peer->recvTiming(pkt); }
|
||||
|
||||
/**
|
||||
* Attempt to send a timing snoop request or snoop response packet
|
||||
* to the peer port by calling its receive function. If the send
|
||||
* does not succeed, as indicated by the return value, then the
|
||||
* sender must wait for a recvRetry at which point it can re-issue
|
||||
* a sendTimingSnoop.
|
||||
*
|
||||
* @param pkt Packet to send.
|
||||
*
|
||||
* @return If the send was succesful or not.
|
||||
*/
|
||||
bool sendTimingSnoop(PacketPtr pkt) { return peer->recvTimingSnoop(pkt); }
|
||||
|
||||
/**
|
||||
* Send a retry to a peer port that previously attempted a
|
||||
* sendTiming or sendTimingSnoop which was unsuccessful.
|
||||
* sendTimingReq, sendTimingResp or sendTimingSnoopResp which was
|
||||
* unsuccessful.
|
||||
*/
|
||||
void sendRetry() { return peer->recvRetry(); }
|
||||
|
||||
|
@ -202,6 +158,8 @@ class SlavePort;
|
|||
class MasterPort : public Port
|
||||
{
|
||||
|
||||
friend class SlavePort;
|
||||
|
||||
private:
|
||||
|
||||
SlavePort* _slavePort;
|
||||
|
@ -237,30 +195,28 @@ class MasterPort : public Port
|
|||
void sendFunctional(PacketPtr pkt);
|
||||
|
||||
/**
|
||||
* Receive an atomic snoop request packet from the slave port.
|
||||
* Attempt to send a timing request to the slave port by calling
|
||||
* its corresponding receive function. If the send does not
|
||||
* succeed, as indicated by the return value, then the sender must
|
||||
* wait for a recvRetry at which point it can re-issue a
|
||||
* sendTimingReq.
|
||||
*
|
||||
* @param pkt Packet to send.
|
||||
*
|
||||
* @return If the send was succesful or not.
|
||||
*/
|
||||
virtual Tick recvAtomicSnoop(PacketPtr pkt)
|
||||
{
|
||||
panic("%s was not expecting an atomic snoop\n", name());
|
||||
return 0;
|
||||
}
|
||||
bool sendTimingReq(PacketPtr pkt);
|
||||
|
||||
/**
|
||||
* Receive a functional snoop request packet from the slave port.
|
||||
* Attempt to send a timing snoop response packet to the slave
|
||||
* port by calling its corresponding receive function. If the send
|
||||
* does not succeed, as indicated by the return value, then the
|
||||
* sender must wait for a recvRetry at which point it can re-issue
|
||||
* a sendTimingSnoopResp.
|
||||
*
|
||||
* @param pkt Packet to send.
|
||||
*/
|
||||
virtual void recvFunctionalSnoop(PacketPtr pkt)
|
||||
{
|
||||
panic("%s was not expecting a functional snoop\n", name());
|
||||
}
|
||||
|
||||
/**
|
||||
* Called to receive an address range change from the peer slave
|
||||
* port. the default implementation ignored the change and does
|
||||
* nothing. Override this function in a derived class if the owner
|
||||
* needs to be aware of he laesddress ranges, e.g. in an
|
||||
* interconnect component like a bus.
|
||||
*/
|
||||
virtual void recvRangeChange() { }
|
||||
bool sendTimingSnoopResp(PacketPtr pkt);
|
||||
|
||||
/**
|
||||
* Determine if this master port is snooping or not. The default
|
||||
|
@ -288,6 +244,47 @@ class MasterPort : public Port
|
|||
* that address throughout the memory system. For debugging.
|
||||
*/
|
||||
void printAddr(Addr a);
|
||||
|
||||
protected:
|
||||
|
||||
/**
|
||||
* Receive an atomic snoop request packet from the slave port.
|
||||
*/
|
||||
virtual Tick recvAtomicSnoop(PacketPtr pkt)
|
||||
{
|
||||
panic("%s was not expecting an atomic snoop request\n", name());
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Receive a functional snoop request packet from the slave port.
|
||||
*/
|
||||
virtual void recvFunctionalSnoop(PacketPtr pkt)
|
||||
{
|
||||
panic("%s was not expecting a functional snoop request\n", name());
|
||||
}
|
||||
|
||||
/**
|
||||
* Receive a timing response from the slave port.
|
||||
*/
|
||||
virtual bool recvTimingResp(PacketPtr pkt) = 0;
|
||||
|
||||
/**
|
||||
* Receive a timing snoop request from the slave port.
|
||||
*/
|
||||
virtual void recvTimingSnoopReq(PacketPtr pkt)
|
||||
{
|
||||
panic("%s was not expecting a timing snoop request\n", name());
|
||||
}
|
||||
|
||||
/**
|
||||
* Called to receive an address range change from the peer slave
|
||||
* port. the default implementation ignored the change and does
|
||||
* nothing. Override this function in a derived class if the owner
|
||||
* needs to be aware of he laesddress ranges, e.g. in an
|
||||
* interconnect component like a bus.
|
||||
*/
|
||||
virtual void recvRangeChange() { }
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -299,6 +296,8 @@ class MasterPort : public Port
|
|||
class SlavePort : public Port
|
||||
{
|
||||
|
||||
friend class MasterPort;
|
||||
|
||||
private:
|
||||
|
||||
MasterPort* _masterPort;
|
||||
|
@ -334,14 +333,26 @@ class SlavePort : public Port
|
|||
void sendFunctionalSnoop(PacketPtr pkt);
|
||||
|
||||
/**
|
||||
* Receive an atomic request packet from the master port.
|
||||
* Attempt to send a timing response to the master port by calling
|
||||
* its corresponding receive function. If the send does not
|
||||
* succeed, as indicated by the return value, then the sender must
|
||||
* wait for a recvRetry at which point it can re-issue a
|
||||
* sendTimingResp.
|
||||
*
|
||||
* @param pkt Packet to send.
|
||||
*
|
||||
* @return If the send was succesful or not.
|
||||
*/
|
||||
virtual Tick recvAtomic(PacketPtr pkt) = 0;
|
||||
bool sendTimingResp(PacketPtr pkt);
|
||||
|
||||
/**
|
||||
* Receive a functional request packet from the master port.
|
||||
* Attempt to send a timing snoop request packet to the master port
|
||||
* by calling its corresponding receive function. Snoop requests
|
||||
* always succeed and hence no return value is needed.
|
||||
*
|
||||
* @param pkt Packet to send.
|
||||
*/
|
||||
virtual void recvFunctional(PacketPtr pkt) = 0;
|
||||
void sendTimingSnoopReq(PacketPtr pkt);
|
||||
|
||||
/**
|
||||
* Called by a peer port in order to determine the block size of
|
||||
|
@ -367,6 +378,32 @@ class SlavePort : public Port
|
|||
* @return a list of ranges responded to
|
||||
*/
|
||||
virtual AddrRangeList getAddrRanges() = 0;
|
||||
|
||||
protected:
|
||||
|
||||
/**
|
||||
* Receive an atomic request packet from the master port.
|
||||
*/
|
||||
virtual Tick recvAtomic(PacketPtr pkt) = 0;
|
||||
|
||||
/**
|
||||
* Receive a functional request packet from the master port.
|
||||
*/
|
||||
virtual void recvFunctional(PacketPtr pkt) = 0;
|
||||
|
||||
/**
|
||||
* Receive a timing request from the master port.
|
||||
*/
|
||||
virtual bool recvTimingReq(PacketPtr pkt) = 0;
|
||||
|
||||
/**
|
||||
* Receive a timing snoop response from the master port.
|
||||
*/
|
||||
virtual bool recvTimingSnoopResp(PacketPtr pkt)
|
||||
{
|
||||
panic("%s was not expecting a timing snoop response\n", name());
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
#endif //__MEM_PORT_HH__
|
||||
|
|
|
@ -62,7 +62,7 @@ class QueuedSlavePort : public SlavePort
|
|||
protected:
|
||||
|
||||
/** Packet queue used to store outgoing requests and responses. */
|
||||
PacketQueue &queue;
|
||||
SlavePacketQueue &queue;
|
||||
|
||||
/** This function is notification that the device should attempt to send a
|
||||
* packet again. */
|
||||
|
@ -78,7 +78,7 @@ class QueuedSlavePort : public SlavePort
|
|||
* QueuePort constructor.
|
||||
*/
|
||||
QueuedSlavePort(const std::string& name, MemObject* owner,
|
||||
PacketQueue &queue) :
|
||||
SlavePacketQueue &queue) :
|
||||
SlavePort(name, owner), queue(queue)
|
||||
{ }
|
||||
|
||||
|
@ -103,7 +103,7 @@ class QueuedMasterPort : public MasterPort
|
|||
protected:
|
||||
|
||||
/** Packet queue used to store outgoing requests and responses. */
|
||||
PacketQueue &queue;
|
||||
MasterPacketQueue &queue;
|
||||
|
||||
/** This function is notification that the device should attempt to send a
|
||||
* packet again. */
|
||||
|
@ -119,7 +119,7 @@ class QueuedMasterPort : public MasterPort
|
|||
* QueuePort constructor.
|
||||
*/
|
||||
QueuedMasterPort(const std::string& name, MemObject* owner,
|
||||
PacketQueue &queue) :
|
||||
MasterPacketQueue &queue) :
|
||||
MasterPort(name, owner), queue(queue)
|
||||
{ }
|
||||
|
||||
|
|
|
@ -141,14 +141,12 @@ RubyPort::M5Port::recvAtomic(PacketPtr pkt)
|
|||
|
||||
|
||||
bool
|
||||
RubyPort::PioPort::recvTiming(PacketPtr pkt)
|
||||
RubyPort::PioPort::recvTimingResp(PacketPtr pkt)
|
||||
{
|
||||
// In FS mode, ruby memory will receive pio responses from devices
|
||||
// and it must forward these responses back to the particular CPU.
|
||||
DPRINTF(RubyPort, "Pio response for address %#x\n", pkt->getAddr());
|
||||
|
||||
assert(pkt->isResponse());
|
||||
|
||||
// First we must retrieve the request port from the sender State
|
||||
RubyPort::SenderState *senderState =
|
||||
safe_cast<RubyPort::SenderState *>(pkt->senderState);
|
||||
|
@ -159,24 +157,23 @@ RubyPort::PioPort::recvTiming(PacketPtr pkt)
|
|||
pkt->senderState = senderState->saved;
|
||||
delete senderState;
|
||||
|
||||
port->sendTiming(pkt);
|
||||
port->sendTimingResp(pkt);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
RubyPort::M5Port::recvTiming(PacketPtr pkt)
|
||||
RubyPort::M5Port::recvTimingReq(PacketPtr pkt)
|
||||
{
|
||||
DPRINTF(RubyPort,
|
||||
"Timing access caught for address %#x\n", pkt->getAddr());
|
||||
|
||||
//dsm: based on SimpleTimingPort::recvTiming(pkt);
|
||||
//dsm: based on SimpleTimingPort::recvTimingReq(pkt);
|
||||
|
||||
// The received packets should only be M5 requests, which should never
|
||||
// get nacked. There used to be code to hanldle nacks here, but
|
||||
// I'm pretty sure it didn't work correctly with the drain code,
|
||||
// so that would need to be fixed if we ever added it back.
|
||||
assert(pkt->isRequest());
|
||||
|
||||
if (pkt->memInhibitAsserted()) {
|
||||
warn("memInhibitAsserted???");
|
||||
|
|
|
@ -62,7 +62,7 @@ class RubyPort : public MemObject
|
|||
{
|
||||
private:
|
||||
|
||||
PacketQueue queue;
|
||||
SlavePacketQueue queue;
|
||||
RubyPort *ruby_port;
|
||||
RubySystem* ruby_system;
|
||||
bool _onRetryList;
|
||||
|
@ -83,7 +83,7 @@ class RubyPort : public MemObject
|
|||
{ _onRetryList = newVal; }
|
||||
|
||||
protected:
|
||||
virtual bool recvTiming(PacketPtr pkt);
|
||||
virtual bool recvTimingReq(PacketPtr pkt);
|
||||
virtual Tick recvAtomic(PacketPtr pkt);
|
||||
virtual void recvFunctional(PacketPtr pkt);
|
||||
virtual AddrRangeList getAddrRanges();
|
||||
|
@ -100,7 +100,7 @@ class RubyPort : public MemObject
|
|||
{
|
||||
private:
|
||||
|
||||
PacketQueue queue;
|
||||
MasterPacketQueue queue;
|
||||
|
||||
RubyPort *ruby_port;
|
||||
|
||||
|
@ -109,7 +109,7 @@ class RubyPort : public MemObject
|
|||
bool sendNextCycle(PacketPtr pkt);
|
||||
|
||||
protected:
|
||||
virtual bool recvTiming(PacketPtr pkt);
|
||||
virtual bool recvTimingResp(PacketPtr pkt);
|
||||
};
|
||||
|
||||
friend class PioPort;
|
||||
|
|
|
@ -53,7 +53,6 @@ SimpleTimingPort::SimpleTimingPort(const std::string& _name,
|
|||
void
|
||||
SimpleTimingPort::recvFunctional(PacketPtr pkt)
|
||||
{
|
||||
assert(pkt->isRequest());
|
||||
if (!queue.checkFunctional(pkt)) {
|
||||
// do an atomic access and throw away the returned latency
|
||||
recvAtomic(pkt);
|
||||
|
@ -61,11 +60,8 @@ SimpleTimingPort::recvFunctional(PacketPtr pkt)
|
|||
}
|
||||
|
||||
bool
|
||||
SimpleTimingPort::recvTiming(PacketPtr pkt)
|
||||
SimpleTimingPort::recvTimingReq(PacketPtr pkt)
|
||||
{
|
||||
// the port is a slave and should hence only get timing requests
|
||||
assert(pkt->isRequest());
|
||||
|
||||
if (pkt->memInhibitAsserted()) {
|
||||
// snooper will supply based on copy of packet
|
||||
// still target's responsibility to delete packet
|
||||
|
|
|
@ -54,7 +54,7 @@
|
|||
|
||||
/**
|
||||
* The simple timing port uses a queued port to implement
|
||||
* recvFunctional and recvTiming through recvAtomic. It is always a
|
||||
* recvFunctional and recvTimingReq through recvAtomic. It is always a
|
||||
* slave port.
|
||||
*/
|
||||
class SimpleTimingPort : public QueuedSlavePort
|
||||
|
@ -63,13 +63,13 @@ class SimpleTimingPort : public QueuedSlavePort
|
|||
protected:
|
||||
|
||||
/** The packet queue used to store outgoing responses. */
|
||||
PacketQueue queue;
|
||||
SlavePacketQueue queue;
|
||||
|
||||
/** Implemented using recvAtomic(). */
|
||||
void recvFunctional(PacketPtr pkt);
|
||||
|
||||
/** Implemented using recvAtomic(). */
|
||||
bool recvTiming(PacketPtr pkt);
|
||||
bool recvTimingReq(PacketPtr pkt);
|
||||
|
||||
virtual Tick recvAtomic(PacketPtr pkt) = 0;
|
||||
|
||||
|
@ -77,7 +77,7 @@ class SimpleTimingPort : public QueuedSlavePort
|
|||
|
||||
/**
|
||||
* Create a new SimpleTimingPort that relies on a packet queue to
|
||||
* hold responses, and implements recvTiming and recvFunctional
|
||||
* hold responses, and implements recvTimingReq and recvFunctional
|
||||
* through calls to recvAtomic. Once a request arrives, it is
|
||||
* passed to recvAtomic, and in the case of a timing access any
|
||||
* response is scheduled to be sent after the delay of the atomic
|
||||
|
|
|
@ -88,7 +88,7 @@ class System : public MemObject
|
|||
SystemPort(const std::string &_name, MemObject *_owner)
|
||||
: MasterPort(_name, _owner)
|
||||
{ }
|
||||
bool recvTiming(PacketPtr pkt)
|
||||
bool recvTimingResp(PacketPtr pkt)
|
||||
{ panic("SystemPort does not receive timing!\n"); return false; }
|
||||
void recvRetry()
|
||||
{ panic("SystemPort does not expect retry!\n"); }
|
||||
|
|
Loading…
Reference in a new issue