mem: Split port retry for all different packet classes
This patch fixes a long-standing isue with the port flow control. Before this patch the retry mechanism was shared between all different packet classes. As a result, a snoop response could get stuck behind a request waiting for a retry, even if the send/recv functions were split. This caused message-dependent deadlocks in stress-test scenarios. The patch splits the retry into one per packet (message) class. Thus, sendTimingReq has a corresponding recvReqRetry, sendTimingResp has recvRespRetry etc. Most of the changes to the code involve simply clarifying what type of request a specific object was accepting. The biggest change in functionality is in the cache downstream packet queue, facing the memory. This queue was shared by requests and snoop responses, and it is now split into two queues, each with their own flow control, but the same physical MasterPort. These changes fixes the previously seen deadlocks.
This commit is contained in:
parent
6ebe8d863a
commit
f26a289295
67 changed files with 559 additions and 387 deletions
|
@ -133,13 +133,13 @@ Walker::recvTimingResp(PacketPtr pkt)
|
|||
}
|
||||
|
||||
void
|
||||
Walker::WalkerPort::recvRetry()
|
||||
Walker::WalkerPort::recvReqRetry()
|
||||
{
|
||||
walker->recvRetry();
|
||||
walker->recvReqRetry();
|
||||
}
|
||||
|
||||
void
|
||||
Walker::recvRetry()
|
||||
Walker::recvReqRetry()
|
||||
{
|
||||
std::list<WalkerState *>::iterator iter;
|
||||
for (iter = currStates.begin(); iter != currStates.end(); iter++) {
|
||||
|
|
|
@ -77,7 +77,7 @@ namespace X86ISA
|
|||
void recvTimingSnoopReq(PacketPtr pkt) { }
|
||||
Tick recvAtomicSnoop(PacketPtr pkt) { return 0; }
|
||||
void recvFunctionalSnoop(PacketPtr pkt) { }
|
||||
void recvRetry();
|
||||
void recvReqRetry();
|
||||
bool isSnooping() const { return true; }
|
||||
};
|
||||
|
||||
|
@ -190,7 +190,7 @@ namespace X86ISA
|
|||
|
||||
// Functions for dealing with packets.
|
||||
bool recvTimingResp(PacketPtr pkt);
|
||||
void recvRetry();
|
||||
void recvReqRetry();
|
||||
bool sendTiming(WalkerState * sendingState, PacketPtr pkt);
|
||||
|
||||
public:
|
||||
|
|
|
@ -561,9 +561,9 @@ class BaseKvmCPU : public BaseCPU
|
|||
return true;
|
||||
}
|
||||
|
||||
void recvRetry()
|
||||
void recvReqRetry()
|
||||
{
|
||||
panic("The KVM CPU doesn't expect recvRetry!\n");
|
||||
panic("The KVM CPU doesn't expect recvReqRetry!\n");
|
||||
}
|
||||
|
||||
};
|
||||
|
|
|
@ -417,7 +417,7 @@ Fetch1::recvTimingResp(PacketPtr response)
|
|||
}
|
||||
|
||||
void
|
||||
Fetch1::recvRetry()
|
||||
Fetch1::recvReqRetry()
|
||||
{
|
||||
DPRINTF(Fetch, "recvRetry\n");
|
||||
assert(icacheState == IcacheNeedsRetry);
|
||||
|
|
|
@ -77,7 +77,7 @@ class Fetch1 : public Named
|
|||
bool recvTimingResp(PacketPtr pkt)
|
||||
{ return fetch.recvTimingResp(pkt); }
|
||||
|
||||
void recvRetry() { fetch.recvRetry(); }
|
||||
void recvReqRetry() { fetch.recvReqRetry(); }
|
||||
};
|
||||
|
||||
/** Memory access queuing.
|
||||
|
@ -345,7 +345,7 @@ class Fetch1 : public Named
|
|||
|
||||
/** Memory interface */
|
||||
virtual bool recvTimingResp(PacketPtr pkt);
|
||||
virtual void recvRetry();
|
||||
virtual void recvReqRetry();
|
||||
|
||||
public:
|
||||
Fetch1(const std::string &name_,
|
||||
|
|
|
@ -1235,7 +1235,7 @@ LSQ::recvTimingResp(PacketPtr response)
|
|||
}
|
||||
|
||||
void
|
||||
LSQ::recvRetry()
|
||||
LSQ::recvReqRetry()
|
||||
{
|
||||
DPRINTF(MinorMem, "Received retry request\n");
|
||||
|
||||
|
|
|
@ -101,7 +101,7 @@ class LSQ : public Named
|
|||
bool recvTimingResp(PacketPtr pkt)
|
||||
{ return lsq.recvTimingResp(pkt); }
|
||||
|
||||
void recvRetry() { lsq.recvRetry(); }
|
||||
void recvReqRetry() { lsq.recvReqRetry(); }
|
||||
|
||||
void recvTimingSnoopReq(PacketPtr pkt)
|
||||
{ return lsq.recvTimingSnoopReq(pkt); }
|
||||
|
@ -712,7 +712,7 @@ class LSQ : public Named
|
|||
|
||||
/** Memory interface */
|
||||
bool recvTimingResp(PacketPtr pkt);
|
||||
void recvRetry();
|
||||
void recvReqRetry();
|
||||
void recvTimingSnoopReq(PacketPtr pkt);
|
||||
|
||||
/** Return the raw-bindable port */
|
||||
|
|
|
@ -101,9 +101,9 @@ FullO3CPU<Impl>::IcachePort::recvTimingResp(PacketPtr pkt)
|
|||
|
||||
template<class Impl>
|
||||
void
|
||||
FullO3CPU<Impl>::IcachePort::recvRetry()
|
||||
FullO3CPU<Impl>::IcachePort::recvReqRetry()
|
||||
{
|
||||
fetch->recvRetry();
|
||||
fetch->recvReqRetry();
|
||||
}
|
||||
|
||||
template <class Impl>
|
||||
|
@ -126,9 +126,9 @@ FullO3CPU<Impl>::DcachePort::recvTimingSnoopReq(PacketPtr pkt)
|
|||
|
||||
template <class Impl>
|
||||
void
|
||||
FullO3CPU<Impl>::DcachePort::recvRetry()
|
||||
FullO3CPU<Impl>::DcachePort::recvReqRetry()
|
||||
{
|
||||
lsq->recvRetry();
|
||||
lsq->recvReqRetry();
|
||||
}
|
||||
|
||||
template <class Impl>
|
||||
|
|
|
@ -150,7 +150,7 @@ class FullO3CPU : public BaseO3CPU
|
|||
virtual void recvTimingSnoopReq(PacketPtr pkt) { }
|
||||
|
||||
/** Handles doing a retry of a failed fetch. */
|
||||
virtual void recvRetry();
|
||||
virtual void recvReqRetry();
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -185,7 +185,7 @@ class FullO3CPU : public BaseO3CPU
|
|||
}
|
||||
|
||||
/** Handles doing a retry of the previous send. */
|
||||
virtual void recvRetry();
|
||||
virtual void recvReqRetry();
|
||||
|
||||
/**
|
||||
* As this CPU requires snooping to maintain the load store queue
|
||||
|
|
|
@ -224,7 +224,7 @@ class DefaultFetch
|
|||
void startupStage();
|
||||
|
||||
/** Handles retrying the fetch access. */
|
||||
void recvRetry();
|
||||
void recvReqRetry();
|
||||
|
||||
/** Processes cache completion event. */
|
||||
void processCacheCompletion(PacketPtr pkt);
|
||||
|
|
|
@ -1407,7 +1407,7 @@ DefaultFetch<Impl>::fetch(bool &status_change)
|
|||
|
||||
template<class Impl>
|
||||
void
|
||||
DefaultFetch<Impl>::recvRetry()
|
||||
DefaultFetch<Impl>::recvReqRetry()
|
||||
{
|
||||
if (retryPkt != NULL) {
|
||||
assert(cacheBlocked);
|
||||
|
|
|
@ -286,7 +286,7 @@ class LSQ {
|
|||
/**
|
||||
* Retry the previous send that failed.
|
||||
*/
|
||||
void recvRetry();
|
||||
void recvReqRetry();
|
||||
|
||||
/**
|
||||
* Handles writing back and completing the load or store that has
|
||||
|
|
|
@ -330,7 +330,7 @@ LSQ<Impl>::violation()
|
|||
|
||||
template <class Impl>
|
||||
void
|
||||
LSQ<Impl>::recvRetry()
|
||||
LSQ<Impl>::recvReqRetry()
|
||||
{
|
||||
iewStage->cacheUnblocked();
|
||||
|
||||
|
|
|
@ -139,7 +139,7 @@ class AtomicSimpleCPU : public BaseSimpleCPU
|
|||
return true;
|
||||
}
|
||||
|
||||
void recvRetry()
|
||||
void recvReqRetry()
|
||||
{
|
||||
panic("Atomic CPU doesn't expect recvRetry!\n");
|
||||
}
|
||||
|
|
|
@ -729,7 +729,7 @@ TimingSimpleCPU::IcachePort::recvTimingResp(PacketPtr pkt)
|
|||
}
|
||||
|
||||
void
|
||||
TimingSimpleCPU::IcachePort::recvRetry()
|
||||
TimingSimpleCPU::IcachePort::recvReqRetry()
|
||||
{
|
||||
// we shouldn't get a retry unless we have a packet that we're
|
||||
// waiting to transmit
|
||||
|
@ -846,8 +846,8 @@ TimingSimpleCPU::DcachePort::recvTimingResp(PacketPtr pkt)
|
|||
// In the case of a split transaction and a cache that is
|
||||
// faster than a CPU we could get two responses in the
|
||||
// same tick, delay the second one
|
||||
if (!retryEvent.scheduled())
|
||||
cpu->schedule(retryEvent, cpu->clockEdge(Cycles(1)));
|
||||
if (!retryRespEvent.scheduled())
|
||||
cpu->schedule(retryRespEvent, cpu->clockEdge(Cycles(1)));
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
@ -859,7 +859,7 @@ TimingSimpleCPU::DcachePort::DTickEvent::process()
|
|||
}
|
||||
|
||||
void
|
||||
TimingSimpleCPU::DcachePort::recvRetry()
|
||||
TimingSimpleCPU::DcachePort::recvReqRetry()
|
||||
{
|
||||
// we shouldn't get a retry unless we have a packet that we're
|
||||
// waiting to transmit
|
||||
|
|
|
@ -157,7 +157,7 @@ class TimingSimpleCPU : public BaseSimpleCPU
|
|||
public:
|
||||
|
||||
TimingCPUPort(const std::string& _name, TimingSimpleCPU* _cpu)
|
||||
: MasterPort(_name, _cpu), cpu(_cpu), retryEvent(this)
|
||||
: MasterPort(_name, _cpu), cpu(_cpu), retryRespEvent(this)
|
||||
{ }
|
||||
|
||||
protected:
|
||||
|
@ -179,7 +179,7 @@ class TimingSimpleCPU : public BaseSimpleCPU
|
|||
void schedule(PacketPtr _pkt, Tick t);
|
||||
};
|
||||
|
||||
EventWrapper<MasterPort, &MasterPort::sendRetry> retryEvent;
|
||||
EventWrapper<MasterPort, &MasterPort::sendRetryResp> retryRespEvent;
|
||||
};
|
||||
|
||||
class IcachePort : public TimingCPUPort
|
||||
|
@ -195,7 +195,7 @@ class TimingSimpleCPU : public BaseSimpleCPU
|
|||
|
||||
virtual bool recvTimingResp(PacketPtr pkt);
|
||||
|
||||
virtual void recvRetry();
|
||||
virtual void recvReqRetry();
|
||||
|
||||
struct ITickEvent : public TickEvent
|
||||
{
|
||||
|
@ -232,7 +232,7 @@ class TimingSimpleCPU : public BaseSimpleCPU
|
|||
|
||||
virtual bool recvTimingResp(PacketPtr pkt);
|
||||
|
||||
virtual void recvRetry();
|
||||
virtual void recvReqRetry();
|
||||
|
||||
virtual bool isSnooping() const {
|
||||
return true;
|
||||
|
|
|
@ -60,7 +60,7 @@ class RubyDirectedTester : public MemObject
|
|||
|
||||
protected:
|
||||
virtual bool recvTimingResp(PacketPtr pkt);
|
||||
virtual void recvRetry()
|
||||
virtual void recvReqRetry()
|
||||
{ panic("%s does not expect a retry\n", name()); }
|
||||
};
|
||||
|
||||
|
|
|
@ -63,7 +63,7 @@ MemTest::CpuPort::recvTimingResp(PacketPtr pkt)
|
|||
}
|
||||
|
||||
void
|
||||
MemTest::CpuPort::recvRetry()
|
||||
MemTest::CpuPort::recvReqRetry()
|
||||
{
|
||||
memtest.recvRetry();
|
||||
}
|
||||
|
|
|
@ -114,7 +114,7 @@ class MemTest : public MemObject
|
|||
|
||||
Tick recvAtomicSnoop(PacketPtr pkt) { return 0; }
|
||||
|
||||
void recvRetry();
|
||||
void recvReqRetry();
|
||||
};
|
||||
|
||||
CpuPort port;
|
||||
|
|
|
@ -59,7 +59,7 @@ NetworkTest::CpuPort::recvTimingResp(PacketPtr pkt)
|
|||
}
|
||||
|
||||
void
|
||||
NetworkTest::CpuPort::recvRetry()
|
||||
NetworkTest::CpuPort::recvReqRetry()
|
||||
{
|
||||
networktest->doRetry();
|
||||
}
|
||||
|
|
|
@ -91,7 +91,7 @@ class NetworkTest : public MemObject
|
|||
|
||||
virtual bool recvTimingResp(PacketPtr pkt);
|
||||
|
||||
virtual void recvRetry();
|
||||
virtual void recvReqRetry();
|
||||
};
|
||||
|
||||
CpuPort cachePort;
|
||||
|
|
|
@ -75,7 +75,7 @@ class RubyTester : public MemObject
|
|||
|
||||
protected:
|
||||
virtual bool recvTimingResp(PacketPtr pkt);
|
||||
virtual void recvRetry()
|
||||
virtual void recvReqRetry()
|
||||
{ panic("%s does not expect a retry\n", name()); }
|
||||
};
|
||||
|
||||
|
|
|
@ -476,7 +476,7 @@ TrafficGen::enterState(uint32_t newState)
|
|||
}
|
||||
|
||||
void
|
||||
TrafficGen::recvRetry()
|
||||
TrafficGen::recvReqRetry()
|
||||
{
|
||||
assert(retryPkt != NULL);
|
||||
|
||||
|
|
|
@ -91,7 +91,7 @@ class TrafficGen : public MemObject
|
|||
* Receive a retry from the neighbouring port and attempt to
|
||||
* resend the waiting packet.
|
||||
*/
|
||||
void recvRetry();
|
||||
void recvReqRetry();
|
||||
|
||||
/** Struct to represent a probabilistic transition during parsing. */
|
||||
struct Transition {
|
||||
|
@ -148,7 +148,7 @@ class TrafficGen : public MemObject
|
|||
|
||||
protected:
|
||||
|
||||
void recvRetry() { trafficGen.recvRetry(); }
|
||||
void recvReqRetry() { trafficGen.recvReqRetry(); }
|
||||
|
||||
bool recvTimingResp(PacketPtr pkt);
|
||||
|
||||
|
|
|
@ -146,7 +146,7 @@ DmaPort::drain(DrainManager *dm)
|
|||
}
|
||||
|
||||
void
|
||||
DmaPort::recvRetry()
|
||||
DmaPort::recvReqRetry()
|
||||
{
|
||||
assert(transmitList.size());
|
||||
trySendTimingReq();
|
||||
|
|
|
@ -134,7 +134,7 @@ class DmaPort : public MasterPort
|
|||
protected:
|
||||
|
||||
bool recvTimingResp(PacketPtr pkt);
|
||||
void recvRetry() ;
|
||||
void recvReqRetry() ;
|
||||
|
||||
void queueDma(PacketPtr pkt);
|
||||
|
||||
|
|
|
@ -190,15 +190,15 @@ AddrMapper::isSnooping() const
|
|||
}
|
||||
|
||||
void
|
||||
AddrMapper::recvRetryMaster()
|
||||
AddrMapper::recvReqRetry()
|
||||
{
|
||||
slavePort.sendRetry();
|
||||
slavePort.sendRetryReq();
|
||||
}
|
||||
|
||||
void
|
||||
AddrMapper::recvRetrySlave()
|
||||
AddrMapper::recvRespRetry()
|
||||
{
|
||||
masterPort.sendRetry();
|
||||
masterPort.sendRetryResp();
|
||||
}
|
||||
|
||||
void
|
||||
|
|
|
@ -143,9 +143,9 @@ class AddrMapper : public MemObject
|
|||
return mapper.isSnooping();
|
||||
}
|
||||
|
||||
void recvRetry()
|
||||
void recvReqRetry()
|
||||
{
|
||||
mapper.recvRetryMaster();
|
||||
mapper.recvReqRetry();
|
||||
}
|
||||
|
||||
private:
|
||||
|
@ -193,9 +193,9 @@ class AddrMapper : public MemObject
|
|||
return mapper.getAddrRanges();
|
||||
}
|
||||
|
||||
void recvRetry()
|
||||
void recvRespRetry()
|
||||
{
|
||||
mapper.recvRetrySlave();
|
||||
mapper.recvRespRetry();
|
||||
}
|
||||
|
||||
private:
|
||||
|
@ -227,9 +227,9 @@ class AddrMapper : public MemObject
|
|||
|
||||
bool isSnooping() const;
|
||||
|
||||
void recvRetryMaster();
|
||||
void recvReqRetry();
|
||||
|
||||
void recvRetrySlave();
|
||||
void recvRespRetry();
|
||||
|
||||
void recvRangeChange();
|
||||
};
|
||||
|
|
|
@ -200,7 +200,7 @@ Bridge::BridgeSlavePort::retryStalledReq()
|
|||
if (retryReq) {
|
||||
DPRINTF(Bridge, "Request waiting for retry, now retrying\n");
|
||||
retryReq = false;
|
||||
sendRetry();
|
||||
sendRetryReq();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -309,7 +309,7 @@ Bridge::BridgeSlavePort::trySendTiming()
|
|||
if (!masterPort.reqQueueFull() && retryReq) {
|
||||
DPRINTF(Bridge, "Request waiting for retry, now retrying\n");
|
||||
retryReq = false;
|
||||
sendRetry();
|
||||
sendRetryReq();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -318,13 +318,13 @@ Bridge::BridgeSlavePort::trySendTiming()
|
|||
}
|
||||
|
||||
void
|
||||
Bridge::BridgeMasterPort::recvRetry()
|
||||
Bridge::BridgeMasterPort::recvReqRetry()
|
||||
{
|
||||
trySendTiming();
|
||||
}
|
||||
|
||||
void
|
||||
Bridge::BridgeSlavePort::recvRetry()
|
||||
Bridge::BridgeSlavePort::recvRespRetry()
|
||||
{
|
||||
trySendTiming();
|
||||
}
|
||||
|
|
|
@ -193,7 +193,7 @@ class Bridge : public MemObject
|
|||
|
||||
/** When receiving a retry request from the peer port,
|
||||
pass it to the bridge. */
|
||||
void recvRetry();
|
||||
void recvRespRetry();
|
||||
|
||||
/** When receiving a Atomic requestfrom the peer port,
|
||||
pass it to the bridge. */
|
||||
|
@ -301,7 +301,7 @@ class Bridge : public MemObject
|
|||
|
||||
/** When receiving a retry request from the peer port,
|
||||
pass it to the bridge. */
|
||||
void recvRetry();
|
||||
void recvReqRetry();
|
||||
};
|
||||
|
||||
/** Slave port of the bridge. */
|
||||
|
|
2
src/mem/cache/base.cc
vendored
2
src/mem/cache/base.cc
vendored
|
@ -122,7 +122,7 @@ BaseCache::CacheSlavePort::processSendRetry()
|
|||
|
||||
// reset the flag and call retry
|
||||
mustSendRetry = false;
|
||||
sendRetry();
|
||||
sendRetryReq();
|
||||
}
|
||||
|
||||
void
|
||||
|
|
12
src/mem/cache/base.hh
vendored
12
src/mem/cache/base.hh
vendored
|
@ -125,20 +125,20 @@ class BaseCache : public MemObject
|
|||
|
||||
/**
|
||||
* Schedule a send of a request packet (from the MSHR). Note
|
||||
* that we could already have a retry or a transmit list of
|
||||
* responses outstanding.
|
||||
* that we could already have a retry outstanding.
|
||||
*/
|
||||
void requestBus(RequestCause cause, Tick time)
|
||||
{
|
||||
DPRINTF(CachePort, "Asserting bus request for cause %d\n", cause);
|
||||
queue.schedSendEvent(time);
|
||||
reqQueue.schedSendEvent(time);
|
||||
}
|
||||
|
||||
protected:
|
||||
|
||||
CacheMasterPort(const std::string &_name, BaseCache *_cache,
|
||||
MasterPacketQueue &_queue) :
|
||||
QueuedMasterPort(_name, _cache, _queue)
|
||||
ReqPacketQueue &_reqQueue,
|
||||
SnoopRespPacketQueue &_snoopRespQueue) :
|
||||
QueuedMasterPort(_name, _cache, _reqQueue, _snoopRespQueue)
|
||||
{ }
|
||||
|
||||
/**
|
||||
|
@ -176,7 +176,7 @@ class BaseCache : public MemObject
|
|||
const std::string &_label);
|
||||
|
||||
/** A normal packet queue used to store responses. */
|
||||
SlavePacketQueue queue;
|
||||
RespPacketQueue queue;
|
||||
|
||||
bool blocked;
|
||||
|
||||
|
|
15
src/mem/cache/cache.hh
vendored
15
src/mem/cache/cache.hh
vendored
|
@ -114,18 +114,21 @@ class Cache : public BaseCache
|
|||
* current MSHR status. This queue has a pointer to our specific
|
||||
* cache implementation and is used by the MemSidePort.
|
||||
*/
|
||||
class MemSidePacketQueue : public MasterPacketQueue
|
||||
class CacheReqPacketQueue : public ReqPacketQueue
|
||||
{
|
||||
|
||||
protected:
|
||||
|
||||
Cache<TagStore> &cache;
|
||||
SnoopRespPacketQueue &snoopRespQueue;
|
||||
|
||||
public:
|
||||
|
||||
MemSidePacketQueue(Cache<TagStore> &cache, MasterPort &port,
|
||||
const std::string &label) :
|
||||
MasterPacketQueue(cache, port, label), cache(cache) { }
|
||||
CacheReqPacketQueue(Cache<TagStore> &cache, MasterPort &port,
|
||||
SnoopRespPacketQueue &snoop_resp_queue,
|
||||
const std::string &label) :
|
||||
ReqPacketQueue(cache, port, label), cache(cache),
|
||||
snoopRespQueue(snoop_resp_queue) { }
|
||||
|
||||
/**
|
||||
* Override the normal sendDeferredPacket and do not only
|
||||
|
@ -145,7 +148,9 @@ class Cache : public BaseCache
|
|||
private:
|
||||
|
||||
/** The cache-specific queue. */
|
||||
MemSidePacketQueue _queue;
|
||||
CacheReqPacketQueue _reqQueue;
|
||||
|
||||
SnoopRespPacketQueue _snoopRespQueue;
|
||||
|
||||
// a pointer to our specific cache implementation
|
||||
Cache<TagStore> *cache;
|
||||
|
|
116
src/mem/cache/cache_impl.hh
vendored
116
src/mem/cache/cache_impl.hh
vendored
|
@ -2183,61 +2183,84 @@ Cache<TagStore>::MemSidePort::recvFunctionalSnoop(PacketPtr pkt)
|
|||
|
||||
template<class TagStore>
|
||||
void
|
||||
Cache<TagStore>::MemSidePacketQueue::sendDeferredPacket()
|
||||
Cache<TagStore>::CacheReqPacketQueue::sendDeferredPacket()
|
||||
{
|
||||
// if we have a response packet waiting we have to start with that
|
||||
if (deferredPacketReady()) {
|
||||
// use the normal approach from the timing port
|
||||
trySendTiming();
|
||||
// sanity check
|
||||
assert(!waitingOnRetry);
|
||||
|
||||
// there should never be any deferred request packets in the
|
||||
// queue, instead we resly on the cache to provide the packets
|
||||
// from the MSHR queue or write queue
|
||||
assert(deferredPacketReadyTime() == MaxTick);
|
||||
|
||||
// check for request packets (requests & writebacks)
|
||||
PacketPtr pkt = cache.getTimingPacket();
|
||||
if (pkt == NULL) {
|
||||
// can happen if e.g. we attempt a writeback and fail, but
|
||||
// before the retry, the writeback is eliminated because
|
||||
// we snoop another cache's ReadEx.
|
||||
} else {
|
||||
// check for request packets (requests & writebacks)
|
||||
PacketPtr pkt = cache.getTimingPacket();
|
||||
if (pkt == NULL) {
|
||||
// can happen if e.g. we attempt a writeback and fail, but
|
||||
// before the retry, the writeback is eliminated because
|
||||
// we snoop another cache's ReadEx.
|
||||
waitingOnRetry = false;
|
||||
} else {
|
||||
MSHR *mshr = dynamic_cast<MSHR*>(pkt->senderState);
|
||||
MSHR *mshr = dynamic_cast<MSHR*>(pkt->senderState);
|
||||
// in most cases getTimingPacket allocates a new packet, and
|
||||
// we must delete it unless it is successfully sent
|
||||
bool delete_pkt = !mshr->isForwardNoResponse();
|
||||
|
||||
waitingOnRetry = !masterPort.sendTimingReq(pkt);
|
||||
// let our snoop responses go first if there are responses to
|
||||
// the same addresses we are about to writeback, note that
|
||||
// this creates a dependency between requests and snoop
|
||||
// responses, but that should not be a problem since there is
|
||||
// a chain already and the key is that the snoop responses can
|
||||
// sink unconditionally
|
||||
if (snoopRespQueue.hasAddr(pkt->getAddr())) {
|
||||
DPRINTF(CachePort, "Waiting for snoop response to be sent\n");
|
||||
Tick when = snoopRespQueue.deferredPacketReadyTime();
|
||||
schedSendEvent(when);
|
||||
|
||||
if (waitingOnRetry) {
|
||||
DPRINTF(CachePort, "now waiting on a retry\n");
|
||||
if (!mshr->isForwardNoResponse()) {
|
||||
// we are awaiting a retry, but we
|
||||
// delete the packet and will be creating a new packet
|
||||
// when we get the opportunity
|
||||
delete pkt;
|
||||
}
|
||||
// note that we have now masked any requestBus and
|
||||
// schedSendEvent (we will wait for a retry before
|
||||
// doing anything), and this is so even if we do not
|
||||
// care about this packet and might override it before
|
||||
// it gets retried
|
||||
} else {
|
||||
// As part of the call to sendTimingReq the packet is
|
||||
// forwarded to all neighbouring caches (and any
|
||||
// caches above them) as a snoop. The packet is also
|
||||
// sent to any potential cache below as the
|
||||
// interconnect is not allowed to buffer the
|
||||
// packet. Thus at this point we know if any of the
|
||||
// neighbouring, or the downstream cache is
|
||||
// responding, and if so, if it is with a dirty line
|
||||
// or not.
|
||||
bool pending_dirty_resp = !pkt->sharedAsserted() &&
|
||||
pkt->memInhibitAsserted();
|
||||
if (delete_pkt)
|
||||
delete pkt;
|
||||
|
||||
cache.markInService(mshr, pending_dirty_resp);
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
waitingOnRetry = !masterPort.sendTimingReq(pkt);
|
||||
|
||||
if (waitingOnRetry) {
|
||||
DPRINTF(CachePort, "now waiting on a retry\n");
|
||||
if (delete_pkt) {
|
||||
// we are awaiting a retry, but we
|
||||
// delete the packet and will be creating a new packet
|
||||
// when we get the opportunity
|
||||
delete pkt;
|
||||
}
|
||||
// note that we have now masked any requestBus and
|
||||
// schedSendEvent (we will wait for a retry before
|
||||
// doing anything), and this is so even if we do not
|
||||
// care about this packet and might override it before
|
||||
// it gets retried
|
||||
} else {
|
||||
// As part of the call to sendTimingReq the packet is
|
||||
// forwarded to all neighbouring caches (and any
|
||||
// caches above them) as a snoop. The packet is also
|
||||
// sent to any potential cache below as the
|
||||
// interconnect is not allowed to buffer the
|
||||
// packet. Thus at this point we know if any of the
|
||||
// neighbouring, or the downstream cache is
|
||||
// responding, and if so, if it is with a dirty line
|
||||
// or not.
|
||||
bool pending_dirty_resp = !pkt->sharedAsserted() &&
|
||||
pkt->memInhibitAsserted();
|
||||
|
||||
cache.markInService(mshr, pending_dirty_resp);
|
||||
}
|
||||
}
|
||||
|
||||
// if we succeeded and are not waiting for a retry, schedule the
|
||||
// next send, not only looking at the response transmit list, but
|
||||
// also considering when the next MSHR is ready
|
||||
// next send considering when the next MSHR is ready, note that
|
||||
// snoop responses have their own packet queue and thus schedule
|
||||
// their own events
|
||||
if (!waitingOnRetry) {
|
||||
scheduleSend(cache.nextMSHRReadyTime());
|
||||
schedSendEvent(cache.nextMSHRReadyTime());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2245,8 +2268,9 @@ template<class TagStore>
|
|||
Cache<TagStore>::
|
||||
MemSidePort::MemSidePort(const std::string &_name, Cache<TagStore> *_cache,
|
||||
const std::string &_label)
|
||||
: BaseCache::CacheMasterPort(_name, _cache, _queue),
|
||||
_queue(*_cache, *this, _label), cache(_cache)
|
||||
: BaseCache::CacheMasterPort(_name, _cache, _reqQueue, _snoopRespQueue),
|
||||
_reqQueue(*_cache, *this, _snoopRespQueue, _label),
|
||||
_snoopRespQueue(*_cache, *this, _label), cache(_cache)
|
||||
{
|
||||
}
|
||||
|
||||
|
|
|
@ -66,8 +66,8 @@ CoherentXBar::CoherentXBar(const CoherentXBarParams *p)
|
|||
masterPorts.push_back(bp);
|
||||
reqLayers.push_back(new ReqLayer(*bp, *this,
|
||||
csprintf(".reqLayer%d", i)));
|
||||
snoopLayers.push_back(new SnoopLayer(*bp, *this,
|
||||
csprintf(".snoopLayer%d", i)));
|
||||
snoopLayers.push_back(new SnoopRespLayer(*bp, *this,
|
||||
csprintf(".snoopLayer%d", i)));
|
||||
}
|
||||
|
||||
// see if we have a default slave device connected and if so add
|
||||
|
@ -80,9 +80,9 @@ CoherentXBar::CoherentXBar(const CoherentXBarParams *p)
|
|||
masterPorts.push_back(bp);
|
||||
reqLayers.push_back(new ReqLayer(*bp, *this, csprintf(".reqLayer%d",
|
||||
defaultPortID)));
|
||||
snoopLayers.push_back(new SnoopLayer(*bp, *this,
|
||||
csprintf(".snoopLayer%d",
|
||||
defaultPortID)));
|
||||
snoopLayers.push_back(new SnoopRespLayer(*bp, *this,
|
||||
csprintf(".snoopLayer%d",
|
||||
defaultPortID)));
|
||||
}
|
||||
|
||||
// create the slave ports, once again starting at zero
|
||||
|
@ -528,7 +528,7 @@ CoherentXBar::forwardTiming(PacketPtr pkt, PortID exclude_slave_port_id,
|
|||
}
|
||||
|
||||
void
|
||||
CoherentXBar::recvRetry(PortID master_port_id)
|
||||
CoherentXBar::recvReqRetry(PortID master_port_id)
|
||||
{
|
||||
// responses and snoop responses never block on forwarding them,
|
||||
// so the retry will always be coming from a port to which we
|
||||
|
|
|
@ -75,12 +75,9 @@ class CoherentXBar : public BaseXBar
|
|||
* Declare the layers of this crossbar, one vector for requests,
|
||||
* one for responses, and one for snoop responses
|
||||
*/
|
||||
typedef Layer<SlavePort,MasterPort> ReqLayer;
|
||||
typedef Layer<MasterPort,SlavePort> RespLayer;
|
||||
typedef Layer<SlavePort,MasterPort> SnoopLayer;
|
||||
std::vector<ReqLayer*> reqLayers;
|
||||
std::vector<RespLayer*> respLayers;
|
||||
std::vector<SnoopLayer*> snoopLayers;
|
||||
std::vector<SnoopRespLayer*> snoopLayers;
|
||||
|
||||
/**
|
||||
* Declaration of the coherent crossbar slave port type, one will
|
||||
|
@ -131,7 +128,7 @@ class CoherentXBar : public BaseXBar
|
|||
/**
|
||||
* When receiving a retry, pass it to the crossbar.
|
||||
*/
|
||||
virtual void recvRetry()
|
||||
virtual void recvRespRetry()
|
||||
{ panic("Crossbar slave ports should never retry.\n"); }
|
||||
|
||||
/**
|
||||
|
@ -202,8 +199,8 @@ class CoherentXBar : public BaseXBar
|
|||
|
||||
/** When reciving a retry from the peer port (at id),
|
||||
pass it to the crossbar. */
|
||||
virtual void recvRetry()
|
||||
{ xbar.recvRetry(id); }
|
||||
virtual void recvReqRetry()
|
||||
{ xbar.recvReqRetry(id); }
|
||||
|
||||
};
|
||||
|
||||
|
@ -233,14 +230,15 @@ class CoherentXBar : public BaseXBar
|
|||
* Override the sending of retries and pass them on through
|
||||
* the mirrored slave port.
|
||||
*/
|
||||
void sendRetry() {
|
||||
slavePort.sendRetry();
|
||||
void sendRetryResp() {
|
||||
// forward it as a snoop response retry
|
||||
slavePort.sendRetrySnoopResp();
|
||||
}
|
||||
|
||||
/**
|
||||
* Provided as necessary.
|
||||
*/
|
||||
void recvRetry() { panic("SnoopRespPort should never see retry\n"); }
|
||||
void recvReqRetry() { panic("SnoopRespPort should never see retry\n"); }
|
||||
|
||||
/**
|
||||
* Provided as necessary.
|
||||
|
@ -292,7 +290,7 @@ class CoherentXBar : public BaseXBar
|
|||
|
||||
/** Timing function called by port when it is once again able to process
|
||||
* requests. */
|
||||
void recvRetry(PortID master_port_id);
|
||||
void recvReqRetry(PortID master_port_id);
|
||||
|
||||
/**
|
||||
* Forward a timing packet to our snoopers, potentially excluding
|
||||
|
|
|
@ -429,15 +429,15 @@ CommMonitor::getAddrRanges() const
|
|||
}
|
||||
|
||||
void
|
||||
CommMonitor::recvRetryMaster()
|
||||
CommMonitor::recvReqRetry()
|
||||
{
|
||||
slavePort.sendRetry();
|
||||
slavePort.sendRetryReq();
|
||||
}
|
||||
|
||||
void
|
||||
CommMonitor::recvRetrySlave()
|
||||
CommMonitor::recvRespRetry()
|
||||
{
|
||||
masterPort.sendRetry();
|
||||
masterPort.sendRetryResp();
|
||||
}
|
||||
|
||||
void
|
||||
|
|
|
@ -173,9 +173,9 @@ class CommMonitor : public MemObject
|
|||
return mon.isSnooping();
|
||||
}
|
||||
|
||||
void recvRetry()
|
||||
void recvReqRetry()
|
||||
{
|
||||
mon.recvRetryMaster();
|
||||
mon.recvReqRetry();
|
||||
}
|
||||
|
||||
private:
|
||||
|
@ -229,9 +229,9 @@ class CommMonitor : public MemObject
|
|||
return mon.getAddrRanges();
|
||||
}
|
||||
|
||||
void recvRetry()
|
||||
void recvRespRetry()
|
||||
{
|
||||
mon.recvRetrySlave();
|
||||
mon.recvRespRetry();
|
||||
}
|
||||
|
||||
private:
|
||||
|
@ -263,9 +263,9 @@ class CommMonitor : public MemObject
|
|||
|
||||
bool isSnooping() const;
|
||||
|
||||
void recvRetryMaster();
|
||||
void recvReqRetry();
|
||||
|
||||
void recvRetrySlave();
|
||||
void recvRespRetry();
|
||||
|
||||
void recvRangeChange();
|
||||
|
||||
|
|
|
@ -748,7 +748,7 @@ DRAMCtrl::processRespondEvent()
|
|||
// so if there is a read that was forced to wait, retry now
|
||||
if (retryRdReq) {
|
||||
retryRdReq = false;
|
||||
port.sendRetry();
|
||||
port.sendRetryReq();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1441,7 +1441,7 @@ DRAMCtrl::processNextReqEvent()
|
|||
// the next request processing
|
||||
if (retryWrReq && writeQueue.size() < writeBufferSize) {
|
||||
retryWrReq = false;
|
||||
port.sendRetry();
|
||||
port.sendRetryReq();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -95,7 +95,7 @@ class DRAMCtrl : public AbstractMemory
|
|||
class MemoryPort : public QueuedSlavePort
|
||||
{
|
||||
|
||||
SlavePacketQueue queue;
|
||||
RespPacketQueue queue;
|
||||
DRAMCtrl& memory;
|
||||
|
||||
public:
|
||||
|
|
|
@ -147,7 +147,7 @@ DRAMSim2::tick()
|
|||
// state and send a retry if conditions have changed
|
||||
if (retryReq && nbrOutstanding() < wrapper.queueSize()) {
|
||||
retryReq = false;
|
||||
port.sendRetry();
|
||||
port.sendRetryReq();
|
||||
}
|
||||
|
||||
schedule(tickEvent, curTick() + wrapper.clockPeriod() * SimClock::Int::ns);
|
||||
|
@ -244,7 +244,7 @@ DRAMSim2::recvTimingReq(PacketPtr pkt)
|
|||
}
|
||||
|
||||
void
|
||||
DRAMSim2::recvRetry()
|
||||
DRAMSim2::recvRespRetry()
|
||||
{
|
||||
DPRINTF(DRAMSim2, "Retrying\n");
|
||||
|
||||
|
@ -402,9 +402,9 @@ DRAMSim2::MemoryPort::recvTimingReq(PacketPtr pkt)
|
|||
}
|
||||
|
||||
void
|
||||
DRAMSim2::MemoryPort::recvRetry()
|
||||
DRAMSim2::MemoryPort::recvRespRetry()
|
||||
{
|
||||
memory.recvRetry();
|
||||
memory.recvRespRetry();
|
||||
}
|
||||
|
||||
DRAMSim2*
|
||||
|
|
|
@ -80,7 +80,7 @@ class DRAMSim2 : public AbstractMemory
|
|||
|
||||
bool recvTimingReq(PacketPtr pkt);
|
||||
|
||||
void recvRetry();
|
||||
void recvRespRetry();
|
||||
|
||||
AddrRangeList getAddrRanges() const;
|
||||
|
||||
|
@ -208,7 +208,7 @@ class DRAMSim2 : public AbstractMemory
|
|||
Tick recvAtomic(PacketPtr pkt);
|
||||
void recvFunctional(PacketPtr pkt);
|
||||
bool recvTimingReq(PacketPtr pkt);
|
||||
void recvRetry();
|
||||
void recvRespRetry();
|
||||
|
||||
};
|
||||
|
||||
|
|
|
@ -80,7 +80,7 @@ class StubSlavePort : public ExternalSlave::Port
|
|||
void recvFunctional(PacketPtr packet);
|
||||
bool recvTimingReq(PacketPtr packet);
|
||||
bool recvTimingSnoopResp(PacketPtr packet);
|
||||
void recvRetry();
|
||||
void recvRespRetry();
|
||||
void recvFunctionalSnoop(PacketPtr packet);
|
||||
};
|
||||
|
||||
|
@ -131,7 +131,7 @@ StubSlavePort::ResponseEvent::process()
|
|||
owner.responsePacket = NULL;
|
||||
|
||||
if (owner.mustRetry)
|
||||
owner.sendRetry();
|
||||
owner.sendRetryReq();
|
||||
owner.mustRetry = false;
|
||||
}
|
||||
}
|
||||
|
@ -161,7 +161,7 @@ StubSlavePort::recvTimingSnoopResp(PacketPtr packet)
|
|||
}
|
||||
|
||||
void
|
||||
StubSlavePort::recvRetry()
|
||||
StubSlavePort::recvRespRetry()
|
||||
{
|
||||
assert(responsePacket);
|
||||
/* Stub handles only one response at a time so responseEvent should never
|
||||
|
|
|
@ -356,15 +356,15 @@ MemCheckerMonitor::getAddrRanges() const
|
|||
}
|
||||
|
||||
void
|
||||
MemCheckerMonitor::recvRetryMaster()
|
||||
MemCheckerMonitor::recvReqRetry()
|
||||
{
|
||||
slavePort.sendRetry();
|
||||
slavePort.sendRetryReq();
|
||||
}
|
||||
|
||||
void
|
||||
MemCheckerMonitor::recvRetrySlave()
|
||||
MemCheckerMonitor::recvRespRetry()
|
||||
{
|
||||
masterPort.sendRetry();
|
||||
masterPort.sendRetryResp();
|
||||
}
|
||||
|
||||
void
|
||||
|
|
|
@ -136,9 +136,9 @@ class MemCheckerMonitor : public MemObject
|
|||
return mon.isSnooping();
|
||||
}
|
||||
|
||||
void recvRetry()
|
||||
void recvReqRetry()
|
||||
{
|
||||
mon.recvRetryMaster();
|
||||
mon.recvReqRetry();
|
||||
}
|
||||
|
||||
private:
|
||||
|
@ -192,9 +192,9 @@ class MemCheckerMonitor : public MemObject
|
|||
return mon.getAddrRanges();
|
||||
}
|
||||
|
||||
void recvRetry()
|
||||
void recvRespRetry()
|
||||
{
|
||||
mon.recvRetrySlave();
|
||||
mon.recvRespRetry();
|
||||
}
|
||||
|
||||
private:
|
||||
|
@ -226,9 +226,9 @@ class MemCheckerMonitor : public MemObject
|
|||
|
||||
bool isSnooping() const;
|
||||
|
||||
void recvRetryMaster();
|
||||
void recvReqRetry();
|
||||
|
||||
void recvRetrySlave();
|
||||
void recvRespRetry();
|
||||
|
||||
void recvRangeChange();
|
||||
|
||||
|
|
|
@ -76,7 +76,8 @@ class MessageMasterPort : public QueuedMasterPort
|
|||
public:
|
||||
|
||||
MessageMasterPort(const std::string &name, MemObject *owner) :
|
||||
QueuedMasterPort(name, owner, queue), queue(*owner, *this)
|
||||
QueuedMasterPort(name, owner, reqQueue, snoopRespQueue),
|
||||
reqQueue(*owner, *this), snoopRespQueue(*owner, *this)
|
||||
{}
|
||||
|
||||
virtual ~MessageMasterPort()
|
||||
|
@ -87,7 +88,8 @@ class MessageMasterPort : public QueuedMasterPort
|
|||
protected:
|
||||
|
||||
/** A packet queue for outgoing packets. */
|
||||
MasterPacketQueue queue;
|
||||
ReqPacketQueue reqQueue;
|
||||
SnoopRespPacketQueue snoopRespQueue;
|
||||
|
||||
// Accept and ignore responses.
|
||||
virtual Tick recvResponse(PacketPtr pkt)
|
||||
|
|
|
@ -224,7 +224,7 @@ NoncoherentXBar::recvTimingResp(PacketPtr pkt, PortID master_port_id)
|
|||
}
|
||||
|
||||
void
|
||||
NoncoherentXBar::recvRetry(PortID master_port_id)
|
||||
NoncoherentXBar::recvReqRetry(PortID master_port_id)
|
||||
{
|
||||
// responses never block on forwarding them, so the retry will
|
||||
// always be coming from a port to which we tried to forward a
|
||||
|
|
|
@ -76,8 +76,6 @@ class NoncoherentXBar : public BaseXBar
|
|||
* Declare the layers of this crossbar, one vector for requests
|
||||
* and one for responses.
|
||||
*/
|
||||
typedef Layer<SlavePort,MasterPort> ReqLayer;
|
||||
typedef Layer<MasterPort,SlavePort> RespLayer;
|
||||
std::vector<ReqLayer*> reqLayers;
|
||||
std::vector<RespLayer*> respLayers;
|
||||
|
||||
|
@ -123,7 +121,7 @@ class NoncoherentXBar : public BaseXBar
|
|||
/**
|
||||
* When receiving a retry, pass it to the crossbar.
|
||||
*/
|
||||
virtual void recvRetry()
|
||||
virtual void recvRespRetry()
|
||||
{ panic("Crossbar slave ports should never retry.\n"); }
|
||||
|
||||
/**
|
||||
|
@ -168,8 +166,8 @@ class NoncoherentXBar : public BaseXBar
|
|||
|
||||
/** When reciving a retry from the peer port (at id),
|
||||
pass it to the crossbar. */
|
||||
virtual void recvRetry()
|
||||
{ xbar.recvRetry(id); }
|
||||
virtual void recvReqRetry()
|
||||
{ xbar.recvReqRetry(id); }
|
||||
|
||||
};
|
||||
|
||||
|
@ -183,7 +181,7 @@ class NoncoherentXBar : public BaseXBar
|
|||
|
||||
/** Timing function called by port when it is once again able to process
|
||||
* requests. */
|
||||
void recvRetry(PortID master_port_id);
|
||||
void recvReqRetry(PortID master_port_id);
|
||||
|
||||
/** Function called by the port when the crossbar is recieving a Atomic
|
||||
transaction.*/
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2012 ARM Limited
|
||||
* Copyright (c) 2012,2015 ARM Limited
|
||||
* All rights reserved.
|
||||
*
|
||||
* The license below extends only to copyright in the software and shall
|
||||
|
@ -63,9 +63,22 @@ PacketQueue::retry()
|
|||
{
|
||||
DPRINTF(PacketQueue, "Queue %s received retry\n", name());
|
||||
assert(waitingOnRetry);
|
||||
waitingOnRetry = false;
|
||||
sendDeferredPacket();
|
||||
}
|
||||
|
||||
bool
|
||||
PacketQueue::hasAddr(Addr addr) const
|
||||
{
|
||||
// caller is responsible for ensuring that all packets have the
|
||||
// same alignment
|
||||
for (const auto& p : transmitList) {
|
||||
if (p.pkt->getAddr() == addr)
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool
|
||||
PacketQueue::checkFunctional(PacketPtr pkt)
|
||||
{
|
||||
|
@ -87,27 +100,11 @@ PacketQueue::checkFunctional(PacketPtr pkt)
|
|||
}
|
||||
|
||||
void
|
||||
PacketQueue::schedSendEvent(Tick when)
|
||||
{
|
||||
// if we are waiting on a retry, do not schedule a send event, and
|
||||
// instead rely on retry being called
|
||||
if (waitingOnRetry) {
|
||||
assert(!sendEvent.scheduled());
|
||||
return;
|
||||
}
|
||||
|
||||
if (!sendEvent.scheduled()) {
|
||||
em.schedule(&sendEvent, when);
|
||||
} else if (sendEvent.when() > when) {
|
||||
em.reschedule(&sendEvent, when);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
PacketQueue::schedSendTiming(PacketPtr pkt, Tick when, bool send_as_snoop)
|
||||
PacketQueue::schedSendTiming(PacketPtr pkt, Tick when)
|
||||
{
|
||||
DPRINTF(PacketQueue, "%s for %s address %x size %d\n", __func__,
|
||||
pkt->cmdString(), pkt->getAddr(), pkt->getSize());
|
||||
|
||||
// we can still send a packet before the end of this tick
|
||||
assert(when >= curTick());
|
||||
|
||||
|
@ -127,14 +124,22 @@ PacketQueue::schedSendTiming(PacketPtr pkt, Tick when, bool send_as_snoop)
|
|||
// note that currently we ignore a potentially outstanding retry
|
||||
// and could in theory put a new packet at the head of the
|
||||
// transmit list before retrying the existing packet
|
||||
transmitList.push_front(DeferredPacket(when, pkt, send_as_snoop));
|
||||
transmitList.push_front(DeferredPacket(when, pkt));
|
||||
schedSendEvent(when);
|
||||
return;
|
||||
}
|
||||
|
||||
// we should either have an outstanding retry, or a send event
|
||||
// scheduled, but there is an unfortunate corner case where the
|
||||
// x86 page-table walker and timing CPU send out a new request as
|
||||
// part of the receiving of a response (called by
|
||||
// PacketQueue::sendDeferredPacket), in which we end up calling
|
||||
// ourselves again before we had a chance to update waitingOnRetry
|
||||
// assert(waitingOnRetry || sendEvent.scheduled());
|
||||
|
||||
// list is non-empty and this belongs at the end
|
||||
if (when >= transmitList.back().tick) {
|
||||
transmitList.push_back(DeferredPacket(when, pkt, send_as_snoop));
|
||||
transmitList.push_back(DeferredPacket(when, pkt));
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -143,46 +148,35 @@ PacketQueue::schedSendTiming(PacketPtr pkt, Tick when, bool send_as_snoop)
|
|||
++i; // already checked for insertion at front
|
||||
while (i != transmitList.end() && when >= i->tick)
|
||||
++i;
|
||||
transmitList.insert(i, DeferredPacket(when, pkt, send_as_snoop));
|
||||
}
|
||||
|
||||
void PacketQueue::trySendTiming()
|
||||
{
|
||||
assert(deferredPacketReady());
|
||||
|
||||
DeferredPacket dp = transmitList.front();
|
||||
|
||||
// use the appropriate implementation of sendTiming based on the
|
||||
// type of port associated with the queue, and whether the packet
|
||||
// is to be sent as a snoop or not
|
||||
waitingOnRetry = !sendTiming(dp.pkt, dp.sendAsSnoop);
|
||||
|
||||
if (!waitingOnRetry) {
|
||||
// take the packet off the list
|
||||
transmitList.pop_front();
|
||||
}
|
||||
transmitList.insert(i, DeferredPacket(when, pkt));
|
||||
}
|
||||
|
||||
void
|
||||
PacketQueue::scheduleSend(Tick time)
|
||||
PacketQueue::schedSendEvent(Tick when)
|
||||
{
|
||||
// the next ready time is either determined by the next deferred packet,
|
||||
// or in the cache through the MSHR ready time
|
||||
Tick nextReady = std::max(std::min(deferredPacketReadyTime(), time),
|
||||
curTick() + 1);
|
||||
// if we are waiting on a retry just hold off
|
||||
if (waitingOnRetry) {
|
||||
DPRINTF(PacketQueue, "Not scheduling send as waiting for retry\n");
|
||||
assert(!sendEvent.scheduled());
|
||||
return;
|
||||
}
|
||||
|
||||
if (when != MaxTick) {
|
||||
// we cannot go back in time, and to be consistent we stick to
|
||||
// one tick in the future
|
||||
when = std::max(when, curTick() + 1);
|
||||
// @todo Revisit the +1
|
||||
|
||||
if (nextReady != MaxTick) {
|
||||
// if the sendTiming caused someone else to call our
|
||||
// recvTiming we could already have an event scheduled, check
|
||||
if (!sendEvent.scheduled()) {
|
||||
em.schedule(&sendEvent, nextReady);
|
||||
} else if (nextReady < sendEvent.when()) {
|
||||
em.schedule(&sendEvent, when);
|
||||
} else if (when < sendEvent.when()) {
|
||||
// if the new time is earlier than when the event
|
||||
// currently is scheduled, move it forward
|
||||
em.reschedule(&sendEvent, nextReady);
|
||||
em.reschedule(&sendEvent, when);
|
||||
}
|
||||
} else {
|
||||
// no more to send, so if we're draining, we may be done
|
||||
// we get a MaxTick when there is no more to send, so if we're
|
||||
// draining, we may be done at this point
|
||||
if (drainManager && transmitList.empty() && !sendEvent.scheduled()) {
|
||||
DPRINTF(Drain, "PacketQueue done draining,"
|
||||
"processing drain event\n");
|
||||
|
@ -195,14 +189,30 @@ PacketQueue::scheduleSend(Tick time)
|
|||
void
|
||||
PacketQueue::sendDeferredPacket()
|
||||
{
|
||||
// try to send what is on the list, this will set waitingOnRetry
|
||||
// accordingly
|
||||
trySendTiming();
|
||||
// sanity checks
|
||||
assert(!waitingOnRetry);
|
||||
assert(deferredPacketReady());
|
||||
|
||||
DeferredPacket dp = transmitList.front();
|
||||
|
||||
// take the packet of the list before sending it, as sending of
|
||||
// the packet in some cases causes a new packet to be enqueued
|
||||
// (most notaly when responding to the timing CPU, leading to a
|
||||
// new request hitting in the L1 icache, leading to a new
|
||||
// response)
|
||||
transmitList.pop_front();
|
||||
|
||||
// use the appropriate implementation of sendTiming based on the
|
||||
// type of queue
|
||||
waitingOnRetry = !sendTiming(dp.pkt);
|
||||
|
||||
// if we succeeded and are not waiting for a retry, schedule the
|
||||
// next send
|
||||
if (!waitingOnRetry) {
|
||||
scheduleSend();
|
||||
schedSendEvent(deferredPacketReadyTime());
|
||||
} else {
|
||||
// put the packet back at the front of the list
|
||||
transmitList.push_front(dp);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -223,32 +233,39 @@ PacketQueue::drain(DrainManager *dm)
|
|||
return 1;
|
||||
}
|
||||
|
||||
MasterPacketQueue::MasterPacketQueue(EventManager& _em, MasterPort& _masterPort,
|
||||
const std::string _label)
|
||||
ReqPacketQueue::ReqPacketQueue(EventManager& _em, MasterPort& _masterPort,
|
||||
const std::string _label)
|
||||
: PacketQueue(_em, _label), masterPort(_masterPort)
|
||||
{
|
||||
}
|
||||
|
||||
bool
|
||||
MasterPacketQueue::sendTiming(PacketPtr pkt, bool send_as_snoop)
|
||||
ReqPacketQueue::sendTiming(PacketPtr pkt)
|
||||
{
|
||||
// attempt to send the packet and return according to the outcome
|
||||
if (!send_as_snoop)
|
||||
return masterPort.sendTimingReq(pkt);
|
||||
else
|
||||
return masterPort.sendTimingSnoopResp(pkt);
|
||||
return masterPort.sendTimingReq(pkt);
|
||||
}
|
||||
|
||||
SlavePacketQueue::SlavePacketQueue(EventManager& _em, SlavePort& _slavePort,
|
||||
const std::string _label)
|
||||
SnoopRespPacketQueue::SnoopRespPacketQueue(EventManager& _em,
|
||||
MasterPort& _masterPort,
|
||||
const std::string _label)
|
||||
: PacketQueue(_em, _label), masterPort(_masterPort)
|
||||
{
|
||||
}
|
||||
|
||||
bool
|
||||
SnoopRespPacketQueue::sendTiming(PacketPtr pkt)
|
||||
{
|
||||
return masterPort.sendTimingSnoopResp(pkt);
|
||||
}
|
||||
|
||||
RespPacketQueue::RespPacketQueue(EventManager& _em, SlavePort& _slavePort,
|
||||
const std::string _label)
|
||||
: PacketQueue(_em, _label), slavePort(_slavePort)
|
||||
{
|
||||
}
|
||||
|
||||
bool
|
||||
SlavePacketQueue::sendTiming(PacketPtr pkt, bool send_as_snoop)
|
||||
RespPacketQueue::sendTiming(PacketPtr pkt)
|
||||
{
|
||||
// we should never have queued snoop requests
|
||||
assert(!send_as_snoop);
|
||||
return slavePort.sendTimingResp(pkt);
|
||||
}
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2012 ARM Limited
|
||||
* Copyright (c) 2012,2015 ARM Limited
|
||||
* All rights reserved.
|
||||
*
|
||||
* The license below extends only to copyright in the software and shall
|
||||
|
@ -49,8 +49,7 @@
|
|||
* Declaration of a simple PacketQueue that is associated with
|
||||
* a port on which it attempts to send packets according to the time
|
||||
* stamp given to them at insertion. The packet queue is responsible
|
||||
* for the flow control of the port, but relies on the module
|
||||
* notifying the queue when a transfer ends.
|
||||
* for the flow control of the port.
|
||||
*/
|
||||
|
||||
#include <list>
|
||||
|
@ -71,28 +70,23 @@ class PacketQueue : public Drainable
|
|||
public:
|
||||
Tick tick; ///< The tick when the packet is ready to transmit
|
||||
PacketPtr pkt; ///< Pointer to the packet to transmit
|
||||
bool sendAsSnoop; ///< Should it be sent as a snoop or not
|
||||
DeferredPacket(Tick t, PacketPtr p, bool send_as_snoop)
|
||||
: tick(t), pkt(p), sendAsSnoop(send_as_snoop)
|
||||
DeferredPacket(Tick t, PacketPtr p)
|
||||
: tick(t), pkt(p)
|
||||
{}
|
||||
};
|
||||
|
||||
typedef std::list<DeferredPacket> DeferredPacketList;
|
||||
|
||||
/** A list of outgoing timing response packets that haven't been
|
||||
* serviced yet. */
|
||||
/** A list of outgoing packets. */
|
||||
DeferredPacketList transmitList;
|
||||
|
||||
/** The manager which is used for the event queue */
|
||||
EventManager& em;
|
||||
|
||||
/** This function attempts to send deferred packets. Scheduled to
|
||||
* be called in the future via SendEvent. */
|
||||
/** Used to schedule sending of deferred packets. */
|
||||
void processSendEvent();
|
||||
|
||||
/**
|
||||
* Event used to call processSendEvent.
|
||||
**/
|
||||
/** Event used to call processSendEvent. */
|
||||
EventWrapper<PacketQueue, &PacketQueue::processSendEvent> sendEvent;
|
||||
|
||||
/** If we need to drain, keep the drain manager around until we're done
|
||||
|
@ -104,55 +98,28 @@ class PacketQueue : public Drainable
|
|||
/** Label to use for print request packets label stack. */
|
||||
const std::string label;
|
||||
|
||||
/** Remember whether we're awaiting a retry from the bus. */
|
||||
/** Remember whether we're awaiting a retry. */
|
||||
bool waitingOnRetry;
|
||||
|
||||
/** Check whether we have a packet ready to go on the transmit list. */
|
||||
bool deferredPacketReady() const
|
||||
{ return !transmitList.empty() && transmitList.front().tick <= curTick(); }
|
||||
|
||||
Tick deferredPacketReadyTime() const
|
||||
{ return transmitList.empty() ? MaxTick : transmitList.front().tick; }
|
||||
|
||||
/**
|
||||
* Attempt to send the packet at the head of the transmit
|
||||
* list. Caller must guarantee that the list is non-empty and that
|
||||
* the head packet is scheduled for curTick() (or earlier). Note
|
||||
* that a subclass of the PacketQueue can override this method and
|
||||
* thus change the behaviour (as done by the cache).
|
||||
* Attempt to send a packet. Note that a subclass of the
|
||||
* PacketQueue can override this method and thus change the
|
||||
* behaviour (as done by the cache for the request queue). The
|
||||
* default implementation sends the head of the transmit list. The
|
||||
* caller must guarantee that the list is non-empty and that the
|
||||
* head packet is scheduled for curTick() (or earlier).
|
||||
*/
|
||||
virtual void sendDeferredPacket();
|
||||
|
||||
/**
|
||||
* Attempt to send the packet at the front of the transmit list,
|
||||
* and set waitingOnRetry accordingly. The packet is temporarily
|
||||
* taken off the list, but put back at the front if not
|
||||
* successfully sent.
|
||||
* Send a packet using the appropriate method for the specific
|
||||
* subclass (reuest, response or snoop response).
|
||||
*/
|
||||
void trySendTiming();
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
virtual bool sendTiming(PacketPtr pkt, bool send_as_snoop) = 0;
|
||||
|
||||
/**
|
||||
* Based on the transmit list, or the provided time, schedule a
|
||||
* send event if there are packets to send. If we are idle and
|
||||
* asked to drain then do so.
|
||||
*
|
||||
* @param time an alternative time for the next send event
|
||||
*/
|
||||
void scheduleSend(Tick time = MaxTick);
|
||||
|
||||
/**
|
||||
* Simple ports are generally used as slave ports (i.e. the
|
||||
* respond to requests) and thus do not expect to receive any
|
||||
* range changes (as the neighbouring port has a master role and
|
||||
* do not have any address ranges. A subclass can override the
|
||||
* default behaviuor if needed.
|
||||
*/
|
||||
virtual void recvRangeChange() { }
|
||||
virtual bool sendTiming(PacketPtr pkt) = 0;
|
||||
|
||||
/**
|
||||
* Create a packet queue, linked to an event manager, and a label
|
||||
|
@ -177,40 +144,56 @@ class PacketQueue : public Drainable
|
|||
*/
|
||||
virtual const std::string name() const = 0;
|
||||
|
||||
/**
|
||||
* Get the size of the queue.
|
||||
*/
|
||||
size_t size() const { return transmitList.size(); }
|
||||
|
||||
/**
|
||||
* Get the next packet ready time.
|
||||
*/
|
||||
Tick deferredPacketReadyTime() const
|
||||
{ return transmitList.empty() ? MaxTick : transmitList.front().tick; }
|
||||
|
||||
/**
|
||||
* Check if a packets address exists in the queue.
|
||||
*/
|
||||
bool hasAddr(Addr addr) const;
|
||||
|
||||
/** Check the list of buffered packets against the supplied
|
||||
* functional request. */
|
||||
bool checkFunctional(PacketPtr pkt);
|
||||
|
||||
/**
|
||||
* Schedule a send even if not already waiting for a retry. If the
|
||||
* requested time is before an already scheduled send event it
|
||||
* will be rescheduled.
|
||||
* Schedule a send event if we are not already waiting for a
|
||||
* retry. If the requested time is before an already scheduled
|
||||
* send event, the event will be rescheduled. If MaxTick is
|
||||
* passed, no event is scheduled. Instead, if we are idle and
|
||||
* asked to drain then check and signal drained.
|
||||
*
|
||||
* @param when
|
||||
* @param when time to schedule an event
|
||||
*/
|
||||
void schedSendEvent(Tick when);
|
||||
|
||||
/**
|
||||
* Add a packet to the transmit list, and ensure that a
|
||||
* processSendEvent is called in the future.
|
||||
* Add a packet to the transmit list, and schedule a send event.
|
||||
*
|
||||
* @param pkt Packet to send
|
||||
* @param when Absolute time (in ticks) to send packet
|
||||
* @param send_as_snoop Send the packet as a snoop or not
|
||||
*/
|
||||
void schedSendTiming(PacketPtr pkt, Tick when, bool send_as_snoop = false);
|
||||
void schedSendTiming(PacketPtr pkt, Tick when);
|
||||
|
||||
/**
|
||||
* Used by a port to notify the queue that a retry was received
|
||||
* and that the queue can proceed and retry sending the packet
|
||||
* that caused the wait.
|
||||
* Retry sending a packet from the queue. Note that this is not
|
||||
* necessarily the same packet if something has been added with an
|
||||
* earlier time stamp.
|
||||
*/
|
||||
void retry();
|
||||
|
||||
unsigned int drain(DrainManager *dm);
|
||||
};
|
||||
|
||||
class MasterPacketQueue : public PacketQueue
|
||||
class ReqPacketQueue : public PacketQueue
|
||||
{
|
||||
|
||||
protected:
|
||||
|
@ -220,7 +203,7 @@ class MasterPacketQueue : public PacketQueue
|
|||
public:
|
||||
|
||||
/**
|
||||
* Create a master packet queue, linked to an event manager, a
|
||||
* Create a request packet queue, linked to an event manager, a
|
||||
* master port, and a label that will be used for functional print
|
||||
* request packets.
|
||||
*
|
||||
|
@ -228,18 +211,49 @@ class MasterPacketQueue : public PacketQueue
|
|||
* @param _masterPort Master port used to send the packets
|
||||
* @param _label Label to push on the label stack for print request packets
|
||||
*/
|
||||
MasterPacketQueue(EventManager& _em, MasterPort& _masterPort,
|
||||
const std::string _label = "MasterPacketQueue");
|
||||
ReqPacketQueue(EventManager& _em, MasterPort& _masterPort,
|
||||
const std::string _label = "ReqPacketQueue");
|
||||
|
||||
virtual ~MasterPacketQueue() { }
|
||||
virtual ~ReqPacketQueue() { }
|
||||
|
||||
const std::string name() const
|
||||
{ return masterPort.name() + "-" + label; }
|
||||
|
||||
bool sendTiming(PacketPtr pkt, bool send_as_snoop);
|
||||
bool sendTiming(PacketPtr pkt);
|
||||
|
||||
};
|
||||
|
||||
class SlavePacketQueue : public PacketQueue
|
||||
class SnoopRespPacketQueue : public PacketQueue
|
||||
{
|
||||
|
||||
protected:
|
||||
|
||||
MasterPort& masterPort;
|
||||
|
||||
public:
|
||||
|
||||
/**
|
||||
* Create a snoop response packet queue, linked to an event
|
||||
* manager, a master port, and a label that will be used for
|
||||
* functional print request packets.
|
||||
*
|
||||
* @param _em Event manager used for scheduling this queue
|
||||
* @param _masterPort Master port used to send the packets
|
||||
* @param _label Label to push on the label stack for print request packets
|
||||
*/
|
||||
SnoopRespPacketQueue(EventManager& _em, MasterPort& _masterPort,
|
||||
const std::string _label = "SnoopRespPacketQueue");
|
||||
|
||||
virtual ~SnoopRespPacketQueue() { }
|
||||
|
||||
const std::string name() const
|
||||
{ return masterPort.name() + "-" + label; }
|
||||
|
||||
bool sendTiming(PacketPtr pkt);
|
||||
|
||||
};
|
||||
|
||||
class RespPacketQueue : public PacketQueue
|
||||
{
|
||||
|
||||
protected:
|
||||
|
@ -249,7 +263,7 @@ class SlavePacketQueue : public PacketQueue
|
|||
public:
|
||||
|
||||
/**
|
||||
* Create a slave packet queue, linked to an event manager, a
|
||||
* Create a response packet queue, linked to an event manager, a
|
||||
* slave port, and a label that will be used for functional print
|
||||
* request packets.
|
||||
*
|
||||
|
@ -257,15 +271,15 @@ class SlavePacketQueue : public PacketQueue
|
|||
* @param _slavePort Slave port used to send the packets
|
||||
* @param _label Label to push on the label stack for print request packets
|
||||
*/
|
||||
SlavePacketQueue(EventManager& _em, SlavePort& _slavePort,
|
||||
const std::string _label = "SlavePacketQueue");
|
||||
RespPacketQueue(EventManager& _em, SlavePort& _slavePort,
|
||||
const std::string _label = "RespPacketQueue");
|
||||
|
||||
virtual ~SlavePacketQueue() { }
|
||||
virtual ~RespPacketQueue() { }
|
||||
|
||||
const std::string name() const
|
||||
{ return slavePort.name() + "-" + label; }
|
||||
|
||||
bool sendTiming(PacketPtr pkt, bool send_as_snoop);
|
||||
bool sendTiming(PacketPtr pkt);
|
||||
|
||||
};
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2012 ARM Limited
|
||||
* Copyright (c) 2012,2015 ARM Limited
|
||||
* All rights reserved
|
||||
*
|
||||
* The license below extends only to copyright in the software and shall
|
||||
|
@ -190,9 +190,9 @@ MasterPort::sendTimingSnoopResp(PacketPtr pkt)
|
|||
}
|
||||
|
||||
void
|
||||
MasterPort::sendRetry()
|
||||
MasterPort::sendRetryResp()
|
||||
{
|
||||
_slavePort->recvRetry();
|
||||
_slavePort->recvRespRetry();
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -261,7 +261,13 @@ SlavePort::sendTimingSnoopReq(PacketPtr pkt)
|
|||
}
|
||||
|
||||
void
|
||||
SlavePort::sendRetry()
|
||||
SlavePort::sendRetryReq()
|
||||
{
|
||||
_masterPort->recvRetry();
|
||||
_masterPort->recvReqRetry();
|
||||
}
|
||||
|
||||
void
|
||||
SlavePort::sendRetrySnoopResp()
|
||||
{
|
||||
_masterPort->recvRetrySnoopResp();
|
||||
}
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2011-2012 ARM Limited
|
||||
* Copyright (c) 2011-2012,2015 ARM Limited
|
||||
* All rights reserved
|
||||
*
|
||||
* The license below extends only to copyright in the software and shall
|
||||
|
@ -214,7 +214,7 @@ class MasterPort : public BaseMasterPort
|
|||
* Attempt to send a timing request to the slave port by calling
|
||||
* its corresponding receive function. If the send does not
|
||||
* succeed, as indicated by the return value, then the sender must
|
||||
* wait for a recvRetry at which point it can re-issue a
|
||||
* wait for a recvReqRetry at which point it can re-issue a
|
||||
* sendTimingReq.
|
||||
*
|
||||
* @param pkt Packet to send.
|
||||
|
@ -227,8 +227,8 @@ class MasterPort : public BaseMasterPort
|
|||
* Attempt to send a timing snoop response packet to the slave
|
||||
* port by calling its corresponding receive function. If the send
|
||||
* does not succeed, as indicated by the return value, then the
|
||||
* sender must wait for a recvRetry at which point it can re-issue
|
||||
* a sendTimingSnoopResp.
|
||||
* sender must wait for a recvRetrySnoop at which point it can
|
||||
* re-issue a sendTimingSnoopResp.
|
||||
*
|
||||
* @param pkt Packet to send.
|
||||
*/
|
||||
|
@ -236,9 +236,11 @@ class MasterPort : public BaseMasterPort
|
|||
|
||||
/**
|
||||
* Send a retry to the slave port that previously attempted a
|
||||
* sendTimingResp to this master port and failed.
|
||||
* sendTimingResp to this master port and failed. Note that this
|
||||
* is virtual so that the "fake" snoop response port in the
|
||||
* coherent crossbar can override the behaviour.
|
||||
*/
|
||||
virtual void sendRetry();
|
||||
virtual void sendRetryResp();
|
||||
|
||||
/**
|
||||
* Determine if this master port is snooping or not. The default
|
||||
|
@ -294,12 +296,21 @@ class MasterPort : public BaseMasterPort
|
|||
}
|
||||
|
||||
/**
|
||||
* Called by the slave port if sendTimingReq or
|
||||
* sendTimingSnoopResp was called on this master port (causing
|
||||
* recvTimingReq and recvTimingSnoopResp to be called on the
|
||||
* slave port) and was unsuccesful.
|
||||
* Called by the slave port if sendTimingReq was called on this
|
||||
* master port (causing recvTimingReq to be called on the slave
|
||||
* port) and was unsuccesful.
|
||||
*/
|
||||
virtual void recvRetry() = 0;
|
||||
virtual void recvReqRetry() = 0;
|
||||
|
||||
/**
|
||||
* Called by the slave port if sendTimingSnoopResp was called on this
|
||||
* master port (causing recvTimingSnoopResp to be called on the slave
|
||||
* port) and was unsuccesful.
|
||||
*/
|
||||
virtual void recvRetrySnoopResp()
|
||||
{
|
||||
panic("%s was not expecting a snoop retry\n", name());
|
||||
}
|
||||
|
||||
/**
|
||||
* Called to receive an address range change from the peer slave
|
||||
|
@ -356,7 +367,7 @@ class SlavePort : public BaseSlavePort
|
|||
* Attempt to send a timing response to the master port by calling
|
||||
* its corresponding receive function. If the send does not
|
||||
* succeed, as indicated by the return value, then the sender must
|
||||
* wait for a recvRetry at which point it can re-issue a
|
||||
* wait for a recvRespRetry at which point it can re-issue a
|
||||
* sendTimingResp.
|
||||
*
|
||||
* @param pkt Packet to send.
|
||||
|
@ -376,10 +387,15 @@ class SlavePort : public BaseSlavePort
|
|||
|
||||
/**
|
||||
* Send a retry to the master port that previously attempted a
|
||||
* sendTimingReq or sendTimingSnoopResp to this slave port and
|
||||
* failed.
|
||||
* sendTimingReq to this slave port and failed.
|
||||
*/
|
||||
void sendRetry();
|
||||
void sendRetryReq();
|
||||
|
||||
/**
|
||||
* Send a retry to the master port that previously attempted a
|
||||
* sendTimingSnoopResp to this slave port and failed.
|
||||
*/
|
||||
void sendRetrySnoopResp();
|
||||
|
||||
/**
|
||||
* Find out if the peer master port is snooping or not.
|
||||
|
@ -448,7 +464,7 @@ class SlavePort : public BaseSlavePort
|
|||
* slave port (causing recvTimingResp to be called on the master
|
||||
* port) and was unsuccesful.
|
||||
*/
|
||||
virtual void recvRetry() = 0;
|
||||
virtual void recvRespRetry() = 0;
|
||||
|
||||
};
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2012 ARM Limited
|
||||
* Copyright (c) 2012,2015 ARM Limited
|
||||
* All rights reserved.
|
||||
*
|
||||
* The license below extends only to copyright in the software and shall
|
||||
|
@ -61,12 +61,10 @@ class QueuedSlavePort : public SlavePort
|
|||
|
||||
protected:
|
||||
|
||||
/** Packet queue used to store outgoing requests and responses. */
|
||||
SlavePacketQueue &queue;
|
||||
/** Packet queue used to store outgoing responses. */
|
||||
RespPacketQueue &respQueue;
|
||||
|
||||
/** This function is notification that the device should attempt to send a
|
||||
* packet again. */
|
||||
virtual void recvRetry() { queue.retry(); }
|
||||
void recvRespRetry() { respQueue.retry(); }
|
||||
|
||||
public:
|
||||
|
||||
|
@ -78,8 +76,8 @@ class QueuedSlavePort : public SlavePort
|
|||
* QueuePort constructor.
|
||||
*/
|
||||
QueuedSlavePort(const std::string& name, MemObject* owner,
|
||||
SlavePacketQueue &queue, PortID id = InvalidPortID) :
|
||||
SlavePort(name, owner, id), queue(queue)
|
||||
RespPacketQueue &resp_queue, PortID id = InvalidPortID) :
|
||||
SlavePort(name, owner, id), respQueue(resp_queue)
|
||||
{ }
|
||||
|
||||
virtual ~QueuedSlavePort() { }
|
||||
|
@ -91,39 +89,53 @@ class QueuedSlavePort : public SlavePort
|
|||
* @param when Absolute time (in ticks) to send packet
|
||||
*/
|
||||
void schedTimingResp(PacketPtr pkt, Tick when)
|
||||
{ queue.schedSendTiming(pkt, when); }
|
||||
{ respQueue.schedSendTiming(pkt, when); }
|
||||
|
||||
/** Check the list of buffered packets against the supplied
|
||||
* functional request. */
|
||||
bool checkFunctional(PacketPtr pkt) { return queue.checkFunctional(pkt); }
|
||||
bool checkFunctional(PacketPtr pkt)
|
||||
{ return respQueue.checkFunctional(pkt); }
|
||||
|
||||
unsigned int drain(DrainManager *dm) { return queue.drain(dm); }
|
||||
unsigned int drain(DrainManager *dm) { return respQueue.drain(dm); }
|
||||
};
|
||||
|
||||
/**
|
||||
* The QueuedMasterPort combines two queues, a request queue and a
|
||||
* snoop response queue, that both share the same port. The flow
|
||||
* control for requests and snoop responses are completely
|
||||
* independent, and so each queue manages its own flow control
|
||||
* (retries).
|
||||
*/
|
||||
class QueuedMasterPort : public MasterPort
|
||||
{
|
||||
|
||||
protected:
|
||||
|
||||
/** Packet queue used to store outgoing requests and responses. */
|
||||
MasterPacketQueue &queue;
|
||||
/** Packet queue used to store outgoing requests. */
|
||||
ReqPacketQueue &reqQueue;
|
||||
|
||||
/** This function is notification that the device should attempt to send a
|
||||
* packet again. */
|
||||
virtual void recvRetry() { queue.retry(); }
|
||||
/** Packet queue used to store outgoing snoop responses. */
|
||||
SnoopRespPacketQueue &snoopRespQueue;
|
||||
|
||||
void recvReqRetry() { reqQueue.retry(); }
|
||||
|
||||
void recvRetrySnoopResp() { snoopRespQueue.retry(); }
|
||||
|
||||
public:
|
||||
|
||||
/**
|
||||
* Create a QueuedPort with a given name, owner, and a supplied
|
||||
* implementation of a packet queue. The external definition of
|
||||
* the queue enables e.g. the cache to implement a specific queue
|
||||
* implementation of two packet queues. The external definition of
|
||||
* the queues enables e.g. the cache to implement a specific queue
|
||||
* behaviuor in a subclass, and provide the latter to the
|
||||
* QueuePort constructor.
|
||||
*/
|
||||
QueuedMasterPort(const std::string& name, MemObject* owner,
|
||||
MasterPacketQueue &queue, PortID id = InvalidPortID) :
|
||||
MasterPort(name, owner, id), queue(queue)
|
||||
ReqPacketQueue &req_queue,
|
||||
SnoopRespPacketQueue &snoop_resp_queue,
|
||||
PortID id = InvalidPortID) :
|
||||
MasterPort(name, owner, id), reqQueue(req_queue),
|
||||
snoopRespQueue(snoop_resp_queue)
|
||||
{ }
|
||||
|
||||
virtual ~QueuedMasterPort() { }
|
||||
|
@ -135,7 +147,7 @@ class QueuedMasterPort : public MasterPort
|
|||
* @param when Absolute time (in ticks) to send packet
|
||||
*/
|
||||
void schedTimingReq(PacketPtr pkt, Tick when)
|
||||
{ queue.schedSendTiming(pkt, when); }
|
||||
{ reqQueue.schedSendTiming(pkt, when); }
|
||||
|
||||
/**
|
||||
* Schedule the sending of a timing snoop response.
|
||||
|
@ -144,13 +156,18 @@ class QueuedMasterPort : public MasterPort
|
|||
* @param when Absolute time (in ticks) to send packet
|
||||
*/
|
||||
void schedTimingSnoopResp(PacketPtr pkt, Tick when)
|
||||
{ queue.schedSendTiming(pkt, when, true); }
|
||||
{ snoopRespQueue.schedSendTiming(pkt, when); }
|
||||
|
||||
/** Check the list of buffered packets against the supplied
|
||||
* functional request. */
|
||||
bool checkFunctional(PacketPtr pkt) { return queue.checkFunctional(pkt); }
|
||||
bool checkFunctional(PacketPtr pkt)
|
||||
{
|
||||
return reqQueue.checkFunctional(pkt) ||
|
||||
snoopRespQueue.checkFunctional(pkt);
|
||||
}
|
||||
|
||||
unsigned int drain(DrainManager *dm) { return queue.drain(dm); }
|
||||
unsigned int drain(DrainManager *dm)
|
||||
{ return reqQueue.drain(dm) + snoopRespQueue.drain(dm); }
|
||||
};
|
||||
|
||||
#endif // __MEM_QPORT_HH__
|
||||
|
|
|
@ -327,7 +327,9 @@ AbstractController::MemoryPort::recvTimingResp(PacketPtr pkt)
|
|||
AbstractController::MemoryPort::MemoryPort(const std::string &_name,
|
||||
AbstractController *_controller,
|
||||
const std::string &_label)
|
||||
: QueuedMasterPort(_name, _controller, _queue),
|
||||
_queue(*_controller, *this, _label), controller(_controller)
|
||||
: QueuedMasterPort(_name, _controller, reqQueue, snoopRespQueue),
|
||||
reqQueue(*_controller, *this, _label),
|
||||
snoopRespQueue(*_controller, *this, _label),
|
||||
controller(_controller)
|
||||
{
|
||||
}
|
||||
|
|
|
@ -181,8 +181,9 @@ class AbstractController : public MemObject, public Consumer
|
|||
class MemoryPort : public QueuedMasterPort
|
||||
{
|
||||
private:
|
||||
// Packet queue used to store outgoing requests and responses.
|
||||
MasterPacketQueue _queue;
|
||||
// Packet queues used to store outgoing requests and snoop responses.
|
||||
ReqPacketQueue reqQueue;
|
||||
SnoopRespPacketQueue snoopRespQueue;
|
||||
|
||||
// Controller that operates this port.
|
||||
AbstractController *controller;
|
||||
|
|
|
@ -109,7 +109,7 @@ class RubyMemoryControl : public AbstractMemory, public Consumer
|
|||
// flow control for the responses being sent back
|
||||
class MemoryPort : public QueuedSlavePort
|
||||
{
|
||||
SlavePacketQueue queue;
|
||||
RespPacketQueue queue;
|
||||
RubyMemoryControl& memory;
|
||||
|
||||
public:
|
||||
|
|
|
@ -138,7 +138,7 @@ DMASequencer::ruby_hit_callback(PacketPtr pkt)
|
|||
retry = false;
|
||||
DPRINTF(RubyDma,"Sequencer may now be free. SendRetry to port %s\n",
|
||||
slave_port.name());
|
||||
slave_port.sendRetry();
|
||||
slave_port.sendRetryReq();
|
||||
}
|
||||
|
||||
testDrainComplete();
|
||||
|
|
|
@ -66,7 +66,7 @@ class DMASequencer : public MemObject
|
|||
class MemSlavePort : public QueuedSlavePort
|
||||
{
|
||||
private:
|
||||
SlavePacketQueue queue;
|
||||
RespPacketQueue queue;
|
||||
RubySystem* ruby_system;
|
||||
bool access_backing_store;
|
||||
|
||||
|
|
|
@ -136,7 +136,8 @@ RubyPort::getSlavePort(const std::string &if_name, PortID idx)
|
|||
|
||||
RubyPort::PioMasterPort::PioMasterPort(const std::string &_name,
|
||||
RubyPort *_port)
|
||||
: QueuedMasterPort(_name, _port, queue), queue(*_port, *this)
|
||||
: QueuedMasterPort(_name, _port, reqQueue, snoopRespQueue),
|
||||
reqQueue(*_port, *this), snoopRespQueue(*_port, *this)
|
||||
{
|
||||
DPRINTF(RubyPort, "Created master pioport on sequencer %s\n", _name);
|
||||
}
|
||||
|
@ -150,7 +151,8 @@ RubyPort::PioSlavePort::PioSlavePort(const std::string &_name,
|
|||
|
||||
RubyPort::MemMasterPort::MemMasterPort(const std::string &_name,
|
||||
RubyPort *_port)
|
||||
: QueuedMasterPort(_name, _port, queue), queue(*_port, *this)
|
||||
: QueuedMasterPort(_name, _port, reqQueue, snoopRespQueue),
|
||||
reqQueue(*_port, *this), snoopRespQueue(*_port, *this)
|
||||
{
|
||||
DPRINTF(RubyPort, "Created master memport on ruby sequencer %s\n", _name);
|
||||
}
|
||||
|
@ -374,7 +376,7 @@ RubyPort::ruby_hit_callback(PacketPtr pkt)
|
|||
DPRINTF(RubyPort,
|
||||
"Sequencer may now be free. SendRetry to port %s\n",
|
||||
(*i)->name());
|
||||
(*i)->sendRetry();
|
||||
(*i)->sendRetryReq();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -60,7 +60,8 @@ class RubyPort : public MemObject
|
|||
class MemMasterPort : public QueuedMasterPort
|
||||
{
|
||||
private:
|
||||
MasterPacketQueue queue;
|
||||
ReqPacketQueue reqQueue;
|
||||
SnoopRespPacketQueue snoopRespQueue;
|
||||
|
||||
public:
|
||||
MemMasterPort(const std::string &_name, RubyPort *_port);
|
||||
|
@ -73,7 +74,7 @@ class RubyPort : public MemObject
|
|||
class MemSlavePort : public QueuedSlavePort
|
||||
{
|
||||
private:
|
||||
SlavePacketQueue queue;
|
||||
RespPacketQueue queue;
|
||||
RubySystem* ruby_system;
|
||||
bool access_backing_store;
|
||||
|
||||
|
@ -101,7 +102,8 @@ class RubyPort : public MemObject
|
|||
class PioMasterPort : public QueuedMasterPort
|
||||
{
|
||||
private:
|
||||
MasterPacketQueue queue;
|
||||
ReqPacketQueue reqQueue;
|
||||
SnoopRespPacketQueue snoopRespQueue;
|
||||
|
||||
public:
|
||||
PioMasterPort(const std::string &_name, RubyPort *_port);
|
||||
|
@ -114,7 +116,7 @@ class RubyPort : public MemObject
|
|||
class PioSlavePort : public QueuedSlavePort
|
||||
{
|
||||
private:
|
||||
SlavePacketQueue queue;
|
||||
RespPacketQueue queue;
|
||||
|
||||
public:
|
||||
PioSlavePort(const std::string &_name, RubyPort *_port);
|
||||
|
|
|
@ -178,7 +178,7 @@ SimpleMemory::release()
|
|||
isBusy = false;
|
||||
if (retryReq) {
|
||||
retryReq = false;
|
||||
port.sendRetry();
|
||||
port.sendRetryReq();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -216,7 +216,7 @@ SimpleMemory::getLatency() const
|
|||
}
|
||||
|
||||
void
|
||||
SimpleMemory::recvRetry()
|
||||
SimpleMemory::recvRespRetry()
|
||||
{
|
||||
assert(retryResp);
|
||||
|
||||
|
@ -284,9 +284,9 @@ SimpleMemory::MemoryPort::recvTimingReq(PacketPtr pkt)
|
|||
}
|
||||
|
||||
void
|
||||
SimpleMemory::MemoryPort::recvRetry()
|
||||
SimpleMemory::MemoryPort::recvRespRetry()
|
||||
{
|
||||
memory.recvRetry();
|
||||
memory.recvRespRetry();
|
||||
}
|
||||
|
||||
SimpleMemory*
|
||||
|
|
|
@ -101,7 +101,7 @@ class SimpleMemory : public AbstractMemory
|
|||
|
||||
bool recvTimingReq(PacketPtr pkt);
|
||||
|
||||
void recvRetry();
|
||||
void recvRespRetry();
|
||||
|
||||
AddrRangeList getAddrRanges() const;
|
||||
|
||||
|
@ -205,7 +205,7 @@ class SimpleMemory : public AbstractMemory
|
|||
|
||||
bool recvTimingReq(PacketPtr pkt);
|
||||
|
||||
void recvRetry();
|
||||
void recvRespRetry();
|
||||
|
||||
};
|
||||
|
||||
|
|
|
@ -53,7 +53,7 @@ SimpleTimingPort::SimpleTimingPort(const std::string& _name,
|
|||
void
|
||||
SimpleTimingPort::recvFunctional(PacketPtr pkt)
|
||||
{
|
||||
if (!queue.checkFunctional(pkt)) {
|
||||
if (!respQueue.checkFunctional(pkt)) {
|
||||
// do an atomic access and throw away the returned latency
|
||||
recvAtomic(pkt);
|
||||
}
|
||||
|
|
|
@ -68,7 +68,7 @@ class SimpleTimingPort : public QueuedSlavePort
|
|||
* name used in the QueuedSlavePort. Access is provided through
|
||||
* the queue reference in the base class.
|
||||
*/
|
||||
SlavePacketQueue queueImpl;
|
||||
RespPacketQueue queueImpl;
|
||||
|
||||
protected:
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2011-2014 ARM Limited
|
||||
* Copyright (c) 2011-2015 ARM Limited
|
||||
* All rights reserved
|
||||
*
|
||||
* The license below extends only to copyright in the software and shall
|
||||
|
@ -271,7 +271,7 @@ BaseXBar::Layer<SrcType,DstType>::retryWaiting()
|
|||
|
||||
// tell the port to retry, which in some cases ends up calling the
|
||||
// layer again
|
||||
retryingPort->sendRetry();
|
||||
sendRetry(retryingPort);
|
||||
|
||||
// If the layer is still in the retry state, sendTiming wasn't
|
||||
// called in zero time (e.g. the cache does this), burn a cycle
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2011-2014 ARM Limited
|
||||
* Copyright (c) 2011-2015 ARM Limited
|
||||
* All rights reserved
|
||||
*
|
||||
* The license below extends only to copyright in the software and shall
|
||||
|
@ -174,6 +174,16 @@ class BaseXBar : public MemObject
|
|||
*/
|
||||
void regStats();
|
||||
|
||||
protected:
|
||||
|
||||
/**
|
||||
* Sending the actual retry, in a manner specific to the
|
||||
* individual layers. Note that for a MasterPort, there is
|
||||
* both a RequestLayer and a SnoopResponseLayer using the same
|
||||
* port, but using different functions for the flow control.
|
||||
*/
|
||||
virtual void sendRetry(SrcType* retry_port) = 0;
|
||||
|
||||
private:
|
||||
|
||||
/** The destination port this layer converges at. */
|
||||
|
@ -241,6 +251,64 @@ class BaseXBar : public MemObject
|
|||
|
||||
};
|
||||
|
||||
class ReqLayer : public Layer<SlavePort,MasterPort>
|
||||
{
|
||||
public:
|
||||
/**
|
||||
* Create a request layer and give it a name.
|
||||
*
|
||||
* @param _port destination port the layer converges at
|
||||
* @param _xbar the crossbar this layer belongs to
|
||||
* @param _name the layer's name
|
||||
*/
|
||||
ReqLayer(MasterPort& _port, BaseXBar& _xbar, const std::string& _name) :
|
||||
Layer(_port, _xbar, _name) {}
|
||||
|
||||
protected:
|
||||
|
||||
void sendRetry(SlavePort* retry_port)
|
||||
{ retry_port->sendRetryReq(); }
|
||||
};
|
||||
|
||||
class RespLayer : public Layer<MasterPort,SlavePort>
|
||||
{
|
||||
public:
|
||||
/**
|
||||
* Create a response layer and give it a name.
|
||||
*
|
||||
* @param _port destination port the layer converges at
|
||||
* @param _xbar the crossbar this layer belongs to
|
||||
* @param _name the layer's name
|
||||
*/
|
||||
RespLayer(SlavePort& _port, BaseXBar& _xbar, const std::string& _name) :
|
||||
Layer(_port, _xbar, _name) {}
|
||||
|
||||
protected:
|
||||
|
||||
void sendRetry(MasterPort* retry_port)
|
||||
{ retry_port->sendRetryResp(); }
|
||||
};
|
||||
|
||||
class SnoopRespLayer : public Layer<SlavePort,MasterPort>
|
||||
{
|
||||
public:
|
||||
/**
|
||||
* Create a snoop response layer and give it a name.
|
||||
*
|
||||
* @param _port destination port the layer converges at
|
||||
* @param _xbar the crossbar this layer belongs to
|
||||
* @param _name the layer's name
|
||||
*/
|
||||
SnoopRespLayer(MasterPort& _port, BaseXBar& _xbar,
|
||||
const std::string& _name) :
|
||||
Layer(_port, _xbar, _name) {}
|
||||
|
||||
protected:
|
||||
|
||||
void sendRetry(SlavePort* retry_port)
|
||||
{ retry_port->sendRetrySnoopResp(); }
|
||||
};
|
||||
|
||||
/** cycles of overhead per transaction */
|
||||
const Cycles headerCycles;
|
||||
/** the width of the xbar in bytes */
|
||||
|
|
|
@ -99,7 +99,7 @@ class System : public MemObject
|
|||
{ }
|
||||
bool recvTimingResp(PacketPtr pkt)
|
||||
{ panic("SystemPort does not receive timing!\n"); return false; }
|
||||
void recvRetry()
|
||||
void recvReqRetry()
|
||||
{ panic("SystemPort does not expect retry!\n"); }
|
||||
};
|
||||
|
||||
|
|
Loading…
Reference in a new issue