Port: Extend the QueuedPort interface and use where appropriate

This patch extends the queued port interfaces with methods for
scheduling the transmission of a timing request/response. The methods
are named similar to the corresponding sendTiming(Snoop)Req/Resp,
replacing the "send" with "sched". As the queues are currently
unbounded, the methods always succeed and hence do not return a value.

This functionality was previously provided in the subclasses by
calling PacketQueue::schedSendTiming with the appropriate
parameters. With this change, there is no need to introduce these
extra methods in the subclasses, and the use of the queued interface
is more uniform and explicit.
This commit is contained in:
Andreas Hansson 2012-08-22 11:39:56 -04:00
parent 70e99e0b91
commit e317d8b9ff
8 changed files with 42 additions and 49 deletions

View file

@ -51,7 +51,7 @@ X86ISA::IntDev::IntMasterPort::sendMessage(ApicList apics,
for (apicIt = apics.begin(); apicIt != apics.end(); apicIt++) { for (apicIt = apics.begin(); apicIt != apics.end(); apicIt++) {
PacketPtr pkt = buildIntRequest(*apicIt, message); PacketPtr pkt = buildIntRequest(*apicIt, message);
if (timing) { if (timing) {
queue.schedSendTiming(pkt, curTick() + latency); schedTimingReq(pkt, curTick() + latency);
// The target handles cleaning up the packet in timing mode. // The target handles cleaning up the packet in timing mode.
} else { } else {
// ignore the latency involved in the atomic transaction // ignore the latency involved in the atomic transaction

22
src/mem/cache/base.hh vendored
View file

@ -134,17 +134,6 @@ class BaseCache : public MemObject
queue.schedSendEvent(time); queue.schedSendEvent(time);
} }
/**
* Schedule the transmissions of a response packet at a given
* point in time.
*
* @param pkt response packet
* @param when time to send the response
*/
void respond(PacketPtr pkt, Tick time) {
queue.schedSendTiming(pkt, time, true);
}
protected: protected:
CacheMasterPort(const std::string &_name, BaseCache *_cache, CacheMasterPort(const std::string &_name, BaseCache *_cache,
@ -179,17 +168,6 @@ class BaseCache : public MemObject
/** Return to normal operation and accept new requests. */ /** Return to normal operation and accept new requests. */
void clearBlocked(); void clearBlocked();
/**
* Schedule the transmissions of a response packet at a given
* point in time.
*
* @param pkt response packet
* @param when time to send the response
*/
void respond(PacketPtr pkt, Tick time) {
queue.schedSendTiming(pkt, time);
}
protected: protected:
CacheSlavePort(const std::string &_name, BaseCache *_cache, CacheSlavePort(const std::string &_name, BaseCache *_cache,

View file

@ -407,7 +407,7 @@ Cache<TagStore>::timingAccess(PacketPtr pkt)
rec->restore(pkt, this); rec->restore(pkt, this);
delete rec; delete rec;
memSidePort->respond(pkt, time); memSidePort->schedTimingSnoopResp(pkt, time);
return true; return true;
} }
@ -500,7 +500,7 @@ Cache<TagStore>::timingAccess(PacketPtr pkt)
if (needsResponse) { if (needsResponse) {
pkt->makeTimingResponse(); pkt->makeTimingResponse();
cpuSidePort->respond(pkt, curTick()+lat); cpuSidePort->schedTimingResp(pkt, curTick()+lat);
} else { } else {
/// @todo nominally we should just delete the packet here, /// @todo nominally we should just delete the packet here,
/// however, until 4-phase stuff we can't because sending /// however, until 4-phase stuff we can't because sending
@ -933,7 +933,7 @@ Cache<TagStore>::handleResponse(PacketPtr pkt)
// isInvalidate() set otherwise. // isInvalidate() set otherwise.
target->pkt->cmd = MemCmd::ReadRespWithInvalidate; target->pkt->cmd = MemCmd::ReadRespWithInvalidate;
} }
cpuSidePort->respond(target->pkt, completion_time); cpuSidePort->schedTimingResp(target->pkt, completion_time);
break; break;
case MSHR::Target::FromPrefetcher: case MSHR::Target::FromPrefetcher:
@ -1166,7 +1166,7 @@ doTimingSupplyResponse(PacketPtr req_pkt, uint8_t *blk_data,
// invalidate it. // invalidate it.
pkt->cmd = MemCmd::ReadRespWithInvalidate; pkt->cmd = MemCmd::ReadRespWithInvalidate;
} }
memSidePort->respond(pkt, curTick() + hitLatency); memSidePort->schedTimingSnoopResp(pkt, curTick() + hitLatency);
} }
template<class TagStore> template<class TagStore>

View file

@ -109,6 +109,9 @@ PacketQueue::schedSendTiming(PacketPtr pkt, Tick when, bool send_as_snoop)
// we can still send a packet before the end of this tick // we can still send a packet before the end of this tick
assert(when >= curTick()); assert(when >= curTick());
// express snoops should never be queued
assert(!pkt->isExpressSnoop());
// nothing on the list, or earlier than current front element, // nothing on the list, or earlier than current front element,
// schedule an event // schedule an event
if (transmitList.empty() || when < transmitList.front().tick) { if (transmitList.empty() || when < transmitList.front().tick) {

View file

@ -84,6 +84,15 @@ class QueuedSlavePort : public SlavePort
virtual ~QueuedSlavePort() { } virtual ~QueuedSlavePort() { }
/**
* Schedule the sending of a timing response.
*
* @param pkt Packet to send
* @param when Absolute time (in ticks) to send packet
*/
void schedTimingResp(PacketPtr pkt, Tick when)
{ queue.schedSendTiming(pkt, when); }
/** Check the list of buffered packets against the supplied /** Check the list of buffered packets against the supplied
* functional request. */ * functional request. */
bool checkFunctional(PacketPtr pkt) { return queue.checkFunctional(pkt); } bool checkFunctional(PacketPtr pkt) { return queue.checkFunctional(pkt); }
@ -125,6 +134,24 @@ class QueuedMasterPort : public MasterPort
virtual ~QueuedMasterPort() { } virtual ~QueuedMasterPort() { }
/**
* Schedule the sending of a timing request.
*
* @param pkt Packet to send
* @param when Absolute time (in ticks) to send packet
*/
void schedTimingReq(PacketPtr pkt, Tick when)
{ queue.schedSendTiming(pkt, when); }
/**
* Schedule the sending of a timing snoop response.
*
* @param pkt Packet to send
* @param when Absolute time (in ticks) to send packet
*/
void schedTimingSnoopResp(PacketPtr pkt, Tick when)
{ queue.schedSendTiming(pkt, when, true); }
/** Check the list of buffered packets against the supplied /** Check the list of buffered packets against the supplied
* functional request. */ * functional request. */
bool checkFunctional(PacketPtr pkt) { return queue.checkFunctional(pkt); } bool checkFunctional(PacketPtr pkt) { return queue.checkFunctional(pkt); }

View file

@ -196,7 +196,10 @@ RubyPort::M5Port::recvTimingReq(PacketPtr pkt)
"Request for address 0x%#x is assumed to be a pio request\n", "Request for address 0x%#x is assumed to be a pio request\n",
pkt->getAddr()); pkt->getAddr());
return ruby_port->pio_port.sendNextCycle(pkt); // send next cycle
ruby_port->pio_port.schedTimingReq(pkt, curTick() +
g_eventQueue_ptr->getClock());
return true;
} }
assert(Address(pkt->getAddr()).getOffset() + pkt->getSize() <= assert(Address(pkt->getAddr()).getOffset() + pkt->getSize() <=
@ -647,30 +650,14 @@ RubyPort::M5Port::hitCallback(PacketPtr pkt)
// turn packet around to go back to requester if response expected // turn packet around to go back to requester if response expected
if (needsResponse) { if (needsResponse) {
DPRINTF(RubyPort, "Sending packet back over port\n"); DPRINTF(RubyPort, "Sending packet back over port\n");
sendNextCycle(pkt); // send next cycle
schedTimingResp(pkt, curTick() + g_eventQueue_ptr->getClock());
} else { } else {
delete pkt; delete pkt;
} }
DPRINTF(RubyPort, "Hit callback done!\n"); DPRINTF(RubyPort, "Hit callback done!\n");
} }
bool
RubyPort::M5Port::sendNextCycle(PacketPtr pkt, bool send_as_snoop)
{
//minimum latency, must be > 0
queue.schedSendTiming(pkt, curTick() + (1 * g_eventQueue_ptr->getClock()),
send_as_snoop);
return true;
}
bool
RubyPort::PioPort::sendNextCycle(PacketPtr pkt)
{
//minimum latency, must be > 0
queue.schedSendTiming(pkt, curTick() + (1 * g_eventQueue_ptr->getClock()));
return true;
}
AddrRangeList AddrRangeList
RubyPort::M5Port::getAddrRanges() const RubyPort::M5Port::getAddrRanges() const
{ {

View file

@ -71,7 +71,6 @@ class RubyPort : public MemObject
public: public:
M5Port(const std::string &_name, RubyPort *_port, M5Port(const std::string &_name, RubyPort *_port,
RubySystem*_system, bool _access_phys_mem); RubySystem*_system, bool _access_phys_mem);
bool sendNextCycle(PacketPtr pkt, bool send_as_snoop = false);
void hitCallback(PacketPtr pkt); void hitCallback(PacketPtr pkt);
void evictionCallback(const Address& address); void evictionCallback(const Address& address);
unsigned deviceBlockSize() const; unsigned deviceBlockSize() const;
@ -106,7 +105,6 @@ class RubyPort : public MemObject
public: public:
PioPort(const std::string &_name, RubyPort *_port); PioPort(const std::string &_name, RubyPort *_port);
bool sendNextCycle(PacketPtr pkt);
protected: protected:
virtual bool recvTimingResp(PacketPtr pkt); virtual bool recvTimingResp(PacketPtr pkt);

View file

@ -82,7 +82,7 @@ SimpleTimingPort::recvTimingReq(PacketPtr pkt)
// recvAtomic() should already have turned packet into // recvAtomic() should already have turned packet into
// atomic response // atomic response
assert(pkt->isResponse()); assert(pkt->isResponse());
queue.schedSendTiming(pkt, curTick() + latency); schedTimingResp(pkt, curTick() + latency);
} else { } else {
/// @todo nominally we should just delete the packet here. /// @todo nominally we should just delete the packet here.
/// Until 4-phase stuff we can't because the sending /// Until 4-phase stuff we can't because the sending