Port: Extend the QueuedPort interface and use where appropriate

This patch extends the queued port interfaces with methods for
scheduling the transmission of a timing request/response. The methods
are named similar to the corresponding sendTiming(Snoop)Req/Resp,
replacing the "send" with "sched". As the queues are currently
unbounded, the methods always succeed and hence do not return a value.

This functionality was previously provided in the subclasses by
calling PacketQueue::schedSendTiming with the appropriate
parameters. With this change, there is no need to introduce these
extra methods in the subclasses, and the use of the queued interface
is more uniform and explicit.
This commit is contained in:
Andreas Hansson 2012-08-22 11:39:56 -04:00
parent 70e99e0b91
commit e317d8b9ff
8 changed files with 42 additions and 49 deletions

View file

@ -51,7 +51,7 @@ X86ISA::IntDev::IntMasterPort::sendMessage(ApicList apics,
for (apicIt = apics.begin(); apicIt != apics.end(); apicIt++) {
PacketPtr pkt = buildIntRequest(*apicIt, message);
if (timing) {
queue.schedSendTiming(pkt, curTick() + latency);
schedTimingReq(pkt, curTick() + latency);
// The target handles cleaning up the packet in timing mode.
} else {
// ignore the latency involved in the atomic transaction

22
src/mem/cache/base.hh vendored
View file

@ -134,17 +134,6 @@ class BaseCache : public MemObject
queue.schedSendEvent(time);
}
/**
* Schedule the transmissions of a response packet at a given
* point in time.
*
* @param pkt response packet
* @param when time to send the response
*/
void respond(PacketPtr pkt, Tick time) {
queue.schedSendTiming(pkt, time, true);
}
protected:
CacheMasterPort(const std::string &_name, BaseCache *_cache,
@ -179,17 +168,6 @@ class BaseCache : public MemObject
/** Return to normal operation and accept new requests. */
void clearBlocked();
/**
* Schedule the transmissions of a response packet at a given
* point in time.
*
* @param pkt response packet
* @param when time to send the response
*/
void respond(PacketPtr pkt, Tick time) {
queue.schedSendTiming(pkt, time);
}
protected:
CacheSlavePort(const std::string &_name, BaseCache *_cache,

View file

@ -407,7 +407,7 @@ Cache<TagStore>::timingAccess(PacketPtr pkt)
rec->restore(pkt, this);
delete rec;
memSidePort->respond(pkt, time);
memSidePort->schedTimingSnoopResp(pkt, time);
return true;
}
@ -500,7 +500,7 @@ Cache<TagStore>::timingAccess(PacketPtr pkt)
if (needsResponse) {
pkt->makeTimingResponse();
cpuSidePort->respond(pkt, curTick()+lat);
cpuSidePort->schedTimingResp(pkt, curTick()+lat);
} else {
/// @todo nominally we should just delete the packet here,
/// however, until 4-phase stuff we can't because sending
@ -933,7 +933,7 @@ Cache<TagStore>::handleResponse(PacketPtr pkt)
// isInvalidate() set otherwise.
target->pkt->cmd = MemCmd::ReadRespWithInvalidate;
}
cpuSidePort->respond(target->pkt, completion_time);
cpuSidePort->schedTimingResp(target->pkt, completion_time);
break;
case MSHR::Target::FromPrefetcher:
@ -1166,7 +1166,7 @@ doTimingSupplyResponse(PacketPtr req_pkt, uint8_t *blk_data,
// invalidate it.
pkt->cmd = MemCmd::ReadRespWithInvalidate;
}
memSidePort->respond(pkt, curTick() + hitLatency);
memSidePort->schedTimingSnoopResp(pkt, curTick() + hitLatency);
}
template<class TagStore>

View file

@ -109,6 +109,9 @@ PacketQueue::schedSendTiming(PacketPtr pkt, Tick when, bool send_as_snoop)
// we can still send a packet before the end of this tick
assert(when >= curTick());
// express snoops should never be queued
assert(!pkt->isExpressSnoop());
// nothing on the list, or earlier than current front element,
// schedule an event
if (transmitList.empty() || when < transmitList.front().tick) {

View file

@ -84,6 +84,15 @@ class QueuedSlavePort : public SlavePort
virtual ~QueuedSlavePort() { }
/**
* Schedule the sending of a timing response.
*
* @param pkt Packet to send
* @param when Absolute time (in ticks) to send packet
*/
void schedTimingResp(PacketPtr pkt, Tick when)
{ queue.schedSendTiming(pkt, when); }
/** Check the list of buffered packets against the supplied
* functional request. */
bool checkFunctional(PacketPtr pkt) { return queue.checkFunctional(pkt); }
@ -125,6 +134,24 @@ class QueuedMasterPort : public MasterPort
virtual ~QueuedMasterPort() { }
/**
* Schedule the sending of a timing request.
*
* @param pkt Packet to send
* @param when Absolute time (in ticks) to send packet
*/
void schedTimingReq(PacketPtr pkt, Tick when)
{ queue.schedSendTiming(pkt, when); }
/**
* Schedule the sending of a timing snoop response.
*
* @param pkt Packet to send
* @param when Absolute time (in ticks) to send packet
*/
void schedTimingSnoopResp(PacketPtr pkt, Tick when)
{ queue.schedSendTiming(pkt, when, true); }
/** Check the list of buffered packets against the supplied
* functional request. */
bool checkFunctional(PacketPtr pkt) { return queue.checkFunctional(pkt); }

View file

@ -196,7 +196,10 @@ RubyPort::M5Port::recvTimingReq(PacketPtr pkt)
"Request for address 0x%#x is assumed to be a pio request\n",
pkt->getAddr());
return ruby_port->pio_port.sendNextCycle(pkt);
// send next cycle
ruby_port->pio_port.schedTimingReq(pkt, curTick() +
g_eventQueue_ptr->getClock());
return true;
}
assert(Address(pkt->getAddr()).getOffset() + pkt->getSize() <=
@ -647,30 +650,14 @@ RubyPort::M5Port::hitCallback(PacketPtr pkt)
// turn packet around to go back to requester if response expected
if (needsResponse) {
DPRINTF(RubyPort, "Sending packet back over port\n");
sendNextCycle(pkt);
// send next cycle
schedTimingResp(pkt, curTick() + g_eventQueue_ptr->getClock());
} else {
delete pkt;
}
DPRINTF(RubyPort, "Hit callback done!\n");
}
bool
RubyPort::M5Port::sendNextCycle(PacketPtr pkt, bool send_as_snoop)
{
//minimum latency, must be > 0
queue.schedSendTiming(pkt, curTick() + (1 * g_eventQueue_ptr->getClock()),
send_as_snoop);
return true;
}
bool
RubyPort::PioPort::sendNextCycle(PacketPtr pkt)
{
//minimum latency, must be > 0
queue.schedSendTiming(pkt, curTick() + (1 * g_eventQueue_ptr->getClock()));
return true;
}
AddrRangeList
RubyPort::M5Port::getAddrRanges() const
{

View file

@ -71,7 +71,6 @@ class RubyPort : public MemObject
public:
M5Port(const std::string &_name, RubyPort *_port,
RubySystem*_system, bool _access_phys_mem);
bool sendNextCycle(PacketPtr pkt, bool send_as_snoop = false);
void hitCallback(PacketPtr pkt);
void evictionCallback(const Address& address);
unsigned deviceBlockSize() const;
@ -106,7 +105,6 @@ class RubyPort : public MemObject
public:
PioPort(const std::string &_name, RubyPort *_port);
bool sendNextCycle(PacketPtr pkt);
protected:
virtual bool recvTimingResp(PacketPtr pkt);

View file

@ -82,7 +82,7 @@ SimpleTimingPort::recvTimingReq(PacketPtr pkt)
// recvAtomic() should already have turned packet into
// atomic response
assert(pkt->isResponse());
queue.schedSendTiming(pkt, curTick() + latency);
schedTimingResp(pkt, curTick() + latency);
} else {
/// @todo nominally we should just delete the packet here.
/// Until 4-phase stuff we can't because the sending