Merge zizzer.eecs.umich.edu:/z/m5/Bitkeeper/newmem

into  zizzer.eecs.umich.edu:/z/stever/bk/newmem-py

--HG--
extra : convert_revision : 36640569d33c4410320b8444bb572f408bf5edde
This commit is contained in:
Steve Reinhardt 2006-05-30 19:45:54 -04:00
commit f0c05de9f9
11 changed files with 148 additions and 118 deletions

View file

@ -106,11 +106,10 @@ AtomicSimpleCPU::CpuPort::recvStatusChange(Status status)
panic("AtomicSimpleCPU doesn't expect recvStatusChange callback!");
}
Packet *
void
AtomicSimpleCPU::CpuPort::recvRetry()
{
panic("AtomicSimpleCPU doesn't expect recvRetry callback!");
return NULL;
}

View file

@ -98,7 +98,7 @@ class AtomicSimpleCPU : public BaseSimpleCPU
virtual void recvStatusChange(Status status);
virtual Packet *recvRetry();
virtual void recvRetry();
virtual void getDeviceAddressRanges(AddrRangeList &resp,
AddrRangeList &snoop)

View file

@ -419,17 +419,18 @@ TimingSimpleCPU::IcachePort::recvTiming(Packet *pkt)
return true;
}
Packet *
void
TimingSimpleCPU::IcachePort::recvRetry()
{
// we shouldn't get a retry unless we have a packet that we're
// waiting to transmit
assert(cpu->ifetch_pkt != NULL);
assert(cpu->_status == IcacheRetry);
cpu->_status = IcacheWaitResponse;
Packet *tmp = cpu->ifetch_pkt;
cpu->ifetch_pkt = NULL;
return tmp;
if (sendTiming(tmp)) {
cpu->_status = IcacheWaitResponse;
cpu->ifetch_pkt = NULL;
}
}
void
@ -459,17 +460,18 @@ TimingSimpleCPU::DcachePort::recvTiming(Packet *pkt)
return true;
}
Packet *
void
TimingSimpleCPU::DcachePort::recvRetry()
{
// we shouldn't get a retry unless we have a packet that we're
// waiting to transmit
assert(cpu->dcache_pkt != NULL);
assert(cpu->_status == DcacheRetry);
cpu->_status = DcacheWaitResponse;
Packet *tmp = cpu->dcache_pkt;
cpu->dcache_pkt = NULL;
return tmp;
if (sendTiming(tmp)) {
cpu->_status = DcacheWaitResponse;
cpu->dcache_pkt = NULL;
}
}

View file

@ -100,7 +100,7 @@ class TimingSimpleCPU : public BaseSimpleCPU
virtual bool recvTiming(Packet *pkt);
virtual Packet *recvRetry();
virtual void recvRetry();
};
class DcachePort : public CpuPort
@ -115,7 +115,7 @@ class TimingSimpleCPU : public BaseSimpleCPU
virtual bool recvTiming(Packet *pkt);
virtual Packet *recvRetry();
virtual void recvRetry();
};
IcachePort icachePort;

View file

@ -26,6 +26,7 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "base/trace.hh"
#include "dev/io_device.hh"
#include "sim/builder.hh"
@ -55,12 +56,13 @@ PioPort::getDeviceAddressRanges(AddrRangeList &resp, AddrRangeList &snoop)
}
Packet *
void
PioPort::recvRetry()
{
Packet* pkt = transmitList.front();
transmitList.pop_front();
return pkt;
if (Port::sendTiming(pkt)) {
transmitList.pop_front();
}
}
@ -74,6 +76,7 @@ PioPort::SendEvent::process()
}
bool
PioPort::recvTiming(Packet *pkt)
{
@ -116,16 +119,20 @@ DmaPort::recvTiming(Packet *pkt)
{
if (pkt->senderState) {
DmaReqState *state;
DPRINTF(DMA, "Received response Packet %#x with senderState: %#x\n",
pkt, pkt->senderState);
state = dynamic_cast<DmaReqState*>(pkt->senderState);
state->completionEvent->schedule(pkt->time - pkt->req->getTime());
assert(state);
state->completionEvent->process();
delete pkt->req;
delete pkt;
} else {
DPRINTF(DMA, "Received response Packet %#x with no senderState\n", pkt);
delete pkt->req;
delete pkt;
}
return Packet::Success;
return true;
}
DmaDevice::DmaDevice(Params *p)
@ -133,20 +140,19 @@ DmaDevice::DmaDevice(Params *p)
{ }
void
DmaPort::SendEvent::process()
{
if (port->Port::sendTiming(packet))
return;
port->transmitList.push_back(packet);
}
Packet *
DmaPort::recvRetry()
{
Packet* pkt = transmitList.front();
transmitList.pop_front();
return pkt;
DPRINTF(DMA, "Retry on Packet %#x with senderState: %#x\n",
pkt, pkt->senderState);
if (sendTiming(pkt)) {
DPRINTF(DMA, "-- Done\n");
transmitList.pop_front();
pendingCount--;
assert(pendingCount >= 0);
} else {
DPRINTF(DMA, "-- Failed, queued\n");
}
}
@ -192,13 +198,22 @@ DmaPort::sendDma(Packet *pkt)
// switching actually work
/* MemState state = device->platform->system->memState;
if (state == Timing) {
if (!sendTiming(pkt))
transmitList.push_back(&packet);
} else if (state == Atomic) {*/
if (state == Timing) { */
DPRINTF(DMA, "Attempting to send Packet %#x with senderState: %#x\n",
pkt, pkt->senderState);
if (!sendTiming(pkt)) {
transmitList.push_back(pkt);
DPRINTF(DMA, "-- Failed: queued\n");
} else {
DPRINTF(DMA, "-- Done\n");
pendingCount--;
assert(pendingCount >= 0);
}
/* } else if (state == Atomic) {
sendAtomic(pkt);
if (pkt->senderState) {
DmaReqState *state = dynamic_cast<DmaReqState*>(pkt->senderState);
assert(state);
state->completionEvent->schedule(curTick + (pkt->time - pkt->req->getTime()) +1);
}
pendingCount--;
@ -206,7 +221,7 @@ DmaPort::sendDma(Packet *pkt)
delete pkt->req;
delete pkt;
/* } else if (state == Functional) {
} else if (state == Functional) {
sendFunctional(pkt);
// Is this correct???
completionEvent->schedule(pkt->req->responseTime - pkt->req->requestTime);

View file

@ -105,8 +105,9 @@ class PioPort : public Port
void sendTiming(Packet *pkt, Tick time)
{ new PioPort::SendEvent(this, pkt, time); }
/** This function pops the last element off the transmit list and sends it.*/
virtual Packet *recvRetry();
/** This function is notification that the device should attempt to send a
* packet again. */
virtual void recvRetry();
public:
PioPort(PioDevice *dev, Platform *p);
@ -146,28 +147,11 @@ class DmaPort : public Port
virtual void recvStatusChange(Status status)
{ ; }
virtual Packet *recvRetry() ;
virtual void recvRetry() ;
virtual void getDeviceAddressRanges(AddrRangeList &resp, AddrRangeList &snoop)
{ resp.clear(); snoop.clear(); }
class SendEvent : public Event
{
DmaPort *port;
Packet *packet;
SendEvent(PioPort *p, Packet *pkt, Tick t)
: Event(&mainEventQueue), packet(pkt)
{ schedule(curTick + t); }
virtual void process();
virtual const char *description()
{ return "Future scheduled sendTiming event"; }
friend class DmaPort;
};
void sendDma(Packet *pkt);
public:
@ -178,8 +162,6 @@ class DmaPort : public Port
bool dmaPending() { return pendingCount > 0; }
friend class DmaPort::SendEvent;
};
/**

View file

@ -90,19 +90,6 @@ Bridge::BridgePort::recvTiming(Packet *pkt)
DPRINTF(BusBridge, "recvTiming: src %d dest %d addr 0x%x\n",
pkt->getSrc(), pkt->getDest(), pkt->getAddr());
if (pkt->isResponse()) {
// This is a response for a request we forwarded earlier. The
// corresponding PacketBuffer should be stored in the packet's
// senderState field.
PacketBuffer *buf = dynamic_cast<PacketBuffer*>(pkt->senderState);
assert(buf != NULL);
// set up new packet dest & senderState based on values saved
// from original request
buf->fixResponse(pkt);
DPRINTF(BusBridge, " is response, new dest %d\n", pkt->getDest());
delete buf;
}
return otherPort->queueForSendTiming(pkt);
}
@ -113,8 +100,25 @@ Bridge::BridgePort::queueForSendTiming(Packet *pkt)
if (queueFull())
return false;
if (pkt->isResponse()) {
// This is a response for a request we forwarded earlier. The
// corresponding PacketBuffer should be stored in the packet's
// senderState field.
PacketBuffer *buf = dynamic_cast<PacketBuffer*>(pkt->senderState);
assert(buf != NULL);
// set up new packet dest & senderState based on values saved
// from original request
buf->fixResponse(pkt);
DPRINTF(BusBridge, "restoring sender state: %#X, from packet buffer: %#X\n",
pkt->senderState, buf);
DPRINTF(BusBridge, " is response, new dest %d\n", pkt->getDest());
delete buf;
}
Tick readyTime = curTick + delay;
PacketBuffer *buf = new PacketBuffer(pkt, readyTime);
DPRINTF(BusBridge, "old sender state: %#X, new sender state: %#X\n",
buf->origSenderState, buf);
// If we're about to put this packet at the head of the queue, we
// need to schedule an event to do the transmit. Otherwise there
@ -126,43 +130,16 @@ Bridge::BridgePort::queueForSendTiming(Packet *pkt)
sendQueue.push_back(buf);
// Did we just become blocked? If yes, let other side know.
if (queueFull())
otherPort->sendStatusChange(Port::Blocked);
return true;
}
void
Bridge::BridgePort::finishSend(PacketBuffer *buf)
{
if (buf->expectResponse) {
// Must wait for response. We just need to count outstanding
// responses (in case we want to cap them); PacketBuffer
// pointer will be recovered on response.
++outstandingResponses;
DPRINTF(BusBridge, " successful: awaiting response (%d)\n",
outstandingResponses);
} else {
// no response expected... deallocate packet buffer now.
DPRINTF(BusBridge, " successful: no response expected\n");
delete buf;
}
// If there are more packets to send, schedule event to try again.
if (!sendQueue.empty()) {
buf = sendQueue.front();
sendEvent.schedule(std::max(buf->ready, curTick + 1));
}
}
void
Bridge::BridgePort::trySend()
{
assert(!sendQueue.empty());
bool was_full = queueFull();
PacketBuffer *buf = sendQueue.front();
assert(buf->ready <= curTick);
@ -176,20 +153,41 @@ Bridge::BridgePort::trySend()
// send successful
sendQueue.pop_front();
buf->pkt = NULL; // we no longer own packet, so it's not safe to look at it
finishSend(buf);
if (buf->expectResponse) {
// Must wait for response. We just need to count outstanding
// responses (in case we want to cap them); PacketBuffer
// pointer will be recovered on response.
++outstandingResponses;
DPRINTF(BusBridge, " successful: awaiting response (%d)\n",
outstandingResponses);
} else {
// no response expected... deallocate packet buffer now.
DPRINTF(BusBridge, " successful: no response expected\n");
delete buf;
}
// If there are more packets to send, schedule event to try again.
if (!sendQueue.empty()) {
buf = sendQueue.front();
sendEvent.schedule(std::max(buf->ready, curTick + 1));
}
// Let things start sending again
if (was_full) {
DPRINTF(BusBridge, "Queue was full, sending retry\n");
otherPort->sendRetry();
}
} else {
DPRINTF(BusBridge, " unsuccessful\n");
}
}
Packet *
void
Bridge::BridgePort::recvRetry()
{
PacketBuffer *buf = sendQueue.front();
Packet *pkt = buf->pkt;
finishSend(buf);
return pkt;
trySend();
}
/** Function called by the port when the bus is receiving a Atomic
@ -223,9 +221,6 @@ Bridge::BridgePort::recvFunctional(Packet *pkt)
void
Bridge::BridgePort::recvStatusChange(Port::Status status)
{
if (status == Port::Blocked || status == Port::Unblocked)
return;
otherPort->sendStatusChange(status);
}

View file

@ -38,7 +38,6 @@
#include <inttypes.h>
#include <queue>
#include "mem/mem_object.hh"
#include "mem/packet.hh"
#include "mem/port.hh"
@ -77,7 +76,8 @@ class Bridge : public MemObject
origSenderState(_pkt->senderState), origSrc(_pkt->getSrc()),
expectResponse(_pkt->needsResponse())
{
pkt->senderState = this;
if (!pkt->isResponse())
pkt->senderState = this;
}
void fixResponse(Packet *pkt)
@ -146,7 +146,7 @@ class Bridge : public MemObject
/** When receiving a retry request from the peer port,
pass it to the bridge. */
virtual Packet* recvRetry();
virtual void recvRetry();
/** When receiving a Atomic requestfrom the peer port,
pass it to the bridge. */

View file

@ -72,9 +72,35 @@ Bus::recvTiming(Packet *pkt)
assert(dest != pkt->getSrc()); // catch infinite loops
port = interfaces[dest];
}
return port->sendTiming(pkt);
if (port->sendTiming(pkt)) {
// packet was successfully sent, just return true.
return true;
}
// packet not successfully sent
retryList.push_back(interfaces[pkt->getSrc()]);
return false;
}
void
Bus::recvRetry(int id)
{
// Go through all the elements on the list calling sendRetry on each
// This is not very efficient at all but it works. Ultimately we should end
// up with something that is more intelligent.
int initialSize = retryList.size();
int i;
Port *p;
for (i = 0; i < initialSize; i++) {
assert(retryList.size() > 0);
p = retryList.front();
retryList.pop_front();
p->sendRetry();
}
}
Port *
Bus::findPort(Addr addr, int id)
{

View file

@ -67,6 +67,10 @@ class Bus : public MemObject
transaction.*/
void recvFunctional(Packet *pkt);
/** Timing function called by port when it is once again able to process
* requests. */
void recvRetry(int id);
/** Function called by the port when the bus is recieving a status change.*/
void recvStatusChange(Port::Status status, int id);
@ -126,6 +130,11 @@ class Bus : public MemObject
virtual void recvStatusChange(Status status)
{ bus->recvStatusChange(status, id); }
/** When reciving a retry from the peer port (at id),
pass it to the bus. */
virtual void recvRetry()
{ bus->recvRetry(id); }
// This should return all the 'owned' addresses that are
// downstream from this bus, yes? That is, the union of all
// the 'owned' address ranges of all the other interfaces on
@ -143,6 +152,10 @@ class Bus : public MemObject
connected to this bus.*/
std::vector<Port*> interfaces;
/** An array of pointers to ports that retry should be called on because the
* original send failed for whatever reason.*/
std::list<Port*> retryList;
public:
/** A function used to return the port associated with this bus object. */

View file

@ -92,11 +92,9 @@ class Port
virtual ~Port() {};
// mey be better to use subclasses & RTTI?
/** Holds the ports status. Keeps track if it is blocked, or has
calculated a range change. */
/** Holds the ports status. Currently just that a range recomputation needs
* to be done. */
enum Status {
Blocked,
Unblocked,
RangeChange
};
@ -140,7 +138,7 @@ class Port
wait. This shouldn't be valid for response paths (IO Devices).
so it is set to panic if it isn't already defined.
*/
virtual Packet *recvRetry() { panic("??"); }
virtual void recvRetry() { panic("??"); }
/** Called by a peer port in order to determine the block size of the
device connected to this port. It sometimes doesn't make sense for
@ -165,7 +163,7 @@ class Port
port receive function.
@return This function returns if the send was succesful in it's
recieve. If it was a failure, then the port will wait for a recvRetry
at which point it can issue a successful sendTiming. This is used in
at which point it can possibly issue a successful sendTiming. This is used in
case a cache has a higher priority request come in while waiting for
the bus to arbitrate.
*/
@ -194,7 +192,7 @@ class Port
/** When a timing access doesn't return a success, some time later the
Retry will be sent.
*/
Packet *sendRetry() { return peer->recvRetry(); }
void sendRetry() { return peer->recvRetry(); }
/** Called by the associated device if it wishes to find out the blocksize
of the device on attached to the peer port.