add uglyiness to fix dmas
src/dev/io_device.cc: extra printing and assertions src/mem/bridge.hh: deal with packets only satisfying part of a request by making many requests src/mem/cache/cache_impl.hh: make the cache try to satisfy a functional request from the cache above it before checking itself --HG-- extra : convert_revision : 1df52ab61d7967e14cc377c560495430a6af266a
This commit is contained in:
parent
af26532bbd
commit
ea4e6f2e3d
3 changed files with 31 additions and 11 deletions
|
@ -218,6 +218,9 @@ DmaPort::dmaAction(Packet::Command cmd, Addr addr, int size, Event *event,
|
|||
|
||||
DmaReqState *reqState = new DmaReqState(event, this, size);
|
||||
|
||||
|
||||
DPRINTF(DMA, "Starting DMA for addr: %#x size: %d sched: %d\n", addr, size,
|
||||
event->scheduled());
|
||||
for (ChunkGenerator gen(addr, size, peerBlockSize());
|
||||
!gen.done(); gen.next()) {
|
||||
Request *req = new Request(gen.addr(), gen.size(), 0);
|
||||
|
@ -231,6 +234,8 @@ DmaPort::dmaAction(Packet::Command cmd, Addr addr, int size, Event *event,
|
|||
|
||||
assert(pendingCount >= 0);
|
||||
pendingCount++;
|
||||
DPRINTF(DMA, "--Queuing DMA for addr: %#x size: %d\n", gen.addr(),
|
||||
gen.size());
|
||||
queueDma(pkt);
|
||||
}
|
||||
|
||||
|
@ -281,19 +286,28 @@ DmaPort::sendDma()
|
|||
|
||||
if (transmitList.size() && backoffTime && !inRetry &&
|
||||
!backoffEvent.scheduled()) {
|
||||
DPRINTF(DMA, "-- Scheduling backoff timer for %d\n",
|
||||
backoffTime+curTick);
|
||||
backoffEvent.schedule(backoffTime+curTick);
|
||||
}
|
||||
} else if (state == System::Atomic) {
|
||||
transmitList.pop_front();
|
||||
|
||||
Tick lat;
|
||||
DPRINTF(DMA, "--Sending DMA for addr: %#x size: %d\n",
|
||||
pkt->req->getPaddr(), pkt->req->getSize());
|
||||
lat = sendAtomic(pkt);
|
||||
assert(pkt->senderState);
|
||||
DmaReqState *state = dynamic_cast<DmaReqState*>(pkt->senderState);
|
||||
assert(state);
|
||||
|
||||
state->numBytes += pkt->req->getSize();
|
||||
|
||||
DPRINTF(DMA, "--Received response for DMA for addr: %#x size: %d nb: %d, tot: %d sched %d\n",
|
||||
pkt->req->getPaddr(), pkt->req->getSize(), state->numBytes,
|
||||
state->totBytes, state->completionEvent->scheduled());
|
||||
|
||||
if (state->totBytes == state->numBytes) {
|
||||
assert(!state->completionEvent->scheduled());
|
||||
state->completionEvent->schedule(curTick + lat);
|
||||
delete state;
|
||||
delete pkt->req;
|
||||
|
|
|
@ -108,18 +108,24 @@ class Bridge : public MemObject
|
|||
assert(!partialWriteFixed);
|
||||
assert(expectResponse);
|
||||
|
||||
int pbs = port->peerBlockSize();
|
||||
Addr pbs = port->peerBlockSize();
|
||||
Addr blockAddr = pkt->getAddr() & ~(pbs-1);
|
||||
partialWriteFixed = true;
|
||||
PacketDataPtr data;
|
||||
|
||||
data = new uint8_t[pbs];
|
||||
PacketPtr funcPkt = new Packet(pkt->req, MemCmd::ReadReq,
|
||||
Packet::Broadcast, pbs);
|
||||
|
||||
funcPkt->dataStatic(data);
|
||||
port->sendFunctional(funcPkt);
|
||||
assert(funcPkt->result == Packet::Success);
|
||||
RequestPtr funcReq = new Request(blockAddr, 4, 0);
|
||||
PacketPtr funcPkt = new Packet(funcReq, MemCmd::ReadReq,
|
||||
Packet::Broadcast);
|
||||
for (int x = 0; x < pbs; x+=4) {
|
||||
funcReq->setPhys(blockAddr + x, 4, 0);
|
||||
funcPkt->reinitFromRequest();
|
||||
funcPkt->dataStatic(data + x);
|
||||
port->sendFunctional(funcPkt);
|
||||
assert(funcPkt->result == Packet::Success);
|
||||
}
|
||||
delete funcPkt;
|
||||
delete funcReq;
|
||||
|
||||
oldPkt = pkt;
|
||||
memcpy(data + oldPkt->getOffset(pbs), pkt->getPtr<uint8_t>(),
|
||||
|
|
6
src/mem/cache/cache_impl.hh
vendored
6
src/mem/cache/cache_impl.hh
vendored
|
@ -1290,9 +1290,9 @@ template<class TagStore, class Coherence>
|
|||
void
|
||||
Cache<TagStore,Coherence>::MemSidePort::recvFunctional(PacketPtr pkt)
|
||||
{
|
||||
if (checkFunctional(pkt)) {
|
||||
myCache()->probe(pkt, false, cache->cpuSidePort);
|
||||
}
|
||||
myCache()->probe(pkt, false, cache->cpuSidePort);
|
||||
if (pkt->result != Packet::Success)
|
||||
checkFunctional(pkt);
|
||||
}
|
||||
|
||||
|
||||
|
|
Loading…
Reference in a new issue