mem: add local var in Cache::recvTimingResp()

The main loop in recvTimingResp() uses target->pkt all over
the place.  Create a local tgt_pkt to help keep lines
under the line length limit.
This commit is contained in:
Steve Reinhardt 2015-02-11 10:48:52 -08:00
parent ee0b52404c
commit 89bb03a1a6

View file

@ -1136,6 +1136,7 @@ Cache<TagStore>::recvTimingResp(PacketPtr pkt)
while (mshr->hasTargets()) { while (mshr->hasTargets()) {
MSHR::Target *target = mshr->getTarget(); MSHR::Target *target = mshr->getTarget();
Packet *tgt_pkt = target->pkt;
switch (target->source) { switch (target->source) {
case MSHR::Target::FromCPU: case MSHR::Target::FromCPU:
@ -1145,12 +1146,12 @@ Cache<TagStore>::recvTimingResp(PacketPtr pkt)
completion_time = pkt->headerDelay; completion_time = pkt->headerDelay;
// Software prefetch handling for cache closest to core // Software prefetch handling for cache closest to core
if (target->pkt->cmd.isSWPrefetch() && isTopLevel) { if (tgt_pkt->cmd.isSWPrefetch() && isTopLevel) {
// a software prefetch would have already been ack'd immediately // a software prefetch would have already been ack'd immediately
// with dummy data so the core would be able to retire it. // with dummy data so the core would be able to retire it.
// this request completes right here, so we deallocate it. // this request completes right here, so we deallocate it.
delete target->pkt->req; delete tgt_pkt->req;
delete target->pkt; delete tgt_pkt;
break; // skip response break; // skip response
} }
@ -1160,24 +1161,24 @@ Cache<TagStore>::recvTimingResp(PacketPtr pkt)
// cannot actually be determined until examining the stored MSHR // cannot actually be determined until examining the stored MSHR
// state. We "catch up" with that logic here, which is duplicated // state. We "catch up" with that logic here, which is duplicated
// from above. // from above.
if (target->pkt->isWriteInvalidate() && isTopLevel) { if (tgt_pkt->isWriteInvalidate() && isTopLevel) {
assert(!is_error); assert(!is_error);
// NB: we use the original packet here and not the response! // NB: we use the original packet here and not the response!
mshr->handleFill(target->pkt, blk); mshr->handleFill(tgt_pkt, blk);
blk = handleFill(target->pkt, blk, writebacks); blk = handleFill(tgt_pkt, blk, writebacks);
assert(blk != NULL); assert(blk != NULL);
is_fill = true; is_fill = true;
} }
if (is_fill) { if (is_fill) {
satisfyCpuSideRequest(target->pkt, blk, satisfyCpuSideRequest(tgt_pkt, blk,
true, mshr->hasPostDowngrade()); true, mshr->hasPostDowngrade());
// How many bytes past the first request is this one // How many bytes past the first request is this one
int transfer_offset = int transfer_offset =
target->pkt->getOffset(blkSize) - initial_offset; tgt_pkt->getOffset(blkSize) - initial_offset;
if (transfer_offset < 0) { if (transfer_offset < 0) {
transfer_offset += blkSize; transfer_offset += blkSize;
} }
@ -1189,22 +1190,22 @@ Cache<TagStore>::recvTimingResp(PacketPtr pkt)
completion_time += clockEdge(responseLatency) + completion_time += clockEdge(responseLatency) +
(transfer_offset ? pkt->payloadDelay : 0); (transfer_offset ? pkt->payloadDelay : 0);
assert(!target->pkt->req->isUncacheable()); assert(!tgt_pkt->req->isUncacheable());
assert(target->pkt->req->masterId() < system->maxMasters()); assert(tgt_pkt->req->masterId() < system->maxMasters());
missLatency[target->pkt->cmdToIndex()][target->pkt->req->masterId()] += missLatency[tgt_pkt->cmdToIndex()][tgt_pkt->req->masterId()] +=
completion_time - target->recvTime; completion_time - target->recvTime;
} else if (pkt->cmd == MemCmd::UpgradeFailResp) { } else if (pkt->cmd == MemCmd::UpgradeFailResp) {
// failed StoreCond upgrade // failed StoreCond upgrade
assert(target->pkt->cmd == MemCmd::StoreCondReq || assert(tgt_pkt->cmd == MemCmd::StoreCondReq ||
target->pkt->cmd == MemCmd::StoreCondFailReq || tgt_pkt->cmd == MemCmd::StoreCondFailReq ||
target->pkt->cmd == MemCmd::SCUpgradeFailReq); tgt_pkt->cmd == MemCmd::SCUpgradeFailReq);
// responseLatency is the latency of the return path // responseLatency is the latency of the return path
// from lower level caches/memory to an upper level cache or // from lower level caches/memory to an upper level cache or
// the core. // the core.
completion_time += clockEdge(responseLatency) + completion_time += clockEdge(responseLatency) +
pkt->payloadDelay; pkt->payloadDelay;
target->pkt->req->setExtraData(0); tgt_pkt->req->setExtraData(0);
} else { } else {
// not a cache fill, just forwarding response // not a cache fill, just forwarding response
// responseLatency is the latency of the return path // responseLatency is the latency of the return path
@ -1212,34 +1213,33 @@ Cache<TagStore>::recvTimingResp(PacketPtr pkt)
completion_time += clockEdge(responseLatency) + completion_time += clockEdge(responseLatency) +
pkt->payloadDelay; pkt->payloadDelay;
if (pkt->isRead() && !is_error) { if (pkt->isRead() && !is_error) {
target->pkt->setData(pkt->getConstPtr<uint8_t>()); tgt_pkt->setData(pkt->getConstPtr<uint8_t>());
} }
} }
target->pkt->makeTimingResponse(); tgt_pkt->makeTimingResponse();
// if this packet is an error copy that to the new packet // if this packet is an error copy that to the new packet
if (is_error) if (is_error)
target->pkt->copyError(pkt); tgt_pkt->copyError(pkt);
if (target->pkt->cmd == MemCmd::ReadResp && if (tgt_pkt->cmd == MemCmd::ReadResp &&
(pkt->isInvalidate() || mshr->hasPostInvalidate())) { (pkt->isInvalidate() || mshr->hasPostInvalidate())) {
// If intermediate cache got ReadRespWithInvalidate, // If intermediate cache got ReadRespWithInvalidate,
// propagate that. Response should not have // propagate that. Response should not have
// isInvalidate() set otherwise. // isInvalidate() set otherwise.
target->pkt->cmd = MemCmd::ReadRespWithInvalidate; tgt_pkt->cmd = MemCmd::ReadRespWithInvalidate;
DPRINTF(Cache, "%s updated cmd to %s for addr %#llx\n", DPRINTF(Cache, "%s updated cmd to %s for addr %#llx\n",
__func__, target->pkt->cmdString(), __func__, tgt_pkt->cmdString(), tgt_pkt->getAddr());
target->pkt->getAddr());
} }
// Reset the bus additional time as it is now accounted for // Reset the bus additional time as it is now accounted for
target->pkt->headerDelay = target->pkt->payloadDelay = 0; tgt_pkt->headerDelay = tgt_pkt->payloadDelay = 0;
cpuSidePort->schedTimingResp(target->pkt, completion_time); cpuSidePort->schedTimingResp(tgt_pkt, completion_time);
break; break;
case MSHR::Target::FromPrefetcher: case MSHR::Target::FromPrefetcher:
assert(target->pkt->cmd == MemCmd::HardPFReq); assert(tgt_pkt->cmd == MemCmd::HardPFReq);
if (blk) if (blk)
blk->status |= BlkHWPrefetched; blk->status |= BlkHWPrefetched;
delete target->pkt->req; delete tgt_pkt->req;
delete target->pkt; delete tgt_pkt;
break; break;
case MSHR::Target::FromSnoop: case MSHR::Target::FromSnoop:
@ -1248,8 +1248,7 @@ Cache<TagStore>::recvTimingResp(PacketPtr pkt)
// response to snoop request // response to snoop request
DPRINTF(Cache, "processing deferred snoop...\n"); DPRINTF(Cache, "processing deferred snoop...\n");
assert(!(pkt->isInvalidate() && !mshr->hasPostInvalidate())); assert(!(pkt->isInvalidate() && !mshr->hasPostInvalidate()));
handleSnoop(target->pkt, blk, true, true, handleSnoop(tgt_pkt, blk, true, true, mshr->hasPostInvalidate());
mshr->hasPostInvalidate());
break; break;
default: default: