mem: Unify delayed packet deletion

This patch unifies how we deal with delayed packet deletion, where the
receiving slave is responsible for deleting the packet, but the
sending agent (e.g. a cache) is still relying on the pointer until the
call to sendTimingReq completes. Previously we used a mix of a
deletion vector and a construct using unique_ptr. With this patch we
ensure all slaves use the latter approach.
This commit is contained in:
Andreas Hansson 2015-11-06 03:26:21 -05:00
parent 2cb5467e85
commit ac1368df50
12 changed files with 43 additions and 88 deletions

View file

@ -542,15 +542,6 @@ bool
Cache::recvTimingReq(PacketPtr pkt) Cache::recvTimingReq(PacketPtr pkt)
{ {
DPRINTF(CacheTags, "%s tags: %s\n", __func__, tags->print()); DPRINTF(CacheTags, "%s tags: %s\n", __func__, tags->print());
//@todo Add back in MemDebug Calls
// MemDebug::cacheAccess(pkt);
/// @todo temporary hack to deal with memory corruption issue until
/// 4-phase transactions are complete
for (int x = 0; x < pendingDelete.size(); x++)
delete pendingDelete[x];
pendingDelete.clear();
assert(pkt->isRequest()); assert(pkt->isRequest());
@ -602,10 +593,9 @@ Cache::recvTimingReq(PacketPtr pkt)
// main memory will delete the packet // main memory will delete the packet
} }
/// @todo nominally we should just delete the packet here, // queue for deletion, as the sending cache is still relying
/// however, until 4-phase stuff we can't because sending // on the packet
/// cache is still relying on it. pendingDelete.reset(pkt);
pendingDelete.push_back(pkt);
// no need to take any action in this particular cache as the // no need to take any action in this particular cache as the
// caches along the path to memory are allowed to keep lines // caches along the path to memory are allowed to keep lines
@ -678,12 +668,11 @@ Cache::recvTimingReq(PacketPtr pkt)
// by access(), that calls accessBlock() function. // by access(), that calls accessBlock() function.
cpuSidePort->schedTimingResp(pkt, request_time); cpuSidePort->schedTimingResp(pkt, request_time);
} else { } else {
/// @todo nominally we should just delete the packet here, // queue the packet for deletion, as the sending cache is
/// however, until 4-phase stuff we can't because sending cache is // still relying on it; if the block is found in access(),
/// still relying on it. If the block is found in access(), // CleanEvict and Writeback messages will be deleted
/// CleanEvict and Writeback messages will be deleted here as // here as well
/// well. pendingDelete.reset(pkt);
pendingDelete.push_back(pkt);
} }
} else { } else {
// miss // miss
@ -754,7 +743,7 @@ Cache::recvTimingReq(PacketPtr pkt)
// CleanEvicts corresponding to blocks which have outstanding // CleanEvicts corresponding to blocks which have outstanding
// requests in MSHRs can be deleted here. // requests in MSHRs can be deleted here.
if (pkt->cmd == MemCmd::CleanEvict) { if (pkt->cmd == MemCmd::CleanEvict) {
pendingDelete.push_back(pkt); pendingDelete.reset(pkt);
} else { } else {
DPRINTF(Cache, "%s coalescing MSHR for %s addr %#llx size %d\n", DPRINTF(Cache, "%s coalescing MSHR for %s addr %#llx size %d\n",
__func__, pkt->cmdString(), pkt->getAddr(), __func__, pkt->cmdString(), pkt->getAddr(),

View file

@ -195,11 +195,10 @@ class Cache : public BaseCache
const bool prefetchOnAccess; const bool prefetchOnAccess;
/** /**
* @todo this is a temporary workaround until the 4-phase code is committed. * Upstream caches need this packet until true is returned, so
* upstream caches need this packet until true is returned, so hold it for * hold it for deletion until a subsequent call
* deletion until a subsequent call
*/ */
std::vector<PacketPtr> pendingDelete; std::unique_ptr<Packet> pendingDelete;
/** /**
* Does all the processing necessary to perform the provided request. * Does all the processing necessary to perform the provided request.

View file

@ -140,12 +140,6 @@ CoherentXBar::init()
bool bool
CoherentXBar::recvTimingReq(PacketPtr pkt, PortID slave_port_id) CoherentXBar::recvTimingReq(PacketPtr pkt, PortID slave_port_id)
{ {
// @todo temporary hack to deal with memory corruption issue until
// 4-phase transactions are complete
for (int x = 0; x < pendingDelete.size(); x++)
delete pendingDelete[x];
pendingDelete.clear();
// determine the source port based on the id // determine the source port based on the id
SlavePort *src_port = slavePorts[slave_port_id]; SlavePort *src_port = slavePorts[slave_port_id];
@ -223,7 +217,10 @@ CoherentXBar::recvTimingReq(PacketPtr pkt, PortID slave_port_id)
// update the layer state and schedule an idle event // update the layer state and schedule an idle event
reqLayers[master_port_id]->succeededTiming(packetFinishTime); reqLayers[master_port_id]->succeededTiming(packetFinishTime);
pendingDelete.push_back(pkt);
// queue the packet for deletion
pendingDelete.reset(pkt);
return true; return true;
} }

View file

@ -274,11 +274,10 @@ class CoherentXBar : public BaseXBar
const Cycles snoopResponseLatency; const Cycles snoopResponseLatency;
/** /**
* @todo this is a temporary workaround until the 4-phase code is committed. * Upstream caches need this packet until true is returned, so
* upstream caches need this packet until true is returned, so hold it for * hold it for deletion until a subsequent call
* deletion until a subsequent call
*/ */
std::vector<PacketPtr> pendingDelete; std::unique_ptr<Packet> pendingDelete;
/** Function called by the port when the crossbar is recieving a Timing /** Function called by the port when the crossbar is recieving a Timing
request packet.*/ request packet.*/

View file

@ -586,12 +586,6 @@ DRAMCtrl::printQs() const {
bool bool
DRAMCtrl::recvTimingReq(PacketPtr pkt) DRAMCtrl::recvTimingReq(PacketPtr pkt)
{ {
/// @todo temporary hack to deal with memory corruption issues until
/// 4-phase transactions are complete
for (int x = 0; x < pendingDelete.size(); x++)
delete pendingDelete[x];
pendingDelete.clear();
// This is where we enter from the outside world // This is where we enter from the outside world
DPRINTF(DRAM, "recvTimingReq: request %s addr %lld size %d\n", DPRINTF(DRAM, "recvTimingReq: request %s addr %lld size %d\n",
pkt->cmdString(), pkt->getAddr(), pkt->getSize()); pkt->cmdString(), pkt->getAddr(), pkt->getSize());
@ -600,7 +594,7 @@ DRAMCtrl::recvTimingReq(PacketPtr pkt)
if (pkt->memInhibitAsserted() || if (pkt->memInhibitAsserted() ||
pkt->cmd == MemCmd::CleanEvict) { pkt->cmd == MemCmd::CleanEvict) {
DPRINTF(DRAM, "Inhibited packet or clean evict -- Dropping it now\n"); DPRINTF(DRAM, "Inhibited packet or clean evict -- Dropping it now\n");
pendingDelete.push_back(pkt); pendingDelete.reset(pkt);
return true; return true;
} }
@ -872,7 +866,7 @@ DRAMCtrl::accessAndRespond(PacketPtr pkt, Tick static_latency)
} else { } else {
// @todo the packet is going to be deleted, and the DRAMPacket // @todo the packet is going to be deleted, and the DRAMPacket
// is still having a pointer to it // is still having a pointer to it
pendingDelete.push_back(pkt); pendingDelete.reset(pkt);
} }
DPRINTF(DRAM, "Done\n"); DPRINTF(DRAM, "Done\n");

View file

@ -836,11 +836,11 @@ class DRAMCtrl : public AbstractMemory
// timestamp offset // timestamp offset
uint64_t timeStampOffset; uint64_t timeStampOffset;
/** @todo this is a temporary workaround until the 4-phase code is /**
* committed. upstream caches needs this packet until true is returned, so * Upstream caches need this packet until true is returned, so
* hold onto it for deletion until a subsequent call * hold it for deletion until a subsequent call
*/ */
std::vector<PacketPtr> pendingDelete; std::unique_ptr<Packet> pendingDelete;
/** /**
* This function increments the energy when called. If stats are * This function increments the energy when called. If stats are

View file

@ -178,16 +178,10 @@ DRAMSim2::recvTimingReq(PacketPtr pkt)
// we should never see a new request while in retry // we should never see a new request while in retry
assert(!retryReq); assert(!retryReq);
// @todo temporary hack to deal with memory corruption issues until
// 4-phase transactions are complete
for (int x = 0; x < pendingDelete.size(); x++)
delete pendingDelete[x];
pendingDelete.clear();
if (pkt->memInhibitAsserted()) { if (pkt->memInhibitAsserted()) {
// snooper will supply based on copy of packet // snooper will supply based on copy of packet
// still target's responsibility to delete packet // still target's responsibility to delete packet
pendingDelete.push_back(pkt); pendingDelete.reset(pkt);
return true; return true;
} }
@ -281,9 +275,8 @@ DRAMSim2::accessAndRespond(PacketPtr pkt)
if (!retryResp && !sendResponseEvent.scheduled()) if (!retryResp && !sendResponseEvent.scheduled())
schedule(sendResponseEvent, time); schedule(sendResponseEvent, time);
} else { } else {
// @todo the packet is going to be deleted, and the DRAMPacket // queue the packet for deletion
// is still having a pointer to it pendingDelete.reset(pkt);
pendingDelete.push_back(pkt);
} }
} }

View file

@ -160,11 +160,11 @@ class DRAMSim2 : public AbstractMemory
*/ */
EventWrapper<DRAMSim2, &DRAMSim2::tick> tickEvent; EventWrapper<DRAMSim2, &DRAMSim2::tick> tickEvent;
/** @todo this is a temporary workaround until the 4-phase code is /**
* committed. upstream caches needs this packet until true is returned, so * Upstream caches need this packet until true is returned, so
* hold onto it for deletion until a subsequent call * hold it for deletion until a subsequent call
*/ */
std::vector<PacketPtr> pendingDelete; std::unique_ptr<Packet> pendingDelete;
public: public:

View file

@ -97,16 +97,10 @@ SimpleMemory::recvFunctional(PacketPtr pkt)
bool bool
SimpleMemory::recvTimingReq(PacketPtr pkt) SimpleMemory::recvTimingReq(PacketPtr pkt)
{ {
/// @todo temporary hack to deal with memory corruption issues until
/// 4-phase transactions are complete
for (int x = 0; x < pendingDelete.size(); x++)
delete pendingDelete[x];
pendingDelete.clear();
if (pkt->memInhibitAsserted()) { if (pkt->memInhibitAsserted()) {
// snooper will supply based on copy of packet // snooper will supply based on copy of packet
// still target's responsibility to delete packet // still target's responsibility to delete packet
pendingDelete.push_back(pkt); pendingDelete.reset(pkt);
return true; return true;
} }
@ -165,7 +159,7 @@ SimpleMemory::recvTimingReq(PacketPtr pkt)
if (!retryResp && !dequeueEvent.scheduled()) if (!retryResp && !dequeueEvent.scheduled())
schedule(dequeueEvent, packetQueue.back().tick); schedule(dequeueEvent, packetQueue.back().tick);
} else { } else {
pendingDelete.push_back(pkt); pendingDelete.reset(pkt);
} }
return true; return true;

View file

@ -175,11 +175,11 @@ class SimpleMemory : public AbstractMemory
*/ */
Tick getLatency() const; Tick getLatency() const;
/** @todo this is a temporary workaround until the 4-phase code is /**
* committed. upstream caches needs this packet until true is returned, so * Upstream caches need this packet until true is returned, so
* hold onto it for deletion until a subsequent call * hold it for deletion until a subsequent call
*/ */
std::vector<PacketPtr> pendingDelete; std::unique_ptr<Packet> pendingDelete;
public: public:

View file

@ -62,12 +62,6 @@ SimpleTimingPort::recvFunctional(PacketPtr pkt)
bool bool
SimpleTimingPort::recvTimingReq(PacketPtr pkt) SimpleTimingPort::recvTimingReq(PacketPtr pkt)
{ {
/// @todo temporary hack to deal with memory corruption issue until
/// 4-phase transactions are complete. Remove me later
for (int x = 0; x < pendingDelete.size(); x++)
delete pendingDelete[x];
pendingDelete.clear();
// the SimpleTimingPort should not be used anywhere where there is // the SimpleTimingPort should not be used anywhere where there is
// a need to deal with inhibited packets // a need to deal with inhibited packets
if (pkt->memInhibitAsserted()) if (pkt->memInhibitAsserted())
@ -82,10 +76,8 @@ SimpleTimingPort::recvTimingReq(PacketPtr pkt)
assert(pkt->isResponse()); assert(pkt->isResponse());
schedTimingResp(pkt, curTick() + latency); schedTimingResp(pkt, curTick() + latency);
} else { } else {
/// @todo nominally we should just delete the packet here. // queue the packet for deletion
/// Until 4-phase stuff we can't because the sending pendingDelete.reset(pkt);
/// cache is still relying on it
pendingDelete.push_back(pkt);
} }
return true; return true;

View file

@ -81,12 +81,10 @@ class SimpleTimingPort : public QueuedSlavePort
virtual Tick recvAtomic(PacketPtr pkt) = 0; virtual Tick recvAtomic(PacketPtr pkt) = 0;
/** /**
* @todo this is a temporary workaround until the 4-phase code is committed. * Upstream caches need this packet until true is returned, so
* upstream caches need this packet until true is returned, so hold it for * hold it for deletion until a subsequent call
* deletion until a subsequent call
*/ */
std::vector<PacketPtr> pendingDelete; std::unique_ptr<Packet> pendingDelete;
public: public: