mem: Unify delayed packet deletion
This patch unifies how we deal with delayed packet deletion, where the receiving slave is responsible for deleting the packet, but the sending agent (e.g. a cache) is still relying on the pointer until the call to sendTimingReq completes. Previously we used a mix of a deletion vector and a construct using unique_ptr. With this patch we ensure all slaves use the latter approach.
This commit is contained in:
parent
2cb5467e85
commit
ac1368df50
12 changed files with 43 additions and 88 deletions
29
src/mem/cache/cache.cc
vendored
29
src/mem/cache/cache.cc
vendored
|
@ -542,15 +542,6 @@ bool
|
|||
Cache::recvTimingReq(PacketPtr pkt)
|
||||
{
|
||||
DPRINTF(CacheTags, "%s tags: %s\n", __func__, tags->print());
|
||||
//@todo Add back in MemDebug Calls
|
||||
// MemDebug::cacheAccess(pkt);
|
||||
|
||||
|
||||
/// @todo temporary hack to deal with memory corruption issue until
|
||||
/// 4-phase transactions are complete
|
||||
for (int x = 0; x < pendingDelete.size(); x++)
|
||||
delete pendingDelete[x];
|
||||
pendingDelete.clear();
|
||||
|
||||
assert(pkt->isRequest());
|
||||
|
||||
|
@ -602,10 +593,9 @@ Cache::recvTimingReq(PacketPtr pkt)
|
|||
// main memory will delete the packet
|
||||
}
|
||||
|
||||
/// @todo nominally we should just delete the packet here,
|
||||
/// however, until 4-phase stuff we can't because sending
|
||||
/// cache is still relying on it.
|
||||
pendingDelete.push_back(pkt);
|
||||
// queue for deletion, as the sending cache is still relying
|
||||
// on the packet
|
||||
pendingDelete.reset(pkt);
|
||||
|
||||
// no need to take any action in this particular cache as the
|
||||
// caches along the path to memory are allowed to keep lines
|
||||
|
@ -678,12 +668,11 @@ Cache::recvTimingReq(PacketPtr pkt)
|
|||
// by access(), that calls accessBlock() function.
|
||||
cpuSidePort->schedTimingResp(pkt, request_time);
|
||||
} else {
|
||||
/// @todo nominally we should just delete the packet here,
|
||||
/// however, until 4-phase stuff we can't because sending cache is
|
||||
/// still relying on it. If the block is found in access(),
|
||||
/// CleanEvict and Writeback messages will be deleted here as
|
||||
/// well.
|
||||
pendingDelete.push_back(pkt);
|
||||
// queue the packet for deletion, as the sending cache is
|
||||
// still relying on it; if the block is found in access(),
|
||||
// CleanEvict and Writeback messages will be deleted
|
||||
// here as well
|
||||
pendingDelete.reset(pkt);
|
||||
}
|
||||
} else {
|
||||
// miss
|
||||
|
@ -754,7 +743,7 @@ Cache::recvTimingReq(PacketPtr pkt)
|
|||
// CleanEvicts corresponding to blocks which have outstanding
|
||||
// requests in MSHRs can be deleted here.
|
||||
if (pkt->cmd == MemCmd::CleanEvict) {
|
||||
pendingDelete.push_back(pkt);
|
||||
pendingDelete.reset(pkt);
|
||||
} else {
|
||||
DPRINTF(Cache, "%s coalescing MSHR for %s addr %#llx size %d\n",
|
||||
__func__, pkt->cmdString(), pkt->getAddr(),
|
||||
|
|
7
src/mem/cache/cache.hh
vendored
7
src/mem/cache/cache.hh
vendored
|
@ -195,11 +195,10 @@ class Cache : public BaseCache
|
|||
const bool prefetchOnAccess;
|
||||
|
||||
/**
|
||||
* @todo this is a temporary workaround until the 4-phase code is committed.
|
||||
* upstream caches need this packet until true is returned, so hold it for
|
||||
* deletion until a subsequent call
|
||||
* Upstream caches need this packet until true is returned, so
|
||||
* hold it for deletion until a subsequent call
|
||||
*/
|
||||
std::vector<PacketPtr> pendingDelete;
|
||||
std::unique_ptr<Packet> pendingDelete;
|
||||
|
||||
/**
|
||||
* Does all the processing necessary to perform the provided request.
|
||||
|
|
|
@ -140,12 +140,6 @@ CoherentXBar::init()
|
|||
bool
|
||||
CoherentXBar::recvTimingReq(PacketPtr pkt, PortID slave_port_id)
|
||||
{
|
||||
// @todo temporary hack to deal with memory corruption issue until
|
||||
// 4-phase transactions are complete
|
||||
for (int x = 0; x < pendingDelete.size(); x++)
|
||||
delete pendingDelete[x];
|
||||
pendingDelete.clear();
|
||||
|
||||
// determine the source port based on the id
|
||||
SlavePort *src_port = slavePorts[slave_port_id];
|
||||
|
||||
|
@ -223,7 +217,10 @@ CoherentXBar::recvTimingReq(PacketPtr pkt, PortID slave_port_id)
|
|||
|
||||
// update the layer state and schedule an idle event
|
||||
reqLayers[master_port_id]->succeededTiming(packetFinishTime);
|
||||
pendingDelete.push_back(pkt);
|
||||
|
||||
// queue the packet for deletion
|
||||
pendingDelete.reset(pkt);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
|
@ -274,11 +274,10 @@ class CoherentXBar : public BaseXBar
|
|||
const Cycles snoopResponseLatency;
|
||||
|
||||
/**
|
||||
* @todo this is a temporary workaround until the 4-phase code is committed.
|
||||
* upstream caches need this packet until true is returned, so hold it for
|
||||
* deletion until a subsequent call
|
||||
* Upstream caches need this packet until true is returned, so
|
||||
* hold it for deletion until a subsequent call
|
||||
*/
|
||||
std::vector<PacketPtr> pendingDelete;
|
||||
std::unique_ptr<Packet> pendingDelete;
|
||||
|
||||
/** Function called by the port when the crossbar is recieving a Timing
|
||||
request packet.*/
|
||||
|
|
|
@ -586,12 +586,6 @@ DRAMCtrl::printQs() const {
|
|||
bool
|
||||
DRAMCtrl::recvTimingReq(PacketPtr pkt)
|
||||
{
|
||||
/// @todo temporary hack to deal with memory corruption issues until
|
||||
/// 4-phase transactions are complete
|
||||
for (int x = 0; x < pendingDelete.size(); x++)
|
||||
delete pendingDelete[x];
|
||||
pendingDelete.clear();
|
||||
|
||||
// This is where we enter from the outside world
|
||||
DPRINTF(DRAM, "recvTimingReq: request %s addr %lld size %d\n",
|
||||
pkt->cmdString(), pkt->getAddr(), pkt->getSize());
|
||||
|
@ -600,7 +594,7 @@ DRAMCtrl::recvTimingReq(PacketPtr pkt)
|
|||
if (pkt->memInhibitAsserted() ||
|
||||
pkt->cmd == MemCmd::CleanEvict) {
|
||||
DPRINTF(DRAM, "Inhibited packet or clean evict -- Dropping it now\n");
|
||||
pendingDelete.push_back(pkt);
|
||||
pendingDelete.reset(pkt);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -872,7 +866,7 @@ DRAMCtrl::accessAndRespond(PacketPtr pkt, Tick static_latency)
|
|||
} else {
|
||||
// @todo the packet is going to be deleted, and the DRAMPacket
|
||||
// is still having a pointer to it
|
||||
pendingDelete.push_back(pkt);
|
||||
pendingDelete.reset(pkt);
|
||||
}
|
||||
|
||||
DPRINTF(DRAM, "Done\n");
|
||||
|
|
|
@ -836,11 +836,11 @@ class DRAMCtrl : public AbstractMemory
|
|||
// timestamp offset
|
||||
uint64_t timeStampOffset;
|
||||
|
||||
/** @todo this is a temporary workaround until the 4-phase code is
|
||||
* committed. upstream caches needs this packet until true is returned, so
|
||||
* hold onto it for deletion until a subsequent call
|
||||
/**
|
||||
* Upstream caches need this packet until true is returned, so
|
||||
* hold it for deletion until a subsequent call
|
||||
*/
|
||||
std::vector<PacketPtr> pendingDelete;
|
||||
std::unique_ptr<Packet> pendingDelete;
|
||||
|
||||
/**
|
||||
* This function increments the energy when called. If stats are
|
||||
|
|
|
@ -178,16 +178,10 @@ DRAMSim2::recvTimingReq(PacketPtr pkt)
|
|||
// we should never see a new request while in retry
|
||||
assert(!retryReq);
|
||||
|
||||
// @todo temporary hack to deal with memory corruption issues until
|
||||
// 4-phase transactions are complete
|
||||
for (int x = 0; x < pendingDelete.size(); x++)
|
||||
delete pendingDelete[x];
|
||||
pendingDelete.clear();
|
||||
|
||||
if (pkt->memInhibitAsserted()) {
|
||||
// snooper will supply based on copy of packet
|
||||
// still target's responsibility to delete packet
|
||||
pendingDelete.push_back(pkt);
|
||||
pendingDelete.reset(pkt);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -281,9 +275,8 @@ DRAMSim2::accessAndRespond(PacketPtr pkt)
|
|||
if (!retryResp && !sendResponseEvent.scheduled())
|
||||
schedule(sendResponseEvent, time);
|
||||
} else {
|
||||
// @todo the packet is going to be deleted, and the DRAMPacket
|
||||
// is still having a pointer to it
|
||||
pendingDelete.push_back(pkt);
|
||||
// queue the packet for deletion
|
||||
pendingDelete.reset(pkt);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -160,11 +160,11 @@ class DRAMSim2 : public AbstractMemory
|
|||
*/
|
||||
EventWrapper<DRAMSim2, &DRAMSim2::tick> tickEvent;
|
||||
|
||||
/** @todo this is a temporary workaround until the 4-phase code is
|
||||
* committed. upstream caches needs this packet until true is returned, so
|
||||
* hold onto it for deletion until a subsequent call
|
||||
/**
|
||||
* Upstream caches need this packet until true is returned, so
|
||||
* hold it for deletion until a subsequent call
|
||||
*/
|
||||
std::vector<PacketPtr> pendingDelete;
|
||||
std::unique_ptr<Packet> pendingDelete;
|
||||
|
||||
public:
|
||||
|
||||
|
|
|
@ -97,16 +97,10 @@ SimpleMemory::recvFunctional(PacketPtr pkt)
|
|||
bool
|
||||
SimpleMemory::recvTimingReq(PacketPtr pkt)
|
||||
{
|
||||
/// @todo temporary hack to deal with memory corruption issues until
|
||||
/// 4-phase transactions are complete
|
||||
for (int x = 0; x < pendingDelete.size(); x++)
|
||||
delete pendingDelete[x];
|
||||
pendingDelete.clear();
|
||||
|
||||
if (pkt->memInhibitAsserted()) {
|
||||
// snooper will supply based on copy of packet
|
||||
// still target's responsibility to delete packet
|
||||
pendingDelete.push_back(pkt);
|
||||
pendingDelete.reset(pkt);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -165,7 +159,7 @@ SimpleMemory::recvTimingReq(PacketPtr pkt)
|
|||
if (!retryResp && !dequeueEvent.scheduled())
|
||||
schedule(dequeueEvent, packetQueue.back().tick);
|
||||
} else {
|
||||
pendingDelete.push_back(pkt);
|
||||
pendingDelete.reset(pkt);
|
||||
}
|
||||
|
||||
return true;
|
||||
|
|
|
@ -175,11 +175,11 @@ class SimpleMemory : public AbstractMemory
|
|||
*/
|
||||
Tick getLatency() const;
|
||||
|
||||
/** @todo this is a temporary workaround until the 4-phase code is
|
||||
* committed. upstream caches needs this packet until true is returned, so
|
||||
* hold onto it for deletion until a subsequent call
|
||||
/**
|
||||
* Upstream caches need this packet until true is returned, so
|
||||
* hold it for deletion until a subsequent call
|
||||
*/
|
||||
std::vector<PacketPtr> pendingDelete;
|
||||
std::unique_ptr<Packet> pendingDelete;
|
||||
|
||||
public:
|
||||
|
||||
|
|
|
@ -62,12 +62,6 @@ SimpleTimingPort::recvFunctional(PacketPtr pkt)
|
|||
bool
|
||||
SimpleTimingPort::recvTimingReq(PacketPtr pkt)
|
||||
{
|
||||
/// @todo temporary hack to deal with memory corruption issue until
|
||||
/// 4-phase transactions are complete. Remove me later
|
||||
for (int x = 0; x < pendingDelete.size(); x++)
|
||||
delete pendingDelete[x];
|
||||
pendingDelete.clear();
|
||||
|
||||
// the SimpleTimingPort should not be used anywhere where there is
|
||||
// a need to deal with inhibited packets
|
||||
if (pkt->memInhibitAsserted())
|
||||
|
@ -82,10 +76,8 @@ SimpleTimingPort::recvTimingReq(PacketPtr pkt)
|
|||
assert(pkt->isResponse());
|
||||
schedTimingResp(pkt, curTick() + latency);
|
||||
} else {
|
||||
/// @todo nominally we should just delete the packet here.
|
||||
/// Until 4-phase stuff we can't because the sending
|
||||
/// cache is still relying on it
|
||||
pendingDelete.push_back(pkt);
|
||||
// queue the packet for deletion
|
||||
pendingDelete.reset(pkt);
|
||||
}
|
||||
|
||||
return true;
|
||||
|
|
|
@ -81,12 +81,10 @@ class SimpleTimingPort : public QueuedSlavePort
|
|||
virtual Tick recvAtomic(PacketPtr pkt) = 0;
|
||||
|
||||
/**
|
||||
* @todo this is a temporary workaround until the 4-phase code is committed.
|
||||
* upstream caches need this packet until true is returned, so hold it for
|
||||
* deletion until a subsequent call
|
||||
* Upstream caches need this packet until true is returned, so
|
||||
* hold it for deletion until a subsequent call
|
||||
*/
|
||||
std::vector<PacketPtr> pendingDelete;
|
||||
|
||||
std::unique_ptr<Packet> pendingDelete;
|
||||
|
||||
public:
|
||||
|
||||
|
|
Loading…
Reference in a new issue