More fixes for functional accesses. It now makes the writeback memory leak to crash all configs.
Working on that now. src/mem/cache/base_cache.cc: Keep a list of the responders so we can search them on functional accesses. src/mem/cache/base_cache.hh: Properly put things on a list for responses so we can search the list. Also, be sure to check the outgoing ports lists on a functional access (factor some common code out there) src/mem/cache/cache_impl.hh: Properly return when the first read hit on a functional access. Make sure to call to check the other ports list of packets before forwarding it out. --HG-- extra : convert_revision : 1d21cb55ff29c15716617efc48441329707c088a
This commit is contained in:
parent
9a6e896d3b
commit
f876bc2bf0
3 changed files with 187 additions and 42 deletions
87
src/mem/cache/base_cache.cc
vendored
87
src/mem/cache/base_cache.cc
vendored
|
@ -102,21 +102,51 @@ BaseCache::CachePort::recvAtomic(PacketPtr pkt)
|
||||||
return cache->doAtomicAccess(pkt, isCpuSide);
|
return cache->doAtomicAccess(pkt, isCpuSide);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
bool
|
||||||
BaseCache::CachePort::recvFunctional(PacketPtr pkt)
|
BaseCache::CachePort::checkFunctional(PacketPtr pkt)
|
||||||
{
|
{
|
||||||
//Check storage here first
|
//Check storage here first
|
||||||
list<PacketPtr>::iterator i = drainList.begin();
|
list<PacketPtr>::iterator i = drainList.begin();
|
||||||
list<PacketPtr>::iterator end = drainList.end();
|
list<PacketPtr>::iterator iend = drainList.end();
|
||||||
for (; i != end; ++i) {
|
bool notDone = true;
|
||||||
|
while (i != iend && notDone) {
|
||||||
PacketPtr target = *i;
|
PacketPtr target = *i;
|
||||||
// If the target contains data, and it overlaps the
|
// If the target contains data, and it overlaps the
|
||||||
// probed request, need to update data
|
// probed request, need to update data
|
||||||
if (target->intersect(pkt)) {
|
if (target->intersect(pkt)) {
|
||||||
fixPacket(pkt, target);
|
notDone = fixPacket(pkt, target);
|
||||||
}
|
}
|
||||||
|
i++;
|
||||||
}
|
}
|
||||||
cache->doFunctionalAccess(pkt, isCpuSide);
|
//Also check the response not yet ready to be on the list
|
||||||
|
std::list<std::pair<Tick,PacketPtr> >::iterator j = transmitList.begin();
|
||||||
|
std::list<std::pair<Tick,PacketPtr> >::iterator jend = transmitList.end();
|
||||||
|
|
||||||
|
while (j != jend && notDone) {
|
||||||
|
PacketPtr target = j->second;
|
||||||
|
// If the target contains data, and it overlaps the
|
||||||
|
// probed request, need to update data
|
||||||
|
if (target->intersect(pkt))
|
||||||
|
notDone = fixPacket(pkt, target);
|
||||||
|
j++;
|
||||||
|
}
|
||||||
|
return notDone;
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
BaseCache::CachePort::recvFunctional(PacketPtr pkt)
|
||||||
|
{
|
||||||
|
bool notDone = checkFunctional(pkt);
|
||||||
|
if (notDone)
|
||||||
|
cache->doFunctionalAccess(pkt, isCpuSide);
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
BaseCache::CachePort::checkAndSendFunctional(PacketPtr pkt)
|
||||||
|
{
|
||||||
|
bool notDone = checkFunctional(pkt);
|
||||||
|
if (notDone)
|
||||||
|
sendFunctional(pkt);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
|
@ -135,7 +165,7 @@ BaseCache::CachePort::recvRetry()
|
||||||
isCpuSide && cache->doSlaveRequest()) {
|
isCpuSide && cache->doSlaveRequest()) {
|
||||||
|
|
||||||
DPRINTF(CachePort, "%s has more responses/requests\n", name());
|
DPRINTF(CachePort, "%s has more responses/requests\n", name());
|
||||||
BaseCache::CacheEvent * reqCpu = new BaseCache::CacheEvent(this);
|
BaseCache::CacheEvent * reqCpu = new BaseCache::CacheEvent(this, false);
|
||||||
reqCpu->schedule(curTick + 1);
|
reqCpu->schedule(curTick + 1);
|
||||||
}
|
}
|
||||||
waitingOnRetry = false;
|
waitingOnRetry = false;
|
||||||
|
@ -176,7 +206,7 @@ BaseCache::CachePort::recvRetry()
|
||||||
{
|
{
|
||||||
DPRINTF(CachePort, "%s has more requests\n", name());
|
DPRINTF(CachePort, "%s has more requests\n", name());
|
||||||
//Still more to issue, rerequest in 1 cycle
|
//Still more to issue, rerequest in 1 cycle
|
||||||
BaseCache::CacheEvent * reqCpu = new BaseCache::CacheEvent(this);
|
BaseCache::CacheEvent * reqCpu = new BaseCache::CacheEvent(this, false);
|
||||||
reqCpu->schedule(curTick + 1);
|
reqCpu->schedule(curTick + 1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -194,7 +224,7 @@ BaseCache::CachePort::recvRetry()
|
||||||
{
|
{
|
||||||
DPRINTF(CachePort, "%s has more requests\n", name());
|
DPRINTF(CachePort, "%s has more requests\n", name());
|
||||||
//Still more to issue, rerequest in 1 cycle
|
//Still more to issue, rerequest in 1 cycle
|
||||||
BaseCache::CacheEvent * reqCpu = new BaseCache::CacheEvent(this);
|
BaseCache::CacheEvent * reqCpu = new BaseCache::CacheEvent(this, false);
|
||||||
reqCpu->schedule(curTick + 1);
|
reqCpu->schedule(curTick + 1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -226,23 +256,19 @@ BaseCache::CachePort::clearBlocked()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
BaseCache::CacheEvent::CacheEvent(CachePort *_cachePort)
|
BaseCache::CacheEvent::CacheEvent(CachePort *_cachePort, bool _newResponse)
|
||||||
: Event(&mainEventQueue, CPU_Tick_Pri), cachePort(_cachePort)
|
: Event(&mainEventQueue, CPU_Tick_Pri), cachePort(_cachePort),
|
||||||
|
newResponse(_newResponse)
|
||||||
{
|
{
|
||||||
this->setFlags(AutoDelete);
|
if (!newResponse)
|
||||||
|
this->setFlags(AutoDelete);
|
||||||
pkt = NULL;
|
pkt = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
BaseCache::CacheEvent::CacheEvent(CachePort *_cachePort, PacketPtr _pkt)
|
|
||||||
: Event(&mainEventQueue, CPU_Tick_Pri), cachePort(_cachePort), pkt(_pkt)
|
|
||||||
{
|
|
||||||
this->setFlags(AutoDelete);
|
|
||||||
}
|
|
||||||
|
|
||||||
void
|
void
|
||||||
BaseCache::CacheEvent::process()
|
BaseCache::CacheEvent::process()
|
||||||
{
|
{
|
||||||
if (!pkt)
|
if (!newResponse)
|
||||||
{
|
{
|
||||||
if (cachePort->waitingOnRetry) return;
|
if (cachePort->waitingOnRetry) return;
|
||||||
//We have some responses to drain first
|
//We have some responses to drain first
|
||||||
|
@ -322,8 +348,16 @@ BaseCache::CacheEvent::process()
|
||||||
}
|
}
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
//Response
|
//Else it's a response Response
|
||||||
//Know the packet to send
|
assert(cachePort->transmitList.size());
|
||||||
|
assert(cachePort->transmitList.front().first <= curTick);
|
||||||
|
pkt = cachePort->transmitList.front().second;
|
||||||
|
cachePort->transmitList.pop_front();
|
||||||
|
if (!cachePort->transmitList.empty()) {
|
||||||
|
Tick time = cachePort->transmitList.front().first;
|
||||||
|
schedule(time <= curTick ? curTick+1 : time);
|
||||||
|
}
|
||||||
|
|
||||||
if (pkt->flags & NACKED_LINE)
|
if (pkt->flags & NACKED_LINE)
|
||||||
pkt->result = Packet::Nacked;
|
pkt->result = Packet::Nacked;
|
||||||
else
|
else
|
||||||
|
@ -343,7 +377,7 @@ BaseCache::CacheEvent::process()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check if we're done draining once this list is empty
|
// Check if we're done draining once this list is empty
|
||||||
if (cachePort->drainList.empty())
|
if (cachePort->drainList.empty() && cachePort->transmitList.empty())
|
||||||
cachePort->cache->checkDrain();
|
cachePort->cache->checkDrain();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -358,8 +392,10 @@ BaseCache::getPort(const std::string &if_name, int idx)
|
||||||
{
|
{
|
||||||
if (if_name == "")
|
if (if_name == "")
|
||||||
{
|
{
|
||||||
if(cpuSidePort == NULL)
|
if(cpuSidePort == NULL) {
|
||||||
cpuSidePort = new CachePort(name() + "-cpu_side_port", this, true);
|
cpuSidePort = new CachePort(name() + "-cpu_side_port", this, true);
|
||||||
|
sendEvent = new CacheEvent(cpuSidePort, true);
|
||||||
|
}
|
||||||
return cpuSidePort;
|
return cpuSidePort;
|
||||||
}
|
}
|
||||||
else if (if_name == "functional")
|
else if (if_name == "functional")
|
||||||
|
@ -368,8 +404,10 @@ BaseCache::getPort(const std::string &if_name, int idx)
|
||||||
}
|
}
|
||||||
else if (if_name == "cpu_side")
|
else if (if_name == "cpu_side")
|
||||||
{
|
{
|
||||||
if(cpuSidePort == NULL)
|
if(cpuSidePort == NULL) {
|
||||||
cpuSidePort = new CachePort(name() + "-cpu_side_port", this, true);
|
cpuSidePort = new CachePort(name() + "-cpu_side_port", this, true);
|
||||||
|
sendEvent = new CacheEvent(cpuSidePort, true);
|
||||||
|
}
|
||||||
return cpuSidePort;
|
return cpuSidePort;
|
||||||
}
|
}
|
||||||
else if (if_name == "mem_side")
|
else if (if_name == "mem_side")
|
||||||
|
@ -377,6 +415,7 @@ BaseCache::getPort(const std::string &if_name, int idx)
|
||||||
if (memSidePort != NULL)
|
if (memSidePort != NULL)
|
||||||
panic("Already have a mem side for this cache\n");
|
panic("Already have a mem side for this cache\n");
|
||||||
memSidePort = new CachePort(name() + "-mem_side_port", this, false);
|
memSidePort = new CachePort(name() + "-mem_side_port", this, false);
|
||||||
|
memSendEvent = new CacheEvent(memSidePort, true);
|
||||||
return memSidePort;
|
return memSidePort;
|
||||||
}
|
}
|
||||||
else panic("Port name %s unrecognized\n", if_name);
|
else panic("Port name %s unrecognized\n", if_name);
|
||||||
|
|
121
src/mem/cache/base_cache.hh
vendored
121
src/mem/cache/base_cache.hh
vendored
|
@ -105,7 +105,11 @@ class BaseCache : public MemObject
|
||||||
|
|
||||||
void clearBlocked();
|
void clearBlocked();
|
||||||
|
|
||||||
bool canDrain() { return drainList.empty(); }
|
bool checkFunctional(PacketPtr pkt);
|
||||||
|
|
||||||
|
void checkAndSendFunctional(PacketPtr pkt);
|
||||||
|
|
||||||
|
bool canDrain() { return drainList.empty() && transmitList.empty(); }
|
||||||
|
|
||||||
bool blocked;
|
bool blocked;
|
||||||
|
|
||||||
|
@ -117,15 +121,16 @@ class BaseCache : public MemObject
|
||||||
|
|
||||||
std::list<PacketPtr> drainList;
|
std::list<PacketPtr> drainList;
|
||||||
|
|
||||||
|
std::list<std::pair<Tick,PacketPtr> > transmitList;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct CacheEvent : public Event
|
struct CacheEvent : public Event
|
||||||
{
|
{
|
||||||
CachePort *cachePort;
|
CachePort *cachePort;
|
||||||
PacketPtr pkt;
|
PacketPtr pkt;
|
||||||
|
bool newResponse;
|
||||||
|
|
||||||
CacheEvent(CachePort *_cachePort);
|
CacheEvent(CachePort *_cachePort, bool response);
|
||||||
CacheEvent(CachePort *_cachePort, PacketPtr _pkt);
|
|
||||||
void process();
|
void process();
|
||||||
const char *description();
|
const char *description();
|
||||||
};
|
};
|
||||||
|
@ -133,6 +138,9 @@ class BaseCache : public MemObject
|
||||||
public: //Made public so coherence can get at it.
|
public: //Made public so coherence can get at it.
|
||||||
CachePort *cpuSidePort;
|
CachePort *cpuSidePort;
|
||||||
|
|
||||||
|
CacheEvent *sendEvent;
|
||||||
|
CacheEvent *memSendEvent;
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
CachePort *memSidePort;
|
CachePort *memSidePort;
|
||||||
|
|
||||||
|
@ -353,6 +361,12 @@ class BaseCache : public MemObject
|
||||||
snoopRangesSent = false;
|
snoopRangesSent = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
~BaseCache()
|
||||||
|
{
|
||||||
|
delete sendEvent;
|
||||||
|
delete memSendEvent;
|
||||||
|
}
|
||||||
|
|
||||||
virtual void init();
|
virtual void init();
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -467,7 +481,8 @@ class BaseCache : public MemObject
|
||||||
{
|
{
|
||||||
if (!doMasterRequest() && !memSidePort->waitingOnRetry)
|
if (!doMasterRequest() && !memSidePort->waitingOnRetry)
|
||||||
{
|
{
|
||||||
BaseCache::CacheEvent * reqCpu = new BaseCache::CacheEvent(memSidePort);
|
BaseCache::CacheEvent * reqCpu =
|
||||||
|
new BaseCache::CacheEvent(memSidePort, false);
|
||||||
reqCpu->schedule(time);
|
reqCpu->schedule(time);
|
||||||
}
|
}
|
||||||
uint8_t flag = 1<<cause;
|
uint8_t flag = 1<<cause;
|
||||||
|
@ -503,7 +518,8 @@ class BaseCache : public MemObject
|
||||||
{
|
{
|
||||||
if (!doSlaveRequest() && !cpuSidePort->waitingOnRetry)
|
if (!doSlaveRequest() && !cpuSidePort->waitingOnRetry)
|
||||||
{
|
{
|
||||||
BaseCache::CacheEvent * reqCpu = new BaseCache::CacheEvent(cpuSidePort);
|
BaseCache::CacheEvent * reqCpu =
|
||||||
|
new BaseCache::CacheEvent(cpuSidePort, false);
|
||||||
reqCpu->schedule(time);
|
reqCpu->schedule(time);
|
||||||
}
|
}
|
||||||
uint8_t flag = 1<<cause;
|
uint8_t flag = 1<<cause;
|
||||||
|
@ -528,9 +544,38 @@ class BaseCache : public MemObject
|
||||||
*/
|
*/
|
||||||
void respond(PacketPtr pkt, Tick time)
|
void respond(PacketPtr pkt, Tick time)
|
||||||
{
|
{
|
||||||
|
assert(time >= curTick);
|
||||||
if (pkt->needsResponse()) {
|
if (pkt->needsResponse()) {
|
||||||
CacheEvent *reqCpu = new CacheEvent(cpuSidePort, pkt);
|
/* CacheEvent *reqCpu = new CacheEvent(cpuSidePort, pkt);
|
||||||
reqCpu->schedule(time);
|
reqCpu->schedule(time);
|
||||||
|
*/
|
||||||
|
if (cpuSidePort->transmitList.empty()) {
|
||||||
|
assert(!sendEvent->scheduled());
|
||||||
|
sendEvent->schedule(time);
|
||||||
|
cpuSidePort->transmitList.push_back(std::pair<Tick,PacketPtr>
|
||||||
|
(time,pkt));
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// something is on the list and this belongs at the end
|
||||||
|
if (time >= cpuSidePort->transmitList.back().first) {
|
||||||
|
cpuSidePort->transmitList.push_back(std::pair<Tick,PacketPtr>
|
||||||
|
(time,pkt));
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
// Something is on the list and this belongs somewhere else
|
||||||
|
std::list<std::pair<Tick,PacketPtr> >::iterator i =
|
||||||
|
cpuSidePort->transmitList.begin();
|
||||||
|
std::list<std::pair<Tick,PacketPtr> >::iterator end =
|
||||||
|
cpuSidePort->transmitList.end();
|
||||||
|
bool done = false;
|
||||||
|
|
||||||
|
while (i != end && !done) {
|
||||||
|
if (time < i->first)
|
||||||
|
cpuSidePort->transmitList.insert(i,std::pair<Tick,PacketPtr>
|
||||||
|
(time,pkt));
|
||||||
|
i++;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
if (pkt->cmd != Packet::UpgradeReq)
|
if (pkt->cmd != Packet::UpgradeReq)
|
||||||
|
@ -548,12 +593,42 @@ class BaseCache : public MemObject
|
||||||
*/
|
*/
|
||||||
void respondToMiss(PacketPtr pkt, Tick time)
|
void respondToMiss(PacketPtr pkt, Tick time)
|
||||||
{
|
{
|
||||||
|
assert(time >= curTick);
|
||||||
if (!pkt->req->isUncacheable()) {
|
if (!pkt->req->isUncacheable()) {
|
||||||
missLatency[pkt->cmdToIndex()][0/*pkt->req->getThreadNum()*/] += time - pkt->time;
|
missLatency[pkt->cmdToIndex()][0/*pkt->req->getThreadNum()*/] +=
|
||||||
|
time - pkt->time;
|
||||||
}
|
}
|
||||||
if (pkt->needsResponse()) {
|
if (pkt->needsResponse()) {
|
||||||
CacheEvent *reqCpu = new CacheEvent(cpuSidePort, pkt);
|
/* CacheEvent *reqCpu = new CacheEvent(cpuSidePort, pkt);
|
||||||
reqCpu->schedule(time);
|
reqCpu->schedule(time);
|
||||||
|
*/
|
||||||
|
if (cpuSidePort->transmitList.empty()) {
|
||||||
|
assert(!sendEvent->scheduled());
|
||||||
|
sendEvent->schedule(time);
|
||||||
|
cpuSidePort->transmitList.push_back(std::pair<Tick,PacketPtr>
|
||||||
|
(time,pkt));
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// something is on the list and this belongs at the end
|
||||||
|
if (time >= cpuSidePort->transmitList.back().first) {
|
||||||
|
cpuSidePort->transmitList.push_back(std::pair<Tick,PacketPtr>
|
||||||
|
(time,pkt));
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
// Something is on the list and this belongs somewhere else
|
||||||
|
std::list<std::pair<Tick,PacketPtr> >::iterator i =
|
||||||
|
cpuSidePort->transmitList.begin();
|
||||||
|
std::list<std::pair<Tick,PacketPtr> >::iterator end =
|
||||||
|
cpuSidePort->transmitList.end();
|
||||||
|
bool done = false;
|
||||||
|
|
||||||
|
while (i != end && !done) {
|
||||||
|
if (time < i->first)
|
||||||
|
cpuSidePort->transmitList.insert(i,std::pair<Tick,PacketPtr>
|
||||||
|
(time,pkt));
|
||||||
|
i++;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
if (pkt->cmd != Packet::UpgradeReq)
|
if (pkt->cmd != Packet::UpgradeReq)
|
||||||
|
@ -570,9 +645,37 @@ class BaseCache : public MemObject
|
||||||
*/
|
*/
|
||||||
void respondToSnoop(PacketPtr pkt, Tick time)
|
void respondToSnoop(PacketPtr pkt, Tick time)
|
||||||
{
|
{
|
||||||
|
assert(time >= curTick);
|
||||||
assert (pkt->needsResponse());
|
assert (pkt->needsResponse());
|
||||||
CacheEvent *reqMem = new CacheEvent(memSidePort, pkt);
|
/* CacheEvent *reqMem = new CacheEvent(memSidePort, pkt);
|
||||||
reqMem->schedule(time);
|
reqMem->schedule(time);
|
||||||
|
*/
|
||||||
|
if (memSidePort->transmitList.empty()) {
|
||||||
|
assert(!memSendEvent->scheduled());
|
||||||
|
memSendEvent->schedule(time);
|
||||||
|
memSidePort->transmitList.push_back(std::pair<Tick,PacketPtr>
|
||||||
|
(time,pkt));
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// something is on the list and this belongs at the end
|
||||||
|
if (time >= memSidePort->transmitList.back().first) {
|
||||||
|
memSidePort->transmitList.push_back(std::pair<Tick,PacketPtr>
|
||||||
|
(time,pkt));
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
// Something is on the list and this belongs somewhere else
|
||||||
|
std::list<std::pair<Tick,PacketPtr> >::iterator i =
|
||||||
|
memSidePort->transmitList.begin();
|
||||||
|
std::list<std::pair<Tick,PacketPtr> >::iterator end =
|
||||||
|
memSidePort->transmitList.end();
|
||||||
|
bool done = false;
|
||||||
|
|
||||||
|
while (i != end && !done) {
|
||||||
|
if (time < i->first)
|
||||||
|
memSidePort->transmitList.insert(i,std::pair<Tick,PacketPtr>(time,pkt));
|
||||||
|
i++;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
21
src/mem/cache/cache_impl.hh
vendored
21
src/mem/cache/cache_impl.hh
vendored
|
@ -536,7 +536,7 @@ Cache<TagStore,Buffering,Coherence>::probe(PacketPtr &pkt, bool update,
|
||||||
|
|
||||||
if (!update && (pkt->isWrite() || (otherSidePort == cpuSidePort))) {
|
if (!update && (pkt->isWrite() || (otherSidePort == cpuSidePort))) {
|
||||||
// Still need to change data in all locations.
|
// Still need to change data in all locations.
|
||||||
otherSidePort->sendFunctional(pkt);
|
otherSidePort->checkAndSendFunctional(pkt);
|
||||||
if (pkt->isRead() && pkt->result == Packet::Success)
|
if (pkt->isRead() && pkt->result == Packet::Success)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -560,30 +560,33 @@ Cache<TagStore,Buffering,Coherence>::probe(PacketPtr &pkt, bool update,
|
||||||
missQueue->findWrites(blk_addr, writes);
|
missQueue->findWrites(blk_addr, writes);
|
||||||
|
|
||||||
if (!update) {
|
if (!update) {
|
||||||
|
bool notDone = !(pkt->flags & SATISFIED); //Hit in cache (was a block)
|
||||||
// Check for data in MSHR and writebuffer.
|
// Check for data in MSHR and writebuffer.
|
||||||
if (mshr) {
|
if (mshr) {
|
||||||
MSHR::TargetList *targets = mshr->getTargetList();
|
MSHR::TargetList *targets = mshr->getTargetList();
|
||||||
MSHR::TargetList::iterator i = targets->begin();
|
MSHR::TargetList::iterator i = targets->begin();
|
||||||
MSHR::TargetList::iterator end = targets->end();
|
MSHR::TargetList::iterator end = targets->end();
|
||||||
for (; i != end; ++i) {
|
for (; i != end && notDone; ++i) {
|
||||||
PacketPtr target = *i;
|
PacketPtr target = *i;
|
||||||
// If the target contains data, and it overlaps the
|
// If the target contains data, and it overlaps the
|
||||||
// probed request, need to update data
|
// probed request, need to update data
|
||||||
if (target->intersect(pkt)) {
|
if (target->intersect(pkt)) {
|
||||||
fixPacket(pkt, target);
|
DPRINTF(Cache, "Functional %s access to blk_addr %x intersects a MSHR\n",
|
||||||
|
blk_addr);
|
||||||
|
notDone = fixPacket(pkt, target);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for (int i = 0; i < writes.size(); ++i) {
|
for (int i = 0; i < writes.size() && notDone; ++i) {
|
||||||
PacketPtr write = writes[i]->pkt;
|
PacketPtr write = writes[i]->pkt;
|
||||||
if (write->intersect(pkt)) {
|
if (write->intersect(pkt)) {
|
||||||
fixPacket(pkt, write);
|
DPRINTF(Cache, "Functional %s access to blk_addr %x intersects a writeback\n",
|
||||||
|
pkt->cmdString(), blk_addr);
|
||||||
|
notDone = fixPacket(pkt, write);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (pkt->isRead()
|
if (notDone && otherSidePort == memSidePort) {
|
||||||
&& pkt->result != Packet::Success
|
otherSidePort->checkAndSendFunctional(pkt);
|
||||||
&& otherSidePort == memSidePort) {
|
|
||||||
otherSidePort->sendFunctional(pkt);
|
|
||||||
assert(pkt->result == Packet::Success);
|
assert(pkt->result == Packet::Success);
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
|
|
Loading…
Reference in a new issue