Fixes for uni-coherence in timing mode for FS.
Still a bug in atomic uni-coherence in FS. src/cpu/o3/fetch_impl.hh: src/cpu/o3/lsq_impl.hh: src/cpu/simple/atomic.cc: src/cpu/simple/timing.cc: Make CPU models handle coherence requests src/mem/cache/base_cache.cc: Properly signal coherence CSHRs src/mem/cache/coherence/uni_coherence.cc: Only deallocate once --HG-- extra : convert_revision : c4533de421c371c5532ee505e3ecd451511f5c99
This commit is contained in:
parent
4fff6d4603
commit
9c582c7e14
6 changed files with 46 additions and 24 deletions
|
@ -80,7 +80,10 @@ template<class Impl>
|
|||
bool
|
||||
DefaultFetch<Impl>::IcachePort::recvTiming(Packet *pkt)
|
||||
{
|
||||
if (pkt->isResponse()) {
|
||||
fetch->processCacheCompletion(pkt);
|
||||
}
|
||||
//else Snooped a coherence request, just return
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
|
@ -63,7 +63,14 @@ template <class Impl>
|
|||
bool
|
||||
LSQ<Impl>::DcachePort::recvTiming(PacketPtr pkt)
|
||||
{
|
||||
if (pkt->isResponse()) {
|
||||
lsq->thread[pkt->req->getThreadNum()].completeDataAccess(pkt);
|
||||
}
|
||||
else {
|
||||
//else it is a coherence request, maybe you need to do something
|
||||
warn("Recieved a coherence request (Invalidate??), 03CPU doesn't"
|
||||
"update LSQ for these\n");
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
|
@ -101,7 +101,7 @@ AtomicSimpleCPU::CpuPort::recvTiming(Packet *pkt)
|
|||
Tick
|
||||
AtomicSimpleCPU::CpuPort::recvAtomic(Packet *pkt)
|
||||
{
|
||||
panic("AtomicSimpleCPU doesn't expect recvAtomic callback!");
|
||||
//Snooping a coherence request, just return
|
||||
return curTick;
|
||||
}
|
||||
|
||||
|
|
|
@ -528,6 +528,7 @@ TimingSimpleCPU::IcachePort::ITickEvent::process()
|
|||
bool
|
||||
TimingSimpleCPU::IcachePort::recvTiming(Packet *pkt)
|
||||
{
|
||||
if (pkt->isResponse()) {
|
||||
// delay processing of returned data until next CPU clock edge
|
||||
Tick time = pkt->req->getTime();
|
||||
while (time < curTick)
|
||||
|
@ -539,6 +540,11 @@ TimingSimpleCPU::IcachePort::recvTiming(Packet *pkt)
|
|||
tickEvent.schedule(pkt, time);
|
||||
|
||||
return true;
|
||||
}
|
||||
else {
|
||||
//Snooping a Coherence Request, do nothing
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -600,6 +606,7 @@ TimingSimpleCPU::completeDrain()
|
|||
bool
|
||||
TimingSimpleCPU::DcachePort::recvTiming(Packet *pkt)
|
||||
{
|
||||
if (pkt->isResponse()) {
|
||||
// delay processing of returned data until next CPU clock edge
|
||||
Tick time = pkt->req->getTime();
|
||||
while (time < curTick)
|
||||
|
@ -611,6 +618,11 @@ TimingSimpleCPU::DcachePort::recvTiming(Packet *pkt)
|
|||
tickEvent.schedule(pkt, time);
|
||||
|
||||
return true;
|
||||
}
|
||||
else {
|
||||
//Snooping a coherence req, do nothing
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
|
|
2
src/mem/cache/base_cache.cc
vendored
2
src/mem/cache/base_cache.cc
vendored
|
@ -331,7 +331,7 @@ BaseCache::CacheEvent::process()
|
|||
pkt = cachePort->cache->getCoherencePacket();
|
||||
MSHR* cshr = (MSHR*) pkt->senderState;
|
||||
bool success = cachePort->sendTiming(pkt);
|
||||
cachePort->cache->sendResult(pkt, cshr, success);
|
||||
cachePort->cache->sendCoherenceResult(pkt, cshr, success);
|
||||
cachePort->waitingOnRetry = !success;
|
||||
if (cachePort->waitingOnRetry)
|
||||
DPRINTF(CachePort, "%s now waiting on a retry\n", cachePort->name());
|
||||
|
|
4
src/mem/cache/coherence/uni_coherence.cc
vendored
4
src/mem/cache/coherence/uni_coherence.cc
vendored
|
@ -53,11 +53,11 @@ UniCoherence::sendResult(Packet * &pkt, MSHR* cshr, bool success)
|
|||
if (success)
|
||||
{
|
||||
bool unblock = cshrs.isFull();
|
||||
cshrs.markInService(cshr);
|
||||
// cshrs.markInService(cshr);
|
||||
cshrs.deallocate(cshr);
|
||||
if (!cshrs.havePending()) {
|
||||
cache->clearSlaveRequest(Request_Coherence);
|
||||
}
|
||||
cshrs.deallocate(cshr);
|
||||
if (unblock) {
|
||||
//since CSHRs are always used as buffers, should always get rid of one
|
||||
assert(!cshrs.isFull());
|
||||
|
|
Loading…
Reference in a new issue