Fix for DMA's in FS caches.
Fix CSHR's for flow control. Fix for Bus Bridges reusing packets (clean flags up) Now both timing/atomic caches with MOESI in UP fail at same point. src/dev/io_device.hh: DMA's should send WriteInvalidates src/mem/bridge.cc: Reusing packet, clean flags in the packet set by bus. src/mem/cache/base_cache.cc: src/mem/cache/base_cache.hh: src/mem/cache/cache.hh: src/mem/cache/cache_impl.hh: src/mem/cache/coherence/simple_coherence.hh: src/mem/cache/coherence/uni_coherence.cc: src/mem/cache/coherence/uni_coherence.hh: Fix CSHR's for flow control. src/mem/packet.hh: Make a writeInvalidateResp, since the DMA expects responses to it's writes --HG-- extra : convert_revision : 59fd6658bcc0d076f4b143169caca946472a86cd
This commit is contained in:
parent
eddbb6801f
commit
a17afb1649
10 changed files with 99 additions and 46 deletions
|
@ -256,7 +256,7 @@ class DmaDevice : public PioDevice
|
|||
virtual ~DmaDevice();
|
||||
|
||||
void dmaWrite(Addr addr, int size, Event *event, uint8_t *data)
|
||||
{ dmaPort->dmaAction(Packet::WriteReq, addr, size, event, data) ; }
|
||||
{ dmaPort->dmaAction(Packet::WriteInvalidateReq, addr, size, event, data) ; }
|
||||
|
||||
void dmaRead(Addr addr, int size, Event *event, uint8_t *data = NULL)
|
||||
{ dmaPort->dmaAction(Packet::ReadReq, addr, size, event, data); }
|
||||
|
|
|
@ -153,6 +153,7 @@ Bridge::BridgePort::trySend()
|
|||
DPRINTF(BusBridge, "trySend: origSrc %d dest %d addr 0x%x\n",
|
||||
buf->origSrc, pkt->getDest(), pkt->getAddr());
|
||||
|
||||
pkt->flags &= ~SNOOP_COMMIT; //CLear it if it was set
|
||||
if (sendTiming(pkt)) {
|
||||
// send successful
|
||||
sendQueue.pop_front();
|
||||
|
|
57
src/mem/cache/base_cache.cc
vendored
57
src/mem/cache/base_cache.cc
vendored
|
@ -44,7 +44,6 @@ BaseCache::CachePort::CachePort(const std::string &_name, BaseCache *_cache,
|
|||
: Port(_name), cache(_cache), isCpuSide(_isCpuSide)
|
||||
{
|
||||
blocked = false;
|
||||
cshrRetry = NULL;
|
||||
waitingOnRetry = false;
|
||||
//Start ports at null if more than one is created we should panic
|
||||
//cpuSidePort = NULL;
|
||||
|
@ -195,20 +194,20 @@ BaseCache::CachePort::recvRetry()
|
|||
}
|
||||
else
|
||||
{
|
||||
assert(cshrRetry);
|
||||
assert(cache->doSlaveRequest());
|
||||
//pkt = cache->getCoherencePacket();
|
||||
//We save the packet, no reordering on CSHRS
|
||||
pkt = cshrRetry;
|
||||
pkt = cache->getCoherencePacket();
|
||||
MSHR* cshr = (MSHR*)pkt->senderState;
|
||||
bool success = sendTiming(pkt);
|
||||
cache->sendCoherenceResult(pkt, cshr, success);
|
||||
waitingOnRetry = !success;
|
||||
if (success)
|
||||
if (success && cache->doSlaveRequest())
|
||||
{
|
||||
if (cache->doSlaveRequest()) {
|
||||
//Still more to issue, rerequest in 1 cycle
|
||||
BaseCache::CacheEvent * reqCpu = new BaseCache::CacheEvent(this);
|
||||
reqCpu->schedule(curTick + 1);
|
||||
}
|
||||
cshrRetry = NULL;
|
||||
DPRINTF(CachePort, "%s has more requests\n", name());
|
||||
//Still more to issue, rerequest in 1 cycle
|
||||
BaseCache::CacheEvent * reqCpu = new BaseCache::CacheEvent(this);
|
||||
reqCpu->schedule(curTick + 1);
|
||||
}
|
||||
}
|
||||
if (waitingOnRetry) DPRINTF(CachePort, "%s STILL Waiting on retry\n", name());
|
||||
|
@ -294,10 +293,12 @@ BaseCache::CacheEvent::process()
|
|||
pkt->getAddr(), success ? "succesful" : "unsuccesful");
|
||||
cachePort->cache->sendResult(pkt, mshr, success);
|
||||
cachePort->waitingOnRetry = !success;
|
||||
if (cachePort->waitingOnRetry) DPRINTF(CachePort, "%s now waiting on a retry\n", cachePort->name());
|
||||
if (cachePort->waitingOnRetry)
|
||||
DPRINTF(CachePort, "%s now waiting on a retry\n", cachePort->name());
|
||||
if (success && cachePort->cache->doMasterRequest())
|
||||
{
|
||||
DPRINTF(CachePort, "%s still more MSHR requests to send\n", cachePort->name());
|
||||
DPRINTF(CachePort, "%s still more MSHR requests to send\n",
|
||||
cachePort->name());
|
||||
//Still more to issue, rerequest in 1 cycle
|
||||
pkt = NULL;
|
||||
this->schedule(curTick+1);
|
||||
|
@ -306,27 +307,21 @@ BaseCache::CacheEvent::process()
|
|||
else
|
||||
{
|
||||
//CSHR
|
||||
if (!cachePort->cshrRetry) {
|
||||
assert(cachePort->cache->doSlaveRequest());
|
||||
pkt = cachePort->cache->getCoherencePacket();
|
||||
}
|
||||
else {
|
||||
pkt = cachePort->cshrRetry;
|
||||
}
|
||||
assert(cachePort->cache->doSlaveRequest());
|
||||
pkt = cachePort->cache->getCoherencePacket();
|
||||
MSHR* cshr = (MSHR*) pkt->senderState;
|
||||
bool success = cachePort->sendTiming(pkt);
|
||||
if (!success) {
|
||||
//Need to send on a retry
|
||||
cachePort->cshrRetry = pkt;
|
||||
cachePort->waitingOnRetry = true;
|
||||
}
|
||||
else
|
||||
cachePort->cache->sendResult(pkt, cshr, success);
|
||||
cachePort->waitingOnRetry = !success;
|
||||
if (cachePort->waitingOnRetry)
|
||||
DPRINTF(CachePort, "%s now waiting on a retry\n", cachePort->name());
|
||||
if (success && cachePort->cache->doSlaveRequest())
|
||||
{
|
||||
cachePort->cshrRetry = NULL;
|
||||
if (cachePort->cache->doSlaveRequest()) {
|
||||
//Still more to issue, rerequest in 1 cycle
|
||||
pkt = NULL;
|
||||
this->schedule(curTick+1);
|
||||
}
|
||||
DPRINTF(CachePort, "%s still more CSHR requests to send\n",
|
||||
cachePort->name());
|
||||
//Still more to issue, rerequest in 1 cycle
|
||||
pkt = NULL;
|
||||
this->schedule(curTick+1);
|
||||
}
|
||||
}
|
||||
return;
|
||||
|
|
14
src/mem/cache/base_cache.hh
vendored
14
src/mem/cache/base_cache.hh
vendored
|
@ -116,7 +116,6 @@ class BaseCache : public MemObject
|
|||
|
||||
std::list<Packet *> drainList;
|
||||
|
||||
Packet *cshrRetry;
|
||||
};
|
||||
|
||||
struct CacheEvent : public Event
|
||||
|
@ -188,6 +187,12 @@ class BaseCache : public MemObject
|
|||
fatal("No implementation");
|
||||
}
|
||||
|
||||
virtual void sendCoherenceResult(Packet* &pkt, MSHR* mshr, bool success)
|
||||
{
|
||||
|
||||
fatal("No implementation");
|
||||
}
|
||||
|
||||
/**
|
||||
* Bit vector of the blocking reasons for the access path.
|
||||
* @sa #BlockedCause
|
||||
|
@ -489,10 +494,13 @@ class BaseCache : public MemObject
|
|||
*/
|
||||
void setSlaveRequest(RequestCause cause, Tick time)
|
||||
{
|
||||
if (!doSlaveRequest() && !cpuSidePort->waitingOnRetry)
|
||||
{
|
||||
BaseCache::CacheEvent * reqCpu = new BaseCache::CacheEvent(cpuSidePort);
|
||||
reqCpu->schedule(time);
|
||||
}
|
||||
uint8_t flag = 1<<cause;
|
||||
slaveRequests |= flag;
|
||||
assert("Implement\n" && 0);
|
||||
// si->pktuest(time);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
7
src/mem/cache/cache.hh
vendored
7
src/mem/cache/cache.hh
vendored
|
@ -178,6 +178,13 @@ class Cache : public BaseCache
|
|||
*/
|
||||
virtual void sendResult(Packet * &pkt, MSHR* mshr, bool success);
|
||||
|
||||
/**
|
||||
* Was the CSHR request was sent successfully?
|
||||
* @param pkt The request.
|
||||
* @param success True if the request was sent successfully.
|
||||
*/
|
||||
virtual void sendCoherenceResult(Packet * &pkt, MSHR* cshr, bool success);
|
||||
|
||||
/**
|
||||
* Handles a response (cache line fill/write ack) from the bus.
|
||||
* @param pkt The request being responded to.
|
||||
|
|
10
src/mem/cache/cache_impl.hh
vendored
10
src/mem/cache/cache_impl.hh
vendored
|
@ -304,6 +304,7 @@ Cache<TagStore,Buffering,Coherence>::handleResponse(Packet * &pkt)
|
|||
{
|
||||
BlkType *blk = NULL;
|
||||
if (pkt->senderState) {
|
||||
((MSHR*)pkt->senderState)->pkt = pkt;
|
||||
if (pkt->result == Packet::Nacked) {
|
||||
//pkt->reinitFromRequest();
|
||||
warn("NACKs from devices not connected to the same bus not implemented\n");
|
||||
|
@ -379,6 +380,15 @@ Cache<TagStore,Buffering,Coherence>::getCoherencePacket()
|
|||
return coherence->getPacket();
|
||||
}
|
||||
|
||||
template<class TagStore, class Buffering, class Coherence>
|
||||
void
|
||||
Cache<TagStore,Buffering,Coherence>::sendCoherenceResult(Packet* &pkt,
|
||||
MSHR *cshr,
|
||||
bool success)
|
||||
{
|
||||
coherence->sendResult(pkt, cshr, success);
|
||||
}
|
||||
|
||||
|
||||
template<class TagStore, class Buffering, class Coherence>
|
||||
void
|
||||
|
|
12
src/mem/cache/coherence/simple_coherence.hh
vendored
12
src/mem/cache/coherence/simple_coherence.hh
vendored
|
@ -94,6 +94,18 @@ class SimpleCoherence
|
|||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* Was the CSHR request was sent successfully?
|
||||
* @param pkt The request.
|
||||
* @param success True if the request was sent successfully.
|
||||
*/
|
||||
void sendResult(Packet * &pkt, MSHR* cshr, bool success)
|
||||
{
|
||||
//Don't do coherence
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Return the proper state given the current state and the bus response.
|
||||
* @param pkt The bus response.
|
||||
|
|
30
src/mem/cache/coherence/uni_coherence.cc
vendored
30
src/mem/cache/coherence/uni_coherence.cc
vendored
|
@ -43,20 +43,30 @@ UniCoherence::UniCoherence()
|
|||
Packet *
|
||||
UniCoherence::getPacket()
|
||||
{
|
||||
bool unblock = cshrs.isFull();
|
||||
Packet* pkt = cshrs.getReq();
|
||||
cshrs.markInService((MSHR*)pkt->senderState);
|
||||
if (!cshrs.havePending()) {
|
||||
cache->clearSlaveRequest(Request_Coherence);
|
||||
}
|
||||
if (unblock) {
|
||||
//since CSHRs are always used as buffers, should always get rid of one
|
||||
assert(!cshrs.isFull());
|
||||
cache->clearBlocked(Blocked_Coherence);
|
||||
}
|
||||
return pkt;
|
||||
}
|
||||
|
||||
void
|
||||
UniCoherence::sendResult(Packet * &pkt, MSHR* cshr, bool success)
|
||||
{
|
||||
if (success)
|
||||
{
|
||||
bool unblock = cshrs.isFull();
|
||||
cshrs.markInService(cshr);
|
||||
if (!cshrs.havePending()) {
|
||||
cache->clearSlaveRequest(Request_Coherence);
|
||||
}
|
||||
cshrs.deallocate(cshr);
|
||||
if (unblock) {
|
||||
//since CSHRs are always used as buffers, should always get rid of one
|
||||
assert(!cshrs.isFull());
|
||||
cache->clearBlocked(Blocked_Coherence);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* @todo add support for returning slave requests, not doing them here.
|
||||
*/
|
||||
|
|
8
src/mem/cache/coherence/uni_coherence.hh
vendored
8
src/mem/cache/coherence/uni_coherence.hh
vendored
|
@ -108,12 +108,20 @@ class UniCoherence
|
|||
else
|
||||
return BlkValid | BlkWritable;
|
||||
}
|
||||
|
||||
/**
|
||||
* Return outstanding invalidate to forward.
|
||||
* @return The next invalidate to forward to lower levels of cache.
|
||||
*/
|
||||
Packet * getPacket();
|
||||
|
||||
/**
|
||||
* Was the CSHR request was sent successfully?
|
||||
* @param pkt The request.
|
||||
* @param success True if the request was sent successfully.
|
||||
*/
|
||||
void sendResult(Packet * &pkt, MSHR* cshr, bool success);
|
||||
|
||||
/**
|
||||
* Handle snooped bus requests.
|
||||
* @param pkt The snooped bus request.
|
||||
|
|
|
@ -202,7 +202,9 @@ class Packet
|
|||
HardPFResp = IsRead | IsResponse | IsHWPrefetch
|
||||
| NeedsResponse | HasData,
|
||||
InvalidateReq = IsInvalidate | IsRequest,
|
||||
WriteInvalidateReq = IsWrite | IsInvalidate | IsRequest | HasData,
|
||||
WriteInvalidateReq = IsWrite | IsInvalidate | IsRequest
|
||||
| HasData | NeedsResponse,
|
||||
WriteInvalidateResp = IsWrite | IsInvalidate | IsRequest | NeedsResponse,
|
||||
UpgradeReq = IsInvalidate | IsRequest | IsUpgrade,
|
||||
ReadExReq = IsRead | IsInvalidate | IsRequest | NeedsResponse,
|
||||
ReadExResp = IsRead | IsInvalidate | IsResponse
|
||||
|
|
Loading…
Reference in a new issue