First set of changes for reorganized cache coherence support.

Compiles but doesn't work... committing just so I can merge
(stupid bk!).

src/mem/bridge.cc:
    Get rid of SNOOP_COMMIT.
src/mem/bus.cc:
src/mem/packet.hh:
    Get rid of SNOOP_COMMIT & two-pass snoop.
    First bits of EXPRESS_SNOOP support.
src/mem/cache/base_cache.cc:
src/mem/cache/base_cache.hh:
src/mem/cache/cache.hh:
src/mem/cache/cache_impl.hh:
src/mem/cache/miss/blocking_buffer.cc:
src/mem/cache/miss/miss_queue.cc:
src/mem/cache/prefetch/base_prefetcher.cc:
    Big reorg of ports and port-related functions & events.
src/mem/cache/cache.cc:
src/mem/cache/cache_builder.cc:
src/mem/cache/coherence/SConscript:
    Get rid of UniCoherence object.

--HG--
extra : convert_revision : 7672434fa3115c9b1c94686f497e57e90413b7c3
This commit is contained in:
Steve Reinhardt 2007-05-18 22:35:04 -07:00
parent 224ae7813d
commit 792d5b9e5e
15 changed files with 435 additions and 918 deletions

View file

@ -112,10 +112,6 @@ Bridge::BridgePort::reqQueueFull()
bool
Bridge::BridgePort::recvTiming(PacketPtr pkt)
{
if (!(pkt->flags & SNOOP_COMMIT))
return true;
DPRINTF(BusBridge, "recvTiming: src %d dest %d addr 0x%x\n",
pkt->getSrc(), pkt->getDest(), pkt->getAddr());
@ -255,8 +251,6 @@ Bridge::BridgePort::trySend()
PacketPtr pkt = buf->pkt;
pkt->flags &= ~SNOOP_COMMIT; //CLear it if it was set
if (pkt->cmd == MemCmd::WriteInvalidateReq && fixPartialWrite &&
pkt->result != Packet::Nacked && pkt->getOffset(pbs) &&
pkt->getSize() != pbs) {

View file

@ -182,8 +182,10 @@ Bus::recvTiming(PacketPtr pkt)
// If the bus is busy, or other devices are in line ahead of the current
// one, put this device on the retry list.
if (tickNextIdle > curTick ||
(retryList.size() && (!inRetry || pktPort != retryList.front()))) {
if (!(pkt->flags & EXPRESS_SNOOP) &&
tickNextIdle > curTick ||
(retryList.size() && (!inRetry || pktPort != retryList.front())))
{
addToRetryList(pktPort);
DPRINTF(Bus, "recvTiming: Bus is busy, returning false\n");
return false;
@ -195,31 +197,18 @@ Bus::recvTiming(PacketPtr pkt)
// access has been handled twice.
if (dest == Packet::Broadcast) {
port = findPort(pkt->getAddr(), pkt->getSrc());
pkt->flags &= ~SNOOP_COMMIT;
if (timingSnoop(pkt, port ? port : interfaces[pkt->getSrc()])) {
bool success;
timingSnoop(pkt, port ? port : interfaces[pkt->getSrc()]);
pkt->flags |= SNOOP_COMMIT;
success = timingSnoop(pkt, port ? port : interfaces[pkt->getSrc()]);
assert(success);
if (pkt->flags & SATISFIED) {
//Cache-Cache transfer occuring
if (inRetry) {
retryList.front()->onRetryList(false);
retryList.pop_front();
inRetry = false;
}
occupyBus(pkt);
DPRINTF(Bus, "recvTiming: Packet sucessfully sent\n");
return true;
if (pkt->flags & SATISFIED) {
//Cache-Cache transfer occuring
if (inRetry) {
retryList.front()->onRetryList(false);
retryList.pop_front();
inRetry = false;
}
} else {
//Snoop didn't succeed
DPRINTF(Bus, "Adding1 a retry to RETRY list %d\n",
pktPort->getId());
addToRetryList(pktPort);
return false;
occupyBus(pkt);
DPRINTF(Bus, "recvTiming: Packet sucessfully sent\n");
return true;
}
} else {
assert(dest >= 0 && dest < maxId);
@ -426,7 +415,6 @@ Bus::recvAtomic(PacketPtr pkt)
DPRINTF(Bus, "recvAtomic: packet src %d dest %d addr 0x%x cmd %s\n",
pkt->getSrc(), pkt->getDest(), pkt->getAddr(), pkt->cmdString());
assert(pkt->getDest() == Packet::Broadcast);
pkt->flags |= SNOOP_COMMIT;
// Assume one bus cycle in order to get through. This may have
// some clock skew issues yet again...
@ -451,7 +439,6 @@ Bus::recvFunctional(PacketPtr pkt)
DPRINTF(Bus, "recvFunctional: packet src %d dest %d addr 0x%x cmd %s\n",
pkt->getSrc(), pkt->getDest(), pkt->getAddr(), pkt->cmdString());
assert(pkt->getDest() == Packet::Broadcast);
pkt->flags |= SNOOP_COMMIT;
Port* port = findPort(pkt->getAddr(), pkt->getSrc());
functionalSnoop(pkt, port ? port : interfaces[pkt->getSrc()]);

View file

@ -40,29 +40,38 @@
using namespace std;
BaseCache::CachePort::CachePort(const std::string &_name, BaseCache *_cache,
bool _isCpuSide)
: Port(_name, _cache), cache(_cache), isCpuSide(_isCpuSide)
BaseCache::CachePort::CachePort(const std::string &_name, BaseCache *_cache)
: Port(_name, _cache), cache(_cache), otherPort(NULL)
{
blocked = false;
waitingOnRetry = false;
//Start ports at null if more than one is created we should panic
//cpuSidePort = NULL;
//memSidePort = NULL;
}
BaseCache::BaseCache(const std::string &name, Params &params)
: MemObject(name),
blocked(0), blockedSnoop(0),
blkSize(params.blkSize),
missCount(params.maxMisses), drainEvent(NULL)
{
}
void
BaseCache::CachePort::recvStatusChange(Port::Status status)
{
cache->recvStatusChange(status, isCpuSide);
if (status == Port::RangeChange) {
otherPort->sendStatusChange(Port::RangeChange);
}
}
void
BaseCache::CachePort::getDeviceAddressRanges(AddrRangeList &resp,
AddrRangeList &snoop)
{
cache->getAddressRanges(resp, snoop, isCpuSide);
AddrRangeList dummy;
otherPort->getPeerAddressRanges(resp, dummy);
}
int
@ -115,92 +124,99 @@ BaseCache::CachePort::checkAndSendFunctional(PacketPtr pkt)
sendFunctional(pkt);
}
void
BaseCache::CachePort::recvRetry()
{
PacketPtr pkt;
assert(waitingOnRetry);
if (!drainList.empty()) {
DPRINTF(CachePort, "%s attempting to send a retry for response (%i waiting)\n"
, name(), drainList.size());
//We have some responses to drain first
pkt = drainList.front();
drainList.pop_front();
if (sendTiming(pkt)) {
DPRINTF(CachePort, "%s sucessful in sending a retry for"
"response (%i still waiting)\n", name(), drainList.size());
if (!drainList.empty() ||
!isCpuSide && cache->doMasterRequest() ||
isCpuSide && cache->doSlaveRequest()) {
DPRINTF(CachePort, "%s has more responses/requests\n", name());
new BaseCache::RequestEvent(this, curTick + 1);
}
waitingOnRetry = false;
}
else {
drainList.push_front(pkt);
}
// Check if we're done draining once this list is empty
if (drainList.empty())
cache->checkDrain();
}
else if (!isCpuSide)
{
DPRINTF(CachePort, "%s attempting to send a retry for MSHR\n", name());
if (!cache->doMasterRequest()) {
//This can happen if I am the owner of a block and see an upgrade
//while the block was in my WB Buffers. I just remove the
//wb and de-assert the masterRequest
waitingOnRetry = false;
void
BaseCache::CachePort::respond(PacketPtr pkt, Tick time)
{
assert(time >= curTick);
if (pkt->needsResponse()) {
if (transmitList.empty()) {
assert(!responseEvent->scheduled());
responseEvent->schedule(time);
transmitList.push_back(std::pair<Tick,PacketPtr>(time,pkt));
return;
}
pkt = cache->getPacket();
MSHR* mshr = (MSHR*) pkt->senderState;
//Copy the packet, it may be modified/destroyed elsewhere
PacketPtr copyPkt = new Packet(*pkt);
copyPkt->dataStatic<uint8_t>(pkt->getPtr<uint8_t>());
mshr->pkt = copyPkt;
bool success = sendTiming(pkt);
DPRINTF(Cache, "Address %x was %s in sending the timing request\n",
pkt->getAddr(), success ? "succesful" : "unsuccesful");
waitingOnRetry = !success;
if (waitingOnRetry) {
DPRINTF(CachePort, "%s now waiting on a retry\n", name());
// something is on the list and this belongs at the end
if (time >= transmitList.back().first) {
transmitList.push_back(std::pair<Tick,PacketPtr>(time,pkt));
return;
}
// Something is on the list and this belongs somewhere else
std::list<std::pair<Tick,PacketPtr> >::iterator i =
transmitList.begin();
std::list<std::pair<Tick,PacketPtr> >::iterator end =
transmitList.end();
bool done = false;
cache->sendResult(pkt, mshr, success);
if (success && cache->doMasterRequest())
{
DPRINTF(CachePort, "%s has more requests\n", name());
//Still more to issue, rerequest in 1 cycle
new BaseCache::RequestEvent(this, curTick + 1);
while (i != end && !done) {
if (time < i->first) {
if (i == transmitList.begin()) {
//Inserting at begining, reschedule
responseEvent->reschedule(time);
}
transmitList.insert(i,std::pair<Tick,PacketPtr>(time,pkt));
done = true;
}
i++;
}
}
else
{
assert(cache->doSlaveRequest());
//pkt = cache->getCoherencePacket();
//We save the packet, no reordering on CSHRS
pkt = cache->getCoherencePacket();
MSHR* cshr = (MSHR*)pkt->senderState;
bool success = sendTiming(pkt);
cache->sendCoherenceResult(pkt, cshr, success);
waitingOnRetry = !success;
if (success && cache->doSlaveRequest())
else {
assert(0);
// this code was on the cpuSidePort only... do we still need it?
if (pkt->cmd != MemCmd::UpgradeReq)
{
DPRINTF(CachePort, "%s has more requests\n", name());
//Still more to issue, rerequest in 1 cycle
new BaseCache::RequestEvent(this, curTick + 1);
delete pkt->req;
delete pkt;
}
}
if (waitingOnRetry) DPRINTF(CachePort, "%s STILL Waiting on retry\n", name());
else DPRINTF(CachePort, "%s no longer waiting on retry\n", name());
return;
}
bool
BaseCache::CachePort::drainResponse()
{
DPRINTF(CachePort,
"%s attempting to send a retry for response (%i waiting)\n",
name(), drainList.size());
//We have some responses to drain first
PacketPtr pkt = drainList.front();
if (sendTiming(pkt)) {
drainList.pop_front();
DPRINTF(CachePort, "%s sucessful in sending a retry for"
"response (%i still waiting)\n", name(), drainList.size());
if (!drainList.empty() || isBusRequested()) {
DPRINTF(CachePort, "%s has more responses/requests\n", name());
return false;
}
} else {
waitingOnRetry = true;
DPRINTF(CachePort, "%s now waiting on a retry\n", name());
}
return true;
}
bool
BaseCache::CachePort::recvRetryCommon()
{
assert(waitingOnRetry);
waitingOnRetry = false;
if (!drainList.empty()) {
if (!drainResponse()) {
// more responses to drain... re-request bus
scheduleRequestEvent(curTick + 1);
}
// Check if we're done draining once this list is empty
if (drainList.empty()) {
cache->checkDrain();
}
return true;
}
return false;
}
void
BaseCache::CachePort::setBlocked()
{
@ -225,143 +241,6 @@ BaseCache::CachePort::clearBlocked()
}
}
BaseCache::RequestEvent::RequestEvent(CachePort *_cachePort, Tick when)
: Event(&mainEventQueue, CPU_Tick_Pri), cachePort(_cachePort)
{
this->setFlags(AutoDelete);
schedule(when);
}
void
BaseCache::RequestEvent::process()
{
if (cachePort->waitingOnRetry) return;
//We have some responses to drain first
if (!cachePort->drainList.empty()) {
DPRINTF(CachePort, "%s trying to drain a response\n", cachePort->name());
if (cachePort->sendTiming(cachePort->drainList.front())) {
DPRINTF(CachePort, "%s drains a response succesfully\n", cachePort->name());
cachePort->drainList.pop_front();
if (!cachePort->drainList.empty() ||
!cachePort->isCpuSide && cachePort->cache->doMasterRequest() ||
cachePort->isCpuSide && cachePort->cache->doSlaveRequest()) {
DPRINTF(CachePort, "%s still has outstanding bus reqs\n", cachePort->name());
this->schedule(curTick + 1);
}
}
else {
cachePort->waitingOnRetry = true;
DPRINTF(CachePort, "%s now waiting on a retry\n", cachePort->name());
}
}
else if (!cachePort->isCpuSide)
{ //MSHR
DPRINTF(CachePort, "%s trying to send a MSHR request\n", cachePort->name());
if (!cachePort->cache->doMasterRequest()) {
//This can happen if I am the owner of a block and see an upgrade
//while the block was in my WB Buffers. I just remove the
//wb and de-assert the masterRequest
return;
}
PacketPtr pkt = cachePort->cache->getPacket();
MSHR* mshr = (MSHR*) pkt->senderState;
//Copy the packet, it may be modified/destroyed elsewhere
PacketPtr copyPkt = new Packet(*pkt);
copyPkt->dataStatic<uint8_t>(pkt->getPtr<uint8_t>());
mshr->pkt = copyPkt;
bool success = cachePort->sendTiming(pkt);
DPRINTF(Cache, "Address %x was %s in sending the timing request\n",
pkt->getAddr(), success ? "succesful" : "unsuccesful");
cachePort->waitingOnRetry = !success;
if (cachePort->waitingOnRetry) {
DPRINTF(CachePort, "%s now waiting on a retry\n", cachePort->name());
}
cachePort->cache->sendResult(pkt, mshr, success);
if (success && cachePort->cache->doMasterRequest())
{
DPRINTF(CachePort, "%s still more MSHR requests to send\n",
cachePort->name());
//Still more to issue, rerequest in 1 cycle
this->schedule(curTick+1);
}
}
else
{
//CSHR
assert(cachePort->cache->doSlaveRequest());
PacketPtr pkt = cachePort->cache->getCoherencePacket();
MSHR* cshr = (MSHR*) pkt->senderState;
bool success = cachePort->sendTiming(pkt);
cachePort->cache->sendCoherenceResult(pkt, cshr, success);
cachePort->waitingOnRetry = !success;
if (cachePort->waitingOnRetry)
DPRINTF(CachePort, "%s now waiting on a retry\n", cachePort->name());
if (success && cachePort->cache->doSlaveRequest())
{
DPRINTF(CachePort, "%s still more CSHR requests to send\n",
cachePort->name());
//Still more to issue, rerequest in 1 cycle
this->schedule(curTick+1);
}
}
}
const char *
BaseCache::RequestEvent::description()
{
return "Cache request event";
}
BaseCache::ResponseEvent::ResponseEvent(CachePort *_cachePort)
: Event(&mainEventQueue, CPU_Tick_Pri), cachePort(_cachePort)
{
}
void
BaseCache::ResponseEvent::process()
{
assert(cachePort->transmitList.size());
assert(cachePort->transmitList.front().first <= curTick);
PacketPtr pkt = cachePort->transmitList.front().second;
cachePort->transmitList.pop_front();
if (!cachePort->transmitList.empty()) {
Tick time = cachePort->transmitList.front().first;
schedule(time <= curTick ? curTick+1 : time);
}
if (pkt->flags & NACKED_LINE)
pkt->result = Packet::Nacked;
else
pkt->result = Packet::Success;
pkt->makeTimingResponse();
DPRINTF(CachePort, "%s attempting to send a response\n", cachePort->name());
if (!cachePort->drainList.empty() || cachePort->waitingOnRetry) {
//Already have a list, just append
cachePort->drainList.push_back(pkt);
DPRINTF(CachePort, "%s appending response onto drain list\n", cachePort->name());
}
else if (!cachePort->sendTiming(pkt)) {
//It failed, save it to list of drain events
DPRINTF(CachePort, "%s now waiting for a retry\n", cachePort->name());
cachePort->drainList.push_back(pkt);
cachePort->waitingOnRetry = true;
}
// Check if we're done draining once this list is empty
if (cachePort->drainList.empty() && cachePort->transmitList.empty())
cachePort->cache->checkDrain();
}
const char *
BaseCache::ResponseEvent::description()
{
return "Cache response event";
}
void
BaseCache::init()

View file

@ -26,6 +26,8 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Authors: Erik Hallnor
* Steve Reinhardt
* Ron Dreslinski
*/
/**
@ -83,7 +85,10 @@ class BaseCache : public MemObject
BaseCache *cache;
protected:
CachePort(const std::string &_name, BaseCache *_cache, bool _isCpuSide);
Event *responseEvent;
CachePort(const std::string &_name, BaseCache *_cache);
virtual void recvStatusChange(Status status);
virtual void getDeviceAddressRanges(AddrRangeList &resp,
@ -91,9 +96,11 @@ class BaseCache : public MemObject
virtual int deviceBlockSize();
virtual void recvRetry();
bool recvRetryCommon();
public:
void setOtherPort(CachePort *_otherPort) { otherPort = _otherPort; }
void setBlocked();
void clearBlocked();
@ -104,65 +111,52 @@ class BaseCache : public MemObject
bool canDrain() { return drainList.empty() && transmitList.empty(); }
bool drainResponse();
CachePort *otherPort;
bool blocked;
bool mustSendRetry;
bool isCpuSide;
bool waitingOnRetry;
/**
* Bit vector for the outstanding requests for the master interface.
*/
uint8_t requestCauses;
std::list<PacketPtr> drainList;
std::list<std::pair<Tick,PacketPtr> > transmitList;
};
struct RequestEvent : public Event
{
CachePort *cachePort;
bool isBusRequested() { return requestCauses != 0; }
RequestEvent(CachePort *_cachePort, Tick when);
void process();
const char *description();
};
// These need to be virtual since the Event objects depend on
// cache template parameters.
virtual void scheduleRequestEvent(Tick t) = 0;
struct ResponseEvent : public Event
{
CachePort *cachePort;
void requestBus(RequestCause cause, Tick time)
{
if (!isBusRequested() && !waitingOnRetry) {
scheduleRequestEvent(time);
}
requestCauses |= (1 << cause);
}
ResponseEvent(CachePort *_cachePort);
void process();
const char *description();
void deassertBusRequest(RequestCause cause)
{
requestCauses &= ~(1 << cause);
}
void respond(PacketPtr pkt, Tick time);
};
public: //Made public so coherence can get at it.
CachePort *cpuSidePort;
CachePort *memSidePort;
ResponseEvent *sendEvent;
ResponseEvent *memSendEvent;
private:
void recvStatusChange(Port::Status status, bool isCpuSide)
{
if (status == Port::RangeChange){
if (!isCpuSide) {
cpuSidePort->sendStatusChange(Port::RangeChange);
}
else {
memSidePort->sendStatusChange(Port::RangeChange);
}
}
}
virtual PacketPtr getPacket() = 0;
virtual PacketPtr getCoherencePacket() = 0;
virtual void sendResult(PacketPtr &pkt, MSHR* mshr, bool success) = 0;
virtual void sendCoherenceResult(PacketPtr &pkt, MSHR* mshr, bool success) = 0;
/**
* Bit vector of the blocking reasons for the access path.
* @sa #BlockedCause
@ -175,16 +169,6 @@ class BaseCache : public MemObject
*/
uint8_t blockedSnoop;
/**
* Bit vector for the outstanding requests for the master interface.
*/
uint8_t masterRequests;
/**
* Bit vector for the outstanding requests for the slave interface.
*/
uint8_t slaveRequests;
protected:
/** Stores time the cache blocked for statistics. */
@ -309,20 +293,10 @@ class BaseCache : public MemObject
* of this cache.
* @param params The parameter object for this BaseCache.
*/
BaseCache(const std::string &name, Params &params)
: MemObject(name), blocked(0), blockedSnoop(0), masterRequests(0),
slaveRequests(0), blkSize(params.blkSize),
missCount(params.maxMisses), drainEvent(NULL)
{
//Start ports at null if more than one is created we should panic
cpuSidePort = NULL;
memSidePort = NULL;
}
BaseCache(const std::string &name, Params &params);
~BaseCache()
{
delete sendEvent;
delete memSendEvent;
}
virtual void init();
@ -422,12 +396,12 @@ class BaseCache : public MemObject
}
/**
* True if the master bus should be requested.
* True if the memory-side bus should be requested.
* @return True if there are outstanding requests for the master bus.
*/
bool doMasterRequest()
bool isMemSideBusRequested()
{
return masterRequests != 0;
return memSidePort->isBusRequested();
}
/**
@ -435,59 +409,18 @@ class BaseCache : public MemObject
* @param cause The reason for the request.
* @param time The time to make the request.
*/
void setMasterRequest(RequestCause cause, Tick time)
void requestMemSideBus(RequestCause cause, Tick time)
{
if (!doMasterRequest() && !memSidePort->waitingOnRetry)
{
new RequestEvent(memSidePort, time);
}
uint8_t flag = 1<<cause;
masterRequests |= flag;
memSidePort->requestBus(cause, time);
}
/**
* Clear the master bus request for the given cause.
* @param cause The request reason to clear.
*/
void clearMasterRequest(RequestCause cause)
void deassertMemSideBusRequest(RequestCause cause)
{
uint8_t flag = 1<<cause;
masterRequests &= ~flag;
checkDrain();
}
/**
* Return true if the slave bus should be requested.
* @return True if there are outstanding requests for the slave bus.
*/
bool doSlaveRequest()
{
return slaveRequests != 0;
}
/**
* Request the slave bus for the given reason and time.
* @param cause The reason for the request.
* @param time The time to make the request.
*/
void setSlaveRequest(RequestCause cause, Tick time)
{
if (!doSlaveRequest() && !cpuSidePort->waitingOnRetry)
{
new RequestEvent(cpuSidePort, time);
}
uint8_t flag = 1<<cause;
slaveRequests |= flag;
}
/**
* Clear the slave bus request for the given reason.
* @param cause The request reason to clear.
*/
void clearSlaveRequest(RequestCause cause)
{
uint8_t flag = 1<<cause;
slaveRequests &= ~flag;
memSidePort->deassertBusRequest(cause);
checkDrain();
}
@ -498,111 +431,7 @@ class BaseCache : public MemObject
*/
void respond(PacketPtr pkt, Tick time)
{
assert(time >= curTick);
if (pkt->needsResponse()) {
/* CacheEvent *reqCpu = new CacheEvent(cpuSidePort, pkt);
reqCpu->schedule(time);
*/
if (cpuSidePort->transmitList.empty()) {
assert(!sendEvent->scheduled());
sendEvent->schedule(time);
cpuSidePort->transmitList.push_back(std::pair<Tick,PacketPtr>
(time,pkt));
return;
}
// something is on the list and this belongs at the end
if (time >= cpuSidePort->transmitList.back().first) {
cpuSidePort->transmitList.push_back(std::pair<Tick,PacketPtr>
(time,pkt));
return;
}
// Something is on the list and this belongs somewhere else
std::list<std::pair<Tick,PacketPtr> >::iterator i =
cpuSidePort->transmitList.begin();
std::list<std::pair<Tick,PacketPtr> >::iterator end =
cpuSidePort->transmitList.end();
bool done = false;
while (i != end && !done) {
if (time < i->first) {
if (i == cpuSidePort->transmitList.begin()) {
//Inserting at begining, reschedule
sendEvent->reschedule(time);
}
cpuSidePort->transmitList.insert(i,std::pair<Tick,PacketPtr>
(time,pkt));
done = true;
}
i++;
}
}
else {
if (pkt->cmd != MemCmd::UpgradeReq)
{
delete pkt->req;
delete pkt;
}
}
}
/**
* Send a reponse to the slave interface and calculate miss latency.
* @param pkt The request to respond to.
* @param time The time the response is ready.
*/
void respondToMiss(PacketPtr pkt, Tick time)
{
assert(time >= curTick);
if (!pkt->req->isUncacheable()) {
missLatency[pkt->cmdToIndex()][0/*pkt->req->getThreadNum()*/] +=
time - pkt->time;
}
if (pkt->needsResponse()) {
/* CacheEvent *reqCpu = new CacheEvent(cpuSidePort, pkt);
reqCpu->schedule(time);
*/
if (cpuSidePort->transmitList.empty()) {
assert(!sendEvent->scheduled());
sendEvent->schedule(time);
cpuSidePort->transmitList.push_back(std::pair<Tick,PacketPtr>
(time,pkt));
return;
}
// something is on the list and this belongs at the end
if (time >= cpuSidePort->transmitList.back().first) {
cpuSidePort->transmitList.push_back(std::pair<Tick,PacketPtr>
(time,pkt));
return;
}
// Something is on the list and this belongs somewhere else
std::list<std::pair<Tick,PacketPtr> >::iterator i =
cpuSidePort->transmitList.begin();
std::list<std::pair<Tick,PacketPtr> >::iterator end =
cpuSidePort->transmitList.end();
bool done = false;
while (i != end && !done) {
if (time < i->first) {
if (i == cpuSidePort->transmitList.begin()) {
//Inserting at begining, reschedule
sendEvent->reschedule(time);
}
cpuSidePort->transmitList.insert(i,std::pair<Tick,PacketPtr>
(time,pkt));
done = true;
}
i++;
}
}
else {
if (pkt->cmd != MemCmd::UpgradeReq)
{
delete pkt->req;
delete pkt;
}
}
cpuSidePort->respond(pkt, time);
}
/**
@ -611,65 +440,7 @@ class BaseCache : public MemObject
*/
void respondToSnoop(PacketPtr pkt, Tick time)
{
assert(time >= curTick);
assert (pkt->needsResponse());
/* CacheEvent *reqMem = new CacheEvent(memSidePort, pkt);
reqMem->schedule(time);
*/
if (memSidePort->transmitList.empty()) {
assert(!memSendEvent->scheduled());
memSendEvent->schedule(time);
memSidePort->transmitList.push_back(std::pair<Tick,PacketPtr>
(time,pkt));
return;
}
// something is on the list and this belongs at the end
if (time >= memSidePort->transmitList.back().first) {
memSidePort->transmitList.push_back(std::pair<Tick,PacketPtr>
(time,pkt));
return;
}
// Something is on the list and this belongs somewhere else
std::list<std::pair<Tick,PacketPtr> >::iterator i =
memSidePort->transmitList.begin();
std::list<std::pair<Tick,PacketPtr> >::iterator end =
memSidePort->transmitList.end();
bool done = false;
while (i != end && !done) {
if (time < i->first) {
if (i == memSidePort->transmitList.begin()) {
//Inserting at begining, reschedule
memSendEvent->reschedule(time);
}
memSidePort->transmitList.insert(i,std::pair<Tick,PacketPtr>(time,pkt));
done = true;
}
i++;
}
}
/**
* Notification from master interface that a address range changed. Nothing
* to do for a cache.
*/
void rangeChange() {}
void getAddressRanges(AddrRangeList &resp, AddrRangeList &snoop, bool isCpuSide)
{
if (isCpuSide)
{
AddrRangeList dummy;
memSidePort->getPeerAddressRanges(resp, dummy);
}
else
{
//This is where snoops get updated
AddrRangeList dummy;
cpuSidePort->getPeerAddressRanges(dummy, snoop);
return;
}
memSidePort->respond(pkt, time);
}
virtual unsigned int drain(Event *de);
@ -686,7 +457,7 @@ class BaseCache : public MemObject
bool canDrain()
{
if (doMasterRequest() || doSlaveRequest()) {
if (isMemSideBusRequested()) {
return false;
} else if (memSidePort && !memSidePort->canDrain()) {
return false;

View file

@ -61,7 +61,6 @@
#include "mem/cache/miss/miss_queue.hh"
#include "mem/cache/miss/blocking_buffer.hh"
#include "mem/cache/coherence/uni_coherence.hh"
#include "mem/cache/coherence/simple_coherence.hh"
#include "mem/cache/cache_impl.hh"
@ -72,27 +71,22 @@
#if defined(USE_CACHE_FALRU)
template class Cache<FALRU, SimpleCoherence>;
template class Cache<FALRU, UniCoherence>;
#endif
#if defined(USE_CACHE_IIC)
template class Cache<IIC, SimpleCoherence>;
template class Cache<IIC, UniCoherence>;
#endif
#if defined(USE_CACHE_LRU)
template class Cache<LRU, SimpleCoherence>;
template class Cache<LRU, UniCoherence>;
#endif
#if defined(USE_CACHE_SPLIT)
template class Cache<Split, SimpleCoherence>;
template class Cache<Split, UniCoherence>;
#endif
#if defined(USE_CACHE_SPLIT_LIFO)
template class Cache<SplitLIFO, SimpleCoherence>;
template class Cache<SplitLIFO, UniCoherence>;
#endif
#endif //DOXYGEN_SHOULD_SKIP_THIS

View file

@ -28,6 +28,7 @@
* Authors: Erik Hallnor
* Dave Greene
* Steve Reinhardt
* Ron Dreslinski
*/
/**
@ -46,6 +47,8 @@
#include "mem/cache/cache_blk.hh"
#include "mem/cache/miss/miss_buffer.hh"
#include "sim/eventq.hh"
//Forward decleration
class MSHR;
class BasePrefetcher;
@ -83,11 +86,26 @@ class Cache : public BaseCache
return static_cast<Cache<TagStore,Coherence> *>(cache);
}
void processRequestEvent();
void processResponseEvent();
virtual bool recvTiming(PacketPtr pkt);
virtual void recvRetry();
virtual Tick recvAtomic(PacketPtr pkt);
virtual void recvFunctional(PacketPtr pkt);
typedef EventWrapper<CpuSidePort, &CpuSidePort::processResponseEvent>
ResponseEvent;
typedef EventWrapper<CpuSidePort, &CpuSidePort::processRequestEvent>
RequestEvent;
virtual void scheduleRequestEvent(Tick t) {
new RequestEvent(this, t);
}
};
class MemSidePort : public CachePort
@ -103,11 +121,26 @@ class Cache : public BaseCache
return static_cast<Cache<TagStore,Coherence> *>(cache);
}
void processRequestEvent();
void processResponseEvent();
virtual bool recvTiming(PacketPtr pkt);
virtual void recvRetry();
virtual Tick recvAtomic(PacketPtr pkt);
virtual void recvFunctional(PacketPtr pkt);
typedef EventWrapper<MemSidePort, &MemSidePort::processResponseEvent>
ResponseEvent;
typedef EventWrapper<MemSidePort, &MemSidePort::processRequestEvent>
RequestEvent;
virtual void scheduleRequestEvent(Tick t) {
new RequestEvent(this, t);
}
};
/** Tag and data Storage */
@ -339,8 +372,6 @@ class Cache : public BaseCache
virtual Port *getPort(const std::string &if_name, int idx = -1);
virtual void deletePortRefs(Port *p);
virtual void recvStatusChange(Port::Status status, bool isCpuSide);
void regStats();
/**
@ -354,21 +385,14 @@ class Cache : public BaseCache
* Selects a request to send on the bus.
* @return The memory request to service.
*/
virtual PacketPtr getPacket();
PacketPtr getPacket();
/**
* Was the request was sent successfully?
* @param pkt The request.
* @param success True if the request was sent successfully.
*/
virtual void sendResult(PacketPtr &pkt, MSHR* mshr, bool success);
/**
* Was the CSHR request was sent successfully?
* @param pkt The request.
* @param success True if the request was sent successfully.
*/
virtual void sendCoherenceResult(PacketPtr &pkt, MSHR* cshr, bool success);
void sendResult(PacketPtr &pkt, MSHR* mshr, bool success);
/**
* Handles a response (cache line fill/write ack) from the bus.
@ -376,12 +400,6 @@ class Cache : public BaseCache
*/
void handleResponse(PacketPtr &pkt);
/**
* Selects a coherence message to forward to lower levels of the hierarchy.
* @return The coherence message to forward.
*/
virtual PacketPtr getCoherencePacket();
/**
* Snoops bus transactions to maintain coherence.
* @param pkt The current bus transaction.

View file

@ -75,7 +75,6 @@
#include "mem/cache/miss/blocking_buffer.hh"
// Coherence Templates
#include "mem/cache/coherence/uni_coherence.hh"
#include "mem/cache/coherence/simple_coherence.hh"
//Prefetcher Headers
@ -302,13 +301,8 @@ END_INIT_SIM_OBJECT_PARAMS(BaseCache)
} while (0)
#define BUILD_COHERENCE(b) do { \
if (protocol == NULL) { \
UniCoherence *coh = new UniCoherence(); \
BUILD_CACHES(UniCoherence); \
} else { \
SimpleCoherence *coh = new SimpleCoherence(protocol); \
BUILD_CACHES(SimpleCoherence); \
} \
SimpleCoherence *coh = new SimpleCoherence(protocol); \
BUILD_CACHES(SimpleCoherence); \
} while (0)
#if defined(USE_TAGGED)

View file

@ -28,6 +28,8 @@
* Authors: Erik Hallnor
* Dave Greene
* Nathan Binkert
* Steve Reinhardt
* Ron Dreslinski
*/
/**
@ -57,18 +59,8 @@
bool SIGNAL_NACK_HACK;
template<class TagStore, class Coherence>
void
Cache<TagStore,Coherence>::
recvStatusChange(Port::Status status, bool isCpuSide)
{
}
template<class TagStore, class Coherence>
Cache<TagStore,Coherence>::
Cache(const std::string &_name,
Cache<TagStore,Coherence>::Params &params)
Cache<TagStore,Coherence>::Cache(const std::string &_name,
Cache<TagStore,Coherence>::Params &params)
: BaseCache(_name, params.baseParams),
prefetchAccess(params.prefetchAccess),
tags(params.tags), missQueue(params.missQueue),
@ -84,6 +76,11 @@ Cache(const std::string &_name,
adaptiveCompression(params.adaptiveCompression),
writebackCompressed(params.writebackCompressed)
{
cpuSidePort = new CpuSidePort(_name + "-cpu_side_port", this);
memSidePort = new MemSidePort(_name + "-mem_side_port", this);
cpuSidePort->setOtherPort(memSidePort);
memSidePort->setOtherPort(cpuSidePort);
tags->setCache(this);
missQueue->setCache(this);
missQueue->setPrefetcher(prefetcher);
@ -406,7 +403,11 @@ Cache<TagStore,Coherence>::handleFill(BlkType *blk, MSHR * mshr,
// mshr->pkt = pkt;
break;
}
respondToMiss(target, completion_time);
if (!target->req->isUncacheable()) {
missLatency[target->cmdToIndex()][0/*pkt->req->getThreadNum()*/] +=
completion_time - target->time;
}
respond(target, completion_time);
mshr->popTarget();
}
@ -688,7 +689,7 @@ Cache<TagStore,Coherence>::getPacket()
}
}
assert(!doMasterRequest() || missQueue->havePending());
assert(!isMemSideBusRequested() || missQueue->havePending());
assert(!pkt || pkt->time <= curTick);
SIGNAL_NACK_HACK = false;
return pkt;
@ -727,7 +728,6 @@ Cache<TagStore,Coherence>::sendResult(PacketPtr &pkt, MSHR* mshr,
pkt->flags &= ~NACKED_LINE;
SIGNAL_NACK_HACK = false;
pkt->flags &= ~SATISFIED;
pkt->flags &= ~SNOOP_COMMIT;
//Rmove copy from mshr
delete mshr->pkt;
@ -783,22 +783,6 @@ Cache<TagStore,Coherence>::handleResponse(PacketPtr &pkt)
}
}
template<class TagStore, class Coherence>
PacketPtr
Cache<TagStore,Coherence>::getCoherencePacket()
{
return coherence->getPacket();
}
template<class TagStore, class Coherence>
void
Cache<TagStore,Coherence>::sendCoherenceResult(PacketPtr &pkt,
MSHR *cshr,
bool success)
{
coherence->sendResult(pkt, cshr, success);
}
template<class TagStore, class Coherence>
void
@ -1146,27 +1130,15 @@ template<class TagStore, class Coherence>
Port *
Cache<TagStore,Coherence>::getPort(const std::string &if_name, int idx)
{
if (if_name == "" || if_name == "cpu_side")
{
if (cpuSidePort == NULL) {
cpuSidePort = new CpuSidePort(name() + "-cpu_side_port", this);
sendEvent = new ResponseEvent(cpuSidePort);
}
if (if_name == "" || if_name == "cpu_side") {
return cpuSidePort;
}
else if (if_name == "functional")
{
return new CpuSidePort(name() + "-cpu_side_funcport", this);
}
else if (if_name == "mem_side")
{
if (memSidePort != NULL)
panic("Already have a mem side for this cache\n");
memSidePort = new MemSidePort(name() + "-mem_side_port", this);
memSendEvent = new ResponseEvent(memSidePort);
} else if (if_name == "mem_side") {
return memSidePort;
} else if (if_name == "functional") {
return new CpuSidePort(name() + "-cpu_side_funcport", this);
} else {
panic("Port name %s unrecognized\n", if_name);
}
else panic("Port name %s unrecognized\n", if_name);
}
template<class TagStore, class Coherence>
@ -1213,6 +1185,68 @@ Cache<TagStore,Coherence>::CpuSidePort::recvTiming(PacketPtr pkt)
return true;
}
template<class TagStore, class Coherence>
void
Cache<TagStore,Coherence>::CpuSidePort::recvRetry()
{
recvRetryCommon();
}
template<class TagStore, class Coherence>
void
Cache<TagStore,Coherence>::CpuSidePort::processRequestEvent()
{
if (waitingOnRetry)
return;
//We have some responses to drain first
if (!drainList.empty()) {
if (!drainResponse()) {
// more responses to drain... re-request bus
scheduleRequestEvent(curTick + 1);
}
}
}
template<class TagStore, class Coherence>
void
Cache<TagStore,Coherence>::CpuSidePort::processResponseEvent()
{
assert(transmitList.size());
assert(transmitList.front().first <= curTick);
PacketPtr pkt = transmitList.front().second;
transmitList.pop_front();
if (!transmitList.empty()) {
Tick time = transmitList.front().first;
responseEvent->schedule(time <= curTick ? curTick+1 : time);
}
if (pkt->flags & NACKED_LINE)
pkt->result = Packet::Nacked;
else
pkt->result = Packet::Success;
pkt->makeTimingResponse();
DPRINTF(CachePort, "%s attempting to send a response\n", name());
if (!drainList.empty() || waitingOnRetry) {
//Already have a list, just append
drainList.push_back(pkt);
DPRINTF(CachePort, "%s appending response onto drain list\n", name());
}
else if (!sendTiming(pkt)) {
//It failed, save it to list of drain events
DPRINTF(CachePort, "%s now waiting for a retry\n", name());
drainList.push_back(pkt);
waitingOnRetry = true;
}
// Check if we're done draining once this list is empty
if (drainList.empty() && transmitList.empty())
myCache()->checkDrain();
}
template<class TagStore, class Coherence>
Tick
Cache<TagStore,Coherence>::CpuSidePort::recvAtomic(PacketPtr pkt)
@ -1249,23 +1283,149 @@ Cache<TagStore,Coherence>::MemSidePort::recvTiming(PacketPtr pkt)
if (pkt->result == Packet::Nacked)
panic("Need to implement cache resending nacked packets!\n");
if (pkt->isRequest() && blocked)
{
if (pkt->isRequest() && blocked) {
DPRINTF(Cache,"Scheduling a retry while blocked\n");
mustSendRetry = true;
return false;
}
if (pkt->isResponse())
if (pkt->isResponse()) {
myCache()->handleResponse(pkt);
else {
//Check if we should do the snoop
if (pkt->flags & SNOOP_COMMIT)
myCache()->snoop(pkt);
} else {
myCache()->snoop(pkt);
}
return true;
}
template<class TagStore, class Coherence>
void
Cache<TagStore,Coherence>::MemSidePort::recvRetry()
{
if (recvRetryCommon()) {
return;
}
DPRINTF(CachePort, "%s attempting to send a retry for MSHR\n", name());
if (!cache->isMemSideBusRequested()) {
//This can happen if I am the owner of a block and see an upgrade
//while the block was in my WB Buffers. I just remove the
//wb and de-assert the masterRequest
waitingOnRetry = false;
return;
}
PacketPtr pkt = myCache()->getPacket();
MSHR* mshr = (MSHR*) pkt->senderState;
//Copy the packet, it may be modified/destroyed elsewhere
PacketPtr copyPkt = new Packet(*pkt);
copyPkt->dataStatic<uint8_t>(pkt->getPtr<uint8_t>());
mshr->pkt = copyPkt;
bool success = sendTiming(pkt);
DPRINTF(Cache, "Address %x was %s in sending the timing request\n",
pkt->getAddr(), success ? "succesful" : "unsuccesful");
waitingOnRetry = !success;
if (waitingOnRetry) {
DPRINTF(CachePort, "%s now waiting on a retry\n", name());
}
myCache()->sendResult(pkt, mshr, success);
if (success && cache->isMemSideBusRequested())
{
DPRINTF(CachePort, "%s has more requests\n", name());
//Still more to issue, rerequest in 1 cycle
new RequestEvent(this, curTick + 1);
}
}
template<class TagStore, class Coherence>
void
Cache<TagStore,Coherence>::MemSidePort::processRequestEvent()
{
if (waitingOnRetry)
return;
//We have some responses to drain first
if (!drainList.empty()) {
if (!drainResponse()) {
// more responses to drain... re-request bus
scheduleRequestEvent(curTick + 1);
}
return;
}
DPRINTF(CachePort, "%s trying to send a MSHR request\n", name());
if (!isBusRequested()) {
//This can happen if I am the owner of a block and see an upgrade
//while the block was in my WB Buffers. I just remove the
//wb and de-assert the masterRequest
return;
}
PacketPtr pkt = myCache()->getPacket();
MSHR* mshr = (MSHR*) pkt->senderState;
//Copy the packet, it may be modified/destroyed elsewhere
PacketPtr copyPkt = new Packet(*pkt);
copyPkt->dataStatic<uint8_t>(pkt->getPtr<uint8_t>());
mshr->pkt = copyPkt;
bool success = sendTiming(pkt);
DPRINTF(Cache, "Address %x was %s in sending the timing request\n",
pkt->getAddr(), success ? "succesful" : "unsuccesful");
waitingOnRetry = !success;
if (waitingOnRetry) {
DPRINTF(CachePort, "%s now waiting on a retry\n", name());
}
myCache()->sendResult(pkt, mshr, success);
if (success && isBusRequested())
{
DPRINTF(CachePort, "%s still more MSHR requests to send\n", name());
//Still more to issue, rerequest in 1 cycle
scheduleRequestEvent(curTick+1);
}
}
template<class TagStore, class Coherence>
void
Cache<TagStore,Coherence>::MemSidePort::processResponseEvent()
{
assert(transmitList.size());
assert(transmitList.front().first <= curTick);
PacketPtr pkt = transmitList.front().second;
transmitList.pop_front();
if (!transmitList.empty()) {
Tick time = transmitList.front().first;
responseEvent->schedule(time <= curTick ? curTick+1 : time);
}
if (pkt->flags & NACKED_LINE)
pkt->result = Packet::Nacked;
else
pkt->result = Packet::Success;
pkt->makeTimingResponse();
DPRINTF(CachePort, "%s attempting to send a response\n", name());
if (!drainList.empty() || waitingOnRetry) {
//Already have a list, just append
drainList.push_back(pkt);
DPRINTF(CachePort, "%s appending response onto drain list\n", name());
}
else if (!sendTiming(pkt)) {
//It failed, save it to list of drain events
DPRINTF(CachePort, "%s now waiting for a retry\n", name());
drainList.push_back(pkt);
waitingOnRetry = true;
}
// Check if we're done draining once this list is empty
if (drainList.empty() && transmitList.empty())
myCache()->checkDrain();
}
template<class TagStore, class Coherence>
Tick
Cache<TagStore,Coherence>::MemSidePort::recvAtomic(PacketPtr pkt)
@ -1292,15 +1452,17 @@ template<class TagStore, class Coherence>
Cache<TagStore,Coherence>::
CpuSidePort::CpuSidePort(const std::string &_name,
Cache<TagStore,Coherence> *_cache)
: BaseCache::CachePort(_name, _cache, true)
: BaseCache::CachePort(_name, _cache)
{
responseEvent = new ResponseEvent(this);
}
template<class TagStore, class Coherence>
Cache<TagStore,Coherence>::
MemSidePort::MemSidePort(const std::string &_name,
Cache<TagStore,Coherence> *_cache)
: BaseCache::CachePort(_name, _cache, false)
: BaseCache::CachePort(_name, _cache)
{
responseEvent = new ResponseEvent(this);
}

View file

@ -31,5 +31,4 @@
Import('*')
Source('coherence_protocol.cc')
Source('uni_coherence.cc')

View file

@ -1,135 +0,0 @@
/*
* Copyright (c) 2003-2005 The Regents of The University of Michigan
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Authors: Erik Hallnor
*/
#include "mem/cache/coherence/uni_coherence.hh"
#include "mem/cache/base_cache.hh"
#include "base/trace.hh"
using namespace std;
UniCoherence::UniCoherence()
: cshrs(50)
{
}
PacketPtr
UniCoherence::getPacket()
{
PacketPtr pkt = cshrs.getReq();
return pkt;
}
void
UniCoherence::sendResult(PacketPtr &pkt, MSHR* cshr, bool success)
{
if (success)
{
bool unblock = cshrs.isFull();
// cshrs.markInService(cshr);
delete pkt->req;
cshrs.deallocate(cshr);
if (!cshrs.havePending()) {
cache->clearSlaveRequest(Request_Coherence);
}
if (unblock) {
//since CSHRs are always used as buffers, should always get rid of one
assert(!cshrs.isFull());
cache->clearBlocked(Blocked_Coherence);
}
}
}
/**
* @todo add support for returning slave requests, not doing them here.
*/
bool
UniCoherence::handleBusRequest(PacketPtr &pkt, CacheBlk *blk, MSHR *mshr,
CacheBlk::State &new_state)
{
new_state = 0;
if (pkt->isInvalidate()) {
DPRINTF(Cache, "snoop inval on blk %x (blk ptr %x)\n",
pkt->getAddr(), blk);
}
else if (blk) {
new_state = blk->status;
if (pkt->isRead()) {
DPRINTF(Cache, "Uni-coherence snoops a read that hit in itself"
". Should satisfy the packet\n");
return true; //Satisfy Reads if we can
}
}
return false;
}
bool
UniCoherence::propogateInvalidate(PacketPtr pkt, bool isTiming)
{
if (pkt->isInvalidate()) {
/* Temp Fix for now, forward all invalidates up as functional accesses */
if (isTiming) {
// Forward to other caches
Request* req = new Request(pkt->req->getPaddr(), pkt->getSize(), 0);
PacketPtr tmp = new Packet(req, MemCmd::InvalidateReq, -1);
cshrs.allocate(tmp);
cache->setSlaveRequest(Request_Coherence, curTick);
if (cshrs.isFull())
cache->setBlockedForSnoop(Blocked_Coherence);
}
else {
PacketPtr tmp = new Packet(pkt->req, MemCmd::InvalidateReq, -1);
cache->cpuSidePort->sendAtomic(tmp);
delete tmp;
}
/**/
/* PacketPtr tmp = new Packet(pkt->req, MemCmd::InvalidateReq, -1);
cache->cpuSidePort->sendFunctional(tmp);
delete tmp;
*/
}
if (pkt->isRead()) {
/*For now we will see if someone above us has the data by
doing a functional access on reads. Fix this later */
PacketPtr tmp = new Packet(pkt->req, MemCmd::ReadReq, -1);
tmp->allocate();
cache->cpuSidePort->sendFunctional(tmp);
bool hit = (tmp->result == Packet::Success);
if (hit) {
memcpy(pkt->getPtr<uint8_t>(), tmp->getPtr<uint8_t>(),
pkt->getSize());
DPRINTF(Cache, "Uni-coherence snoops a read that hit in L1\n");
}
delete tmp;
return hit;
}
return false;
}

View file

@ -1,146 +0,0 @@
/*
* Copyright (c) 2003-2005 The Regents of The University of Michigan
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Authors: Erik Hallnor
*/
#ifndef __UNI_COHERENCE_HH__
#define __UNI_COHERENCE_HH__
#include "base/trace.hh"
#include "base/misc.hh"
#include "mem/cache/cache_blk.hh"
#include "mem/cache/miss/mshr_queue.hh"
#include "mem/packet.hh"
class BaseCache;
class UniCoherence
{
protected:
/** Buffers to hold forwarded invalidates. */
MSHRQueue cshrs;
/** Pointer to the parent cache. */
BaseCache *cache;
public:
/**
* Construct and initialize this coherence policy.
*/
UniCoherence();
/**
* Set the pointer to the parent cache.
* @param _cache The parent cache.
*/
void setCache(BaseCache *_cache)
{
cache = _cache;
}
/**
* Register statistics.
* @param name The name to prepend to stat descriptions.
*/
void regStats(const std::string &name)
{
}
/**
* Return Read.
* @param cmd The request's command.
* @param state The current state of the cache block.
* @return The proper bus command, as determined by the protocol.
* @todo Make changes so writebacks don't get here.
*/
MemCmd getBusCmd(MemCmd cmd, CacheBlk::State state)
{
if (cmd == MemCmd::HardPFReq && state)
warn("Trying to issue a prefetch to a block we already have\n");
if (cmd == MemCmd::Writeback)
return MemCmd::Writeback;
return MemCmd::ReadReq;
}
/**
* Just return readable and writeable.
* @param pkt The bus response.
* @param current The current block state.
* @return The new state.
*/
CacheBlk::State getNewState(PacketPtr &pkt, CacheBlk::State current)
{
if (pkt->senderState) //Blocking Buffers don't get mshrs
{
if (((MSHR *)(pkt->senderState))->originalCmd == MemCmd::HardPFReq) {
DPRINTF(HWPrefetch, "Marking a hardware prefetch as such in the state\n");
return BlkHWPrefetched | BlkValid | BlkWritable;
}
else {
return BlkValid | BlkWritable;
}
}
//@todo What about prefetching with blocking buffers
else
return BlkValid | BlkWritable;
}
/**
* Return outstanding invalidate to forward.
* @return The next invalidate to forward to lower levels of cache.
*/
PacketPtr getPacket();
/**
* Was the CSHR request was sent successfully?
* @param pkt The request.
* @param success True if the request was sent successfully.
*/
void sendResult(PacketPtr &pkt, MSHR* cshr, bool success);
/**
* Handle snooped bus requests.
* @param pkt The snooped bus request.
* @param blk The cache block corresponding to the request, if any.
* @param mshr The MSHR corresponding to the request, if any.
* @param new_state The new coherence state of the block.
* @return True if the request should be satisfied locally.
*/
bool handleBusRequest(PacketPtr &pkt, CacheBlk *blk, MSHR *mshr,
CacheBlk::State &new_state);
/**
* Return true if this coherence policy can handle fast cache writes.
*/
bool allowFastWrites() { return true; }
bool hasProtocol() { return false; }
bool propogateInvalidate(PacketPtr pkt, bool isTiming);
};
#endif //__UNI_COHERENCE_HH__

View file

@ -64,7 +64,7 @@ BlockingBuffer::handleMiss(PacketPtr &pkt, int blk_size, Tick time)
std::memcpy(wb.pkt->getPtr<uint8_t>(), pkt->getPtr<uint8_t>(), blk_size);
cache->setBlocked(Blocked_NoWBBuffers);
cache->setMasterRequest(Request_WB, time);
cache->requestMemSideBus(Request_WB, time);
return;
}
@ -77,7 +77,7 @@ BlockingBuffer::handleMiss(PacketPtr &pkt, int blk_size, Tick time)
miss.pkt->flags |= CACHE_LINE_FILL;
}
cache->setBlocked(Blocked_NoMSHRs);
cache->setMasterRequest(Request_MSHR, time);
cache->requestMemSideBus(Request_MSHR, time);
}
PacketPtr
@ -111,7 +111,7 @@ BlockingBuffer::markInService(PacketPtr &pkt, MSHR* mshr)
// Forwarding a write/ writeback, don't need to change
// the command
assert(mshr == &wb);
cache->clearMasterRequest(Request_WB);
cache->deassertMemSideBusRequest(Request_WB);
if (!pkt->needsResponse()) {
assert(wb.getNumTargets() == 0);
wb.deallocate();
@ -121,7 +121,7 @@ BlockingBuffer::markInService(PacketPtr &pkt, MSHR* mshr)
}
} else {
assert(mshr == &miss);
cache->clearMasterRequest(Request_MSHR);
cache->deassertMemSideBusRequest(Request_MSHR);
if (!pkt->needsResponse()) {
assert(miss.getNumTargets() == 0);
miss.deallocate();
@ -178,7 +178,7 @@ BlockingBuffer::squash(int threadNum)
if (!miss.inService) {
miss.deallocate();
cache->clearBlocked(Blocked_NoMSHRs);
cache->clearMasterRequest(Request_MSHR);
cache->deassertMemSideBusRequest(Request_MSHR);
}
}
}
@ -203,7 +203,7 @@ BlockingBuffer::doWriteback(Addr addr,
writebacks[0/*pkt->req->getThreadNum()*/]++;
wb.allocateAsBuffer(pkt);
cache->setMasterRequest(Request_WB, curTick);
cache->requestMemSideBus(Request_WB, curTick);
cache->setBlocked(Blocked_NoWBBuffers);
}
@ -221,7 +221,7 @@ BlockingBuffer::doWriteback(PacketPtr &pkt)
std::memcpy(wb.pkt->getPtr<uint8_t>(), pkt->getPtr<uint8_t>(), pkt->getSize());
cache->setBlocked(Blocked_NoWBBuffers);
cache->setMasterRequest(Request_WB, curTick);
cache->requestMemSideBus(Request_WB, curTick);
}

View file

@ -348,7 +348,7 @@ MissQueue::allocateMiss(PacketPtr &pkt, int size, Tick time)
}
if (pkt->cmd != MemCmd::HardPFReq) {
//If we need to request the bus (not on HW prefetch), do so
cache->setMasterRequest(Request_MSHR, time);
cache->requestMemSideBus(Request_MSHR, time);
}
return mshr;
}
@ -376,7 +376,7 @@ MissQueue::allocateWrite(PacketPtr &pkt, int size, Tick time)
cache->setBlocked(Blocked_NoWBBuffers);
}
cache->setMasterRequest(Request_WB, time);
cache->requestMemSideBus(Request_WB, time);
return mshr;
}
@ -450,7 +450,7 @@ MissQueue::fetchBlock(Addr addr, int blk_size, Tick time,
if (mq.isFull()) {
cache->setBlocked(Blocked_NoMSHRs);
}
cache->setMasterRequest(Request_MSHR, time);
cache->requestMemSideBus(Request_MSHR, time);
return mshr;
}
@ -534,7 +534,7 @@ MissQueue::markInService(PacketPtr &pkt, MSHR* mshr)
unblock = wb.isFull();
wb.markInService(mshr);
if (!wb.havePending()){
cache->clearMasterRequest(Request_WB);
cache->deassertMemSideBusRequest(Request_WB);
}
if (unblock) {
// Do we really unblock?
@ -545,7 +545,7 @@ MissQueue::markInService(PacketPtr &pkt, MSHR* mshr)
unblock = mq.isFull();
mq.markInService(mshr);
if (!mq.havePending()){
cache->clearMasterRequest(Request_MSHR);
cache->deassertMemSideBusRequest(Request_MSHR);
}
if (mshr->originalCmd == MemCmd::HardPFReq) {
DPRINTF(HWPrefetch, "%s:Marking a HW_PF in service\n",
@ -553,7 +553,7 @@ MissQueue::markInService(PacketPtr &pkt, MSHR* mshr)
//Also clear pending if need be
if (!prefetcher->havePending())
{
cache->clearMasterRequest(Request_PF);
cache->deassertMemSideBusRequest(Request_PF);
}
}
if (unblock) {
@ -602,7 +602,7 @@ MissQueue::handleResponse(PacketPtr &pkt, Tick time)
mshr->pkt->req = mshr->getTarget()->req;
mq.markPending(mshr, cmd);
mshr->order = order++;
cache->setMasterRequest(Request_MSHR, time);
cache->requestMemSideBus(Request_MSHR, time);
}
else {
unblock = mq.isFull();
@ -683,7 +683,7 @@ MissQueue::squash(int threadNum)
}
mq.squash(threadNum);
if (!mq.havePending()) {
cache->clearMasterRequest(Request_MSHR);
cache->deassertMemSideBusRequest(Request_MSHR);
}
if (unblock && !mq.isFull()) {
cache->clearBlocked(cause);

View file

@ -141,7 +141,7 @@ BasePrefetcher::getPacket()
keepTrying = cache->inCache(pkt->getAddr());
}
if (pf.empty()) {
cache->clearMasterRequest(Request_PF);
cache->deassertMemSideBusRequest(Request_PF);
if (keepTrying) return NULL; //None left, all were in cache
}
} while (keepTrying);
@ -165,7 +165,7 @@ BasePrefetcher::handleMiss(PacketPtr &pkt, Tick time)
pfRemovedMSHR++;
pf.erase(iter);
if (pf.empty())
cache->clearMasterRequest(Request_PF);
cache->deassertMemSideBusRequest(Request_PF);
}
//Remove anything in queue with delay older than time
@ -182,7 +182,7 @@ BasePrefetcher::handleMiss(PacketPtr &pkt, Tick time)
iter--;
}
if (pf.empty())
cache->clearMasterRequest(Request_PF);
cache->deassertMemSideBusRequest(Request_PF);
}
@ -244,7 +244,7 @@ BasePrefetcher::handleMiss(PacketPtr &pkt, Tick time)
prefetch->flags |= CACHE_LINE_FILL;
//Make sure to request the bus, with proper delay
cache->setMasterRequest(Request_PF, prefetch->time);
cache->requestMemSideBus(Request_PF, prefetch->time);
//Increment through the list
addr++;

View file

@ -61,8 +61,8 @@ typedef std::list<PacketPtr> PacketList;
#define CACHE_LINE_FILL (1 << 3)
#define COMPRESSED (1 << 4)
#define NO_ALLOCATE (1 << 5)
#define SNOOP_COMMIT (1 << 6)
#define EXPRESS_SNOOP (1 << 7)
class MemCmd
{