First cut at LL/SC support in caches (atomic mode only).
configs/example/fs.py: Add MOESI protocol to caches (uni coherence not quite working w/FS yet). --HG-- extra : convert_revision : 7bef7d9c5b24bf7241cc810df692408837b06b86
This commit is contained in:
parent
75ecd3be60
commit
bba3dfb0d3
4 changed files with 85 additions and 10 deletions
|
@ -72,6 +72,7 @@ class MyCache(BaseCache):
|
|||
latency = 1
|
||||
mshrs = 10
|
||||
tgts_per_mshr = 5
|
||||
protocol = CoherenceProtocol(protocol='moesi')
|
||||
|
||||
# client system CPU is always simple... note this is an assignment of
|
||||
# a class, not an instance.
|
||||
|
|
83
src/mem/cache/cache_blk.hh
vendored
83
src/mem/cache/cache_blk.hh
vendored
|
@ -35,8 +35,11 @@
|
|||
#ifndef __CACHE_BLK_HH__
|
||||
#define __CACHE_BLK_HH__
|
||||
|
||||
#include <list>
|
||||
|
||||
#include "sim/root.hh" // for Tick
|
||||
#include "arch/isa_traits.hh" // for Addr
|
||||
#include "mem/request.hh"
|
||||
|
||||
/**
|
||||
* Cache block status bit assignments
|
||||
|
@ -96,6 +99,35 @@ class CacheBlk
|
|||
/** Number of references to this block since it was brought in. */
|
||||
int refCount;
|
||||
|
||||
protected:
|
||||
/**
|
||||
* Represents that the indicated thread context has a "lock" on
|
||||
* the block, in the LL/SC sense.
|
||||
*/
|
||||
class Lock {
|
||||
public:
|
||||
int cpuNum; // locking CPU
|
||||
int threadNum; // locking thread ID within CPU
|
||||
|
||||
// check for matching execution context
|
||||
bool matchesContext(Request *req)
|
||||
{
|
||||
return (cpuNum == req->getCpuNum() &&
|
||||
threadNum == req->getThreadNum());
|
||||
}
|
||||
|
||||
Lock(Request *req)
|
||||
: cpuNum(req->getCpuNum()), threadNum(req->getThreadNum())
|
||||
{
|
||||
}
|
||||
};
|
||||
|
||||
/** List of thread contexts that have performed a load-locked (LL)
|
||||
* on the block since the last store. */
|
||||
std::list<Lock> lockList;
|
||||
|
||||
public:
|
||||
|
||||
CacheBlk()
|
||||
: asid(-1), tag(0), data(0) ,size(0), status(0), whenReady(0),
|
||||
set(-1), refCount(0)
|
||||
|
@ -175,7 +207,58 @@ class CacheBlk
|
|||
return (status & BlkHWPrefetched) != 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Track the fact that a local locked was issued to the block. If
|
||||
* multiple LLs get issued from the same context we could have
|
||||
* redundant records on the list, but that's OK, as they'll all
|
||||
* get blown away at the next store.
|
||||
*/
|
||||
void trackLoadLocked(Request *req)
|
||||
{
|
||||
assert(req->isLocked());
|
||||
lockList.push_front(Lock(req));
|
||||
}
|
||||
|
||||
/**
|
||||
* Clear the list of valid load locks. Should be called whenever
|
||||
* block is written to or invalidated.
|
||||
*/
|
||||
void clearLoadLocks() { lockList.clear(); }
|
||||
|
||||
/**
|
||||
* Handle interaction of load-locked operations and stores.
|
||||
* @return True if write should proceed, false otherwise. Returns
|
||||
* false only in the case of a failed store conditional.
|
||||
*/
|
||||
bool checkWrite(Request *req)
|
||||
{
|
||||
if (req->isLocked()) {
|
||||
// it's a store conditional... have to check for matching
|
||||
// load locked.
|
||||
bool success = false;
|
||||
|
||||
for (std::list<Lock>::iterator i = lockList.begin();
|
||||
i != lockList.end(); ++i)
|
||||
{
|
||||
if (i->matchesContext(req)) {
|
||||
// it's a store conditional, and as far as the memory
|
||||
// system can tell, the requesting context's lock is
|
||||
// still valid.
|
||||
success = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
req->setScResult(success ? 1 : 0);
|
||||
clearLoadLocks();
|
||||
return success;
|
||||
} else {
|
||||
// for *all* stores (conditional or otherwise) we have to
|
||||
// clear the list of load-locks as they're all invalid now.
|
||||
clearLoadLocks();
|
||||
return true;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
#endif //__CACHE_BLK_HH__
|
||||
|
|
10
src/mem/cache/cache_impl.hh
vendored
10
src/mem/cache/cache_impl.hh
vendored
|
@ -86,11 +86,6 @@ doAtomicAccess(Packet *pkt, bool isCpuSide)
|
|||
{
|
||||
if (isCpuSide)
|
||||
{
|
||||
//Temporary solution to LL/SC
|
||||
if (pkt->isWrite() && (pkt->req->isLocked())) {
|
||||
pkt->req->setScResult(1);
|
||||
}
|
||||
|
||||
probe(pkt, true, NULL);
|
||||
//TEMP ALWAYS SUCCES FOR NOW
|
||||
pkt->result = Packet::Success;
|
||||
|
@ -116,11 +111,6 @@ doFunctionalAccess(Packet *pkt, bool isCpuSide)
|
|||
//TEMP USE CPU?THREAD 0 0
|
||||
pkt->req->setThreadContext(0,0);
|
||||
|
||||
//Temporary solution to LL/SC
|
||||
if (pkt->isWrite() && (pkt->req->isLocked())) {
|
||||
assert("Can't handle LL/SC on functional path\n");
|
||||
}
|
||||
|
||||
probe(pkt, false, memSidePort);
|
||||
//TEMP ALWAYS SUCCESFUL FOR NOW
|
||||
pkt->result = Packet::Success;
|
||||
|
|
1
src/mem/cache/tags/lru.cc
vendored
1
src/mem/cache/tags/lru.cc
vendored
|
@ -246,6 +246,7 @@ LRU::invalidateBlk(Addr addr)
|
|||
if (blk) {
|
||||
blk->status = 0;
|
||||
blk->isTouched = false;
|
||||
blk->clearLoadLocks();
|
||||
tagsInUse--;
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue