ruby: handle llsc accesses through CacheEntry, not CacheMemory

The sequencer takes care of llsc accesses by calling upon functions
from the CacheMemory.  This is unnecessary once the required CacheEntry object
is available.  Thus some of the calls to findTagInSet() are avoided.
This commit is contained in:
Nilay Vaish 2015-08-14 19:28:42 -05:00
parent d383a08f16
commit 1a3e8a3370
5 changed files with 52 additions and 19 deletions

View file

@ -28,6 +28,9 @@
#include "mem/ruby/slicc_interface/AbstractCacheEntry.hh" #include "mem/ruby/slicc_interface/AbstractCacheEntry.hh"
#include "base/trace.hh"
#include "debug/RubyCache.hh"
AbstractCacheEntry::AbstractCacheEntry() AbstractCacheEntry::AbstractCacheEntry()
{ {
m_Permission = AccessPermission_NotPresent; m_Permission = AccessPermission_NotPresent;
@ -48,3 +51,25 @@ AbstractCacheEntry::changePermission(AccessPermission new_perm)
m_locked = -1; m_locked = -1;
} }
} }
void
AbstractCacheEntry::setLocked(int context)
{
DPRINTF(RubyCache, "Setting Lock for addr: %x to %d\n", m_Address, context);
m_locked = context;
}
void
AbstractCacheEntry::clearLocked()
{
DPRINTF(RubyCache, "Clear Lock for addr: %x\n", m_Address);
m_locked = -1;
}
bool
AbstractCacheEntry::isLocked(int context) const
{
DPRINTF(RubyCache, "Testing Lock for addr: %llx cur %d con %d\n",
m_Address, m_locked, context);
return m_locked == context;
}

View file

@ -56,6 +56,11 @@ class AbstractCacheEntry : public AbstractEntry
virtual DataBlock& getDataBlk() virtual DataBlock& getDataBlk()
{ panic("getDataBlk() not implemented!"); } { panic("getDataBlk() not implemented!"); }
// Functions for locking and unlocking the cache entry. These are required
// for supporting atomic memory accesses.
void setLocked(int context);
void clearLocked();
bool isLocked(int context) const;
Addr m_Address; // Address of this block, required by CacheMemory Addr m_Address; // Address of this block, required by CacheMemory
int m_locked; // Holds info whether the address is locked, int m_locked; // Holds info whether the address is locked,

View file

@ -413,7 +413,7 @@ CacheMemory::setLocked(Addr address, int context)
int64 cacheSet = addressToCacheSet(address); int64 cacheSet = addressToCacheSet(address);
int loc = findTagInSet(cacheSet, address); int loc = findTagInSet(cacheSet, address);
assert(loc != -1); assert(loc != -1);
m_cache[cacheSet][loc]->m_locked = context; m_cache[cacheSet][loc]->setLocked(context);
} }
void void
@ -424,7 +424,7 @@ CacheMemory::clearLocked(Addr address)
int64 cacheSet = addressToCacheSet(address); int64 cacheSet = addressToCacheSet(address);
int loc = findTagInSet(cacheSet, address); int loc = findTagInSet(cacheSet, address);
assert(loc != -1); assert(loc != -1);
m_cache[cacheSet][loc]->m_locked = -1; m_cache[cacheSet][loc]->clearLocked();
} }
bool bool
@ -436,7 +436,7 @@ CacheMemory::isLocked(Addr address, int context)
assert(loc != -1); assert(loc != -1);
DPRINTF(RubyCache, "Testing Lock for addr: %llx cur %d con %d\n", DPRINTF(RubyCache, "Testing Lock for addr: %llx cur %d con %d\n",
address, m_cache[cacheSet][loc]->m_locked, context); address, m_cache[cacheSet][loc]->m_locked, context);
return m_cache[cacheSet][loc]->m_locked == context; return m_cache[cacheSet][loc]->isLocked(context);
} }
void void

View file

@ -107,6 +107,11 @@ class CacheMemory : public SimObject
// Set this address to most recently used // Set this address to most recently used
void setMRU(Addr address); void setMRU(Addr address);
// Functions for locking and unlocking cache lines corresponding to the
// provided address. These are required for supporting atomic memory
// accesses. These are to be used when only the address of the cache entry
// is available. In case the entry itself is available. use the functions
// provided by the AbstractCacheEntry class.
void setLocked (Addr addr, int context); void setLocked (Addr addr, int context);
void clearLocked (Addr addr); void clearLocked (Addr addr);
bool isLocked (Addr addr, int context); bool isLocked (Addr addr, int context);

View file

@ -317,28 +317,27 @@ Sequencer::removeRequest(SequencerRequest* srequest)
void void
Sequencer::invalidateSC(Addr address) Sequencer::invalidateSC(Addr address)
{ {
RequestTable::iterator i = m_writeRequestTable.find(address); AbstractCacheEntry *e = m_dataCache_ptr->lookup(address);
if (i != m_writeRequestTable.end()) { // The controller has lost the coherence permissions, hence the lock
SequencerRequest* request = i->second; // on the cache line maintained by the cache should be cleared.
// The controller has lost the coherence permissions, hence the lock if (e && e->isLocked(m_version)) {
// on the cache line maintained by the cache should be cleared. e->clearLocked();
if (request->m_type == RubyRequestType_Store_Conditional) {
m_dataCache_ptr->clearLocked(address);
}
} }
} }
bool bool
Sequencer::handleLlsc(Addr address, SequencerRequest* request) Sequencer::handleLlsc(Addr address, SequencerRequest* request)
{ {
// AbstractCacheEntry *e = m_dataCache_ptr->lookup(address);
if (!e)
return true;
// The success flag indicates whether the LLSC operation was successful. // The success flag indicates whether the LLSC operation was successful.
// LL ops will always succeed, but SC may fail if the cache line is no // LL ops will always succeed, but SC may fail if the cache line is no
// longer locked. // longer locked.
//
bool success = true; bool success = true;
if (request->m_type == RubyRequestType_Store_Conditional) { if (request->m_type == RubyRequestType_Store_Conditional) {
if (!m_dataCache_ptr->isLocked(address, m_version)) { if (!e->isLocked(m_version)) {
// //
// For failed SC requests, indicate the failure to the cpu by // For failed SC requests, indicate the failure to the cpu by
// setting the extra data to zero. // setting the extra data to zero.
@ -355,19 +354,18 @@ Sequencer::handleLlsc(Addr address, SequencerRequest* request)
// //
// Independent of success, all SC operations must clear the lock // Independent of success, all SC operations must clear the lock
// //
m_dataCache_ptr->clearLocked(address); e->clearLocked();
} else if (request->m_type == RubyRequestType_Load_Linked) { } else if (request->m_type == RubyRequestType_Load_Linked) {
// //
// Note: To fully follow Alpha LLSC semantics, should the LL clear any // Note: To fully follow Alpha LLSC semantics, should the LL clear any
// previously locked cache lines? // previously locked cache lines?
// //
m_dataCache_ptr->setLocked(address, m_version); e->setLocked(m_version);
} else if ((m_dataCache_ptr->isTagPresent(address)) && } else if (e->isLocked(m_version)) {
(m_dataCache_ptr->isLocked(address, m_version))) {
// //
// Normal writes should clear the locked address // Normal writes should clear the locked address
// //
m_dataCache_ptr->clearLocked(address); e->clearLocked();
} }
return success; return success;
} }