From 226981b2a65ee4c544bc595d7718de8225fda0b0 Mon Sep 17 00:00:00 2001 From: Polina Dudnik Date: Mon, 13 Jul 2009 11:13:29 -0500 Subject: [PATCH] Reintegrated Derek's functional implementation of atomics with a minor change: don't clear lock on failure --- src/mem/ruby/common/DataBlock.hh | 18 +++++++++++++ src/mem/ruby/libruby.cc | 12 ++++++--- src/mem/ruby/libruby.hh | 3 ++- src/mem/ruby/system/CacheMemory.hh | 43 ++++++++++++++++++++++++++++++ src/mem/ruby/system/Sequencer.cc | 28 ++++++++++++++++--- 5 files changed, 96 insertions(+), 8 deletions(-) diff --git a/src/mem/ruby/common/DataBlock.hh b/src/mem/ruby/common/DataBlock.hh index 2a0811f76..c7dba8ae8 100644 --- a/src/mem/ruby/common/DataBlock.hh +++ b/src/mem/ruby/common/DataBlock.hh @@ -56,6 +56,9 @@ class DataBlock { uint8 getByte(int whichByte) const; const uint8* getData(int offset, int len) const; void setByte(int whichByte, uint8 data); + const uint8* getBlock() const; + uint8* copyData(uint8* dest, int offset, int size) const; + void setBlock(uint8* data) { setData(data, 0, System::getBlockSizeBytes()); } void setData(uint8* data, int offset, int len); void copyPartial(const DataBlock & dblk, int offset, int len); bool equal(const DataBlock& obj) const; @@ -146,6 +149,21 @@ void DataBlock::copyPartial(const DataBlock & dblk, int offset, int len) setData(&dblk.m_data[offset], offset, len); } +inline +const uint8* DataBlock::getBlock() const +{ + return m_data; +} + +inline +uint8* DataBlock::copyData(uint8* dest, int offset, int size) const +{ + assert(offset + size <= RubySystem::getBlockSizeBytes()); + memcpy(dest, m_data + offset, size); + return dest; +} + + // ******************* Definitions ******************* // Output operator definition diff --git a/src/mem/ruby/libruby.cc b/src/mem/ruby/libruby.cc index 987f4fd10..d21b29dec 100644 --- a/src/mem/ruby/libruby.cc +++ b/src/mem/ruby/libruby.cc @@ -19,8 +19,10 @@ string RubyRequestType_to_string(const RubyRequestType& obj) return "LD"; case RubyRequestType_ST: return "ST"; - case RubyRequestType_RMW: - return "RMW"; + case RubyRequestType_RMW_Read: + return "RMW_Read"; + case RubyRequestType_RMW_Write: + return "RMW_Write"; case RubyRequestType_NULL: default: assert(0); @@ -36,8 +38,10 @@ RubyRequestType string_to_RubyRequestType(std::string str) return RubyRequestType_LD; else if (str == "ST") return RubyRequestType_ST; - else if (str == "RMW") - return RubyRequestType_RMW; + else if (str == "RMW_Read") + return RubyRequestType_RMW_Read; + else if (str == "RMW_Write") + return RubyRequestType_RMW_Write; else assert(0); return RubyRequestType_NULL; diff --git a/src/mem/ruby/libruby.hh b/src/mem/ruby/libruby.hh index 5916c98e6..8edcfa0fc 100644 --- a/src/mem/ruby/libruby.hh +++ b/src/mem/ruby/libruby.hh @@ -11,7 +11,8 @@ enum RubyRequestType { RubyRequestType_IFETCH, RubyRequestType_LD, RubyRequestType_ST, - RubyRequestType_RMW + RubyRequestType_RMW_Read, + RubyRequestType_RMW_Write }; enum RubyAccessMode { diff --git a/src/mem/ruby/system/CacheMemory.hh b/src/mem/ruby/system/CacheMemory.hh index 941073ad2..cde5b6d94 100644 --- a/src/mem/ruby/system/CacheMemory.hh +++ b/src/mem/ruby/system/CacheMemory.hh @@ -116,6 +116,9 @@ public: void setMemoryValue(const Address& addr, char* value, unsigned int size_in_bytes ); + void setLocked (const Address& addr, int context); + void clearLocked (const Address& addr); + bool isLocked (const Address& addr, int context); // Print cache contents void print(ostream& out) const; void printData(ostream& out) const; @@ -147,6 +150,7 @@ private: // The first index is the # of cache lines. // The second index is the the amount associativity. Vector > m_cache; + Vector > m_locked; AbstractReplacementPolicy *m_replacementPolicy_ptr; @@ -252,10 +256,13 @@ void CacheMemory::init(const vector & argv) assert(false); m_cache.setSize(m_cache_num_sets); + m_locked.setSize(m_cache_num_sets); for (int i = 0; i < m_cache_num_sets; i++) { m_cache[i].setSize(m_cache_assoc); + m_locked[i].setSize(m_cache_assoc); for (int j = 0; j < m_cache_assoc; j++) { m_cache[i][j] = NULL; + m_locked[i][j] = -1; } } } @@ -474,6 +481,7 @@ void CacheMemory::allocate(const Address& address, AbstractCacheEntry* entry) m_cache[cacheSet][i] = entry; // Init entry m_cache[cacheSet][i]->m_Address = address; m_cache[cacheSet][i]->m_Permission = AccessPermission_Invalid; + m_locked[cacheSet][i] = -1; m_replacementPolicy_ptr->touch(cacheSet, i, g_eventQueue_ptr->getTime()); @@ -494,6 +502,7 @@ void CacheMemory::deallocate(const Address& address) if (location != -1){ delete m_cache[cacheSet][location]; m_cache[cacheSet][location] = NULL; + m_locked[cacheSet][location] = -1; } } @@ -542,6 +551,7 @@ void CacheMemory::changePermission(const Address& address, AccessPermission new_ { assert(address == line_address(address)); lookup(address).m_Permission = new_perm; + m_locked[cacheSet][loc] = -1; assert(getPermission(address) == new_perm); } @@ -630,5 +640,38 @@ void CacheMemory::setMemoryValue(const Address& addr, char* value, // entry = lookup(line_address(addr)); } +inline +void +CacheMemory::setLocked(const Address& address, int context) +{ + assert(address == line_address(address)); + Index cacheSet = addressToCacheSet(address); + int loc = findTagInSet(cacheSet, address); + assert(loc != -1); + m_locked[cacheSet][loc] = context; +} + +inline +void +CacheMemory::clearLocked(const Address& address) +{ + assert(address == line_address(address)); + Index cacheSet = addressToCacheSet(address); + int loc = findTagInSet(cacheSet, address); + assert(loc != -1); + m_locked[cacheSet][loc] = -1; +} + +inline +bool +CacheMemory::isLocked(const Address& address, int context) +{ + assert(address == line_address(address)); + Index cacheSet = addressToCacheSet(address); + int loc = findTagInSet(cacheSet, address); + assert(loc != -1); + return m_locked[cacheSet][loc] == context; +} + #endif //CACHEMEMORY_H diff --git a/src/mem/ruby/system/Sequencer.cc b/src/mem/ruby/system/Sequencer.cc index 97416d2d3..d7d4ba8e0 100644 --- a/src/mem/ruby/system/Sequencer.cc +++ b/src/mem/ruby/system/Sequencer.cc @@ -237,7 +237,8 @@ void Sequencer::removeRequest(SequencerRequest* srequest) { Address line_addr(ruby_request.paddr); line_addr.makeLineAddress(); if ((ruby_request.type == RubyRequestType_ST) || - (ruby_request.type == RubyRequestType_RMW)) { + (ruby_request.type == RubyRequestType_RMW_Read) || + (ruby_request.type == RubyRequestType_RMW_Write)) { m_writeRequestTable.deallocate(line_addr); } else { m_readRequestTable.deallocate(line_addr); @@ -256,7 +257,25 @@ void Sequencer::writeCallback(const Address& address, DataBlock& data) { removeRequest(request); assert((request->ruby_request.type == RubyRequestType_ST) || - (request->ruby_request.type == RubyRequestType_RMW)); + (request->ruby_request.type == RubyRequestType_RMW_Read) || + (request->ruby_request.type == RubyRequestType_RMW_Write)); + // POLINA: the assumption is that atomics are only on data cache and not instruction cache + if (request->ruby_request.type == RubyRequestType_RMW_Read) { + m_dataCache_ptr->setLocked(address, m_version); + } + else if (request->ruby_request.type == RubyRequestType_RMW_Write) { + if (m_dataCache_ptr->isLocked(address, m_version)) { + // if we are holding the lock for this + request->ruby_request.atomic_success = true; + m_dataCache_ptr->clearLocked(address); + } + else { + // if we are not holding the lock for this + request->ruby_request.atomic_success = false; + } + + // can have livelock + } hitCallback(request, data); } @@ -379,7 +398,10 @@ void Sequencer::issueRequest(const RubyRequest& request) { case RubyRequestType_ST: ctype = CacheRequestType_ST; break; - case RubyRequestType_RMW: + case RubyRequestType_RMW_Read: + ctype = CacheRequestType_ATOMIC; + break; + case RubyRequestType_RMW_Write: ctype = CacheRequestType_ATOMIC; break; default: