Ruby: Add support for locked memory accesses in X86_FS
This commit is contained in:
parent
16c1edebd0
commit
82844618fd
5 changed files with 70 additions and 21 deletions
|
@ -58,6 +58,10 @@ RubyRequestType_to_string(const RubyRequestType& obj)
|
|||
return "RMW_Read";
|
||||
case RubyRequestType_RMW_Write:
|
||||
return "RMW_Write";
|
||||
case RubyRequestType_Locked_RMW_Read:
|
||||
return "Locked_RMW_Read";
|
||||
case RubyRequestType_Locked_RMW_Write:
|
||||
return "Locked_RMW_Write";
|
||||
case RubyRequestType_NULL:
|
||||
default:
|
||||
assert(0);
|
||||
|
@ -82,6 +86,10 @@ string_to_RubyRequestType(string str)
|
|||
return RubyRequestType_RMW_Read;
|
||||
else if (str == "RMW_Write")
|
||||
return RubyRequestType_RMW_Write;
|
||||
else if (str == "Locked_RMW_Read")
|
||||
return RubyRequestType_Locked_RMW_Read;
|
||||
else if (str == "Locked_RMW_Write")
|
||||
return RubyRequestType_Locked_RMW_Write;
|
||||
else
|
||||
assert(0);
|
||||
return RubyRequestType_NULL;
|
||||
|
|
|
@ -44,6 +44,8 @@ enum RubyRequestType {
|
|||
RubyRequestType_Store_Conditional,
|
||||
RubyRequestType_RMW_Read,
|
||||
RubyRequestType_RMW_Write,
|
||||
RubyRequestType_Locked_RMW_Read,
|
||||
RubyRequestType_Locked_RMW_Write,
|
||||
RubyRequestType_NUM
|
||||
};
|
||||
|
||||
|
|
|
@ -70,6 +70,8 @@ DMASequencer::makeRequest(const RubyRequest &request)
|
|||
case RubyRequestType_Store_Conditional:
|
||||
case RubyRequestType_RMW_Read:
|
||||
case RubyRequestType_RMW_Write:
|
||||
case RubyRequestType_Locked_RMW_Read:
|
||||
case RubyRequestType_Locked_RMW_Write:
|
||||
case RubyRequestType_NUM:
|
||||
panic("DMASequencer::makeRequest does not support RubyRequestType");
|
||||
return RequestStatus_NULL;
|
||||
|
|
|
@ -26,6 +26,10 @@
|
|||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#include "config/the_isa.hh"
|
||||
#if THE_ISA == X86_ISA
|
||||
#include "arch/x86/insts/microldstop.hh"
|
||||
#endif // X86_ISA
|
||||
#include "cpu/testers/rubytest/RubyTester.hh"
|
||||
#include "mem/physical.hh"
|
||||
#include "mem/ruby/slicc_interface/AbstractController.hh"
|
||||
|
@ -201,22 +205,38 @@ RubyPort::M5Port::recvTiming(PacketPtr pkt)
|
|||
assert(pkt->isRead());
|
||||
type = RubyRequestType_Load_Linked;
|
||||
}
|
||||
} else if (pkt->req->isLocked()) {
|
||||
if (pkt->isWrite()) {
|
||||
DPRINTF(MemoryAccess, "Issuing Locked RMW Write\n");
|
||||
type = RubyRequestType_Locked_RMW_Write;
|
||||
} else {
|
||||
DPRINTF(MemoryAccess, "Issuing Locked RMW Read\n");
|
||||
assert(pkt->isRead());
|
||||
type = RubyRequestType_Locked_RMW_Read;
|
||||
}
|
||||
} else {
|
||||
if (pkt->isRead()) {
|
||||
if (pkt->req->isInstFetch()) {
|
||||
type = RubyRequestType_IFETCH;
|
||||
} else {
|
||||
#if THE_ISA == X86_ISA
|
||||
uint32_t flags = pkt->req->getFlags();
|
||||
bool storeCheck = flags &
|
||||
(TheISA::StoreCheck << TheISA::FlagShift);
|
||||
#else
|
||||
bool storeCheck = false;
|
||||
#endif // X86_ISA
|
||||
if (storeCheck) {
|
||||
type = RubyRequestType_RMW_Read;
|
||||
} else {
|
||||
type = RubyRequestType_LD;
|
||||
}
|
||||
}
|
||||
} else if (pkt->isWrite()) {
|
||||
//
|
||||
// Note: M5 packets do not differentiate ST from RMW_Write
|
||||
//
|
||||
type = RubyRequestType_ST;
|
||||
} else if (pkt->isReadWrite()) {
|
||||
// Fix me. This conditional will never be executed
|
||||
// because isReadWrite() is just an OR of isRead() and
|
||||
// isWrite(). Furthermore, just because the packet is a
|
||||
// read/write request does not necessary mean it is a
|
||||
// read-modify-write atomic operation.
|
||||
type = RubyRequestType_RMW_Write;
|
||||
} else {
|
||||
panic("Unsupported ruby packet type\n");
|
||||
}
|
||||
|
|
|
@ -232,7 +232,9 @@ Sequencer::insertRequest(SequencerRequest* request)
|
|||
(request->ruby_request.type == RubyRequestType_RMW_Read) ||
|
||||
(request->ruby_request.type == RubyRequestType_RMW_Write) ||
|
||||
(request->ruby_request.type == RubyRequestType_Load_Linked) ||
|
||||
(request->ruby_request.type == RubyRequestType_Store_Conditional)) {
|
||||
(request->ruby_request.type == RubyRequestType_Store_Conditional) ||
|
||||
(request->ruby_request.type == RubyRequestType_Locked_RMW_Read) ||
|
||||
(request->ruby_request.type == RubyRequestType_Locked_RMW_Write)) {
|
||||
pair<RequestTable::iterator, bool> r =
|
||||
m_writeRequestTable.insert(RequestTable::value_type(line_addr, 0));
|
||||
bool success = r.second;
|
||||
|
@ -291,7 +293,9 @@ Sequencer::removeRequest(SequencerRequest* srequest)
|
|||
(ruby_request.type == RubyRequestType_RMW_Read) ||
|
||||
(ruby_request.type == RubyRequestType_RMW_Write) ||
|
||||
(ruby_request.type == RubyRequestType_Load_Linked) ||
|
||||
(ruby_request.type == RubyRequestType_Store_Conditional)) {
|
||||
(ruby_request.type == RubyRequestType_Store_Conditional) ||
|
||||
(ruby_request.type == RubyRequestType_Locked_RMW_Read) ||
|
||||
(ruby_request.type == RubyRequestType_Locked_RMW_Write)) {
|
||||
m_writeRequestTable.erase(line_addr);
|
||||
} else {
|
||||
m_readRequestTable.erase(line_addr);
|
||||
|
@ -379,7 +383,9 @@ Sequencer::writeCallback(const Address& address,
|
|||
(request->ruby_request.type == RubyRequestType_RMW_Read) ||
|
||||
(request->ruby_request.type == RubyRequestType_RMW_Write) ||
|
||||
(request->ruby_request.type == RubyRequestType_Load_Linked) ||
|
||||
(request->ruby_request.type == RubyRequestType_Store_Conditional));
|
||||
(request->ruby_request.type == RubyRequestType_Store_Conditional) ||
|
||||
(request->ruby_request.type == RubyRequestType_Locked_RMW_Read) ||
|
||||
(request->ruby_request.type == RubyRequestType_Locked_RMW_Write));
|
||||
|
||||
//
|
||||
// For Alpha, properly handle LL, SC, and write requests with respect to
|
||||
|
@ -387,9 +393,9 @@ Sequencer::writeCallback(const Address& address,
|
|||
//
|
||||
bool success = handleLlsc(address, request);
|
||||
|
||||
if (request->ruby_request.type == RubyRequestType_RMW_Read) {
|
||||
if (request->ruby_request.type == RubyRequestType_Locked_RMW_Read) {
|
||||
m_controller->blockOnQueue(address, m_mandatory_q_ptr);
|
||||
} else if (request->ruby_request.type == RubyRequestType_RMW_Write) {
|
||||
} else if (request->ruby_request.type == RubyRequestType_Locked_RMW_Write) {
|
||||
m_controller->unblock(address);
|
||||
}
|
||||
|
||||
|
@ -430,7 +436,6 @@ Sequencer::readCallback(const Address& address,
|
|||
markRemoved();
|
||||
|
||||
assert((request->ruby_request.type == RubyRequestType_LD) ||
|
||||
(request->ruby_request.type == RubyRequestType_RMW_Read) ||
|
||||
(request->ruby_request.type == RubyRequestType_IFETCH));
|
||||
|
||||
hitCallback(request, mach, data, true,
|
||||
|
@ -501,8 +506,8 @@ Sequencer::hitCallback(SequencerRequest* srequest,
|
|||
if ((type == RubyRequestType_LD) ||
|
||||
(type == RubyRequestType_IFETCH) ||
|
||||
(type == RubyRequestType_RMW_Read) ||
|
||||
(type == RubyRequestType_Locked_RMW_Read) ||
|
||||
(type == RubyRequestType_Load_Linked)) {
|
||||
|
||||
memcpy(ruby_request.data,
|
||||
data.getData(request_address.getOffset(), ruby_request.len),
|
||||
ruby_request.len);
|
||||
|
@ -612,18 +617,30 @@ Sequencer::issueRequest(const RubyRequest& request)
|
|||
ctype = CacheRequestType_LD;
|
||||
break;
|
||||
case RubyRequestType_ST:
|
||||
case RubyRequestType_RMW_Read:
|
||||
case RubyRequestType_RMW_Write:
|
||||
//
|
||||
// x86 locked instructions are translated to store cache coherence
|
||||
// requests because these requests should always be treated as read
|
||||
// exclusive operations and should leverage any migratory sharing
|
||||
// optimization built into the protocol.
|
||||
//
|
||||
case RubyRequestType_Locked_RMW_Read:
|
||||
case RubyRequestType_Locked_RMW_Write:
|
||||
ctype = CacheRequestType_ST;
|
||||
break;
|
||||
//
|
||||
// Alpha LL/SC instructions need to be handled carefully by the cache
|
||||
// coherence protocol to ensure they follow the proper semantics. In
|
||||
// particular, by identifying the operations as atomic, the protocol
|
||||
// should understand that migratory sharing optimizations should not be
|
||||
// performed (i.e. a load between the LL and SC should not steal away
|
||||
// exclusive permission).
|
||||
//
|
||||
case RubyRequestType_Load_Linked:
|
||||
case RubyRequestType_Store_Conditional:
|
||||
ctype = CacheRequestType_ATOMIC;
|
||||
break;
|
||||
case RubyRequestType_RMW_Read:
|
||||
ctype = CacheRequestType_ATOMIC;
|
||||
break;
|
||||
case RubyRequestType_RMW_Write:
|
||||
ctype = CacheRequestType_ATOMIC;
|
||||
break;
|
||||
default:
|
||||
assert(0);
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue