riscv: [Patch 7/5] Corrected LRSC semantics

RISC-V makes use of load-reserved and store-conditional instructions to
enable creation of lock-free concurrent data manipulation as well as
ACQUIRE and RELEASE semantics for memory ordering of LR, SC, and AMO
instructions (the latter of which do not follow LR/SC semantics). This
patch is a correction to patch 4, which added these instructions to the
implementation of RISC-V. It modifies locked_mem.hh and the
implementations of lr.w, sc.w, lr.d, and sc.d to apply the proper gem5
flags and return the proper values.

An important difference between gem5's LLSC semantics and RISC-V's LR/SC
ones, beyond the name, is that gem5 uses 0 to indicate failure and 1 to
indicate success, while RISC-V is the opposite. Strictly speaking, RISC-V
uses 0 to indicate success and nonzero to indicate failure where the
value would indicate the error, but currently only 1 is reserved as a
failure code by the ISA reference.

This is the seventh patch in the series which originally consisted of five
patches that added the RISC-V ISA to gem5. The original five patches added
all of the instructions and added support for more detailed CPU models and
the sixth patch corrected the implementations of Linux constants and
structs. There will be an eighth patch that adds some regression tests
for the instructions.

[Removed some commented-out code from locked_mem.hh.]
Signed-off by: Alec Roelke

Signed-off by: Jason Lowe-Power <jason@lowepower.com>
This commit is contained in:
Alec Roelke 2016-11-30 17:10:28 -05:00
parent 84020a8aed
commit ee0c261e10
3 changed files with 57 additions and 63 deletions

View file

@ -218,12 +218,12 @@ decode OPCODE default Unknown::unknown() {
0x3: decode AMOFUNCT { 0x3: decode AMOFUNCT {
0x2: LoadReserved::lr_d({{ 0x2: LoadReserved::lr_d({{
Rd_sd = Mem_sd; Rd_sd = Mem_sd;
}}, aq=AQ, rl=RL); }}, mem_flags=LLSC, aq=AQ, rl=RL);
0x3: StoreCond::sc_d({{ 0x3: StoreCond::sc_d({{
Mem = Rs2; Mem = Rs2;
}}, {{ }}, {{
Rd = result; Rd = result;
}}, aq=AQ, rl=RL); }}, mem_flags=LLSC, inst_flags=IsStoreConditional, aq=AQ, rl=RL);
format AtomicMemOp { format AtomicMemOp {
0x0: amoadd_d({{Rt_sd = Mem_sd;}}, {{ 0x0: amoadd_d({{Rt_sd = Mem_sd;}}, {{
Mem_sd = Rs2_sd + Rt_sd; Mem_sd = Rs2_sd + Rt_sd;

View file

@ -363,6 +363,9 @@ def template StoreCondExecute {{
if (fault == NoFault) { if (fault == NoFault) {
fault = writeMemAtomic(xc, traceData, Mem, EA, memAccessFlags, fault = writeMemAtomic(xc, traceData, Mem, EA, memAccessFlags,
&result); &result);
// RISC-V has the opposite convention gem5 has for success flags,
// so we invert the result here.
result = !result;
} }
if (fault == NoFault) { if (fault == NoFault) {
@ -385,7 +388,9 @@ def template StoreCondCompleteAcc {{
%(op_dest_decl)s; %(op_dest_decl)s;
uint64_t result = pkt->req->getExtraData(); // RISC-V has the opposite convention gem5 has for success flags,
// so we invert the result here.
uint64_t result = !pkt->req->getExtraData();
if (fault == NoFault) { if (fault == NoFault) {
%(postacc_code)s; %(postacc_code)s;

View file

@ -48,6 +48,8 @@
#ifndef __ARCH_RISCV_LOCKED_MEM_HH__ #ifndef __ARCH_RISCV_LOCKED_MEM_HH__
#define __ARCH_RISCV_LOCKED_MEM_HH__ #define __ARCH_RISCV_LOCKED_MEM_HH__
#include <stack>
#include "arch/registers.hh" #include "arch/registers.hh"
#include "base/misc.hh" #include "base/misc.hh"
#include "base/trace.hh" #include "base/trace.hh"
@ -60,80 +62,67 @@
*/ */
namespace RiscvISA namespace RiscvISA
{ {
static bool lock_flag = false;
static Addr lock_addr = 0;
template <class XC> const int WARN_FAILURE = 10000;
inline void handleLockedSnoop(XC *xc, PacketPtr pkt, Addr cacheBlockMask)
// RISC-V allows multiple locks per hart, but each SC has to unlock the most
// recent one, so we use a stack here.
static std::stack<Addr> locked_addrs;
template <class XC> inline void
handleLockedSnoop(XC *xc, PacketPtr pkt, Addr cacheBlockMask)
{ {
if (!lock_flag) if (locked_addrs.empty())
return; return;
Addr snoop_addr = pkt->getAddr() & cacheBlockMask;
DPRINTF(LLSC, "Locked snoop on address %x.\n", DPRINTF(LLSC, "Locked snoop on address %x.\n", snoop_addr);
pkt->getAddr()&cacheBlockMask); if ((locked_addrs.top() & cacheBlockMask) == snoop_addr)
locked_addrs.pop();
Addr snoop_addr = pkt->getAddr()&cacheBlockMask;
if ((lock_addr&cacheBlockMask) == snoop_addr)
lock_flag = false;
} }
template <class XC> template <class XC> inline void
inline void handleLockedRead(XC *xc, Request *req) handleLockedRead(XC *xc, Request *req)
{ {
lock_addr = req->getPaddr()&~0xF; locked_addrs.push(req->getPaddr() & ~0xF);
lock_flag = true; DPRINTF(LLSC, "[cid:%d]: Reserved address %x.\n",
DPRINTF(LLSC, "[cid:%i]: " req->contextId(), req->getPaddr() & ~0xF);
"Load-Link Flag Set & Load-Link Address set to %x.\n",
req->contextId(), req->getPaddr()&~0xF);
} }
template <class XC> template <class XC> inline void
inline void handleLockedSnoopHit(XC *xc) handleLockedSnoopHit(XC *xc)
{} {}
template <class XC> template <class XC> inline bool
inline bool handleLockedWrite(XC *xc, Request *req, Addr cacheBlockMask) handleLockedWrite(XC *xc, Request *req, Addr cacheBlockMask)
{ {
if (req->isUncacheable()) { // Normally RISC-V uses zero to indicate success and nonzero to indicate
// Funky Turbolaser mailbox access...don't update // failure (right now only 1 is reserved), but in gem5 zero indicates
// result register (see stq_c in decoder.isa) // failure and one indicates success, so here we conform to that (it should
req->setExtraData(2); // be switched in the instruction's implementation)
} else {
// standard store conditional
if (!lock_flag || (req->getPaddr()&~0xF) != lock_addr) {
// Lock flag not set or addr mismatch in CPU;
// don't even bother sending to memory system
req->setExtraData(0);
lock_flag = false;
// the rest of this code is not architectural; DPRINTF(LLSC, "[cid:%d]: locked_addrs empty? %s.\n", req->contextId(),
// it's just a debugging aid to help detect locked_addrs.empty() ? "yes" : "no");
// livelock by warning on long sequences of failed if (!locked_addrs.empty()) {
// store conditionals DPRINTF(LLSC, "[cid:%d]: addr = %x.\n", req->contextId(),
int stCondFailures = xc->readStCondFailures(); req->getPaddr() & ~0xF);
stCondFailures++; DPRINTF(LLSC, "[cid:%d]: last locked addr = %x.\n", req->contextId(),
xc->setStCondFailures(stCondFailures); locked_addrs.top());
if (stCondFailures % 100000 == 0) { }
warn("%i:"" context %d:" if (locked_addrs.empty()
" %d consecutive store conditional failures\n", || locked_addrs.top() != ((req->getPaddr() & ~0xF))) {
curTick(), xc->contextId(), stCondFailures); req->setExtraData(0);
} int stCondFailures = xc->readStCondFailures();
xc->setStCondFailures(++stCondFailures);
if (!lock_flag){ if (stCondFailures % WARN_FAILURE == 0) {
DPRINTF(LLSC, "[cid:%i]:" warn("%i: context %d: %d consecutive SC failures.\n",
" Lock Flag Set, Store Conditional Failed.\n", curTick(), xc->contextId(), stCondFailures);
req->contextId()); }
} else if ((req->getPaddr() & ~0xf) != lock_addr) { return false;
DPRINTF(LLSC, "[cid:%i]: Load-Link Address Mismatch, " }
"Store Conditional Failed.\n", req->contextId()); if (req->isUncacheable()) {
} req->setExtraData(2);
// store conditional failed already, so don't issue it to mem
return false;
}
} }
return true; return true;
} }