mem: rename Locked/LOCKED to LockedRMW/LOCKED_RMW

Makes x86-style locked operations even more distinct from
LLSC operations.  Using "locked" by itself should be
obviously ambiguous now.
This commit is contained in:
Steve Reinhardt 2015-03-23 16:14:20 -07:00
parent c55749d998
commit 6677b9122a
4 changed files with 7 additions and 7 deletions

View file

@ -409,7 +409,7 @@ let {{
'(StoreCheck << FlagShift)')
defineMicroLoadOp('Ldstl', 'Data = merge(Data, Mem, dataSize);',
'Data = Mem & mask(dataSize * 8);',
'(StoreCheck << FlagShift) | Request::LOCKED')
'(StoreCheck << FlagShift) | Request::LOCKED_RMW')
defineMicroLoadOp('Ldfp', code='FpData_uqw = Mem', big = False)
@ -461,7 +461,7 @@ let {{
defineMicroStoreOp('St', 'Mem = pick(Data, 2, dataSize);')
defineMicroStoreOp('Stul', 'Mem = pick(Data, 2, dataSize);',
mem_flags="Request::LOCKED")
mem_flags="Request::LOCKED_RMW")
defineMicroStoreOp('Stfp', code='Mem = FpData_uqw;')

View file

@ -373,7 +373,7 @@ AtomicSimpleCPU::readMem(Addr addr, uint8_t * data,
//If we don't need to access a second cache line, stop now.
if (secondAddr <= addr)
{
if (req->isLocked() && fault == NoFault) {
if (req->isLockedRMW() && fault == NoFault) {
assert(!locked);
locked = true;
}
@ -480,7 +480,7 @@ AtomicSimpleCPU::writeMem(uint8_t *data, unsigned size,
//stop now.
if (fault != NoFault || secondAddr <= addr)
{
if (req->isLocked() && fault == NoFault) {
if (req->isLockedRMW() && fault == NoFault) {
assert(locked);
locked = false;
}

View file

@ -127,7 +127,7 @@ class Request
* made up of a locked load, some operation on the data, and then a locked
* store.
*/
static const FlagsType LOCKED = 0x00100000;
static const FlagsType LOCKED_RMW = 0x00100000;
/** The request is a Load locked/store conditional. */
static const FlagsType LLSC = 0x00200000;
/** This request is for a memory swap. */
@ -626,7 +626,7 @@ class Request
bool isPrefetch() const { return _flags.isSet(PREFETCH); }
bool isLLSC() const { return _flags.isSet(LLSC); }
bool isPriv() const { return _flags.isSet(PRIVILEGED); }
bool isLocked() const { return _flags.isSet(LOCKED); }
bool isLockedRMW() const { return _flags.isSet(LOCKED_RMW); }
bool isSwap() const { return _flags.isSet(MEM_SWAP|MEM_SWAP_COND); }
bool isCondSwap() const { return _flags.isSet(MEM_SWAP_COND); }
bool isMmappedIpr() const { return _flags.isSet(MMAPPED_IPR); }

View file

@ -604,7 +604,7 @@ Sequencer::makeRequest(PacketPtr pkt)
primary_type = RubyRequestType_Load_Linked;
}
secondary_type = RubyRequestType_ATOMIC;
} else if (pkt->req->isLocked()) {
} else if (pkt->req->isLockedRMW()) {
//
// x86 locked instructions are translated to store cache coherence
// requests because these requests should always be treated as read