arm: Rewrite ERET to behave according to the ARMv8 ARM

The ERET instruction doesn't set PSTATE correctly in some cases
(particularly when returning to aarch32 code). Among other things,
this breaks EL0 thumb code when using a 64-bit kernel. This changeset
updates the ERET implementation to match the ARM ARM.

Change-Id: I408e7c69a23cce437859313dfe84e68744b07c98
Signed-off-by: Andreas Sandberg <andreas.sandberg@arm.com>
Reviewed-by: Nathanael Premillieu <nathanael.premillieu@arm.com>
This commit is contained in:
Andreas Sandberg 2016-06-02 13:41:26 +01:00
parent f48ad5b29d
commit 660fbd543f
6 changed files with 143 additions and 84 deletions

View file

@ -727,4 +727,115 @@ ArmStaticInst::checkAdvSIMDOrFPEnabled32(ThreadContext *tc,
} }
static uint8_t
getRestoredITBits(ThreadContext *tc, CPSR spsr)
{
// See: shared/functions/system/RestoredITBits in the ARM ARM
const ExceptionLevel el = opModeToEL((OperatingMode) (uint8_t)spsr.mode);
const uint8_t it = itState(spsr);
if (!spsr.t || spsr.il)
return 0;
// The IT bits are forced to zero when they are set to a reserved
// value.
if (bits(it, 7, 4) != 0 && bits(it, 3, 0) == 0)
return 0;
const bool itd = el == EL2 ?
((SCTLR)tc->readMiscReg(MISCREG_HSCTLR)).itd :
((SCTLR)tc->readMiscReg(MISCREG_SCTLR)).itd;
// The IT bits are forced to zero when returning to A32 state, or
// when returning to an EL with the ITD bit set to 1, and the IT
// bits are describing a multi-instruction block.
if (itd && bits(it, 2, 0) != 0)
return 0;
return it;
}
static bool
illegalExceptionReturn(ThreadContext *tc, CPSR cpsr, CPSR spsr)
{
const OperatingMode mode = (OperatingMode) (uint8_t)spsr.mode;
if (badMode(mode))
return true;
const OperatingMode cur_mode = (OperatingMode) (uint8_t)cpsr.mode;
const ExceptionLevel target_el = opModeToEL(mode);
if (target_el > opModeToEL(cur_mode))
return true;
if (target_el == EL3 && !ArmSystem::haveSecurity(tc))
return true;
if (target_el == EL2 && !ArmSystem::haveVirtualization(tc))
return true;
if (!spsr.width) {
// aarch64
if (!ArmSystem::highestELIs64(tc))
return true;
if (spsr & 0x2)
return true;
if (target_el == EL0 && spsr.sp)
return true;
if (target_el == EL2 && !((SCR)tc->readMiscReg(MISCREG_SCR_EL3)).ns)
return false;
} else {
return badMode32(mode);
}
return false;
}
CPSR
ArmStaticInst::getPSTATEFromPSR(ThreadContext *tc, CPSR cpsr, CPSR spsr) const
{
CPSR new_cpsr = 0;
// gem5 doesn't implement single-stepping, so force the SS bit to
// 0.
new_cpsr.ss = 0;
if (illegalExceptionReturn(tc, cpsr, spsr)) {
new_cpsr.il = 1;
} else {
new_cpsr.il = spsr.il;
if (spsr.width && badMode32((OperatingMode)(uint8_t)spsr.mode)) {
new_cpsr.il = 1;
} else if (spsr.width) {
new_cpsr.mode = spsr.mode;
} else {
new_cpsr.el = spsr.el;
new_cpsr.sp = spsr.sp;
}
}
new_cpsr.nz = spsr.nz;
new_cpsr.c = spsr.c;
new_cpsr.v = spsr.v;
if (new_cpsr.width) {
// aarch32
const ITSTATE it = getRestoredITBits(tc, spsr);
new_cpsr.q = spsr.q;
new_cpsr.ge = spsr.ge;
new_cpsr.e = spsr.e;
new_cpsr.aif = spsr.aif;
new_cpsr.t = spsr.t;
new_cpsr.it2 = it.top6;
new_cpsr.it1 = it.bottom2;
} else {
// aarch64
new_cpsr.daif = spsr.daif;
}
return new_cpsr;
}
} }

View file

@ -404,6 +404,15 @@ class ArmStaticInst : public StaticInst
NSACR nsacr, FPEXC fpexc, NSACR nsacr, FPEXC fpexc,
bool fpexc_check, bool advsimd) const; bool fpexc_check, bool advsimd) const;
/**
* Get the new PSTATE from a SPSR register in preparation for an
* exception return.
*
* See shared/functions/system/SetPSTATEFromPSR in the ARM ARM
* psueodcode library.
*/
CPSR getPSTATEFromPSR(ThreadContext *tc, CPSR cpsr, CPSR spsr) const;
public: public:
virtual void virtual void
annotateFault(ArmFault *fault) {} annotateFault(ArmFault *fault) {}

View file

@ -1,6 +1,6 @@
// -*- mode:c++ -*- // -*- mode:c++ -*-
// Copyright (c) 2011-2013 ARM Limited // Copyright (c) 2011-2013, 2016 ARM Limited
// All rights reserved // All rights reserved
// //
// The license below extends only to copyright in the software and shall // The license below extends only to copyright in the software and shall
@ -127,92 +127,19 @@ let {{
else else
newPc = newPc & ~mask(2); newPc = newPc & ~mask(2);
} }
spsr.q = 0;
spsr.it1 = 0;
spsr.j = 0;
spsr.res0_23_22 = 0;
spsr.ge = 0;
spsr.it2 = 0;
spsr.t = 0;
OperatingMode mode = (OperatingMode) (uint8_t) spsr.mode; CPSR new_cpsr = getPSTATEFromPSR(xc->tcBase(), cpsr, spsr);
bool illegal = false;
ExceptionLevel target_el;
if (badMode(mode)) {
illegal = true;
} else {
target_el = opModeToEL(mode);
if (((target_el == EL2) &&
!ArmSystem::haveVirtualization(xc->tcBase())) ||
(target_el > curr_el) ||
(spsr.width == 1)) {
illegal = true;
} else {
bool known = true;
bool from32 = (spsr.width == 1);
bool to32 = false;
if (false) { // TODO: !haveAArch32EL
to32 = false;
} else if (!ArmSystem::highestELIs64(xc->tcBase())) {
to32 = true;
} else {
bool scr_rw, hcr_rw;
if (ArmSystem::haveSecurity(xc->tcBase())) {
SCR scr = xc->tcBase()->readMiscReg(MISCREG_SCR_EL3);
scr_rw = scr.rw;
} else {
scr_rw = true;
}
if (ArmSystem::haveVirtualization(xc->tcBase())) { Cpsr = new_cpsr;
HCR hcr = xc->tcBase()->readMiscReg(MISCREG_HCR_EL2); CondCodesNZ = new_cpsr.nz;
hcr_rw = hcr.rw; CondCodesC = new_cpsr.c;
} else { CondCodesV = new_cpsr.v;
hcr_rw = scr_rw;
}
switch (target_el) { NextAArch64 = !new_cpsr.width;
case EL3: NextItState = itState(new_cpsr);
to32 = false;
break;
case EL2:
to32 = !scr_rw;
break;
case EL1:
to32 = !scr_rw || !hcr_rw;
break;
case EL0:
if (curr_el == EL0) {
to32 = cpsr.width;
} else if (!scr_rw || !hcr_rw) {
// EL0 using AArch32 if EL1 using AArch32
to32 = true;
} else {
known = false;
to32 = false;
}
}
}
if (known)
illegal = (from32 != to32);
}
}
if (illegal) {
uint8_t old_mode = cpsr.mode;
spsr.mode = old_mode; // Preserve old mode when invalid
spsr.il = 1;
} else {
if (cpsr.width != spsr.width)
panic("AArch32/AArch64 interprocessing not supported yet");
}
Cpsr = spsr;
CondCodesNZ = spsr.nz;
CondCodesC = spsr.c;
CondCodesV = spsr.v;
NPC = purifyTaggedAddr(newPc, xc->tcBase(), NPC = purifyTaggedAddr(newPc, xc->tcBase(),
opModeToEL((OperatingMode) (uint8_t) spsr.mode)); opModeToEL((OperatingMode) (uint8_t) new_cpsr.mode));
LLSCLock = 0; // Clear exclusive monitor LLSCLock = 0; // Clear exclusive monitor
SevMailbox = 1; //Set Event Register SevMailbox = 1; //Set Event Register
''' '''

View file

@ -440,6 +440,7 @@ def operands {{
'NextJazelle': pcStateReg('nextJazelle', srtMode), 'NextJazelle': pcStateReg('nextJazelle', srtMode),
'NextItState': pcStateReg('nextItstate', srtMode), 'NextItState': pcStateReg('nextItstate', srtMode),
'Itstate': pcStateReg('itstate', srtMode), 'Itstate': pcStateReg('itstate', srtMode),
'NextAArch64': pcStateReg('nextAArch64', srtMode),
#Register operands depending on a field in the instruction encoding. These #Register operands depending on a field in the instruction encoding. These
#should be avoided since they may not be portable across different #should be avoided since they may not be portable across different

View file

@ -1376,6 +1376,7 @@ namespace ArmISA
Bitfield<8> a; Bitfield<8> a;
Bitfield<7> i; Bitfield<7> i;
Bitfield<6> f; Bitfield<6> f;
Bitfield<8, 6> aif;
Bitfield<9, 6> daif; // AArch64 Bitfield<9, 6> daif; // AArch64
Bitfield<5> t; Bitfield<5> t;
Bitfield<4> width; // AArch64 Bitfield<4> width; // AArch64

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2010, 2012-2013 ARM Limited * Copyright (c) 2010, 2012-2013, 2016 ARM Limited
* All rights reserved * All rights reserved
* *
* The license below extends only to copyright in the software and shall * The license below extends only to copyright in the software and shall
@ -161,6 +161,16 @@ bool ELIs64(ThreadContext *tc, ExceptionLevel el);
bool isBigEndian64(ThreadContext *tc); bool isBigEndian64(ThreadContext *tc);
static inline uint8_t
itState(CPSR psr)
{
ITSTATE it = 0;
it.top6 = psr.it2;
it.bottom2 = psr.it1;
return (uint8_t)it;
}
/** /**
* Removes the tag from tagged addresses if that mode is enabled. * Removes the tag from tagged addresses if that mode is enabled.
* @param addr The address to be purified. * @param addr The address to be purified.