Move store conditional result checking from SimpleAtomicCpu write

function into Alpha ISA description.  write now just generically
returns a result value if the res pointer is non-null (which means
we can only provide a res pointer if we expect a valid result
value).

--HG--
extra : convert_revision : fb1c315515787f5fbbf7d1af7e428bdbfe8148b8
This commit is contained in:
Steve Reinhardt 2007-02-12 09:26:47 -08:00
parent 6b37bb6710
commit f78bc80bd7
5 changed files with 98 additions and 13 deletions

View file

@ -84,6 +84,9 @@ decode OPCODE default Unknown::unknown() {
uint64_t tmp = write_result;
// see stq_c
Ra = (tmp == 0 || tmp == 1) ? tmp : Ra;
if (tmp == 1) {
xc->setStCondFailures(0);
}
}}, mem_flags = LOCKED, inst_flags = IsStoreConditional);
0x2f: stq_c({{ Mem.uq = Ra; }},
{{
@ -96,6 +99,12 @@ decode OPCODE default Unknown::unknown() {
// mailbox access, and we don't update the
// result register at all.
Ra = (tmp == 0 || tmp == 1) ? tmp : Ra;
if (tmp == 1) {
// clear failure counter... this is
// non-architectural and for debugging
// only.
xc->setStCondFailures(0);
}
}}, mem_flags = LOCKED, inst_flags = IsStoreConditional);
}

View file

@ -344,6 +344,41 @@ def template LoadCompleteAcc {{
def template StoreMemAccExecute {{
Fault
%(class_name)s::MemAcc::execute(%(CPU_exec_context)s *xc,
Trace::InstRecord *traceData) const
{
Addr EA;
Fault fault = NoFault;
%(fp_enable_check)s;
%(op_decl)s;
%(op_rd)s;
EA = xc->getEA();
if (fault == NoFault) {
%(memacc_code)s;
}
if (fault == NoFault) {
fault = xc->write((uint%(mem_acc_size)d_t&)Mem, EA,
memAccessFlags, NULL);
if (traceData) { traceData->setData(Mem); }
}
if (fault == NoFault) {
%(postacc_code)s;
}
if (fault == NoFault) {
%(op_wb)s;
}
return fault;
}
}};
def template StoreCondMemAccExecute {{
Fault
%(class_name)s::MemAcc::execute(%(CPU_exec_context)s *xc,
Trace::InstRecord *traceData) const
@ -381,6 +416,40 @@ def template StoreMemAccExecute {{
def template StoreExecute {{
Fault %(class_name)s::execute(%(CPU_exec_context)s *xc,
Trace::InstRecord *traceData) const
{
Addr EA;
Fault fault = NoFault;
%(fp_enable_check)s;
%(op_decl)s;
%(op_rd)s;
%(ea_code)s;
if (fault == NoFault) {
%(memacc_code)s;
}
if (fault == NoFault) {
fault = xc->write((uint%(mem_acc_size)d_t&)Mem, EA,
memAccessFlags, NULL);
if (traceData) { traceData->setData(Mem); }
}
if (fault == NoFault) {
%(postacc_code)s;
}
if (fault == NoFault) {
%(op_wb)s;
}
return fault;
}
}};
def template StoreCondExecute {{
Fault %(class_name)s::execute(%(CPU_exec_context)s *xc,
Trace::InstRecord *traceData) const
{
@ -614,10 +683,8 @@ def LoadStoreBase(name, Name, ea_code, memacc_code, mem_flags, inst_flags,
# select templates
# define aliases... most StoreCond templates are the same as the
# corresponding Store templates (only CompleteAcc is different).
StoreCondMemAccExecute = StoreMemAccExecute
StoreCondExecute = StoreExecute
# The InitiateAcc template is the same for StoreCond templates as the
# corresponding Store template..
StoreCondInitiateAcc = StoreInitiateAcc
memAccExecTemplate = eval(exec_template_base + 'MemAccExecute')

View file

@ -35,6 +35,14 @@
* @file
*
* ISA-specific helper functions for locked memory accesses.
*
* Note that these functions are not embedded in the ISA description
* because they operate on the *physical* address rather than the
* virtual address. In the current M5 design, the physical address is
* not accessible from the ISA description, only from the CPU model.
* Thus the CPU is responsible for calling back to the ISA (here)
* after the address translation has been performed to allow the ISA
* to do these manipulations based on the physical address.
*/
#include "arch/alpha/miscregfile.hh"

View file

@ -401,15 +401,8 @@ AtomicSimpleCPU::write(T data, Addr addr, unsigned flags, uint64_t *res)
#endif
}
if (req->isLocked()) {
uint64_t scResult = req->getScResult();
if (scResult != 0) {
// clear failure counter
thread->setStCondFailures(0);
}
if (res) {
*res = req->getScResult();
}
if (res) {
*res = req->getScResult();
}
}

View file

@ -329,6 +329,14 @@ class BaseSimpleCPU : public BaseCPU
return thread->setMiscRegWithEffect(reg_idx, val);
}
unsigned readStCondFailures() {
return thread->readStCondFailures();
}
void setStCondFailures(unsigned sc_failures) {
thread->setStCondFailures(sc_failures);
}
#if FULL_SYSTEM
Fault hwrei() { return thread->hwrei(); }
void ev5_trap(Fault fault) { fault->invoke(tc); }