2006-02-10 05:02:38 +01:00
|
|
|
// -*- mode:c++ -*-
|
|
|
|
|
|
|
|
// Copyright (c) 2003-2005 The Regents of The University of Michigan
|
|
|
|
// All rights reserved.
|
|
|
|
//
|
|
|
|
// Redistribution and use in source and binary forms, with or without
|
|
|
|
// modification, are permitted provided that the following conditions are
|
|
|
|
// met: redistributions of source code must retain the above copyright
|
|
|
|
// notice, this list of conditions and the following disclaimer;
|
|
|
|
// redistributions in binary form must reproduce the above copyright
|
|
|
|
// notice, this list of conditions and the following disclaimer in the
|
|
|
|
// documentation and/or other materials provided with the distribution;
|
|
|
|
// neither the name of the copyright holders nor the names of its
|
|
|
|
// contributors may be used to endorse or promote products derived from
|
|
|
|
// this software without specific prior written permission.
|
|
|
|
//
|
|
|
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
|
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
|
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
|
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
|
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
|
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
|
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
|
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
|
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
|
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
|
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
|
|
|
|
output header {{
|
|
|
|
/**
|
|
|
|
* Base class for emulated call_pal calls (used only in
|
|
|
|
* non-full-system mode).
|
|
|
|
*/
|
|
|
|
class EmulatedCallPal : public AlphaStaticInst
|
|
|
|
{
|
|
|
|
protected:
|
|
|
|
|
|
|
|
/// Constructor.
|
|
|
|
EmulatedCallPal(const char *mnem, MachInst _machInst,
|
|
|
|
OpClass __opClass)
|
|
|
|
: AlphaStaticInst(mnem, _machInst, __opClass)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
std::string
|
|
|
|
generateDisassembly(Addr pc, const SymbolTable *symtab) const;
|
|
|
|
};
|
|
|
|
}};
|
|
|
|
|
|
|
|
output decoder {{
|
|
|
|
std::string
|
|
|
|
EmulatedCallPal::generateDisassembly(Addr pc,
|
|
|
|
const SymbolTable *symtab) const
|
|
|
|
{
|
|
|
|
#ifdef SS_COMPATIBLE_DISASSEMBLY
|
|
|
|
return csprintf("%s %s", "call_pal", mnemonic);
|
|
|
|
#else
|
|
|
|
return csprintf("%-10s %s", "call_pal", mnemonic);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
}};
|
|
|
|
|
|
|
|
def format EmulatedCallPal(code, *flags) {{
|
|
|
|
iop = InstObjParams(name, Name, 'EmulatedCallPal', CodeBlock(code), flags)
|
|
|
|
header_output = BasicDeclare.subst(iop)
|
|
|
|
decoder_output = BasicConstructor.subst(iop)
|
|
|
|
decode_block = BasicDecode.subst(iop)
|
|
|
|
exec_output = BasicExecute.subst(iop)
|
|
|
|
}};
|
|
|
|
|
|
|
|
output header {{
|
|
|
|
/**
|
|
|
|
* Base class for full-system-mode call_pal instructions.
|
|
|
|
* Probably could turn this into a leaf class and get rid of the
|
|
|
|
* parser template.
|
|
|
|
*/
|
|
|
|
class CallPalBase : public AlphaStaticInst
|
|
|
|
{
|
|
|
|
protected:
|
|
|
|
int palFunc; ///< Function code part of instruction
|
|
|
|
int palOffset; ///< Target PC, offset from IPR_PAL_BASE
|
|
|
|
bool palValid; ///< is the function code valid?
|
|
|
|
bool palPriv; ///< is this call privileged?
|
|
|
|
|
|
|
|
/// Constructor.
|
|
|
|
CallPalBase(const char *mnem, MachInst _machInst,
|
|
|
|
OpClass __opClass);
|
|
|
|
|
|
|
|
std::string
|
|
|
|
generateDisassembly(Addr pc, const SymbolTable *symtab) const;
|
|
|
|
};
|
|
|
|
}};
|
|
|
|
|
|
|
|
output decoder {{
|
|
|
|
inline
|
|
|
|
CallPalBase::CallPalBase(const char *mnem, MachInst _machInst,
|
|
|
|
OpClass __opClass)
|
|
|
|
: AlphaStaticInst(mnem, _machInst, __opClass),
|
|
|
|
palFunc(PALFUNC)
|
|
|
|
{
|
|
|
|
// From the 21164 HRM (paraphrased):
|
|
|
|
// Bit 7 of the function code (mask 0x80) indicates
|
|
|
|
// whether the call is privileged (bit 7 == 0) or
|
|
|
|
// unprivileged (bit 7 == 1). The privileged call table
|
|
|
|
// starts at 0x2000, the unprivielged call table starts at
|
|
|
|
// 0x3000. Bits 5-0 (mask 0x3f) are used to calculate the
|
|
|
|
// offset.
|
|
|
|
const int palPrivMask = 0x80;
|
|
|
|
const int palOffsetMask = 0x3f;
|
|
|
|
|
|
|
|
// Pal call is invalid unless all other bits are 0
|
|
|
|
palValid = ((machInst & ~(palPrivMask | palOffsetMask)) == 0);
|
|
|
|
palPriv = ((machInst & palPrivMask) == 0);
|
|
|
|
int shortPalFunc = (machInst & palOffsetMask);
|
|
|
|
// Add 1 to base to set pal-mode bit
|
|
|
|
palOffset = (palPriv ? 0x2001 : 0x3001) + (shortPalFunc << 6);
|
|
|
|
}
|
|
|
|
|
|
|
|
std::string
|
|
|
|
CallPalBase::generateDisassembly(Addr pc, const SymbolTable *symtab) const
|
|
|
|
{
|
|
|
|
return csprintf("%-10s %#x", "call_pal", palFunc);
|
|
|
|
}
|
|
|
|
}};
|
|
|
|
|
|
|
|
def format CallPal(code, *flags) {{
|
|
|
|
iop = InstObjParams(name, Name, 'CallPalBase', CodeBlock(code), flags)
|
|
|
|
header_output = BasicDeclare.subst(iop)
|
|
|
|
decoder_output = BasicConstructor.subst(iop)
|
|
|
|
decode_block = BasicDecode.subst(iop)
|
|
|
|
exec_output = BasicExecute.subst(iop)
|
|
|
|
}};
|
|
|
|
|
|
|
|
////////////////////////////////////////////////////////////////////
|
|
|
|
//
|
|
|
|
// hw_ld, hw_st
|
|
|
|
//
|
|
|
|
|
|
|
|
output header {{
|
|
|
|
/**
|
|
|
|
* Base class for hw_ld and hw_st.
|
|
|
|
*/
|
|
|
|
class HwLoadStore : public Memory
|
|
|
|
{
|
|
|
|
protected:
|
|
|
|
|
|
|
|
/// Displacement for EA calculation (signed).
|
|
|
|
int16_t disp;
|
|
|
|
|
|
|
|
/// Constructor
|
|
|
|
HwLoadStore(const char *mnem, MachInst _machInst, OpClass __opClass,
|
|
|
|
StaticInstPtr<AlphaISA> _eaCompPtr = nullStaticInstPtr,
|
|
|
|
StaticInstPtr<AlphaISA> _memAccPtr = nullStaticInstPtr);
|
|
|
|
|
|
|
|
std::string
|
|
|
|
generateDisassembly(Addr pc, const SymbolTable *symtab) const;
|
|
|
|
};
|
|
|
|
}};
|
|
|
|
|
|
|
|
|
|
|
|
output decoder {{
|
|
|
|
inline
|
|
|
|
HwLoadStore::HwLoadStore(const char *mnem, MachInst _machInst,
|
|
|
|
OpClass __opClass,
|
|
|
|
StaticInstPtr<AlphaISA> _eaCompPtr,
|
|
|
|
StaticInstPtr<AlphaISA> _memAccPtr)
|
|
|
|
: Memory(mnem, _machInst, __opClass, _eaCompPtr, _memAccPtr),
|
|
|
|
disp(HW_LDST_DISP)
|
|
|
|
{
|
|
|
|
memAccessFlags = 0;
|
|
|
|
if (HW_LDST_PHYS) memAccessFlags |= PHYSICAL;
|
|
|
|
if (HW_LDST_ALT) memAccessFlags |= ALTMODE;
|
|
|
|
if (HW_LDST_VPTE) memAccessFlags |= VPTE;
|
|
|
|
if (HW_LDST_LOCK) memAccessFlags |= LOCKED;
|
|
|
|
}
|
|
|
|
|
|
|
|
std::string
|
|
|
|
HwLoadStore::generateDisassembly(Addr pc, const SymbolTable *symtab) const
|
|
|
|
{
|
|
|
|
#ifdef SS_COMPATIBLE_DISASSEMBLY
|
|
|
|
return csprintf("%-10s r%d,%d(r%d)", mnemonic, RA, disp, RB);
|
|
|
|
#else
|
|
|
|
// HW_LDST_LOCK and HW_LDST_COND are the same bit.
|
|
|
|
const char *lock_str =
|
|
|
|
(HW_LDST_LOCK) ? (flags[IsLoad] ? ",LOCK" : ",COND") : "";
|
|
|
|
|
|
|
|
return csprintf("%-10s r%d,%d(r%d)%s%s%s%s%s",
|
|
|
|
mnemonic, RA, disp, RB,
|
|
|
|
HW_LDST_PHYS ? ",PHYS" : "",
|
|
|
|
HW_LDST_ALT ? ",ALT" : "",
|
|
|
|
HW_LDST_QUAD ? ",QUAD" : "",
|
|
|
|
HW_LDST_VPTE ? ",VPTE" : "",
|
|
|
|
lock_str);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
}};
|
|
|
|
|
Change how memory operands are handled in ISA descriptions.
Should enable implementation of split-phase timing loads
with new memory model.
May create slight timing differences under FullCPU, as I
believe we were not handling software prefetches correctly
before when the split MemAcc/Exec model was used. I haven't
looked into this in any detail though.
arch/alpha/isa/decoder.isa:
HwLoadStore format split into separate HwLoad and
HwStore formats.
Copy instructions now fall under MiscPrefetch format.
Mem_write_result is now just write_result in store
conditionals.
arch/alpha/isa/mem.isa:
Split MemAccExecute and LoadStoreExecute templates
into separate templates for loads and stores; now
that memory operands are handled differently from
registers, it's impossible to have a single template
serve both.
Also unified the handling of "regular" prefetches
(loads to r31) and "misc" prefetches (e.g., wh64)
under the new scheme. It looks like SW prefetches
were not handled correctly in FullCPU up til now,
since we generated an execute() method for the outer
instruction but didn't generate a proper method for
MemAcc::execute() (instead getting a default no-op
method for that).
arch/alpha/isa/pal.isa:
Split HwLoadStore into separate HwLoad and HwStore
formats to select proper template (see change to
mem.isa in this changeset).
arch/isa_parser.py:
Stop trying to treat memory operands like register
operands, since we never used them in a uniform way
anyway, and it made it impossible to do split-phase
loads as needed for the new CPU model. Now there's no
more 'op_mem_rd', 'op_nonmem_rd', etc.: 'op_rd' just does
register operands, and the template code is responsible
for formulating the call to the memory system. Right now
the only thing exported by InstObjParams is a new attribute
'mem_acc_size' which gives the memory access size in bits,
though more attributes can be added if needed.
Also moved code in findOperands() method to
OperandDescriptorList.__init__(), which is where it belongs.
--HG--
extra : convert_revision : 6d53d07e0c5e828455834ded4395fa40f9146a34
2006-02-10 15:12:55 +01:00
|
|
|
def format HwLoad(ea_code, memacc_code, class_ext, *flags) {{
|
2006-02-10 05:02:38 +01:00
|
|
|
(header_output, decoder_output, decode_block, exec_output) = \
|
|
|
|
LoadStoreBase(name, Name + class_ext, ea_code, memacc_code,
|
2006-02-11 21:11:00 +01:00
|
|
|
mem_flags = [], inst_flags = flags,
|
|
|
|
base_class = 'HwLoadStore', exec_template_base = 'Load')
|
Change how memory operands are handled in ISA descriptions.
Should enable implementation of split-phase timing loads
with new memory model.
May create slight timing differences under FullCPU, as I
believe we were not handling software prefetches correctly
before when the split MemAcc/Exec model was used. I haven't
looked into this in any detail though.
arch/alpha/isa/decoder.isa:
HwLoadStore format split into separate HwLoad and
HwStore formats.
Copy instructions now fall under MiscPrefetch format.
Mem_write_result is now just write_result in store
conditionals.
arch/alpha/isa/mem.isa:
Split MemAccExecute and LoadStoreExecute templates
into separate templates for loads and stores; now
that memory operands are handled differently from
registers, it's impossible to have a single template
serve both.
Also unified the handling of "regular" prefetches
(loads to r31) and "misc" prefetches (e.g., wh64)
under the new scheme. It looks like SW prefetches
were not handled correctly in FullCPU up til now,
since we generated an execute() method for the outer
instruction but didn't generate a proper method for
MemAcc::execute() (instead getting a default no-op
method for that).
arch/alpha/isa/pal.isa:
Split HwLoadStore into separate HwLoad and HwStore
formats to select proper template (see change to
mem.isa in this changeset).
arch/isa_parser.py:
Stop trying to treat memory operands like register
operands, since we never used them in a uniform way
anyway, and it made it impossible to do split-phase
loads as needed for the new CPU model. Now there's no
more 'op_mem_rd', 'op_nonmem_rd', etc.: 'op_rd' just does
register operands, and the template code is responsible
for formulating the call to the memory system. Right now
the only thing exported by InstObjParams is a new attribute
'mem_acc_size' which gives the memory access size in bits,
though more attributes can be added if needed.
Also moved code in findOperands() method to
OperandDescriptorList.__init__(), which is where it belongs.
--HG--
extra : convert_revision : 6d53d07e0c5e828455834ded4395fa40f9146a34
2006-02-10 15:12:55 +01:00
|
|
|
}};
|
|
|
|
|
|
|
|
|
|
|
|
def format HwStore(ea_code, memacc_code, class_ext, *flags) {{
|
|
|
|
(header_output, decoder_output, decode_block, exec_output) = \
|
|
|
|
LoadStoreBase(name, Name + class_ext, ea_code, memacc_code,
|
2006-02-11 21:11:00 +01:00
|
|
|
mem_flags = [], inst_flags = flags,
|
|
|
|
base_class = 'HwLoadStore', exec_template_base = 'Store')
|
2006-02-10 05:02:38 +01:00
|
|
|
}};
|
|
|
|
|
|
|
|
|
2006-02-11 21:11:00 +01:00
|
|
|
def format HwStoreCond(ea_code, memacc_code, postacc_code, class_ext,
|
|
|
|
*flags) {{
|
2006-02-10 05:02:38 +01:00
|
|
|
(header_output, decoder_output, decode_block, exec_output) = \
|
|
|
|
LoadStoreBase(name, Name + class_ext, ea_code, memacc_code,
|
2006-02-11 21:11:00 +01:00
|
|
|
postacc_code, mem_flags = [], inst_flags = flags,
|
|
|
|
base_class = 'HwLoadStore')
|
2006-02-10 05:02:38 +01:00
|
|
|
}};
|
|
|
|
|
|
|
|
|
|
|
|
output header {{
|
|
|
|
/**
|
|
|
|
* Base class for hw_mfpr and hw_mtpr.
|
|
|
|
*/
|
|
|
|
class HwMoveIPR : public AlphaStaticInst
|
|
|
|
{
|
|
|
|
protected:
|
|
|
|
/// Index of internal processor register.
|
|
|
|
int ipr_index;
|
|
|
|
|
|
|
|
/// Constructor
|
|
|
|
HwMoveIPR(const char *mnem, MachInst _machInst, OpClass __opClass)
|
|
|
|
: AlphaStaticInst(mnem, _machInst, __opClass),
|
|
|
|
ipr_index(HW_IPR_IDX)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
std::string
|
|
|
|
generateDisassembly(Addr pc, const SymbolTable *symtab) const;
|
|
|
|
};
|
|
|
|
}};
|
|
|
|
|
|
|
|
output decoder {{
|
|
|
|
std::string
|
|
|
|
HwMoveIPR::generateDisassembly(Addr pc, const SymbolTable *symtab) const
|
|
|
|
{
|
|
|
|
if (_numSrcRegs > 0) {
|
|
|
|
// must be mtpr
|
|
|
|
return csprintf("%-10s r%d,IPR(%#x)",
|
|
|
|
mnemonic, RA, ipr_index);
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
// must be mfpr
|
|
|
|
return csprintf("%-10s IPR(%#x),r%d",
|
|
|
|
mnemonic, ipr_index, RA);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}};
|
|
|
|
|
|
|
|
def format HwMoveIPR(code) {{
|
|
|
|
iop = InstObjParams(name, Name, 'HwMoveIPR', CodeBlock(code),
|
|
|
|
['IprAccessOp'])
|
|
|
|
header_output = BasicDeclare.subst(iop)
|
|
|
|
decoder_output = BasicConstructor.subst(iop)
|
|
|
|
decode_block = BasicDecode.subst(iop)
|
|
|
|
exec_output = BasicExecute.subst(iop)
|
|
|
|
}};
|
|
|
|
|
|
|
|
|