arm: Add support for ARMv8 (AArch64 & AArch32)
Note: AArch64 and AArch32 interworking is not supported. If you use an AArch64 kernel you are restricted to AArch64 user-mode binaries. This will be addressed in a later patch. Note: Virtualization is only supported in AArch32 mode. This will also be fixed in a later patch. Contributors: Giacomo Gabrielli (TrustZone, LPAE, system-level AArch64, AArch64 NEON, validation) Thomas Grocutt (AArch32 Virtualization, AArch64 FP, validation) Mbou Eyole (AArch64 NEON, validation) Ali Saidi (AArch64 Linux support, code integration, validation) Edmund Grimley-Evans (AArch64 FP) William Wang (AArch64 Linux support) Rene De Jong (AArch64 Linux support, performance opt.) Matt Horsnell (AArch64 MP, validation) Matt Evans (device models, code integration, validation) Chris Adeniyi-Jones (AArch64 syscall-emulation) Prakash Ramrakhyani (validation) Dam Sunwoo (validation) Chander Sudanthi (validation) Stephan Diestelhorst (validation) Andreas Hansson (code integration, performance opt.) Eric Van Hensbergen (performance opt.) Gabe Black
This commit is contained in:
parent
f3585c841e
commit
612f8f074f
145 changed files with 39812 additions and 2579 deletions
|
@ -242,7 +242,8 @@ def makeArmSystem(mem_mode, machine_type, mdesc = None,
|
|||
self.realview = VExpress_ELT()
|
||||
elif machine_type == "VExpress_EMM":
|
||||
self.realview = VExpress_EMM()
|
||||
self.load_addr_mask = 0xffffffff
|
||||
elif machine_type == "VExpress_EMM64":
|
||||
self.realview = VExpress_EMM64()
|
||||
else:
|
||||
print "Unknown Machine Type"
|
||||
sys.exit(1)
|
||||
|
|
|
@ -139,7 +139,7 @@ class O3_ARM_v7a_3(DerivO3CPU):
|
|||
backComSize = 5
|
||||
forwardComSize = 5
|
||||
numPhysIntRegs = 128
|
||||
numPhysFloatRegs = 128
|
||||
numPhysFloatRegs = 192
|
||||
numIQEntries = 32
|
||||
numROBEntries = 40
|
||||
|
||||
|
|
|
@ -94,6 +94,9 @@ def addCommonOptions(parser):
|
|||
default="512MB",
|
||||
help="Specify the physical memory size (single memory)")
|
||||
|
||||
parser.add_option("-l", "--lpae", action="store_true")
|
||||
parser.add_option("-V", "--virtualisation", action="store_true")
|
||||
|
||||
# Cache Options
|
||||
parser.add_option("--caches", action="store_true")
|
||||
parser.add_option("--l2cache", action="store_true")
|
||||
|
@ -197,6 +200,14 @@ def addCommonOptions(parser):
|
|||
parser.add_option("--at-instruction", action="store_true", default=False,
|
||||
help="""Treat value of --checkpoint-restore or --take-checkpoint as a
|
||||
number of instructions.""")
|
||||
parser.add_option("--spec-input", default="ref", type="choice",
|
||||
choices=["ref", "test", "train", "smred", "mdred",
|
||||
"lgred"],
|
||||
help="Input set size for SPEC CPU2000 benchmarks.")
|
||||
parser.add_option("--arm-iset", default="arm", type="choice",
|
||||
choices=["arm", "thumb", "aarch64"],
|
||||
help="ARM instruction set.")
|
||||
|
||||
|
||||
def addSEOptions(parser):
|
||||
# Benchmark options
|
||||
|
|
|
@ -663,7 +663,7 @@ class vortex(Benchmark):
|
|||
stdin = None
|
||||
|
||||
def __init__(self, isa, os, input_set):
|
||||
if (isa == 'alpha' or isa == 'arm'):
|
||||
if (isa in ('alpha', 'arm', 'thumb', 'aarch64')):
|
||||
self.endian = 'lendian'
|
||||
elif (isa == 'sparc' or isa == 'sparc32'):
|
||||
self.endian = 'bendian'
|
||||
|
|
|
@ -140,6 +140,12 @@ if options.kernel is not None:
|
|||
if options.script is not None:
|
||||
test_sys.readfile = options.script
|
||||
|
||||
if options.lpae:
|
||||
test_sys.have_lpae = True
|
||||
|
||||
if options.virtualisation:
|
||||
test_sys.have_virtualization = True
|
||||
|
||||
test_sys.init_param = options.init_param
|
||||
|
||||
# For now, assign all the CPUs to the same clock domain
|
||||
|
|
|
@ -135,9 +135,14 @@ if options.bench:
|
|||
for app in apps:
|
||||
try:
|
||||
if buildEnv['TARGET_ISA'] == 'alpha':
|
||||
exec("workload = %s('alpha', 'tru64', 'ref')" % app)
|
||||
exec("workload = %s('alpha', 'tru64', '%s')" % (
|
||||
app, options.spec_input))
|
||||
elif buildEnv['TARGET_ISA'] == 'arm':
|
||||
exec("workload = %s('arm_%s', 'linux', '%s')" % (
|
||||
app, options.arm_iset, options.spec_input))
|
||||
else:
|
||||
exec("workload = %s(buildEnv['TARGET_ISA'], 'linux', 'ref')" % app)
|
||||
exec("workload = %s(buildEnv['TARGET_ISA', 'linux', '%s')" % (
|
||||
app, options.spec_input))
|
||||
multiprocesses.append(workload.makeLiveProcess())
|
||||
except:
|
||||
print >>sys.stderr, "Unable to find workload for %s: %s" % (buildEnv['TARGET_ISA'], app)
|
||||
|
|
|
@ -172,6 +172,7 @@ typedef struct {
|
|||
#define EM_TINYJ 61 /* Advanced Logic Corp. TinyJ processor. */
|
||||
#define EM_X86_64 62 /* Advanced Micro Devices x86-64 */
|
||||
#define EM_AMD64 EM_X86_64 /* Advanced Micro Devices x86-64 (compat) */
|
||||
#define EM_AARCH64 183 /* AArch64 64 bit ARM. */
|
||||
|
||||
/* Non-standard or deprecated. */
|
||||
#define EM_486 6 /* Intel i486. */
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
# Copyright (c) 2012 ARM Limited
|
||||
# Copyright (c) 2012-2013 ARM Limited
|
||||
# All rights reserved.
|
||||
#
|
||||
# The license below extends only to copyright in the software and shall
|
||||
|
@ -34,8 +34,10 @@
|
|||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
#
|
||||
# Authors: Andreas Sandberg
|
||||
# Giacomo Gabrielli
|
||||
|
||||
from m5.params import *
|
||||
from m5.proxy import *
|
||||
from m5.SimObject import SimObject
|
||||
|
||||
class ArmISA(SimObject):
|
||||
|
@ -43,12 +45,9 @@ class ArmISA(SimObject):
|
|||
cxx_class = 'ArmISA::ISA'
|
||||
cxx_header = "arch/arm/isa.hh"
|
||||
|
||||
# 0x35 Implementor is '5' from "M5"
|
||||
# 0x0 Variant
|
||||
# 0xf Architecture from CPUID scheme
|
||||
# 0xc00 Primary part number ("c" or higher implies ARM v7)
|
||||
# 0x0 Revision
|
||||
midr = Param.UInt32(0x350fc000, "Main ID Register")
|
||||
system = Param.System(Parent.any, "System this ISA object belongs to")
|
||||
|
||||
midr = Param.UInt32(0x410fc0f0, "MIDR value")
|
||||
|
||||
# See section B4.1.93 - B4.1.94 of the ARM ARM
|
||||
#
|
||||
|
@ -56,19 +55,19 @@ class ArmISA(SimObject):
|
|||
# Note: ThumbEE is disabled for now since we don't support CP14
|
||||
# config registers and jumping to ThumbEE vectors
|
||||
id_pfr0 = Param.UInt32(0x00000031, "Processor Feature Register 0")
|
||||
# !Timer | !Virti | !M Profile | !TrustZone | ARMv4
|
||||
id_pfr1 = Param.UInt32(0x00000001, "Processor Feature Register 1")
|
||||
# !Timer | Virti | !M Profile | TrustZone | ARMv4
|
||||
id_pfr1 = Param.UInt32(0x00001011, "Processor Feature Register 1")
|
||||
|
||||
# See section B4.1.89 - B4.1.92 of the ARM ARM
|
||||
# VMSAv7 support
|
||||
id_mmfr0 = Param.UInt32(0x00000003, "Memory Model Feature Register 0")
|
||||
id_mmfr0 = Param.UInt32(0x10201103, "Memory Model Feature Register 0")
|
||||
id_mmfr1 = Param.UInt32(0x00000000, "Memory Model Feature Register 1")
|
||||
# no HW access | WFI stalling | ISB and DSB |
|
||||
# all TLB maintenance | no Harvard
|
||||
id_mmfr2 = Param.UInt32(0x01230000, "Memory Model Feature Register 2")
|
||||
# SuperSec | Coherent TLB | Bcast Maint |
|
||||
# BP Maint | Cache Maint Set/way | Cache Maint MVA
|
||||
id_mmfr3 = Param.UInt32(0xF0102211, "Memory Model Feature Register 3")
|
||||
id_mmfr3 = Param.UInt32(0x02102211, "Memory Model Feature Register 3")
|
||||
|
||||
# See section B4.1.84 of ARM ARM
|
||||
# All values are latest for ARMv7-A profile
|
||||
|
@ -79,5 +78,40 @@ class ArmISA(SimObject):
|
|||
id_isar4 = Param.UInt32(0x10010142, "Instruction Set Attribute Register 4")
|
||||
id_isar5 = Param.UInt32(0x00000000, "Instruction Set Attribute Register 5")
|
||||
|
||||
fpsid = Param.UInt32(0x410430a0, "Floating-point System ID Register")
|
||||
|
||||
fpsid = Param.UInt32(0x410430A0, "Floating-point System ID Register")
|
||||
# [31:0] is implementation defined
|
||||
id_aa64afr0_el1 = Param.UInt64(0x0000000000000000,
|
||||
"AArch64 Auxiliary Feature Register 0")
|
||||
# Reserved for future expansion
|
||||
id_aa64afr1_el1 = Param.UInt64(0x0000000000000000,
|
||||
"AArch64 Auxiliary Feature Register 1")
|
||||
|
||||
# 1 CTX CMPs | 2 WRPs | 2 BRPs | !PMU | !Trace | Debug v8-A
|
||||
id_aa64dfr0_el1 = Param.UInt64(0x0000000000101006,
|
||||
"AArch64 Debug Feature Register 0")
|
||||
# Reserved for future expansion
|
||||
id_aa64dfr1_el1 = Param.UInt64(0x0000000000000000,
|
||||
"AArch64 Debug Feature Register 1")
|
||||
|
||||
# !CRC32 | !SHA2 | !SHA1 | !AES
|
||||
id_aa64isar0_el1 = Param.UInt64(0x0000000000000000,
|
||||
"AArch64 Instruction Set Attribute Register 0")
|
||||
# Reserved for future expansion
|
||||
id_aa64isar1_el1 = Param.UInt64(0x0000000000000000,
|
||||
"AArch64 Instruction Set Attribute Register 1")
|
||||
|
||||
# 4K | 64K | !16K | !BigEndEL0 | !SNSMem | !BigEnd | 8b ASID | 40b PA
|
||||
id_aa64mmfr0_el1 = Param.UInt64(0x0000000000f00002,
|
||||
"AArch64 Memory Model Feature Register 0")
|
||||
# Reserved for future expansion
|
||||
id_aa64mmfr1_el1 = Param.UInt64(0x0000000000000000,
|
||||
"AArch64 Memory Model Feature Register 1")
|
||||
|
||||
# !GICv3 CP15 | AdvSIMD | FP | !EL3 | !EL2 | EL1 (AArch64) | EL0 (AArch64)
|
||||
# (no AArch32/64 interprocessing support for now)
|
||||
id_aa64pfr0_el1 = Param.UInt64(0x0000000000000011,
|
||||
"AArch64 Processor Feature Register 0")
|
||||
# Reserved for future expansion
|
||||
id_aa64pfr1_el1 = Param.UInt64(0x0000000000000000,
|
||||
"AArch64 Processor Feature Register 1")
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
# Copyright (c) 2009 ARM Limited
|
||||
# Copyright (c) 2009, 2012-2013 ARM Limited
|
||||
# All rights reserved.
|
||||
#
|
||||
# The license below extends only to copyright in the software and shall
|
||||
|
@ -44,7 +44,8 @@ class ArmMachineType(Enum):
|
|||
'RealView_PBX' : 1901,
|
||||
'VExpress_ELT' : 2272,
|
||||
'VExpress_CA9' : 2272,
|
||||
'VExpress_EMM' : 2272}
|
||||
'VExpress_EMM' : 2272,
|
||||
'VExpress_EMM64' : 2272}
|
||||
|
||||
class ArmSystem(System):
|
||||
type = 'ArmSystem'
|
||||
|
@ -54,6 +55,23 @@ class ArmSystem(System):
|
|||
boot_loader = Param.String("", "File that contains the boot loader code if any")
|
||||
gic_cpu_addr = Param.Addr(0, "Addres of the GIC CPU interface")
|
||||
flags_addr = Param.Addr(0, "Address of the flags register for MP booting")
|
||||
have_security = Param.Bool(False,
|
||||
"True if Security Extensions are implemented")
|
||||
have_virtualization = Param.Bool(False,
|
||||
"True if Virtualization Extensions are implemented")
|
||||
have_lpae = Param.Bool(False, "True if LPAE is implemented")
|
||||
have_generic_timer = Param.Bool(False,
|
||||
"True if the Generic Timer extension is implemented")
|
||||
highest_el_is_64 = Param.Bool(False,
|
||||
"True if the register width of the highest implemented exception level "
|
||||
"is 64 bits (ARMv8)")
|
||||
reset_addr_64 = Param.UInt64(0x0,
|
||||
"Reset address if the highest implemented exception level is 64 bits "
|
||||
"(ARMv8)")
|
||||
phys_addr_range_64 = Param.UInt8(40,
|
||||
"Supported physical address range in bits when using AArch64 (ARMv8)")
|
||||
have_large_asid_64 = Param.Bool(False,
|
||||
"True if ASID is 16 bits in AArch64 (ARMv8)")
|
||||
|
||||
class LinuxArmSystem(ArmSystem):
|
||||
type = 'LinuxArmSystem'
|
||||
|
@ -61,8 +79,10 @@ class LinuxArmSystem(ArmSystem):
|
|||
load_addr_mask = 0x0fffffff
|
||||
machine_type = Param.ArmMachineType('RealView_PBX',
|
||||
"Machine id from http://www.arm.linux.org.uk/developer/machines/")
|
||||
atags_addr = Param.Addr(0x100,
|
||||
"Address where default atags structure should be written")
|
||||
atags_addr = Param.Addr("Address where default atags structure should " \
|
||||
"be written")
|
||||
boot_release_addr = Param.Addr(0xfff8, "Address where secondary CPUs " \
|
||||
"spin waiting boot in the loader")
|
||||
dtb_filename = Param.String("",
|
||||
"File that contains the Device Tree Blob. Don't use DTB if empty.")
|
||||
early_kernel_symbols = Param.Bool(False,
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# -*- mode:python -*-
|
||||
|
||||
# Copyright (c) 2009 ARM Limited
|
||||
# Copyright (c) 2009, 2013 ARM Limited
|
||||
# All rights reserved.
|
||||
#
|
||||
# The license below extends only to copyright in the software and shall
|
||||
|
@ -42,10 +42,12 @@ from m5.params import *
|
|||
from m5.proxy import *
|
||||
from MemObject import MemObject
|
||||
|
||||
# Basic stage 1 translation objects
|
||||
class ArmTableWalker(MemObject):
|
||||
type = 'ArmTableWalker'
|
||||
cxx_class = 'ArmISA::TableWalker'
|
||||
cxx_header = "arch/arm/table_walker.hh"
|
||||
is_stage2 = Param.Bool(False, "Is this object for stage 2 translation?")
|
||||
port = MasterPort("Port for TableWalker to do walk the translation with")
|
||||
sys = Param.System(Parent.any, "system object parameter")
|
||||
num_squash_per_cycle = Param.Unsigned(2,
|
||||
|
@ -57,3 +59,28 @@ class ArmTLB(SimObject):
|
|||
cxx_header = "arch/arm/tlb.hh"
|
||||
size = Param.Int(64, "TLB size")
|
||||
walker = Param.ArmTableWalker(ArmTableWalker(), "HW Table walker")
|
||||
is_stage2 = Param.Bool(False, "Is this a stage 2 TLB?")
|
||||
|
||||
# Stage 2 translation objects, only used when virtualisation is being used
|
||||
class ArmStage2TableWalker(ArmTableWalker):
|
||||
is_stage2 = True
|
||||
|
||||
class ArmStage2TLB(ArmTLB):
|
||||
size = 32
|
||||
walker = ArmStage2TableWalker()
|
||||
is_stage2 = True
|
||||
|
||||
class ArmStage2MMU(SimObject):
|
||||
type = 'ArmStage2MMU'
|
||||
cxx_class = 'ArmISA::Stage2MMU'
|
||||
cxx_header = 'arch/arm/stage2_mmu.hh'
|
||||
tlb = Param.ArmTLB("Stage 1 TLB")
|
||||
stage2_tlb = Param.ArmTLB("Stage 2 TLB")
|
||||
|
||||
class ArmStage2IMMU(ArmStage2MMU):
|
||||
tlb = Parent.itb
|
||||
stage2_tlb = ArmStage2TLB(walker = ArmStage2TableWalker())
|
||||
|
||||
class ArmStage2DMMU(ArmStage2MMU):
|
||||
tlb = Parent.dtb
|
||||
stage2_tlb = ArmStage2TLB(walker = ArmStage2TableWalker())
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# -*- mode:python -*-
|
||||
|
||||
# Copyright (c) 2009 ARM Limited
|
||||
# Copyright (c) 2009, 2012-2013 ARM Limited
|
||||
# All rights reserved.
|
||||
#
|
||||
# The license below extends only to copyright in the software and shall
|
||||
|
@ -49,12 +49,17 @@ if env['TARGET_ISA'] == 'arm':
|
|||
Dir('isa/formats')
|
||||
Source('decoder.cc')
|
||||
Source('faults.cc')
|
||||
Source('insts/branch64.cc')
|
||||
Source('insts/data64.cc')
|
||||
Source('insts/macromem.cc')
|
||||
Source('insts/mem.cc')
|
||||
Source('insts/mem64.cc')
|
||||
Source('insts/misc.cc')
|
||||
Source('insts/misc64.cc')
|
||||
Source('insts/pred_inst.cc')
|
||||
Source('insts/static_inst.cc')
|
||||
Source('insts/vfp.cc')
|
||||
Source('insts/fplib.cc')
|
||||
Source('interrupts.cc')
|
||||
Source('isa.cc')
|
||||
Source('linux/linux.cc')
|
||||
|
@ -67,6 +72,8 @@ if env['TARGET_ISA'] == 'arm':
|
|||
Source('stacktrace.cc')
|
||||
Source('system.cc')
|
||||
Source('table_walker.cc')
|
||||
Source('stage2_mmu.cc')
|
||||
Source('stage2_lookup.cc')
|
||||
Source('tlb.cc')
|
||||
Source('utility.cc')
|
||||
Source('vtophys.cc')
|
||||
|
|
|
@ -1,4 +1,16 @@
|
|||
/*
|
||||
* Copyright (c) 2012-2013 ARM Limited
|
||||
* All rights reserved
|
||||
*
|
||||
* The license below extends only to copyright in the software and shall
|
||||
* not be construed as granting a license to any other intellectual
|
||||
* property including but not limited to intellectual property relating
|
||||
* to a hardware implementation of the functionality of the software
|
||||
* licensed hereunder. You may use the software subject to the license
|
||||
* terms below provided that you ensure that this notice is replicated
|
||||
* unmodified and in its entirety in all distributions of the software,
|
||||
* modified or unmodified, in source code or in binary form.
|
||||
*
|
||||
* Copyright (c) 2012 Google
|
||||
* All rights reserved.
|
||||
*
|
||||
|
@ -47,9 +59,11 @@ Decoder::process()
|
|||
|
||||
if (!emi.thumb) {
|
||||
emi.instBits = data;
|
||||
if (!emi.aarch64) {
|
||||
emi.sevenAndFour = bits(data, 7) && bits(data, 4);
|
||||
emi.isMisc = (bits(data, 24, 23) == 0x2 &&
|
||||
bits(data, 20) == 0);
|
||||
}
|
||||
consumeBytes(4);
|
||||
DPRINTF(Decoder, "Arm inst: %#x.\n", (uint64_t)emi);
|
||||
} else {
|
||||
|
@ -112,6 +126,7 @@ Decoder::moreBytes(const PCState &pc, Addr fetchPC, MachInst inst)
|
|||
data = inst;
|
||||
offset = (fetchPC >= pc.instAddr()) ? 0 : pc.instAddr() - fetchPC;
|
||||
emi.thumb = pc.thumb();
|
||||
emi.aarch64 = pc.aarch64();
|
||||
emi.fpscrLen = fpscrLen;
|
||||
emi.fpscrStride = fpscrStride;
|
||||
|
||||
|
|
|
@ -1,4 +1,16 @@
|
|||
/*
|
||||
* Copyright (c) 2013 ARM Limited
|
||||
* All rights reserved
|
||||
*
|
||||
* The license below extends only to copyright in the software and shall
|
||||
* not be construed as granting a license to any other intellectual
|
||||
* property including but not limited to intellectual property relating
|
||||
* to a hardware implementation of the functionality of the software
|
||||
* licensed hereunder. You may use the software subject to the license
|
||||
* terms below provided that you ensure that this notice is replicated
|
||||
* unmodified and in its entirety in all distributions of the software,
|
||||
* modified or unmodified, in source code or in binary form.
|
||||
*
|
||||
* Copyright (c) 2012 Google
|
||||
* All rights reserved.
|
||||
*
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2010 ARM Limited
|
||||
* Copyright (c) 2010, 2012-2013 ARM Limited
|
||||
* All rights reserved
|
||||
*
|
||||
* The license below extends only to copyright in the software and shall
|
||||
|
@ -40,12 +40,15 @@
|
|||
*
|
||||
* Authors: Ali Saidi
|
||||
* Gabe Black
|
||||
* Giacomo Gabrielli
|
||||
* Thomas Grocutt
|
||||
*/
|
||||
|
||||
#ifndef __ARM_FAULTS_HH__
|
||||
#define __ARM_FAULTS_HH__
|
||||
|
||||
#include "arch/arm/miscregs.hh"
|
||||
#include "arch/arm/pagetable.hh"
|
||||
#include "arch/arm/types.hh"
|
||||
#include "base/misc.hh"
|
||||
#include "sim/faults.hh"
|
||||
|
@ -60,63 +63,146 @@ typedef const Addr FaultOffset;
|
|||
class ArmFault : public FaultBase
|
||||
{
|
||||
protected:
|
||||
ExtMachInst machInst;
|
||||
uint32_t issRaw;
|
||||
|
||||
// Helper variables for ARMv8 exception handling
|
||||
bool from64; // True if the exception is generated from the AArch64 state
|
||||
bool to64; // True if the exception is taken in AArch64 state
|
||||
ExceptionLevel fromEL; // Source exception level
|
||||
ExceptionLevel toEL; // Target exception level
|
||||
OperatingMode fromMode; // Source operating mode
|
||||
|
||||
Addr getVector(ThreadContext *tc);
|
||||
Addr getVector64(ThreadContext *tc);
|
||||
|
||||
public:
|
||||
enum StatusEncoding
|
||||
/// Generic fault source enums used to index into
|
||||
/// {short/long/aarch64}DescFaultSources[] to get the actual encodings based
|
||||
/// on the current register width state and the translation table format in
|
||||
/// use
|
||||
enum FaultSource
|
||||
{
|
||||
// Fault Status register encodings
|
||||
// ARM ARM B3.9.4
|
||||
AlignmentFault = 0x1,
|
||||
DebugEvent = 0x2,
|
||||
AccessFlag0 = 0x3,
|
||||
InstructionCacheMaintenance = 0x4,
|
||||
Translation0 = 0x5,
|
||||
AccessFlag1 = 0x6,
|
||||
Translation1 = 0x7,
|
||||
SynchronousExternalAbort0 = 0x8,
|
||||
Domain0 = 0x9,
|
||||
SynchronousExternalAbort1 = 0x8,
|
||||
Domain1 = 0xb,
|
||||
TranslationTableWalkExtAbt0 = 0xc,
|
||||
Permission0 = 0xd,
|
||||
TranslationTableWalkExtAbt1 = 0xe,
|
||||
Permission1 = 0xf,
|
||||
AsynchronousExternalAbort = 0x16,
|
||||
MemoryAccessAsynchronousParityError = 0x18,
|
||||
MemoryAccessSynchronousParityError = 0x19,
|
||||
TranslationTableWalkPrtyErr0 = 0x1c,
|
||||
TranslationTableWalkPrtyErr1 = 0x1e,
|
||||
AlignmentFault = 0,
|
||||
InstructionCacheMaintenance, // Short-desc. format only
|
||||
SynchExtAbtOnTranslTableWalkLL,
|
||||
SynchPtyErrOnTranslTableWalkLL = SynchExtAbtOnTranslTableWalkLL + 4,
|
||||
TranslationLL = SynchPtyErrOnTranslTableWalkLL + 4,
|
||||
AccessFlagLL = TranslationLL + 4,
|
||||
DomainLL = AccessFlagLL + 4,
|
||||
PermissionLL = DomainLL + 4,
|
||||
DebugEvent = PermissionLL + 4,
|
||||
SynchronousExternalAbort,
|
||||
TLBConflictAbort, // Requires LPAE
|
||||
SynchPtyErrOnMemoryAccess,
|
||||
AsynchronousExternalAbort,
|
||||
AsynchPtyErrOnMemoryAccess,
|
||||
AddressSizeLL, // AArch64 only
|
||||
|
||||
// not a real fault. This is a status code
|
||||
// to allow the translation function to inform
|
||||
// the memory access function not to proceed
|
||||
// for a Prefetch that misses in the TLB.
|
||||
PrefetchTLBMiss = 0x1f,
|
||||
PrefetchUncacheable = 0x20
|
||||
// Not real faults. These are faults to allow the translation function
|
||||
// to inform the memory access function not to proceed for a prefetch
|
||||
// that misses in the TLB or that targets an uncacheable address
|
||||
PrefetchTLBMiss = AddressSizeLL + 4,
|
||||
PrefetchUncacheable,
|
||||
|
||||
NumFaultSources,
|
||||
FaultSourceInvalid = 0xff
|
||||
};
|
||||
|
||||
/// Encodings of the fault sources when the short-desc. translation table
|
||||
/// format is in use (ARM ARM Issue C B3.13.3)
|
||||
static uint8_t shortDescFaultSources[NumFaultSources];
|
||||
/// Encodings of the fault sources when the long-desc. translation table
|
||||
/// format is in use (ARM ARM Issue C B3.13.3)
|
||||
static uint8_t longDescFaultSources[NumFaultSources];
|
||||
/// Encodings of the fault sources in AArch64 state
|
||||
static uint8_t aarch64FaultSources[NumFaultSources];
|
||||
|
||||
enum AnnotationIDs
|
||||
{
|
||||
S1PTW, // DataAbort, PrefetchAbort: Stage 1 Page Table Walk,
|
||||
OVA, // DataAbort, PrefetchAbort: stage 1 Virtual Address for stage 2 faults
|
||||
SAS, // DataAbort: Syndrome Access Size
|
||||
SSE, // DataAbort: Syndrome Sign Extend
|
||||
SRT, // DataAbort: Syndrome Register Transfer
|
||||
|
||||
// AArch64 only
|
||||
SF, // DataAbort: width of the accessed register is SixtyFour
|
||||
AR // DataAbort: Acquire/Release semantics
|
||||
};
|
||||
|
||||
enum TranMethod
|
||||
{
|
||||
LpaeTran,
|
||||
VmsaTran,
|
||||
UnknownTran
|
||||
};
|
||||
|
||||
struct FaultVals
|
||||
{
|
||||
const FaultName name;
|
||||
|
||||
const FaultOffset offset;
|
||||
|
||||
// Offsets used for exceptions taken in AArch64 state
|
||||
const uint16_t currELTOffset;
|
||||
const uint16_t currELHOffset;
|
||||
const uint16_t lowerEL64Offset;
|
||||
const uint16_t lowerEL32Offset;
|
||||
|
||||
const OperatingMode nextMode;
|
||||
|
||||
const uint8_t armPcOffset;
|
||||
const uint8_t thumbPcOffset;
|
||||
// The following two values are used in place of armPcOffset and
|
||||
// thumbPcOffset when the exception return address is saved into ELR
|
||||
// registers (exceptions taken in HYP mode or in AArch64 state)
|
||||
const uint8_t armPcElrOffset;
|
||||
const uint8_t thumbPcElrOffset;
|
||||
|
||||
const bool hypTrappable;
|
||||
const bool abortDisable;
|
||||
const bool fiqDisable;
|
||||
|
||||
// Exception class used to appropriately set the syndrome register
|
||||
// (exceptions taken in HYP mode or in AArch64 state)
|
||||
const ExceptionClass ec;
|
||||
|
||||
FaultStat count;
|
||||
};
|
||||
|
||||
ArmFault(ExtMachInst _machInst = 0, uint32_t _iss = 0) :
|
||||
machInst(_machInst), issRaw(_iss), from64(false), to64(false) {}
|
||||
|
||||
// Returns the actual syndrome register to use based on the target
|
||||
// exception level
|
||||
MiscRegIndex getSyndromeReg64() const;
|
||||
// Returns the actual fault address register to use based on the target
|
||||
// exception level
|
||||
MiscRegIndex getFaultAddrReg64() const;
|
||||
|
||||
void invoke(ThreadContext *tc,
|
||||
StaticInstPtr inst = StaticInst::nullStaticInstPtr);
|
||||
void invoke64(ThreadContext *tc,
|
||||
StaticInstPtr inst = StaticInst::nullStaticInstPtr);
|
||||
virtual void annotate(AnnotationIDs id, uint64_t val) {}
|
||||
virtual FaultStat& countStat() = 0;
|
||||
virtual FaultOffset offset() = 0;
|
||||
virtual FaultOffset offset(ThreadContext *tc) = 0;
|
||||
virtual FaultOffset offset64() = 0;
|
||||
virtual OperatingMode nextMode() = 0;
|
||||
virtual uint8_t armPcOffset() = 0;
|
||||
virtual uint8_t thumbPcOffset() = 0;
|
||||
virtual bool abortDisable() = 0;
|
||||
virtual bool fiqDisable() = 0;
|
||||
virtual bool routeToMonitor(ThreadContext *tc) const = 0;
|
||||
virtual bool routeToHyp(ThreadContext *tc) const { return false; }
|
||||
virtual uint8_t armPcOffset(bool isHyp) = 0;
|
||||
virtual uint8_t thumbPcOffset(bool isHyp) = 0;
|
||||
virtual uint8_t armPcElrOffset() = 0;
|
||||
virtual uint8_t thumbPcElrOffset() = 0;
|
||||
virtual bool abortDisable(ThreadContext *tc) = 0;
|
||||
virtual bool fiqDisable(ThreadContext *tc) = 0;
|
||||
virtual ExceptionClass ec(ThreadContext *tc) const = 0;
|
||||
virtual uint32_t iss() const = 0;
|
||||
virtual bool isStage2() const { return false; }
|
||||
virtual FSR getFsr(ThreadContext *tc) { return 0; }
|
||||
virtual void setSyndrome(ThreadContext *tc, MiscRegIndex syndrome_reg);
|
||||
};
|
||||
|
||||
template<typename T>
|
||||
|
@ -126,14 +212,38 @@ class ArmFaultVals : public ArmFault
|
|||
static FaultVals vals;
|
||||
|
||||
public:
|
||||
ArmFaultVals<T>(ExtMachInst _machInst = 0, uint32_t _iss = 0) :
|
||||
ArmFault(_machInst, _iss) {}
|
||||
FaultName name() const { return vals.name; }
|
||||
FaultStat & countStat() {return vals.count;}
|
||||
FaultOffset offset() { return vals.offset; }
|
||||
FaultStat & countStat() { return vals.count; }
|
||||
FaultOffset offset(ThreadContext *tc);
|
||||
|
||||
FaultOffset
|
||||
offset64()
|
||||
{
|
||||
if (toEL == fromEL) {
|
||||
if (opModeIsT(fromMode))
|
||||
return vals.currELTOffset;
|
||||
return vals.currELHOffset;
|
||||
} else {
|
||||
if (from64)
|
||||
return vals.lowerEL64Offset;
|
||||
return vals.lowerEL32Offset;
|
||||
}
|
||||
}
|
||||
|
||||
OperatingMode nextMode() { return vals.nextMode; }
|
||||
uint8_t armPcOffset() { return vals.armPcOffset; }
|
||||
uint8_t thumbPcOffset() { return vals.thumbPcOffset; }
|
||||
bool abortDisable() { return vals.abortDisable; }
|
||||
bool fiqDisable() { return vals.fiqDisable; }
|
||||
virtual bool routeToMonitor(ThreadContext *tc) const { return false; }
|
||||
uint8_t armPcOffset(bool isHyp) { return isHyp ? vals.armPcElrOffset
|
||||
: vals.armPcOffset; }
|
||||
uint8_t thumbPcOffset(bool isHyp) { return isHyp ? vals.thumbPcElrOffset
|
||||
: vals.thumbPcOffset; }
|
||||
uint8_t armPcElrOffset() { return vals.armPcElrOffset; }
|
||||
uint8_t thumbPcElrOffset() { return vals.thumbPcElrOffset; }
|
||||
virtual bool abortDisable(ThreadContext* tc) { return vals.abortDisable; }
|
||||
virtual bool fiqDisable(ThreadContext* tc) { return vals.fiqDisable; }
|
||||
virtual ExceptionClass ec(ThreadContext *tc) const { return vals.ec; }
|
||||
virtual uint32_t iss() const { return issRaw; }
|
||||
};
|
||||
|
||||
class Reset : public ArmFaultVals<Reset>
|
||||
|
@ -146,61 +256,158 @@ class Reset : public ArmFaultVals<Reset>
|
|||
class UndefinedInstruction : public ArmFaultVals<UndefinedInstruction>
|
||||
{
|
||||
protected:
|
||||
ExtMachInst machInst;
|
||||
bool unknown;
|
||||
const char *mnemonic;
|
||||
bool disabled;
|
||||
ExceptionClass overrideEc;
|
||||
|
||||
public:
|
||||
UndefinedInstruction(ExtMachInst _machInst,
|
||||
bool _unknown,
|
||||
const char *_mnemonic = NULL,
|
||||
bool _disabled = false) :
|
||||
machInst(_machInst), unknown(_unknown),
|
||||
mnemonic(_mnemonic), disabled(_disabled)
|
||||
{
|
||||
}
|
||||
UndefinedInstruction() :
|
||||
machInst(0), unknown(false), mnemonic("undefined"), disabled(false)
|
||||
ArmFaultVals<UndefinedInstruction>(_machInst),
|
||||
unknown(_unknown), mnemonic(_mnemonic), disabled(_disabled),
|
||||
overrideEc(EC_INVALID)
|
||||
{}
|
||||
UndefinedInstruction(ExtMachInst _machInst, uint32_t _iss, ExceptionClass _overrideEc) :
|
||||
ArmFaultVals<UndefinedInstruction>(_machInst, _iss),
|
||||
overrideEc(_overrideEc)
|
||||
{}
|
||||
|
||||
void invoke(ThreadContext *tc,
|
||||
StaticInstPtr inst = StaticInst::nullStaticInstPtr);
|
||||
bool routeToHyp(ThreadContext *tc) const;
|
||||
ExceptionClass ec(ThreadContext *tc) const;
|
||||
uint32_t iss() const;
|
||||
};
|
||||
|
||||
class SupervisorCall : public ArmFaultVals<SupervisorCall>
|
||||
{
|
||||
protected:
|
||||
ExtMachInst machInst;
|
||||
|
||||
ExceptionClass overrideEc;
|
||||
public:
|
||||
SupervisorCall(ExtMachInst _machInst) : machInst(_machInst)
|
||||
{}
|
||||
SupervisorCall() : machInst(0)
|
||||
SupervisorCall(ExtMachInst _machInst, uint32_t _iss,
|
||||
ExceptionClass _overrideEc = EC_INVALID) :
|
||||
ArmFaultVals<SupervisorCall>(_machInst, _iss),
|
||||
overrideEc(_overrideEc)
|
||||
{}
|
||||
|
||||
void invoke(ThreadContext *tc,
|
||||
StaticInstPtr inst = StaticInst::nullStaticInstPtr);
|
||||
bool routeToHyp(ThreadContext *tc) const;
|
||||
ExceptionClass ec(ThreadContext *tc) const;
|
||||
uint32_t iss() const;
|
||||
};
|
||||
|
||||
class SecureMonitorCall : public ArmFaultVals<SecureMonitorCall>
|
||||
{
|
||||
public:
|
||||
SecureMonitorCall(ExtMachInst _machInst) :
|
||||
ArmFaultVals<SecureMonitorCall>(_machInst)
|
||||
{}
|
||||
|
||||
void invoke(ThreadContext *tc,
|
||||
StaticInstPtr inst = StaticInst::nullStaticInstPtr);
|
||||
ExceptionClass ec(ThreadContext *tc) const;
|
||||
uint32_t iss() const;
|
||||
};
|
||||
|
||||
class SupervisorTrap : public ArmFaultVals<SupervisorTrap>
|
||||
{
|
||||
protected:
|
||||
ExtMachInst machInst;
|
||||
ExceptionClass overrideEc;
|
||||
|
||||
public:
|
||||
SupervisorTrap(ExtMachInst _machInst, uint32_t _iss,
|
||||
ExceptionClass _overrideEc = EC_INVALID) :
|
||||
ArmFaultVals<SupervisorTrap>(_machInst, _iss),
|
||||
overrideEc(_overrideEc)
|
||||
{}
|
||||
|
||||
ExceptionClass ec(ThreadContext *tc) const;
|
||||
};
|
||||
|
||||
class SecureMonitorTrap : public ArmFaultVals<SecureMonitorTrap>
|
||||
{
|
||||
protected:
|
||||
ExtMachInst machInst;
|
||||
ExceptionClass overrideEc;
|
||||
|
||||
public:
|
||||
SecureMonitorTrap(ExtMachInst _machInst, uint32_t _iss,
|
||||
ExceptionClass _overrideEc = EC_INVALID) :
|
||||
ArmFaultVals<SecureMonitorTrap>(_machInst, _iss),
|
||||
overrideEc(_overrideEc)
|
||||
{}
|
||||
|
||||
ExceptionClass ec(ThreadContext *tc) const;
|
||||
};
|
||||
|
||||
class HypervisorCall : public ArmFaultVals<HypervisorCall>
|
||||
{
|
||||
public:
|
||||
HypervisorCall(ExtMachInst _machInst, uint32_t _imm);
|
||||
};
|
||||
|
||||
class HypervisorTrap : public ArmFaultVals<HypervisorTrap>
|
||||
{
|
||||
protected:
|
||||
ExtMachInst machInst;
|
||||
ExceptionClass overrideEc;
|
||||
|
||||
public:
|
||||
HypervisorTrap(ExtMachInst _machInst, uint32_t _iss,
|
||||
ExceptionClass _overrideEc = EC_INVALID) :
|
||||
ArmFaultVals<HypervisorTrap>(_machInst, _iss),
|
||||
overrideEc(_overrideEc)
|
||||
{}
|
||||
|
||||
ExceptionClass ec(ThreadContext *tc) const;
|
||||
};
|
||||
|
||||
template <class T>
|
||||
class AbortFault : public ArmFaultVals<T>
|
||||
{
|
||||
protected:
|
||||
/**
|
||||
* The virtual address the fault occured at. If 2 stages of
|
||||
* translation are being used then this is the intermediate
|
||||
* physical address that is the starting point for the second
|
||||
* stage of translation.
|
||||
*/
|
||||
Addr faultAddr;
|
||||
/**
|
||||
* Original virtual address. If the fault was generated on the
|
||||
* second stage of translation then this variable stores the
|
||||
* virtual address used in the original stage 1 translation.
|
||||
*/
|
||||
Addr OVAddr;
|
||||
bool write;
|
||||
uint8_t domain;
|
||||
uint8_t status;
|
||||
TlbEntry::DomainType domain;
|
||||
uint8_t source;
|
||||
uint8_t srcEncoded;
|
||||
bool stage2;
|
||||
bool s1ptw;
|
||||
ArmFault::TranMethod tranMethod;
|
||||
|
||||
public:
|
||||
AbortFault(Addr _faultAddr, bool _write,
|
||||
uint8_t _domain, uint8_t _status) :
|
||||
faultAddr(_faultAddr), write(_write),
|
||||
domain(_domain), status(_status)
|
||||
AbortFault(Addr _faultAddr, bool _write, TlbEntry::DomainType _domain, uint8_t _source,
|
||||
bool _stage2, ArmFault::TranMethod _tranMethod = ArmFault::UnknownTran) :
|
||||
faultAddr(_faultAddr), write(_write), domain(_domain), source(_source),
|
||||
stage2(_stage2), s1ptw(false), tranMethod(_tranMethod)
|
||||
{}
|
||||
|
||||
void invoke(ThreadContext *tc,
|
||||
StaticInstPtr inst = StaticInst::nullStaticInstPtr);
|
||||
|
||||
FSR getFsr(ThreadContext *tc);
|
||||
bool abortDisable(ThreadContext *tc);
|
||||
uint32_t iss() const;
|
||||
bool isStage2() const { return stage2; }
|
||||
void annotate(ArmFault::AnnotationIDs id, uint64_t val);
|
||||
bool isMMUFault() const;
|
||||
};
|
||||
|
||||
class PrefetchAbort : public AbortFault<PrefetchAbort>
|
||||
|
@ -208,10 +415,18 @@ class PrefetchAbort : public AbortFault<PrefetchAbort>
|
|||
public:
|
||||
static const MiscRegIndex FsrIndex = MISCREG_IFSR;
|
||||
static const MiscRegIndex FarIndex = MISCREG_IFAR;
|
||||
static const MiscRegIndex HFarIndex = MISCREG_HIFAR;
|
||||
|
||||
PrefetchAbort(Addr _addr, uint8_t _status) :
|
||||
AbortFault<PrefetchAbort>(_addr, false, 0, _status)
|
||||
PrefetchAbort(Addr _addr, uint8_t _source, bool _stage2 = false,
|
||||
ArmFault::TranMethod _tranMethod = ArmFault::UnknownTran) :
|
||||
AbortFault<PrefetchAbort>(_addr, false, TlbEntry::DomainType::NoAccess,
|
||||
_source, _stage2, _tranMethod)
|
||||
{}
|
||||
|
||||
ExceptionClass ec(ThreadContext *tc) const;
|
||||
// @todo: external aborts should be routed if SCR.EA == 1
|
||||
bool routeToMonitor(ThreadContext *tc) const;
|
||||
bool routeToHyp(ThreadContext *tc) const;
|
||||
};
|
||||
|
||||
class DataAbort : public AbortFault<DataAbort>
|
||||
|
@ -219,14 +434,105 @@ class DataAbort : public AbortFault<DataAbort>
|
|||
public:
|
||||
static const MiscRegIndex FsrIndex = MISCREG_DFSR;
|
||||
static const MiscRegIndex FarIndex = MISCREG_DFAR;
|
||||
static const MiscRegIndex HFarIndex = MISCREG_HDFAR;
|
||||
bool isv;
|
||||
uint8_t sas;
|
||||
uint8_t sse;
|
||||
uint8_t srt;
|
||||
|
||||
DataAbort(Addr _addr, uint8_t _domain, bool _write, uint8_t _status) :
|
||||
AbortFault<DataAbort>(_addr, _write, _domain, _status)
|
||||
// AArch64 only
|
||||
bool sf;
|
||||
bool ar;
|
||||
|
||||
DataAbort(Addr _addr, TlbEntry::DomainType _domain, bool _write, uint8_t _source,
|
||||
bool _stage2 = false, ArmFault::TranMethod _tranMethod = ArmFault::UnknownTran) :
|
||||
AbortFault<DataAbort>(_addr, _write, _domain, _source, _stage2,
|
||||
_tranMethod),
|
||||
isv(false), sas (0), sse(0), srt(0), sf(false), ar(false)
|
||||
{}
|
||||
|
||||
ExceptionClass ec(ThreadContext *tc) const;
|
||||
// @todo: external aborts should be routed if SCR.EA == 1
|
||||
bool routeToMonitor(ThreadContext *tc) const;
|
||||
bool routeToHyp(ThreadContext *tc) const;
|
||||
uint32_t iss() const;
|
||||
void annotate(AnnotationIDs id, uint64_t val);
|
||||
};
|
||||
|
||||
class Interrupt : public ArmFaultVals<Interrupt> {};
|
||||
class FastInterrupt : public ArmFaultVals<FastInterrupt> {};
|
||||
class VirtualDataAbort : public AbortFault<VirtualDataAbort>
|
||||
{
|
||||
public:
|
||||
static const MiscRegIndex FsrIndex = MISCREG_DFSR;
|
||||
static const MiscRegIndex FarIndex = MISCREG_DFAR;
|
||||
static const MiscRegIndex HFarIndex = MISCREG_HDFAR;
|
||||
|
||||
VirtualDataAbort(Addr _addr, TlbEntry::DomainType _domain, bool _write,
|
||||
uint8_t _source) :
|
||||
AbortFault<VirtualDataAbort>(_addr, _write, _domain, _source, false)
|
||||
{}
|
||||
|
||||
void invoke(ThreadContext *tc, StaticInstPtr inst);
|
||||
};
|
||||
|
||||
class Interrupt : public ArmFaultVals<Interrupt>
|
||||
{
|
||||
public:
|
||||
bool routeToMonitor(ThreadContext *tc) const;
|
||||
bool routeToHyp(ThreadContext *tc) const;
|
||||
bool abortDisable(ThreadContext *tc);
|
||||
};
|
||||
|
||||
class VirtualInterrupt : public ArmFaultVals<VirtualInterrupt>
|
||||
{
|
||||
public:
|
||||
VirtualInterrupt();
|
||||
};
|
||||
|
||||
class FastInterrupt : public ArmFaultVals<FastInterrupt>
|
||||
{
|
||||
public:
|
||||
bool routeToMonitor(ThreadContext *tc) const;
|
||||
bool routeToHyp(ThreadContext *tc) const;
|
||||
bool abortDisable(ThreadContext *tc);
|
||||
bool fiqDisable(ThreadContext *tc);
|
||||
};
|
||||
|
||||
class VirtualFastInterrupt : public ArmFaultVals<VirtualFastInterrupt>
|
||||
{
|
||||
public:
|
||||
VirtualFastInterrupt();
|
||||
};
|
||||
|
||||
/// PC alignment fault (AArch64 only)
|
||||
class PCAlignmentFault : public ArmFaultVals<PCAlignmentFault>
|
||||
{
|
||||
protected:
|
||||
/// The unaligned value of the PC
|
||||
Addr faultPC;
|
||||
public:
|
||||
PCAlignmentFault(Addr _faultPC) : faultPC(_faultPC)
|
||||
{}
|
||||
void invoke(ThreadContext *tc,
|
||||
StaticInstPtr inst = StaticInst::nullStaticInstPtr);
|
||||
};
|
||||
|
||||
/// Stack pointer alignment fault (AArch64 only)
|
||||
class SPAlignmentFault : public ArmFaultVals<SPAlignmentFault>
|
||||
{
|
||||
public:
|
||||
SPAlignmentFault();
|
||||
};
|
||||
|
||||
/// System error (AArch64 only)
|
||||
class SystemError : public ArmFaultVals<SystemError>
|
||||
{
|
||||
public:
|
||||
SystemError();
|
||||
void invoke(ThreadContext *tc,
|
||||
StaticInstPtr inst = StaticInst::nullStaticInstPtr);
|
||||
bool routeToMonitor(ThreadContext *tc) const;
|
||||
bool routeToHyp(ThreadContext *tc) const;
|
||||
};
|
||||
|
||||
// A fault that flushes the pipe, excluding the faulting instructions
|
||||
class FlushPipe : public ArmFaultVals<FlushPipe>
|
||||
|
@ -246,6 +552,13 @@ class ArmSev : public ArmFaultVals<ArmSev>
|
|||
StaticInstPtr inst = StaticInst::nullStaticInstPtr);
|
||||
};
|
||||
|
||||
/// Illegal Instruction Set State fault (AArch64 only)
|
||||
class IllegalInstSetStateFault : public ArmFaultVals<IllegalInstSetStateFault>
|
||||
{
|
||||
public:
|
||||
IllegalInstSetStateFault();
|
||||
};
|
||||
|
||||
} // namespace ArmISA
|
||||
|
||||
#endif // __ARM_FAULTS_HH__
|
||||
|
|
146
src/arch/arm/insts/branch64.cc
Normal file
146
src/arch/arm/insts/branch64.cc
Normal file
|
@ -0,0 +1,146 @@
|
|||
/*
|
||||
* Copyright (c) 2011-2013 ARM Limited
|
||||
* All rights reserved
|
||||
*
|
||||
* The license below extends only to copyright in the software and shall
|
||||
* not be construed as granting a license to any other intellectual
|
||||
* property including but not limited to intellectual property relating
|
||||
* to a hardware implementation of the functionality of the software
|
||||
* licensed hereunder. You may use the software subject to the license
|
||||
* terms below provided that you ensure that this notice is replicated
|
||||
* unmodified and in its entirety in all distributions of the software,
|
||||
* modified or unmodified, in source code or in binary form.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are
|
||||
* met: redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer;
|
||||
* redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution;
|
||||
* neither the name of the copyright holders nor the names of its
|
||||
* contributors may be used to endorse or promote products derived from
|
||||
* this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
* Authors: Gabe Black
|
||||
*/
|
||||
|
||||
#include "arch/arm/insts/branch64.hh"
|
||||
|
||||
namespace ArmISA
|
||||
{
|
||||
|
||||
ArmISA::PCState
|
||||
BranchImm64::branchTarget(const ArmISA::PCState &branchPC) const
|
||||
{
|
||||
ArmISA::PCState pcs = branchPC;
|
||||
pcs.instNPC(pcs.pc() + imm);
|
||||
pcs.advance();
|
||||
return pcs;
|
||||
}
|
||||
|
||||
ArmISA::PCState
|
||||
BranchImmReg64::branchTarget(const ArmISA::PCState &branchPC) const
|
||||
{
|
||||
ArmISA::PCState pcs = branchPC;
|
||||
pcs.instNPC(pcs.pc() + imm);
|
||||
pcs.advance();
|
||||
return pcs;
|
||||
}
|
||||
|
||||
ArmISA::PCState
|
||||
BranchImmImmReg64::branchTarget(const ArmISA::PCState &branchPC) const
|
||||
{
|
||||
ArmISA::PCState pcs = branchPC;
|
||||
pcs.instNPC(pcs.pc() + imm2);
|
||||
pcs.advance();
|
||||
return pcs;
|
||||
}
|
||||
|
||||
std::string
|
||||
BranchImmCond64::generateDisassembly(
|
||||
Addr pc, const SymbolTable *symtab) const
|
||||
{
|
||||
std::stringstream ss;
|
||||
printMnemonic(ss, "", false, true, condCode);
|
||||
printTarget(ss, pc + imm, symtab);
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
std::string
|
||||
BranchImm64::generateDisassembly(
|
||||
Addr pc, const SymbolTable *symtab) const
|
||||
{
|
||||
std::stringstream ss;
|
||||
printMnemonic(ss, "", false);
|
||||
printTarget(ss, pc + imm, symtab);
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
std::string
|
||||
BranchReg64::generateDisassembly(
|
||||
Addr pc, const SymbolTable *symtab) const
|
||||
{
|
||||
std::stringstream ss;
|
||||
printMnemonic(ss, "", false);
|
||||
printReg(ss, op1);
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
std::string
|
||||
BranchRet64::generateDisassembly(
|
||||
Addr pc, const SymbolTable *symtab) const
|
||||
{
|
||||
std::stringstream ss;
|
||||
printMnemonic(ss, "", false);
|
||||
if (op1 != INTREG_X30)
|
||||
printReg(ss, op1);
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
std::string
|
||||
BranchEret64::generateDisassembly(
|
||||
Addr pc, const SymbolTable *symtab) const
|
||||
{
|
||||
std::stringstream ss;
|
||||
printMnemonic(ss, "", false);
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
std::string
|
||||
BranchImmReg64::generateDisassembly(
|
||||
Addr pc, const SymbolTable *symtab) const
|
||||
{
|
||||
std::stringstream ss;
|
||||
printMnemonic(ss, "", false);
|
||||
printReg(ss, op1);
|
||||
ccprintf(ss, ", ");
|
||||
printTarget(ss, pc + imm, symtab);
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
std::string
|
||||
BranchImmImmReg64::generateDisassembly(
|
||||
Addr pc, const SymbolTable *symtab) const
|
||||
{
|
||||
std::stringstream ss;
|
||||
printMnemonic(ss, "", false);
|
||||
printReg(ss, op1);
|
||||
ccprintf(ss, ", #%#x, ", imm1);
|
||||
printTarget(ss, pc + imm2, symtab);
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
} // namespace ArmISA
|
166
src/arch/arm/insts/branch64.hh
Normal file
166
src/arch/arm/insts/branch64.hh
Normal file
|
@ -0,0 +1,166 @@
|
|||
/*
|
||||
* Copyright (c) 2011-2013 ARM Limited
|
||||
* All rights reserved
|
||||
*
|
||||
* The license below extends only to copyright in the software and shall
|
||||
* not be construed as granting a license to any other intellectual
|
||||
* property including but not limited to intellectual property relating
|
||||
* to a hardware implementation of the functionality of the software
|
||||
* licensed hereunder. You may use the software subject to the license
|
||||
* terms below provided that you ensure that this notice is replicated
|
||||
* unmodified and in its entirety in all distributions of the software,
|
||||
* modified or unmodified, in source code or in binary form.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are
|
||||
* met: redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer;
|
||||
* redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution;
|
||||
* neither the name of the copyright holders nor the names of its
|
||||
* contributors may be used to endorse or promote products derived from
|
||||
* this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
* Authors: Gabe Black
|
||||
*/
|
||||
#ifndef __ARCH_ARM_INSTS_BRANCH64_HH__
|
||||
#define __ARCH_ARM_INSTS_BRANCH64_HH__
|
||||
|
||||
#include "arch/arm/insts/static_inst.hh"
|
||||
|
||||
namespace ArmISA
|
||||
{
|
||||
// Branch to a target computed with an immediate
|
||||
class BranchImm64 : public ArmStaticInst
|
||||
{
|
||||
protected:
|
||||
int64_t imm;
|
||||
|
||||
public:
|
||||
BranchImm64(const char *mnem, ExtMachInst _machInst, OpClass __opClass,
|
||||
int64_t _imm) :
|
||||
ArmStaticInst(mnem, _machInst, __opClass), imm(_imm)
|
||||
{}
|
||||
|
||||
ArmISA::PCState branchTarget(const ArmISA::PCState &branchPC) const;
|
||||
|
||||
/// Explicitly import the otherwise hidden branchTarget
|
||||
using StaticInst::branchTarget;
|
||||
|
||||
std::string generateDisassembly(Addr pc, const SymbolTable *symtab) const;
|
||||
};
|
||||
|
||||
// Conditionally Branch to a target computed with an immediate
|
||||
class BranchImmCond64 : public BranchImm64
|
||||
{
|
||||
protected:
|
||||
ConditionCode condCode;
|
||||
|
||||
public:
|
||||
BranchImmCond64(const char *mnem, ExtMachInst _machInst, OpClass __opClass,
|
||||
int64_t _imm, ConditionCode _condCode) :
|
||||
BranchImm64(mnem, _machInst, __opClass, _imm), condCode(_condCode)
|
||||
{}
|
||||
|
||||
std::string generateDisassembly(Addr pc, const SymbolTable *symtab) const;
|
||||
};
|
||||
|
||||
// Branch to a target computed with a register
|
||||
class BranchReg64 : public ArmStaticInst
|
||||
{
|
||||
protected:
|
||||
IntRegIndex op1;
|
||||
|
||||
public:
|
||||
BranchReg64(const char *mnem, ExtMachInst _machInst, OpClass __opClass,
|
||||
IntRegIndex _op1) :
|
||||
ArmStaticInst(mnem, _machInst, __opClass), op1(_op1)
|
||||
{}
|
||||
|
||||
std::string generateDisassembly(Addr pc, const SymbolTable *symtab) const;
|
||||
};
|
||||
|
||||
// Ret instruction
|
||||
class BranchRet64 : public BranchReg64
|
||||
{
|
||||
public:
|
||||
BranchRet64(const char *mnem, ExtMachInst _machInst, OpClass __opClass,
|
||||
IntRegIndex _op1) :
|
||||
BranchReg64(mnem, _machInst, __opClass, _op1)
|
||||
{}
|
||||
|
||||
std::string generateDisassembly(Addr pc, const SymbolTable *symtab) const;
|
||||
};
|
||||
|
||||
// Eret instruction
|
||||
class BranchEret64 : public ArmStaticInst
|
||||
{
|
||||
public:
|
||||
BranchEret64(const char *mnem, ExtMachInst _machInst, OpClass __opClass) :
|
||||
ArmStaticInst(mnem, _machInst, __opClass)
|
||||
{}
|
||||
|
||||
std::string generateDisassembly(Addr pc, const SymbolTable *symtab) const;
|
||||
};
|
||||
|
||||
// Branch to a target computed with an immediate and a register
|
||||
class BranchImmReg64 : public ArmStaticInst
|
||||
{
|
||||
protected:
|
||||
int64_t imm;
|
||||
IntRegIndex op1;
|
||||
|
||||
public:
|
||||
BranchImmReg64(const char *mnem, ExtMachInst _machInst, OpClass __opClass,
|
||||
int64_t _imm, IntRegIndex _op1) :
|
||||
ArmStaticInst(mnem, _machInst, __opClass), imm(_imm), op1(_op1)
|
||||
{}
|
||||
|
||||
ArmISA::PCState branchTarget(const ArmISA::PCState &branchPC) const;
|
||||
|
||||
/// Explicitly import the otherwise hidden branchTarget
|
||||
using StaticInst::branchTarget;
|
||||
|
||||
std::string generateDisassembly(Addr pc, const SymbolTable *symtab) const;
|
||||
};
|
||||
|
||||
// Branch to a target computed with two immediates
|
||||
class BranchImmImmReg64 : public ArmStaticInst
|
||||
{
|
||||
protected:
|
||||
int64_t imm1;
|
||||
int64_t imm2;
|
||||
IntRegIndex op1;
|
||||
|
||||
public:
|
||||
BranchImmImmReg64(const char *mnem, ExtMachInst _machInst,
|
||||
OpClass __opClass, int64_t _imm1, int64_t _imm2,
|
||||
IntRegIndex _op1) :
|
||||
ArmStaticInst(mnem, _machInst, __opClass),
|
||||
imm1(_imm1), imm2(_imm2), op1(_op1)
|
||||
{}
|
||||
|
||||
ArmISA::PCState branchTarget(const ArmISA::PCState &branchPC) const;
|
||||
|
||||
/// Explicitly import the otherwise hidden branchTarget
|
||||
using StaticInst::branchTarget;
|
||||
|
||||
std::string generateDisassembly(Addr pc, const SymbolTable *symtab) const;
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
#endif //__ARCH_ARM_INSTS_BRANCH_HH__
|
203
src/arch/arm/insts/data64.cc
Normal file
203
src/arch/arm/insts/data64.cc
Normal file
|
@ -0,0 +1,203 @@
|
|||
/*
|
||||
* Copyright (c) 2011-2013 ARM Limited
|
||||
* All rights reserved
|
||||
*
|
||||
* The license below extends only to copyright in the software and shall
|
||||
* not be construed as granting a license to any other intellectual
|
||||
* property including but not limited to intellectual property relating
|
||||
* to a hardware implementation of the functionality of the software
|
||||
* licensed hereunder. You may use the software subject to the license
|
||||
* terms below provided that you ensure that this notice is replicated
|
||||
* unmodified and in its entirety in all distributions of the software,
|
||||
* modified or unmodified, in source code or in binary form.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are
|
||||
* met: redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer;
|
||||
* redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution;
|
||||
* neither the name of the copyright holders nor the names of its
|
||||
* contributors may be used to endorse or promote products derived from
|
||||
* this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
* Authors: Gabe Black
|
||||
*/
|
||||
|
||||
#include "arch/arm/insts/data64.hh"
|
||||
|
||||
namespace ArmISA
|
||||
{
|
||||
|
||||
std::string
|
||||
DataXImmOp::generateDisassembly(Addr pc, const SymbolTable *symtab) const
|
||||
{
|
||||
std::stringstream ss;
|
||||
printDataInst(ss, true, false, /*XXX not really s*/ false, dest, op1,
|
||||
INTREG_ZERO, INTREG_ZERO, 0, LSL, imm);
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
std::string
|
||||
DataXImmOnlyOp::generateDisassembly(Addr pc, const SymbolTable *symtab) const
|
||||
{
|
||||
std::stringstream ss;
|
||||
printMnemonic(ss, "", false);
|
||||
printReg(ss, dest);
|
||||
ccprintf(ss, ", #%d", imm);
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
std::string
|
||||
DataXSRegOp::generateDisassembly(Addr pc, const SymbolTable *symtab) const
|
||||
{
|
||||
std::stringstream ss;
|
||||
printDataInst(ss, false, true, /*XXX not really s*/ false, dest, op1,
|
||||
op2, INTREG_ZERO, shiftAmt, shiftType, 0);
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
std::string
|
||||
DataXERegOp::generateDisassembly(Addr pc, const SymbolTable *symtab) const
|
||||
{
|
||||
std::stringstream ss;
|
||||
printDataInst(ss, false, true, /*XXX not really s*/ false, dest, op1,
|
||||
op2, INTREG_ZERO, shiftAmt, LSL, 0);
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
std::string
|
||||
DataX1RegOp::generateDisassembly(Addr pc, const SymbolTable *symtab) const
|
||||
{
|
||||
std::stringstream ss;
|
||||
printMnemonic(ss, "", false);
|
||||
printReg(ss, dest);
|
||||
ccprintf(ss, ", ");
|
||||
printReg(ss, op1);
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
std::string
|
||||
DataX1RegImmOp::generateDisassembly(Addr pc, const SymbolTable *symtab) const
|
||||
{
|
||||
std::stringstream ss;
|
||||
printMnemonic(ss, "", false);
|
||||
printReg(ss, dest);
|
||||
ccprintf(ss, ", ");
|
||||
printReg(ss, op1);
|
||||
ccprintf(ss, ", #%d", imm);
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
std::string
|
||||
DataX1Reg2ImmOp::generateDisassembly(Addr pc, const SymbolTable *symtab) const
|
||||
{
|
||||
std::stringstream ss;
|
||||
printMnemonic(ss, "", false);
|
||||
printReg(ss, dest);
|
||||
ccprintf(ss, ", ");
|
||||
printReg(ss, op1);
|
||||
ccprintf(ss, ", #%d, #%d", imm1, imm2);
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
std::string
|
||||
DataX2RegOp::generateDisassembly(Addr pc, const SymbolTable *symtab) const
|
||||
{
|
||||
std::stringstream ss;
|
||||
printMnemonic(ss, "", false);
|
||||
printReg(ss, dest);
|
||||
ccprintf(ss, ", ");
|
||||
printReg(ss, op1);
|
||||
ccprintf(ss, ", ");
|
||||
printReg(ss, op2);
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
std::string
|
||||
DataX2RegImmOp::generateDisassembly(Addr pc, const SymbolTable *symtab) const
|
||||
{
|
||||
std::stringstream ss;
|
||||
printMnemonic(ss, "", false);
|
||||
printReg(ss, dest);
|
||||
ccprintf(ss, ", ");
|
||||
printReg(ss, op1);
|
||||
ccprintf(ss, ", ");
|
||||
printReg(ss, op2);
|
||||
ccprintf(ss, ", #%d", imm);
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
std::string
|
||||
DataX3RegOp::generateDisassembly(Addr pc, const SymbolTable *symtab) const
|
||||
{
|
||||
std::stringstream ss;
|
||||
printMnemonic(ss, "", false);
|
||||
printReg(ss, dest);
|
||||
ccprintf(ss, ", ");
|
||||
printReg(ss, op1);
|
||||
ccprintf(ss, ", ");
|
||||
printReg(ss, op2);
|
||||
ccprintf(ss, ", ");
|
||||
printReg(ss, op3);
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
std::string
|
||||
DataXCondCompImmOp::generateDisassembly(
|
||||
Addr pc, const SymbolTable *symtab) const
|
||||
{
|
||||
std::stringstream ss;
|
||||
printMnemonic(ss, "", false);
|
||||
printReg(ss, op1);
|
||||
ccprintf(ss, ", #%d, #%d", imm, defCc);
|
||||
ccprintf(ss, ", ");
|
||||
printCondition(ss, condCode, true);
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
std::string
|
||||
DataXCondCompRegOp::generateDisassembly(
|
||||
Addr pc, const SymbolTable *symtab) const
|
||||
{
|
||||
std::stringstream ss;
|
||||
printMnemonic(ss, "", false);
|
||||
printReg(ss, op1);
|
||||
ccprintf(ss, ", ");
|
||||
printReg(ss, op2);
|
||||
ccprintf(ss, ", #%d", defCc);
|
||||
ccprintf(ss, ", ");
|
||||
printCondition(ss, condCode, true);
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
std::string
|
||||
DataXCondSelOp::generateDisassembly(
|
||||
Addr pc, const SymbolTable *symtab) const
|
||||
{
|
||||
std::stringstream ss;
|
||||
printMnemonic(ss, "", false);
|
||||
printReg(ss, dest);
|
||||
ccprintf(ss, ", ");
|
||||
printReg(ss, op1);
|
||||
ccprintf(ss, ", ");
|
||||
printReg(ss, op2);
|
||||
ccprintf(ss, ", ");
|
||||
printCondition(ss, condCode, true);
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
}
|
256
src/arch/arm/insts/data64.hh
Normal file
256
src/arch/arm/insts/data64.hh
Normal file
|
@ -0,0 +1,256 @@
|
|||
/*
|
||||
* Copyright (c) 2011-2013 ARM Limited
|
||||
* All rights reserved
|
||||
*
|
||||
* The license below extends only to copyright in the software and shall
|
||||
* not be construed as granting a license to any other intellectual
|
||||
* property including but not limited to intellectual property relating
|
||||
* to a hardware implementation of the functionality of the software
|
||||
* licensed hereunder. You may use the software subject to the license
|
||||
* terms below provided that you ensure that this notice is replicated
|
||||
* unmodified and in its entirety in all distributions of the software,
|
||||
* modified or unmodified, in source code or in binary form.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are
|
||||
* met: redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer;
|
||||
* redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution;
|
||||
* neither the name of the copyright holders nor the names of its
|
||||
* contributors may be used to endorse or promote products derived from
|
||||
* this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
* Authors: Gabe Black
|
||||
*/
|
||||
#ifndef __ARCH_ARM_INSTS_DATA64_HH__
|
||||
#define __ARCH_ARM_INSTS_DATA64_HH__
|
||||
|
||||
#include "arch/arm/insts/static_inst.hh"
|
||||
#include "base/trace.hh"
|
||||
|
||||
namespace ArmISA
|
||||
{
|
||||
|
||||
class DataXImmOp : public ArmStaticInst
|
||||
{
|
||||
protected:
|
||||
IntRegIndex dest, op1;
|
||||
uint64_t imm;
|
||||
|
||||
DataXImmOp(const char *mnem, ExtMachInst _machInst, OpClass __opClass,
|
||||
IntRegIndex _dest, IntRegIndex _op1, uint64_t _imm) :
|
||||
ArmStaticInst(mnem, _machInst, __opClass),
|
||||
dest(_dest), op1(_op1), imm(_imm)
|
||||
{}
|
||||
|
||||
std::string generateDisassembly(Addr pc, const SymbolTable *symtab) const;
|
||||
};
|
||||
|
||||
class DataXImmOnlyOp : public ArmStaticInst
|
||||
{
|
||||
protected:
|
||||
IntRegIndex dest;
|
||||
uint64_t imm;
|
||||
|
||||
DataXImmOnlyOp(const char *mnem, ExtMachInst _machInst, OpClass __opClass,
|
||||
IntRegIndex _dest, uint64_t _imm) :
|
||||
ArmStaticInst(mnem, _machInst, __opClass),
|
||||
dest(_dest), imm(_imm)
|
||||
{}
|
||||
|
||||
std::string generateDisassembly(Addr pc, const SymbolTable *symtab) const;
|
||||
};
|
||||
|
||||
class DataXSRegOp : public ArmStaticInst
|
||||
{
|
||||
protected:
|
||||
IntRegIndex dest, op1, op2;
|
||||
int32_t shiftAmt;
|
||||
ArmShiftType shiftType;
|
||||
|
||||
DataXSRegOp(const char *mnem, ExtMachInst _machInst, OpClass __opClass,
|
||||
IntRegIndex _dest, IntRegIndex _op1, IntRegIndex _op2,
|
||||
int32_t _shiftAmt, ArmShiftType _shiftType) :
|
||||
ArmStaticInst(mnem, _machInst, __opClass),
|
||||
dest(_dest), op1(_op1), op2(_op2),
|
||||
shiftAmt(_shiftAmt), shiftType(_shiftType)
|
||||
{}
|
||||
|
||||
std::string generateDisassembly(Addr pc, const SymbolTable *symtab) const;
|
||||
};
|
||||
|
||||
class DataXERegOp : public ArmStaticInst
|
||||
{
|
||||
protected:
|
||||
IntRegIndex dest, op1, op2;
|
||||
ArmExtendType extendType;
|
||||
int32_t shiftAmt;
|
||||
|
||||
DataXERegOp(const char *mnem, ExtMachInst _machInst, OpClass __opClass,
|
||||
IntRegIndex _dest, IntRegIndex _op1, IntRegIndex _op2,
|
||||
ArmExtendType _extendType, int32_t _shiftAmt) :
|
||||
ArmStaticInst(mnem, _machInst, __opClass),
|
||||
dest(_dest), op1(_op1), op2(_op2),
|
||||
extendType(_extendType), shiftAmt(_shiftAmt)
|
||||
{}
|
||||
|
||||
std::string generateDisassembly(Addr pc, const SymbolTable *symtab) const;
|
||||
};
|
||||
|
||||
class DataX1RegOp : public ArmStaticInst
|
||||
{
|
||||
protected:
|
||||
IntRegIndex dest, op1;
|
||||
|
||||
DataX1RegOp(const char *mnem, ExtMachInst _machInst, OpClass __opClass,
|
||||
IntRegIndex _dest, IntRegIndex _op1) :
|
||||
ArmStaticInst(mnem, _machInst, __opClass), dest(_dest), op1(_op1)
|
||||
{}
|
||||
|
||||
std::string generateDisassembly(Addr pc, const SymbolTable *symtab) const;
|
||||
};
|
||||
|
||||
class DataX1RegImmOp : public ArmStaticInst
|
||||
{
|
||||
protected:
|
||||
IntRegIndex dest, op1;
|
||||
uint64_t imm;
|
||||
|
||||
DataX1RegImmOp(const char *mnem, ExtMachInst _machInst, OpClass __opClass,
|
||||
IntRegIndex _dest, IntRegIndex _op1, uint64_t _imm) :
|
||||
ArmStaticInst(mnem, _machInst, __opClass), dest(_dest), op1(_op1),
|
||||
imm(_imm)
|
||||
{}
|
||||
|
||||
std::string generateDisassembly(Addr pc, const SymbolTable *symtab) const;
|
||||
};
|
||||
|
||||
class DataX1Reg2ImmOp : public ArmStaticInst
|
||||
{
|
||||
protected:
|
||||
IntRegIndex dest, op1;
|
||||
uint64_t imm1, imm2;
|
||||
|
||||
DataX1Reg2ImmOp(const char *mnem, ExtMachInst _machInst, OpClass __opClass,
|
||||
IntRegIndex _dest, IntRegIndex _op1, uint64_t _imm1,
|
||||
uint64_t _imm2) :
|
||||
ArmStaticInst(mnem, _machInst, __opClass), dest(_dest), op1(_op1),
|
||||
imm1(_imm1), imm2(_imm2)
|
||||
{}
|
||||
|
||||
std::string generateDisassembly(Addr pc, const SymbolTable *symtab) const;
|
||||
};
|
||||
|
||||
class DataX2RegOp : public ArmStaticInst
|
||||
{
|
||||
protected:
|
||||
IntRegIndex dest, op1, op2;
|
||||
|
||||
DataX2RegOp(const char *mnem, ExtMachInst _machInst, OpClass __opClass,
|
||||
IntRegIndex _dest, IntRegIndex _op1, IntRegIndex _op2) :
|
||||
ArmStaticInst(mnem, _machInst, __opClass),
|
||||
dest(_dest), op1(_op1), op2(_op2)
|
||||
{}
|
||||
|
||||
std::string generateDisassembly(Addr pc, const SymbolTable *symtab) const;
|
||||
};
|
||||
|
||||
class DataX2RegImmOp : public ArmStaticInst
|
||||
{
|
||||
protected:
|
||||
IntRegIndex dest, op1, op2;
|
||||
uint64_t imm;
|
||||
|
||||
DataX2RegImmOp(const char *mnem, ExtMachInst _machInst, OpClass __opClass,
|
||||
IntRegIndex _dest, IntRegIndex _op1, IntRegIndex _op2,
|
||||
uint64_t _imm) :
|
||||
ArmStaticInst(mnem, _machInst, __opClass),
|
||||
dest(_dest), op1(_op1), op2(_op2), imm(_imm)
|
||||
{}
|
||||
|
||||
std::string generateDisassembly(Addr pc, const SymbolTable *symtab) const;
|
||||
};
|
||||
|
||||
class DataX3RegOp : public ArmStaticInst
|
||||
{
|
||||
protected:
|
||||
IntRegIndex dest, op1, op2, op3;
|
||||
|
||||
DataX3RegOp(const char *mnem, ExtMachInst _machInst, OpClass __opClass,
|
||||
IntRegIndex _dest, IntRegIndex _op1, IntRegIndex _op2,
|
||||
IntRegIndex _op3) :
|
||||
ArmStaticInst(mnem, _machInst, __opClass),
|
||||
dest(_dest), op1(_op1), op2(_op2), op3(_op3)
|
||||
{}
|
||||
|
||||
std::string generateDisassembly(Addr pc, const SymbolTable *symtab) const;
|
||||
};
|
||||
|
||||
class DataXCondCompImmOp : public ArmStaticInst
|
||||
{
|
||||
protected:
|
||||
IntRegIndex op1;
|
||||
uint64_t imm;
|
||||
ConditionCode condCode;
|
||||
uint8_t defCc;
|
||||
|
||||
DataXCondCompImmOp(const char *mnem, ExtMachInst _machInst,
|
||||
OpClass __opClass, IntRegIndex _op1, uint64_t _imm,
|
||||
ConditionCode _condCode, uint8_t _defCc) :
|
||||
ArmStaticInst(mnem, _machInst, __opClass),
|
||||
op1(_op1), imm(_imm), condCode(_condCode), defCc(_defCc)
|
||||
{}
|
||||
|
||||
std::string generateDisassembly(Addr pc, const SymbolTable *symtab) const;
|
||||
};
|
||||
|
||||
class DataXCondCompRegOp : public ArmStaticInst
|
||||
{
|
||||
protected:
|
||||
IntRegIndex op1, op2;
|
||||
ConditionCode condCode;
|
||||
uint8_t defCc;
|
||||
|
||||
DataXCondCompRegOp(const char *mnem, ExtMachInst _machInst,
|
||||
OpClass __opClass, IntRegIndex _op1, IntRegIndex _op2,
|
||||
ConditionCode _condCode, uint8_t _defCc) :
|
||||
ArmStaticInst(mnem, _machInst, __opClass),
|
||||
op1(_op1), op2(_op2), condCode(_condCode), defCc(_defCc)
|
||||
{}
|
||||
|
||||
std::string generateDisassembly(Addr pc, const SymbolTable *symtab) const;
|
||||
};
|
||||
|
||||
class DataXCondSelOp : public ArmStaticInst
|
||||
{
|
||||
protected:
|
||||
IntRegIndex dest, op1, op2;
|
||||
ConditionCode condCode;
|
||||
|
||||
DataXCondSelOp(const char *mnem, ExtMachInst _machInst, OpClass __opClass,
|
||||
IntRegIndex _dest, IntRegIndex _op1, IntRegIndex _op2,
|
||||
ConditionCode _condCode) :
|
||||
ArmStaticInst(mnem, _machInst, __opClass),
|
||||
dest(_dest), op1(_op1), op2(_op2), condCode(_condCode)
|
||||
{}
|
||||
|
||||
std::string generateDisassembly(Addr pc, const SymbolTable *symtab) const;
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
#endif //__ARCH_ARM_INSTS_PREDINST_HH__
|
3086
src/arch/arm/insts/fplib.cc
Normal file
3086
src/arch/arm/insts/fplib.cc
Normal file
File diff suppressed because it is too large
Load diff
283
src/arch/arm/insts/fplib.hh
Normal file
283
src/arch/arm/insts/fplib.hh
Normal file
|
@ -0,0 +1,283 @@
|
|||
/*
|
||||
* Copyright (c) 2012-2013 ARM Limited
|
||||
* All rights reserved
|
||||
*
|
||||
* The license below extends only to copyright in the software and shall
|
||||
* not be construed as granting a license to any other intellectual
|
||||
* property including but not limited to intellectual property relating
|
||||
* to a hardware implementation of the functionality of the software
|
||||
* licensed hereunder. You may use the software subject to the license
|
||||
* terms below provided that you ensure that this notice is replicated
|
||||
* unmodified and in its entirety in all distributions of the software,
|
||||
* modified or unmodified, in source code or in binary form.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are
|
||||
* met: redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer;
|
||||
* redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution;
|
||||
* neither the name of the copyright holders nor the names of its
|
||||
* contributors may be used to endorse or promote products derived from
|
||||
* this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
* Authors: Edmund Grimley Evans
|
||||
* Thomas Grocutt
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file
|
||||
* Floating-point library code, which will gradually replace vfp.hh. For
|
||||
* portability, this library does not use floating-point data types. Currently,
|
||||
* C's standard integer types are used in the API, though this could be changed
|
||||
* to something like class Fp32 { uint32_t x; }, etc.
|
||||
*/
|
||||
|
||||
#ifndef __ARCH_ARM_INSTS_FPLIB_HH__
|
||||
#define __ARCH_ARM_INSTS_FPLIB_HH__
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
#include "arch/arm/miscregs.hh"
|
||||
|
||||
namespace ArmISA
|
||||
{
|
||||
|
||||
enum FPRounding {
|
||||
FPRounding_TIEEVEN = 0,
|
||||
FPRounding_POSINF = 1,
|
||||
FPRounding_NEGINF = 2,
|
||||
FPRounding_ZERO = 3,
|
||||
FPRounding_TIEAWAY = 4,
|
||||
FPRounding_ODD = 5
|
||||
};
|
||||
|
||||
static inline FPRounding
|
||||
FPCRRounding(FPSCR &fpscr)
|
||||
{
|
||||
return (FPRounding)((uint32_t)fpscr >> 22 & 3);
|
||||
}
|
||||
|
||||
/** Floating-point absolute value. */
|
||||
template <class T>
|
||||
T fplibAbs(T op);
|
||||
/** Floating-point add. */
|
||||
template <class T>
|
||||
T fplibAdd(T op1, T op2, FPSCR &fpscr);
|
||||
/** Floating-point compare (quiet and signaling). */
|
||||
template <class T>
|
||||
int fplibCompare(T op1, T op2, bool signal_nans, FPSCR &fpscr);
|
||||
/** Floating-point compare equal. */
|
||||
template <class T>
|
||||
bool fplibCompareEQ(T op1, T op2, FPSCR &fpscr);
|
||||
/** Floating-point compare greater than or equal. */
|
||||
template <class T>
|
||||
bool fplibCompareGE(T op1, T op2, FPSCR &fpscr);
|
||||
/** Floating-point compare greater than. */
|
||||
template <class T>
|
||||
bool fplibCompareGT(T op1, T op2, FPSCR &fpscr);
|
||||
/** Floating-point convert precision. */
|
||||
template <class T1, class T2>
|
||||
T2 fplibConvert(T1 op, FPRounding rounding, FPSCR &fpscr);
|
||||
/** Floating-point division. */
|
||||
template <class T>
|
||||
T fplibDiv(T op1, T op2, FPSCR &fpscr);
|
||||
/** Floating-point maximum. */
|
||||
template <class T>
|
||||
T fplibMax(T op1, T op2, FPSCR &fpscr);
|
||||
/** Floating-point maximum number. */
|
||||
template <class T>
|
||||
T fplibMaxNum(T op1, T op2, FPSCR &fpscr);
|
||||
/** Floating-point minimum. */
|
||||
template <class T>
|
||||
T fplibMin(T op1, T op2, FPSCR &fpscr);
|
||||
/** Floating-point minimum number. */
|
||||
template <class T>
|
||||
T fplibMinNum(T op1, T op2, FPSCR &fpscr);
|
||||
/** Floating-point multiply. */
|
||||
template <class T>
|
||||
T fplibMul(T op1, T op2, FPSCR &fpscr);
|
||||
/** Floating-point multiply-add. */
|
||||
template <class T>
|
||||
T fplibMulAdd(T addend, T op1, T op2, FPSCR &fpscr);
|
||||
/** Floating-point multiply extended. */
|
||||
template <class T>
|
||||
T fplibMulX(T op1, T op2, FPSCR &fpscr);
|
||||
/** Floating-point negate. */
|
||||
template <class T>
|
||||
T fplibNeg(T op);
|
||||
/** Floating-point reciprocal square root estimate. */
|
||||
template <class T>
|
||||
T fplibRSqrtEstimate(T op, FPSCR &fpscr);
|
||||
/** Floating-point reciprocal square root step. */
|
||||
template <class T>
|
||||
T fplibRSqrtStepFused(T op1, T op2, FPSCR &fpscr);
|
||||
/** Floating-point reciprocal estimate. */
|
||||
template <class T>
|
||||
T fplibRecipEstimate(T op, FPSCR &fpscr);
|
||||
/** Floating-point reciprocal step. */
|
||||
template <class T>
|
||||
T fplibRecipStepFused(T op1, T op2, FPSCR &fpscr);
|
||||
/** Floating-point reciprocal exponent. */
|
||||
template <class T>
|
||||
T fplibRecpX(T op, FPSCR &fpscr);
|
||||
/** Floating-point convert to integer. */
|
||||
template <class T>
|
||||
T fplibRoundInt(T op, FPRounding rounding, bool exact, FPSCR &fpscr);
|
||||
/** Floating-point square root. */
|
||||
template <class T>
|
||||
T fplibSqrt(T op, FPSCR &fpscr);
|
||||
/** Floating-point subtract. */
|
||||
template <class T>
|
||||
T fplibSub(T op1, T op2, FPSCR &fpscr);
|
||||
/** Floating-point convert to fixed-point. */
|
||||
template <class T1, class T2>
|
||||
T2 fplibFPToFixed(T1 op, int fbits, bool u, FPRounding rounding, FPSCR &fpscr);
|
||||
/** Floating-point convert from fixed-point. */
|
||||
template <class T>
|
||||
T fplibFixedToFP(uint64_t op, int fbits, bool u, FPRounding rounding,
|
||||
FPSCR &fpscr);
|
||||
|
||||
/* Function specializations... */
|
||||
template <>
|
||||
uint32_t fplibAbs(uint32_t op);
|
||||
template <>
|
||||
uint64_t fplibAbs(uint64_t op);
|
||||
template <>
|
||||
uint32_t fplibAdd(uint32_t op1, uint32_t op2, FPSCR &fpscr);
|
||||
template <>
|
||||
uint64_t fplibAdd(uint64_t op1, uint64_t op2, FPSCR &fpscr);
|
||||
template <>
|
||||
int fplibCompare(uint32_t op1, uint32_t op2, bool signal_nans, FPSCR &fpscr);
|
||||
template <>
|
||||
int fplibCompare(uint64_t op1, uint64_t op2, bool signal_nans, FPSCR &fpscr);
|
||||
template <>
|
||||
bool fplibCompareEQ(uint32_t op1, uint32_t op2, FPSCR &fpscr);
|
||||
template <>
|
||||
bool fplibCompareEQ(uint64_t op1, uint64_t op2, FPSCR &fpscr);
|
||||
template <>
|
||||
bool fplibCompareGE(uint32_t op1, uint32_t op2, FPSCR &fpscr);
|
||||
template <>
|
||||
bool fplibCompareGE(uint64_t op1, uint64_t op2, FPSCR &fpscr);
|
||||
template <>
|
||||
bool fplibCompareGT(uint32_t op1, uint32_t op2, FPSCR &fpscr);
|
||||
template <>
|
||||
bool fplibCompareGT(uint64_t op1, uint64_t op2, FPSCR &fpscr);
|
||||
template <>
|
||||
uint16_t fplibConvert(uint32_t op, FPRounding rounding, FPSCR &fpscr);
|
||||
template <>
|
||||
uint16_t fplibConvert(uint64_t op, FPRounding rounding, FPSCR &fpscr);
|
||||
template <>
|
||||
uint32_t fplibConvert(uint16_t op, FPRounding rounding, FPSCR &fpscr);
|
||||
template <>
|
||||
uint32_t fplibConvert(uint64_t op, FPRounding rounding, FPSCR &fpscr);
|
||||
template <>
|
||||
uint64_t fplibConvert(uint16_t op, FPRounding rounding, FPSCR &fpscr);
|
||||
template <>
|
||||
uint64_t fplibConvert(uint32_t op, FPRounding rounding, FPSCR &fpscr);
|
||||
template <>
|
||||
uint32_t fplibDiv(uint32_t op1, uint32_t op2, FPSCR &fpscr);
|
||||
template <>
|
||||
uint64_t fplibDiv(uint64_t op1, uint64_t op2, FPSCR &fpscr);
|
||||
template <>
|
||||
uint32_t fplibMax(uint32_t op1, uint32_t op2, FPSCR &fpscr);
|
||||
template <>
|
||||
uint64_t fplibMax(uint64_t op1, uint64_t op2, FPSCR &fpscr);
|
||||
template <>
|
||||
uint32_t fplibMaxNum(uint32_t op1, uint32_t op2, FPSCR &fpscr);
|
||||
template <>
|
||||
uint64_t fplibMaxNum(uint64_t op1, uint64_t op2, FPSCR &fpscr);
|
||||
template <>
|
||||
uint32_t fplibMin(uint32_t op1, uint32_t op2, FPSCR &fpscr);
|
||||
template <>
|
||||
uint64_t fplibMin(uint64_t op1, uint64_t op2, FPSCR &fpscr);
|
||||
template <>
|
||||
uint32_t fplibMinNum(uint32_t op1, uint32_t op2, FPSCR &fpscr);
|
||||
template <>
|
||||
uint64_t fplibMinNum(uint64_t op1, uint64_t op2, FPSCR &fpscr);
|
||||
template <>
|
||||
uint32_t fplibMul(uint32_t op1, uint32_t op2, FPSCR &fpscr);
|
||||
template <>
|
||||
uint64_t fplibMul(uint64_t op1, uint64_t op2, FPSCR &fpscr);
|
||||
template <>
|
||||
uint32_t fplibMulAdd(uint32_t addend, uint32_t op1, uint32_t op2,
|
||||
FPSCR &fpscr);
|
||||
template <>
|
||||
uint64_t fplibMulAdd(uint64_t addend, uint64_t op1, uint64_t op2,
|
||||
FPSCR &fpscr);
|
||||
template <>
|
||||
uint32_t fplibMulX(uint32_t op1, uint32_t op2, FPSCR &fpscr);
|
||||
template <>
|
||||
uint64_t fplibMulX(uint64_t op1, uint64_t op2, FPSCR &fpscr);
|
||||
template <>
|
||||
uint32_t fplibNeg(uint32_t op);
|
||||
template <>
|
||||
uint64_t fplibNeg(uint64_t op);
|
||||
template <>
|
||||
uint32_t fplibRSqrtEstimate(uint32_t op, FPSCR &fpscr);
|
||||
template<>
|
||||
uint64_t fplibRSqrtEstimate(uint64_t op, FPSCR &fpscr);
|
||||
template <>
|
||||
uint32_t fplibRSqrtStepFused(uint32_t op1, uint32_t op2, FPSCR &fpscr);
|
||||
template <>
|
||||
uint64_t fplibRSqrtStepFused(uint64_t op1, uint64_t op2, FPSCR &fpscr);
|
||||
template <>
|
||||
uint32_t fplibRecipEstimate(uint32_t op, FPSCR &fpscr);
|
||||
template <>
|
||||
uint64_t fplibRecipEstimate(uint64_t op, FPSCR &fpscr);
|
||||
template <>
|
||||
uint32_t fplibRecipStepFused(uint32_t op1, uint32_t op2, FPSCR &fpscr);
|
||||
template <>
|
||||
uint64_t fplibRecipStepFused(uint64_t op1, uint64_t op2, FPSCR &fpscr);
|
||||
template <>
|
||||
uint32_t fplibRecpX(uint32_t op, FPSCR &fpscr);
|
||||
template <>
|
||||
uint64_t fplibRecpX(uint64_t op, FPSCR &fpscr);
|
||||
template <>
|
||||
uint32_t fplibRoundInt(uint32_t op, FPRounding rounding, bool exact,
|
||||
FPSCR &fpscr);
|
||||
template <>
|
||||
uint64_t fplibRoundInt(uint64_t op, FPRounding rounding, bool exact,
|
||||
FPSCR &fpscr);
|
||||
template <>
|
||||
uint32_t fplibSqrt(uint32_t op, FPSCR &fpscr);
|
||||
template <>
|
||||
uint64_t fplibSqrt(uint64_t op, FPSCR &fpscr);
|
||||
template <>
|
||||
uint32_t fplibSub(uint32_t op1, uint32_t op2, FPSCR &fpscr);
|
||||
template <>
|
||||
uint64_t fplibSub(uint64_t op1, uint64_t op2, FPSCR &fpscr);
|
||||
template <>
|
||||
uint32_t fplibFPToFixed(uint32_t op, int fbits, bool u, FPRounding rounding,
|
||||
FPSCR &fpscr);
|
||||
template <>
|
||||
uint32_t fplibFPToFixed(uint64_t op, int fbits, bool u, FPRounding rounding,
|
||||
FPSCR &fpscr);
|
||||
template <>
|
||||
uint64_t fplibFPToFixed(uint32_t op, int fbits, bool u, FPRounding rounding,
|
||||
FPSCR &fpscr);
|
||||
template <>
|
||||
uint64_t fplibFPToFixed(uint64_t op, int fbits, bool u, FPRounding rounding,
|
||||
FPSCR &fpscr);
|
||||
template <>
|
||||
uint32_t fplibFixedToFP(uint64_t op, int fbits, bool u, FPRounding rounding,
|
||||
FPSCR &fpscr);
|
||||
template <>
|
||||
uint64_t fplibFixedToFP(uint64_t op, int fbits, bool u, FPRounding rounding,
|
||||
FPSCR &fpscr);
|
||||
}
|
||||
|
||||
#endif
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2010 ARM Limited
|
||||
* Copyright (c) 2010-2013 ARM Limited
|
||||
* All rights reserved
|
||||
*
|
||||
* The license below extends only to copyright in the software and shall
|
||||
|
@ -43,7 +43,9 @@
|
|||
#include <sstream>
|
||||
|
||||
#include "arch/arm/insts/macromem.hh"
|
||||
|
||||
#include "arch/arm/generated/decoder.hh"
|
||||
#include "arch/arm/insts/neon64_mem.hh"
|
||||
|
||||
using namespace std;
|
||||
using namespace ArmISAInst;
|
||||
|
@ -177,6 +179,212 @@ MacroMemOp::MacroMemOp(const char *mnem, ExtMachInst machInst,
|
|||
}
|
||||
}
|
||||
|
||||
PairMemOp::PairMemOp(const char *mnem, ExtMachInst machInst, OpClass __opClass,
|
||||
uint32_t size, bool fp, bool load, bool noAlloc,
|
||||
bool signExt, bool exclusive, bool acrel,
|
||||
int64_t imm, AddrMode mode,
|
||||
IntRegIndex rn, IntRegIndex rt, IntRegIndex rt2) :
|
||||
PredMacroOp(mnem, machInst, __opClass)
|
||||
{
|
||||
bool writeback = (mode != AddrMd_Offset);
|
||||
numMicroops = 1 + (size / 4) + (writeback ? 1 : 0);
|
||||
microOps = new StaticInstPtr[numMicroops];
|
||||
|
||||
StaticInstPtr *uop = microOps;
|
||||
|
||||
bool post = (mode == AddrMd_PostIndex);
|
||||
|
||||
rn = makeSP(rn);
|
||||
|
||||
*uop = new MicroAddXiSpAlignUop(machInst, INTREG_UREG0, rn, post ? 0 : imm);
|
||||
|
||||
if (fp) {
|
||||
if (size == 16) {
|
||||
if (load) {
|
||||
*++uop = new MicroLdrQBFpXImmUop(machInst, rt,
|
||||
INTREG_UREG0, 0, noAlloc, exclusive, acrel);
|
||||
*++uop = new MicroLdrQTFpXImmUop(machInst, rt,
|
||||
INTREG_UREG0, 0, noAlloc, exclusive, acrel);
|
||||
*++uop = new MicroLdrQBFpXImmUop(machInst, rt2,
|
||||
INTREG_UREG0, 16, noAlloc, exclusive, acrel);
|
||||
*++uop = new MicroLdrQTFpXImmUop(machInst, rt2,
|
||||
INTREG_UREG0, 16, noAlloc, exclusive, acrel);
|
||||
} else {
|
||||
*++uop = new MicroStrQBFpXImmUop(machInst, rt,
|
||||
INTREG_UREG0, 0, noAlloc, exclusive, acrel);
|
||||
*++uop = new MicroStrQTFpXImmUop(machInst, rt,
|
||||
INTREG_UREG0, 0, noAlloc, exclusive, acrel);
|
||||
*++uop = new MicroStrQBFpXImmUop(machInst, rt2,
|
||||
INTREG_UREG0, 16, noAlloc, exclusive, acrel);
|
||||
*++uop = new MicroStrQTFpXImmUop(machInst, rt2,
|
||||
INTREG_UREG0, 16, noAlloc, exclusive, acrel);
|
||||
}
|
||||
} else if (size == 8) {
|
||||
if (load) {
|
||||
*++uop = new MicroLdrFpXImmUop(machInst, rt,
|
||||
INTREG_UREG0, 0, noAlloc, exclusive, acrel);
|
||||
*++uop = new MicroLdrFpXImmUop(machInst, rt2,
|
||||
INTREG_UREG0, 8, noAlloc, exclusive, acrel);
|
||||
} else {
|
||||
*++uop = new MicroStrFpXImmUop(machInst, rt,
|
||||
INTREG_UREG0, 0, noAlloc, exclusive, acrel);
|
||||
*++uop = new MicroStrFpXImmUop(machInst, rt2,
|
||||
INTREG_UREG0, 8, noAlloc, exclusive, acrel);
|
||||
}
|
||||
} else if (size == 4) {
|
||||
if (load) {
|
||||
*++uop = new MicroLdrDFpXImmUop(machInst, rt, rt2,
|
||||
INTREG_UREG0, 0, noAlloc, exclusive, acrel);
|
||||
} else {
|
||||
*++uop = new MicroStrDFpXImmUop(machInst, rt, rt2,
|
||||
INTREG_UREG0, 0, noAlloc, exclusive, acrel);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if (size == 8) {
|
||||
if (load) {
|
||||
*++uop = new MicroLdrXImmUop(machInst, rt, INTREG_UREG0,
|
||||
0, noAlloc, exclusive, acrel);
|
||||
*++uop = new MicroLdrXImmUop(machInst, rt2, INTREG_UREG0,
|
||||
size, noAlloc, exclusive, acrel);
|
||||
} else {
|
||||
*++uop = new MicroStrXImmUop(machInst, rt, INTREG_UREG0,
|
||||
0, noAlloc, exclusive, acrel);
|
||||
*++uop = new MicroStrXImmUop(machInst, rt2, INTREG_UREG0,
|
||||
size, noAlloc, exclusive, acrel);
|
||||
}
|
||||
} else if (size == 4) {
|
||||
if (load) {
|
||||
if (signExt) {
|
||||
*++uop = new MicroLdrDSXImmUop(machInst, rt, rt2,
|
||||
INTREG_UREG0, 0, noAlloc, exclusive, acrel);
|
||||
} else {
|
||||
*++uop = new MicroLdrDUXImmUop(machInst, rt, rt2,
|
||||
INTREG_UREG0, 0, noAlloc, exclusive, acrel);
|
||||
}
|
||||
} else {
|
||||
*++uop = new MicroStrDXImmUop(machInst, rt, rt2,
|
||||
INTREG_UREG0, 0, noAlloc, exclusive, acrel);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (writeback) {
|
||||
*++uop = new MicroAddXiUop(machInst, rn, INTREG_UREG0,
|
||||
post ? imm : 0);
|
||||
}
|
||||
|
||||
(*uop)->setLastMicroop();
|
||||
|
||||
for (StaticInstPtr *curUop = microOps;
|
||||
!(*curUop)->isLastMicroop(); curUop++) {
|
||||
(*curUop)->setDelayedCommit();
|
||||
}
|
||||
}
|
||||
|
||||
BigFpMemImmOp::BigFpMemImmOp(const char *mnem, ExtMachInst machInst,
|
||||
OpClass __opClass, bool load, IntRegIndex dest,
|
||||
IntRegIndex base, int64_t imm) :
|
||||
PredMacroOp(mnem, machInst, __opClass)
|
||||
{
|
||||
numMicroops = 2;
|
||||
microOps = new StaticInstPtr[numMicroops];
|
||||
|
||||
if (load) {
|
||||
microOps[0] = new MicroLdrQBFpXImmUop(machInst, dest, base, imm);
|
||||
microOps[1] = new MicroLdrQTFpXImmUop(machInst, dest, base, imm);
|
||||
} else {
|
||||
microOps[0] = new MicroStrQBFpXImmUop(machInst, dest, base, imm);
|
||||
microOps[1] = new MicroStrQTFpXImmUop(machInst, dest, base, imm);
|
||||
}
|
||||
microOps[0]->setDelayedCommit();
|
||||
microOps[1]->setLastMicroop();
|
||||
}
|
||||
|
||||
BigFpMemPostOp::BigFpMemPostOp(const char *mnem, ExtMachInst machInst,
|
||||
OpClass __opClass, bool load, IntRegIndex dest,
|
||||
IntRegIndex base, int64_t imm) :
|
||||
PredMacroOp(mnem, machInst, __opClass)
|
||||
{
|
||||
numMicroops = 3;
|
||||
microOps = new StaticInstPtr[numMicroops];
|
||||
|
||||
if (load) {
|
||||
microOps[0] = new MicroLdrQBFpXImmUop(machInst, dest, base, 0);
|
||||
microOps[1] = new MicroLdrQTFpXImmUop(machInst, dest, base, 0);
|
||||
} else {
|
||||
microOps[0] = new MicroStrQBFpXImmUop(machInst, dest, base, 0);
|
||||
microOps[1] = new MicroStrQTFpXImmUop(machInst, dest, base, 0);
|
||||
}
|
||||
microOps[2] = new MicroAddXiUop(machInst, base, base, imm);
|
||||
|
||||
microOps[0]->setDelayedCommit();
|
||||
microOps[1]->setDelayedCommit();
|
||||
microOps[2]->setLastMicroop();
|
||||
}
|
||||
|
||||
BigFpMemPreOp::BigFpMemPreOp(const char *mnem, ExtMachInst machInst,
|
||||
OpClass __opClass, bool load, IntRegIndex dest,
|
||||
IntRegIndex base, int64_t imm) :
|
||||
PredMacroOp(mnem, machInst, __opClass)
|
||||
{
|
||||
numMicroops = 3;
|
||||
microOps = new StaticInstPtr[numMicroops];
|
||||
|
||||
if (load) {
|
||||
microOps[0] = new MicroLdrQBFpXImmUop(machInst, dest, base, imm);
|
||||
microOps[1] = new MicroLdrQTFpXImmUop(machInst, dest, base, imm);
|
||||
} else {
|
||||
microOps[0] = new MicroStrQBFpXImmUop(machInst, dest, base, imm);
|
||||
microOps[1] = new MicroStrQTFpXImmUop(machInst, dest, base, imm);
|
||||
}
|
||||
microOps[2] = new MicroAddXiUop(machInst, base, base, imm);
|
||||
|
||||
microOps[0]->setDelayedCommit();
|
||||
microOps[1]->setDelayedCommit();
|
||||
microOps[2]->setLastMicroop();
|
||||
}
|
||||
|
||||
BigFpMemRegOp::BigFpMemRegOp(const char *mnem, ExtMachInst machInst,
|
||||
OpClass __opClass, bool load, IntRegIndex dest,
|
||||
IntRegIndex base, IntRegIndex offset,
|
||||
ArmExtendType type, int64_t imm) :
|
||||
PredMacroOp(mnem, machInst, __opClass)
|
||||
{
|
||||
numMicroops = 2;
|
||||
microOps = new StaticInstPtr[numMicroops];
|
||||
|
||||
if (load) {
|
||||
microOps[0] = new MicroLdrQBFpXRegUop(machInst, dest, base,
|
||||
offset, type, imm);
|
||||
microOps[1] = new MicroLdrQTFpXRegUop(machInst, dest, base,
|
||||
offset, type, imm);
|
||||
} else {
|
||||
microOps[0] = new MicroStrQBFpXRegUop(machInst, dest, base,
|
||||
offset, type, imm);
|
||||
microOps[1] = new MicroStrQTFpXRegUop(machInst, dest, base,
|
||||
offset, type, imm);
|
||||
}
|
||||
|
||||
microOps[0]->setDelayedCommit();
|
||||
microOps[1]->setLastMicroop();
|
||||
}
|
||||
|
||||
BigFpMemLitOp::BigFpMemLitOp(const char *mnem, ExtMachInst machInst,
|
||||
OpClass __opClass, IntRegIndex dest,
|
||||
int64_t imm) :
|
||||
PredMacroOp(mnem, machInst, __opClass)
|
||||
{
|
||||
numMicroops = 2;
|
||||
microOps = new StaticInstPtr[numMicroops];
|
||||
|
||||
microOps[0] = new MicroLdrQBFpXLitUop(machInst, dest, imm);
|
||||
microOps[1] = new MicroLdrQTFpXLitUop(machInst, dest, imm);
|
||||
|
||||
microOps[0]->setDelayedCommit();
|
||||
microOps[1]->setLastMicroop();
|
||||
}
|
||||
|
||||
VldMultOp::VldMultOp(const char *mnem, ExtMachInst machInst, OpClass __opClass,
|
||||
unsigned elems, RegIndex rn, RegIndex vd, unsigned regs,
|
||||
unsigned inc, uint32_t size, uint32_t align, RegIndex rm) :
|
||||
|
@ -193,7 +401,7 @@ VldMultOp::VldMultOp(const char *mnem, ExtMachInst machInst, OpClass __opClass,
|
|||
if (deinterleave) numMicroops += (regs / elems);
|
||||
microOps = new StaticInstPtr[numMicroops];
|
||||
|
||||
RegIndex rMid = deinterleave ? NumFloatArchRegs : vd * 2;
|
||||
RegIndex rMid = deinterleave ? NumFloatV7ArchRegs : vd * 2;
|
||||
|
||||
uint32_t noAlign = TLB::MustBeOne;
|
||||
|
||||
|
@ -295,7 +503,7 @@ VldSingleOp::VldSingleOp(const char *mnem, ExtMachInst machInst,
|
|||
numMicroops += (regs / elems);
|
||||
microOps = new StaticInstPtr[numMicroops];
|
||||
|
||||
RegIndex ufp0 = NumFloatArchRegs;
|
||||
RegIndex ufp0 = NumFloatV7ArchRegs;
|
||||
|
||||
unsigned uopIdx = 0;
|
||||
switch (loadSize) {
|
||||
|
@ -556,7 +764,7 @@ VstMultOp::VstMultOp(const char *mnem, ExtMachInst machInst, OpClass __opClass,
|
|||
|
||||
uint32_t noAlign = TLB::MustBeOne;
|
||||
|
||||
RegIndex rMid = interleave ? NumFloatArchRegs : vd * 2;
|
||||
RegIndex rMid = interleave ? NumFloatV7ArchRegs : vd * 2;
|
||||
|
||||
unsigned uopIdx = 0;
|
||||
if (interleave) {
|
||||
|
@ -657,7 +865,7 @@ VstSingleOp::VstSingleOp(const char *mnem, ExtMachInst machInst,
|
|||
numMicroops += (regs / elems);
|
||||
microOps = new StaticInstPtr[numMicroops];
|
||||
|
||||
RegIndex ufp0 = NumFloatArchRegs;
|
||||
RegIndex ufp0 = NumFloatV7ArchRegs;
|
||||
|
||||
unsigned uopIdx = 0;
|
||||
switch (elems) {
|
||||
|
@ -834,6 +1042,285 @@ VstSingleOp::VstSingleOp(const char *mnem, ExtMachInst machInst,
|
|||
microOps[numMicroops - 1]->setLastMicroop();
|
||||
}
|
||||
|
||||
VldMultOp64::VldMultOp64(const char *mnem, ExtMachInst machInst,
|
||||
OpClass __opClass, RegIndex rn, RegIndex vd,
|
||||
RegIndex rm, uint8_t eSize, uint8_t dataSize,
|
||||
uint8_t numStructElems, uint8_t numRegs, bool wb) :
|
||||
PredMacroOp(mnem, machInst, __opClass)
|
||||
{
|
||||
RegIndex vx = NumFloatV8ArchRegs / 4;
|
||||
RegIndex rnsp = (RegIndex) makeSP((IntRegIndex) rn);
|
||||
bool baseIsSP = isSP((IntRegIndex) rnsp);
|
||||
|
||||
numMicroops = wb ? 1 : 0;
|
||||
|
||||
int totNumBytes = numRegs * dataSize / 8;
|
||||
assert(totNumBytes <= 64);
|
||||
|
||||
// The guiding principle here is that no more than 16 bytes can be
|
||||
// transferred at a time
|
||||
int numMemMicroops = totNumBytes / 16;
|
||||
int residuum = totNumBytes % 16;
|
||||
if (residuum)
|
||||
++numMemMicroops;
|
||||
numMicroops += numMemMicroops;
|
||||
|
||||
int numMarshalMicroops = numRegs / 2 + (numRegs % 2 ? 1 : 0);
|
||||
numMicroops += numMarshalMicroops;
|
||||
|
||||
microOps = new StaticInstPtr[numMicroops];
|
||||
unsigned uopIdx = 0;
|
||||
uint32_t memaccessFlags = TLB::MustBeOne | (TLB::ArmFlags) eSize |
|
||||
TLB::AllowUnaligned;
|
||||
|
||||
int i = 0;
|
||||
for(; i < numMemMicroops - 1; ++i) {
|
||||
microOps[uopIdx++] = new MicroNeonLoad64(
|
||||
machInst, vx + (RegIndex) i, rnsp, 16 * i, memaccessFlags,
|
||||
baseIsSP, 16 /* accSize */, eSize);
|
||||
}
|
||||
microOps[uopIdx++] = new MicroNeonLoad64(
|
||||
machInst, vx + (RegIndex) i, rnsp, 16 * i, memaccessFlags, baseIsSP,
|
||||
residuum ? residuum : 16 /* accSize */, eSize);
|
||||
|
||||
// Writeback microop: the post-increment amount is encoded in "Rm": a
|
||||
// 64-bit general register OR as '11111' for an immediate value equal to
|
||||
// the total number of bytes transferred (i.e. 8, 16, 24, 32, 48 or 64)
|
||||
if (wb) {
|
||||
if (rm != ((RegIndex) INTREG_X31)) {
|
||||
microOps[uopIdx++] = new MicroAddXERegUop(machInst, rnsp, rnsp, rm,
|
||||
UXTX, 0);
|
||||
} else {
|
||||
microOps[uopIdx++] = new MicroAddXiUop(machInst, rnsp, rnsp,
|
||||
totNumBytes);
|
||||
}
|
||||
}
|
||||
|
||||
for (int i = 0; i < numMarshalMicroops; ++i) {
|
||||
microOps[uopIdx++] = new MicroDeintNeon64(
|
||||
machInst, vd + (RegIndex) (2 * i), vx, eSize, dataSize,
|
||||
numStructElems, numRegs, i /* step */);
|
||||
}
|
||||
|
||||
assert(uopIdx == numMicroops);
|
||||
|
||||
for (int i = 0; i < numMicroops - 1; ++i) {
|
||||
microOps[i]->setDelayedCommit();
|
||||
}
|
||||
microOps[numMicroops - 1]->setLastMicroop();
|
||||
}
|
||||
|
||||
VstMultOp64::VstMultOp64(const char *mnem, ExtMachInst machInst,
|
||||
OpClass __opClass, RegIndex rn, RegIndex vd,
|
||||
RegIndex rm, uint8_t eSize, uint8_t dataSize,
|
||||
uint8_t numStructElems, uint8_t numRegs, bool wb) :
|
||||
PredMacroOp(mnem, machInst, __opClass)
|
||||
{
|
||||
RegIndex vx = NumFloatV8ArchRegs / 4;
|
||||
RegIndex rnsp = (RegIndex) makeSP((IntRegIndex) rn);
|
||||
bool baseIsSP = isSP((IntRegIndex) rnsp);
|
||||
|
||||
numMicroops = wb ? 1 : 0;
|
||||
|
||||
int totNumBytes = numRegs * dataSize / 8;
|
||||
assert(totNumBytes <= 64);
|
||||
|
||||
// The guiding principle here is that no more than 16 bytes can be
|
||||
// transferred at a time
|
||||
int numMemMicroops = totNumBytes / 16;
|
||||
int residuum = totNumBytes % 16;
|
||||
if (residuum)
|
||||
++numMemMicroops;
|
||||
numMicroops += numMemMicroops;
|
||||
|
||||
int numMarshalMicroops = totNumBytes > 32 ? 2 : 1;
|
||||
numMicroops += numMarshalMicroops;
|
||||
|
||||
microOps = new StaticInstPtr[numMicroops];
|
||||
unsigned uopIdx = 0;
|
||||
|
||||
for(int i = 0; i < numMarshalMicroops; ++i) {
|
||||
microOps[uopIdx++] = new MicroIntNeon64(
|
||||
machInst, vx + (RegIndex) (2 * i), vd, eSize, dataSize,
|
||||
numStructElems, numRegs, i /* step */);
|
||||
}
|
||||
|
||||
uint32_t memaccessFlags = TLB::MustBeOne | (TLB::ArmFlags) eSize |
|
||||
TLB::AllowUnaligned;
|
||||
|
||||
int i = 0;
|
||||
for(; i < numMemMicroops - 1; ++i) {
|
||||
microOps[uopIdx++] = new MicroNeonStore64(
|
||||
machInst, vx + (RegIndex) i, rnsp, 16 * i, memaccessFlags,
|
||||
baseIsSP, 16 /* accSize */, eSize);
|
||||
}
|
||||
microOps[uopIdx++] = new MicroNeonStore64(
|
||||
machInst, vx + (RegIndex) i, rnsp, 16 * i, memaccessFlags, baseIsSP,
|
||||
residuum ? residuum : 16 /* accSize */, eSize);
|
||||
|
||||
// Writeback microop: the post-increment amount is encoded in "Rm": a
|
||||
// 64-bit general register OR as '11111' for an immediate value equal to
|
||||
// the total number of bytes transferred (i.e. 8, 16, 24, 32, 48 or 64)
|
||||
if (wb) {
|
||||
if (rm != ((RegIndex) INTREG_X31)) {
|
||||
microOps[uopIdx++] = new MicroAddXERegUop(machInst, rnsp, rnsp, rm,
|
||||
UXTX, 0);
|
||||
} else {
|
||||
microOps[uopIdx++] = new MicroAddXiUop(machInst, rnsp, rnsp,
|
||||
totNumBytes);
|
||||
}
|
||||
}
|
||||
|
||||
assert(uopIdx == numMicroops);
|
||||
|
||||
for (int i = 0; i < numMicroops - 1; i++) {
|
||||
microOps[i]->setDelayedCommit();
|
||||
}
|
||||
microOps[numMicroops - 1]->setLastMicroop();
|
||||
}
|
||||
|
||||
VldSingleOp64::VldSingleOp64(const char *mnem, ExtMachInst machInst,
|
||||
OpClass __opClass, RegIndex rn, RegIndex vd,
|
||||
RegIndex rm, uint8_t eSize, uint8_t dataSize,
|
||||
uint8_t numStructElems, uint8_t index, bool wb,
|
||||
bool replicate) :
|
||||
PredMacroOp(mnem, machInst, __opClass)
|
||||
{
|
||||
RegIndex vx = NumFloatV8ArchRegs / 4;
|
||||
RegIndex rnsp = (RegIndex) makeSP((IntRegIndex) rn);
|
||||
bool baseIsSP = isSP((IntRegIndex) rnsp);
|
||||
|
||||
numMicroops = wb ? 1 : 0;
|
||||
|
||||
int eSizeBytes = 1 << eSize;
|
||||
int totNumBytes = numStructElems * eSizeBytes;
|
||||
assert(totNumBytes <= 64);
|
||||
|
||||
// The guiding principle here is that no more than 16 bytes can be
|
||||
// transferred at a time
|
||||
int numMemMicroops = totNumBytes / 16;
|
||||
int residuum = totNumBytes % 16;
|
||||
if (residuum)
|
||||
++numMemMicroops;
|
||||
numMicroops += numMemMicroops;
|
||||
|
||||
int numMarshalMicroops = numStructElems / 2 + (numStructElems % 2 ? 1 : 0);
|
||||
numMicroops += numMarshalMicroops;
|
||||
|
||||
microOps = new StaticInstPtr[numMicroops];
|
||||
unsigned uopIdx = 0;
|
||||
|
||||
uint32_t memaccessFlags = TLB::MustBeOne | (TLB::ArmFlags) eSize |
|
||||
TLB::AllowUnaligned;
|
||||
|
||||
int i = 0;
|
||||
for (; i < numMemMicroops - 1; ++i) {
|
||||
microOps[uopIdx++] = new MicroNeonLoad64(
|
||||
machInst, vx + (RegIndex) i, rnsp, 16 * i, memaccessFlags,
|
||||
baseIsSP, 16 /* accSize */, eSize);
|
||||
}
|
||||
microOps[uopIdx++] = new MicroNeonLoad64(
|
||||
machInst, vx + (RegIndex) i, rnsp, 16 * i, memaccessFlags, baseIsSP,
|
||||
residuum ? residuum : 16 /* accSize */, eSize);
|
||||
|
||||
// Writeback microop: the post-increment amount is encoded in "Rm": a
|
||||
// 64-bit general register OR as '11111' for an immediate value equal to
|
||||
// the total number of bytes transferred (i.e. 8, 16, 24, 32, 48 or 64)
|
||||
if (wb) {
|
||||
if (rm != ((RegIndex) INTREG_X31)) {
|
||||
microOps[uopIdx++] = new MicroAddXERegUop(machInst, rnsp, rnsp, rm,
|
||||
UXTX, 0);
|
||||
} else {
|
||||
microOps[uopIdx++] = new MicroAddXiUop(machInst, rnsp, rnsp,
|
||||
totNumBytes);
|
||||
}
|
||||
}
|
||||
|
||||
for(int i = 0; i < numMarshalMicroops; ++i) {
|
||||
microOps[uopIdx++] = new MicroUnpackNeon64(
|
||||
machInst, vd + (RegIndex) (2 * i), vx, eSize, dataSize,
|
||||
numStructElems, index, i /* step */, replicate);
|
||||
}
|
||||
|
||||
assert(uopIdx == numMicroops);
|
||||
|
||||
for (int i = 0; i < numMicroops - 1; i++) {
|
||||
microOps[i]->setDelayedCommit();
|
||||
}
|
||||
microOps[numMicroops - 1]->setLastMicroop();
|
||||
}
|
||||
|
||||
VstSingleOp64::VstSingleOp64(const char *mnem, ExtMachInst machInst,
|
||||
OpClass __opClass, RegIndex rn, RegIndex vd,
|
||||
RegIndex rm, uint8_t eSize, uint8_t dataSize,
|
||||
uint8_t numStructElems, uint8_t index, bool wb,
|
||||
bool replicate) :
|
||||
PredMacroOp(mnem, machInst, __opClass)
|
||||
{
|
||||
RegIndex vx = NumFloatV8ArchRegs / 4;
|
||||
RegIndex rnsp = (RegIndex) makeSP((IntRegIndex) rn);
|
||||
bool baseIsSP = isSP((IntRegIndex) rnsp);
|
||||
|
||||
numMicroops = wb ? 1 : 0;
|
||||
|
||||
int eSizeBytes = 1 << eSize;
|
||||
int totNumBytes = numStructElems * eSizeBytes;
|
||||
assert(totNumBytes <= 64);
|
||||
|
||||
// The guiding principle here is that no more than 16 bytes can be
|
||||
// transferred at a time
|
||||
int numMemMicroops = totNumBytes / 16;
|
||||
int residuum = totNumBytes % 16;
|
||||
if (residuum)
|
||||
++numMemMicroops;
|
||||
numMicroops += numMemMicroops;
|
||||
|
||||
int numMarshalMicroops = totNumBytes > 32 ? 2 : 1;
|
||||
numMicroops += numMarshalMicroops;
|
||||
|
||||
microOps = new StaticInstPtr[numMicroops];
|
||||
unsigned uopIdx = 0;
|
||||
|
||||
for(int i = 0; i < numMarshalMicroops; ++i) {
|
||||
microOps[uopIdx++] = new MicroPackNeon64(
|
||||
machInst, vx + (RegIndex) (2 * i), vd, eSize, dataSize,
|
||||
numStructElems, index, i /* step */, replicate);
|
||||
}
|
||||
|
||||
uint32_t memaccessFlags = TLB::MustBeOne | (TLB::ArmFlags) eSize |
|
||||
TLB::AllowUnaligned;
|
||||
|
||||
int i = 0;
|
||||
for(; i < numMemMicroops - 1; ++i) {
|
||||
microOps[uopIdx++] = new MicroNeonStore64(
|
||||
machInst, vx + (RegIndex) i, rnsp, 16 * i, memaccessFlags,
|
||||
baseIsSP, 16 /* accsize */, eSize);
|
||||
}
|
||||
microOps[uopIdx++] = new MicroNeonStore64(
|
||||
machInst, vx + (RegIndex) i, rnsp, 16 * i, memaccessFlags, baseIsSP,
|
||||
residuum ? residuum : 16 /* accSize */, eSize);
|
||||
|
||||
// Writeback microop: the post-increment amount is encoded in "Rm": a
|
||||
// 64-bit general register OR as '11111' for an immediate value equal to
|
||||
// the total number of bytes transferred (i.e. 8, 16, 24, 32, 48 or 64)
|
||||
if (wb) {
|
||||
if (rm != ((RegIndex) INTREG_X31)) {
|
||||
microOps[uopIdx++] = new MicroAddXERegUop(machInst, rnsp, rnsp, rm,
|
||||
UXTX, 0);
|
||||
} else {
|
||||
microOps[uopIdx++] = new MicroAddXiUop(machInst, rnsp, rnsp,
|
||||
totNumBytes);
|
||||
}
|
||||
}
|
||||
|
||||
assert(uopIdx == numMicroops);
|
||||
|
||||
for (int i = 0; i < numMicroops - 1; i++) {
|
||||
microOps[i]->setDelayedCommit();
|
||||
}
|
||||
microOps[numMicroops - 1]->setLastMicroop();
|
||||
}
|
||||
|
||||
MacroVFPMemOp::MacroVFPMemOp(const char *mnem, ExtMachInst machInst,
|
||||
OpClass __opClass, IntRegIndex rn,
|
||||
RegIndex vd, bool single, bool up,
|
||||
|
@ -846,14 +1333,14 @@ MacroVFPMemOp::MacroVFPMemOp(const char *mnem, ExtMachInst machInst,
|
|||
// to be functionally identical except that fldmx is deprecated. For now
|
||||
// we'll assume they're otherwise interchangable.
|
||||
int count = (single ? offset : (offset / 2));
|
||||
if (count == 0 || count > NumFloatArchRegs)
|
||||
if (count == 0 || count > NumFloatV7ArchRegs)
|
||||
warn_once("Bad offset field for VFP load/store multiple.\n");
|
||||
if (count == 0) {
|
||||
// Force there to be at least one microop so the macroop makes sense.
|
||||
writeback = true;
|
||||
}
|
||||
if (count > NumFloatArchRegs)
|
||||
count = NumFloatArchRegs;
|
||||
if (count > NumFloatV7ArchRegs)
|
||||
count = NumFloatV7ArchRegs;
|
||||
|
||||
numMicroops = count * (single ? 1 : 2) + (writeback ? 1 : 0);
|
||||
microOps = new StaticInstPtr[numMicroops];
|
||||
|
@ -933,6 +1420,19 @@ MicroIntImmOp::generateDisassembly(Addr pc, const SymbolTable *symtab) const
|
|||
return ss.str();
|
||||
}
|
||||
|
||||
std::string
|
||||
MicroIntImmXOp::generateDisassembly(Addr pc, const SymbolTable *symtab) const
|
||||
{
|
||||
std::stringstream ss;
|
||||
printMnemonic(ss);
|
||||
printReg(ss, ura);
|
||||
ss << ", ";
|
||||
printReg(ss, urb);
|
||||
ss << ", ";
|
||||
ccprintf(ss, "#%d", imm);
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
std::string
|
||||
MicroSetPCCPSR::generateDisassembly(Addr pc, const SymbolTable *symtab) const
|
||||
{
|
||||
|
@ -942,6 +1442,18 @@ MicroSetPCCPSR::generateDisassembly(Addr pc, const SymbolTable *symtab) const
|
|||
return ss.str();
|
||||
}
|
||||
|
||||
std::string
|
||||
MicroIntRegXOp::generateDisassembly(Addr pc, const SymbolTable *symtab) const
|
||||
{
|
||||
std::stringstream ss;
|
||||
printMnemonic(ss);
|
||||
printReg(ss, ura);
|
||||
ccprintf(ss, ", ");
|
||||
printReg(ss, urb);
|
||||
printExtendOperand(false, ss, (IntRegIndex)urc, type, shiftAmt);
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
std::string
|
||||
MicroIntMov::generateDisassembly(Addr pc, const SymbolTable *symtab) const
|
||||
{
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2010 ARM Limited
|
||||
* Copyright (c) 2010-2013 ARM Limited
|
||||
* All rights reserved
|
||||
*
|
||||
* The license below extends only to copyright in the software and shall
|
||||
|
@ -85,6 +85,27 @@ class MicroOp : public PredOp
|
|||
}
|
||||
};
|
||||
|
||||
class MicroOpX : public ArmStaticInst
|
||||
{
|
||||
protected:
|
||||
MicroOpX(const char *mnem, ExtMachInst machInst, OpClass __opClass)
|
||||
: ArmStaticInst(mnem, machInst, __opClass)
|
||||
{}
|
||||
|
||||
public:
|
||||
void
|
||||
advancePC(PCState &pcState) const
|
||||
{
|
||||
if (flags[IsLastMicroop]) {
|
||||
pcState.uEnd();
|
||||
} else if (flags[IsMicroop]) {
|
||||
pcState.uAdvance();
|
||||
} else {
|
||||
pcState.advance();
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Microops for Neon loads/stores
|
||||
*/
|
||||
|
@ -135,6 +156,96 @@ class MicroNeonMixLaneOp : public MicroNeonMixOp
|
|||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Microops for AArch64 NEON load/store (de)interleaving
|
||||
*/
|
||||
class MicroNeonMixOp64 : public MicroOp
|
||||
{
|
||||
protected:
|
||||
RegIndex dest, op1;
|
||||
uint8_t eSize, dataSize, numStructElems, numRegs, step;
|
||||
|
||||
MicroNeonMixOp64(const char *mnem, ExtMachInst machInst, OpClass __opClass,
|
||||
RegIndex _dest, RegIndex _op1, uint8_t _eSize,
|
||||
uint8_t _dataSize, uint8_t _numStructElems,
|
||||
uint8_t _numRegs, uint8_t _step)
|
||||
: MicroOp(mnem, machInst, __opClass), dest(_dest), op1(_op1),
|
||||
eSize(_eSize), dataSize(_dataSize), numStructElems(_numStructElems),
|
||||
numRegs(_numRegs), step(_step)
|
||||
{
|
||||
}
|
||||
};
|
||||
|
||||
class MicroNeonMixLaneOp64 : public MicroOp
|
||||
{
|
||||
protected:
|
||||
RegIndex dest, op1;
|
||||
uint8_t eSize, dataSize, numStructElems, lane, step;
|
||||
bool replicate;
|
||||
|
||||
MicroNeonMixLaneOp64(const char *mnem, ExtMachInst machInst,
|
||||
OpClass __opClass, RegIndex _dest, RegIndex _op1,
|
||||
uint8_t _eSize, uint8_t _dataSize,
|
||||
uint8_t _numStructElems, uint8_t _lane, uint8_t _step,
|
||||
bool _replicate = false)
|
||||
: MicroOp(mnem, machInst, __opClass), dest(_dest), op1(_op1),
|
||||
eSize(_eSize), dataSize(_dataSize), numStructElems(_numStructElems),
|
||||
lane(_lane), step(_step), replicate(_replicate)
|
||||
{
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Base classes for microcoded AArch64 NEON memory instructions.
|
||||
*/
|
||||
class VldMultOp64 : public PredMacroOp
|
||||
{
|
||||
protected:
|
||||
uint8_t eSize, dataSize, numStructElems, numRegs;
|
||||
bool wb;
|
||||
|
||||
VldMultOp64(const char *mnem, ExtMachInst machInst, OpClass __opClass,
|
||||
RegIndex rn, RegIndex vd, RegIndex rm, uint8_t eSize,
|
||||
uint8_t dataSize, uint8_t numStructElems, uint8_t numRegs,
|
||||
bool wb);
|
||||
};
|
||||
|
||||
class VstMultOp64 : public PredMacroOp
|
||||
{
|
||||
protected:
|
||||
uint8_t eSize, dataSize, numStructElems, numRegs;
|
||||
bool wb;
|
||||
|
||||
VstMultOp64(const char *mnem, ExtMachInst machInst, OpClass __opClass,
|
||||
RegIndex rn, RegIndex vd, RegIndex rm, uint8_t eSize,
|
||||
uint8_t dataSize, uint8_t numStructElems, uint8_t numRegs,
|
||||
bool wb);
|
||||
};
|
||||
|
||||
class VldSingleOp64 : public PredMacroOp
|
||||
{
|
||||
protected:
|
||||
uint8_t eSize, dataSize, numStructElems, index;
|
||||
bool wb, replicate;
|
||||
|
||||
VldSingleOp64(const char *mnem, ExtMachInst machInst, OpClass __opClass,
|
||||
RegIndex rn, RegIndex vd, RegIndex rm, uint8_t eSize,
|
||||
uint8_t dataSize, uint8_t numStructElems, uint8_t index,
|
||||
bool wb, bool replicate = false);
|
||||
};
|
||||
|
||||
class VstSingleOp64 : public PredMacroOp
|
||||
{
|
||||
protected:
|
||||
uint8_t eSize, dataSize, numStructElems, index;
|
||||
bool wb, replicate;
|
||||
|
||||
VstSingleOp64(const char *mnem, ExtMachInst machInst, OpClass __opClass,
|
||||
RegIndex rn, RegIndex vd, RegIndex rm, uint8_t eSize,
|
||||
uint8_t dataSize, uint8_t numStructElems, uint8_t index,
|
||||
bool wb, bool replicate = false);
|
||||
};
|
||||
|
||||
/**
|
||||
* Microops of the form
|
||||
* PC = IntRegA
|
||||
|
@ -180,10 +291,10 @@ class MicroIntImmOp : public MicroOp
|
|||
{
|
||||
protected:
|
||||
RegIndex ura, urb;
|
||||
uint32_t imm;
|
||||
int32_t imm;
|
||||
|
||||
MicroIntImmOp(const char *mnem, ExtMachInst machInst, OpClass __opClass,
|
||||
RegIndex _ura, RegIndex _urb, uint32_t _imm)
|
||||
RegIndex _ura, RegIndex _urb, int32_t _imm)
|
||||
: MicroOp(mnem, machInst, __opClass),
|
||||
ura(_ura), urb(_urb), imm(_imm)
|
||||
{
|
||||
|
@ -192,6 +303,22 @@ class MicroIntImmOp : public MicroOp
|
|||
std::string generateDisassembly(Addr pc, const SymbolTable *symtab) const;
|
||||
};
|
||||
|
||||
class MicroIntImmXOp : public MicroOpX
|
||||
{
|
||||
protected:
|
||||
RegIndex ura, urb;
|
||||
int64_t imm;
|
||||
|
||||
MicroIntImmXOp(const char *mnem, ExtMachInst machInst, OpClass __opClass,
|
||||
RegIndex _ura, RegIndex _urb, int64_t _imm)
|
||||
: MicroOpX(mnem, machInst, __opClass),
|
||||
ura(_ura), urb(_urb), imm(_imm)
|
||||
{
|
||||
}
|
||||
|
||||
std::string generateDisassembly(Addr pc, const SymbolTable *symtab) const;
|
||||
};
|
||||
|
||||
/**
|
||||
* Microops of the form IntRegA = IntRegB op IntRegC
|
||||
*/
|
||||
|
@ -210,6 +337,25 @@ class MicroIntOp : public MicroOp
|
|||
std::string generateDisassembly(Addr pc, const SymbolTable *symtab) const;
|
||||
};
|
||||
|
||||
class MicroIntRegXOp : public MicroOp
|
||||
{
|
||||
protected:
|
||||
RegIndex ura, urb, urc;
|
||||
ArmExtendType type;
|
||||
uint32_t shiftAmt;
|
||||
|
||||
MicroIntRegXOp(const char *mnem, ExtMachInst machInst, OpClass __opClass,
|
||||
RegIndex _ura, RegIndex _urb, RegIndex _urc,
|
||||
ArmExtendType _type, uint32_t _shiftAmt)
|
||||
: MicroOp(mnem, machInst, __opClass),
|
||||
ura(_ura), urb(_urb), urc(_urc),
|
||||
type(_type), shiftAmt(_shiftAmt)
|
||||
{
|
||||
}
|
||||
|
||||
std::string generateDisassembly(Addr pc, const SymbolTable *symtab) const;
|
||||
};
|
||||
|
||||
/**
|
||||
* Microops of the form IntRegA = IntRegB op shifted IntRegC
|
||||
*/
|
||||
|
@ -260,6 +406,61 @@ class MacroMemOp : public PredMacroOp
|
|||
bool writeback, bool load, uint32_t reglist);
|
||||
};
|
||||
|
||||
/**
|
||||
* Base class for pair load/store instructions.
|
||||
*/
|
||||
class PairMemOp : public PredMacroOp
|
||||
{
|
||||
public:
|
||||
enum AddrMode {
|
||||
AddrMd_Offset,
|
||||
AddrMd_PreIndex,
|
||||
AddrMd_PostIndex
|
||||
};
|
||||
|
||||
protected:
|
||||
PairMemOp(const char *mnem, ExtMachInst machInst, OpClass __opClass,
|
||||
uint32_t size, bool fp, bool load, bool noAlloc, bool signExt,
|
||||
bool exclusive, bool acrel, int64_t imm, AddrMode mode,
|
||||
IntRegIndex rn, IntRegIndex rt, IntRegIndex rt2);
|
||||
};
|
||||
|
||||
class BigFpMemImmOp : public PredMacroOp
|
||||
{
|
||||
protected:
|
||||
BigFpMemImmOp(const char *mnem, ExtMachInst machInst, OpClass __opClass,
|
||||
bool load, IntRegIndex dest, IntRegIndex base, int64_t imm);
|
||||
};
|
||||
|
||||
class BigFpMemPostOp : public PredMacroOp
|
||||
{
|
||||
protected:
|
||||
BigFpMemPostOp(const char *mnem, ExtMachInst machInst, OpClass __opClass,
|
||||
bool load, IntRegIndex dest, IntRegIndex base, int64_t imm);
|
||||
};
|
||||
|
||||
class BigFpMemPreOp : public PredMacroOp
|
||||
{
|
||||
protected:
|
||||
BigFpMemPreOp(const char *mnem, ExtMachInst machInst, OpClass __opClass,
|
||||
bool load, IntRegIndex dest, IntRegIndex base, int64_t imm);
|
||||
};
|
||||
|
||||
class BigFpMemRegOp : public PredMacroOp
|
||||
{
|
||||
protected:
|
||||
BigFpMemRegOp(const char *mnem, ExtMachInst machInst, OpClass __opClass,
|
||||
bool load, IntRegIndex dest, IntRegIndex base,
|
||||
IntRegIndex offset, ArmExtendType type, int64_t imm);
|
||||
};
|
||||
|
||||
class BigFpMemLitOp : public PredMacroOp
|
||||
{
|
||||
protected:
|
||||
BigFpMemLitOp(const char *mnem, ExtMachInst machInst, OpClass __opClass,
|
||||
IntRegIndex dest, int64_t imm);
|
||||
};
|
||||
|
||||
/**
|
||||
* Base classes for microcoded integer memory instructions.
|
||||
*/
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2010 ARM Limited
|
||||
* Copyright (c) 2010, 2012 ARM Limited
|
||||
* All rights reserved
|
||||
*
|
||||
* The license below extends only to copyright in the software and shall
|
||||
|
@ -157,6 +157,9 @@ SrsOp::generateDisassembly(Addr pc, const SymbolTable *symtab) const
|
|||
case MODE_ABORT:
|
||||
ss << "abort";
|
||||
break;
|
||||
case MODE_HYP:
|
||||
ss << "hyp";
|
||||
break;
|
||||
case MODE_UNDEFINED:
|
||||
ss << "undefined";
|
||||
break;
|
||||
|
|
193
src/arch/arm/insts/mem64.cc
Normal file
193
src/arch/arm/insts/mem64.cc
Normal file
|
@ -0,0 +1,193 @@
|
|||
/*
|
||||
* Copyright (c) 2011-2013 ARM Limited
|
||||
* All rights reserved
|
||||
*
|
||||
* The license below extends only to copyright in the software and shall
|
||||
* not be construed as granting a license to any other intellectual
|
||||
* property including but not limited to intellectual property relating
|
||||
* to a hardware implementation of the functionality of the software
|
||||
* licensed hereunder. You may use the software subject to the license
|
||||
* terms below provided that you ensure that this notice is replicated
|
||||
* unmodified and in its entirety in all distributions of the software,
|
||||
* modified or unmodified, in source code or in binary form.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are
|
||||
* met: redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer;
|
||||
* redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution;
|
||||
* neither the name of the copyright holders nor the names of its
|
||||
* contributors may be used to endorse or promote products derived from
|
||||
* this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
* Authors: Gabe Black
|
||||
*/
|
||||
|
||||
#include "arch/arm/insts/mem64.hh"
|
||||
#include "arch/arm/tlb.hh"
|
||||
#include "base/loader/symtab.hh"
|
||||
#include "mem/request.hh"
|
||||
|
||||
using namespace std;
|
||||
|
||||
namespace ArmISA
|
||||
{
|
||||
|
||||
std::string
|
||||
SysDC64::generateDisassembly(Addr pc, const SymbolTable *symtab) const
|
||||
{
|
||||
std::stringstream ss;
|
||||
printMnemonic(ss, "", false);
|
||||
ccprintf(ss, ", [");
|
||||
printReg(ss, base);
|
||||
ccprintf(ss, "]");
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
|
||||
|
||||
void
|
||||
Memory64::startDisassembly(std::ostream &os) const
|
||||
{
|
||||
printMnemonic(os, "", false);
|
||||
printReg(os, dest);
|
||||
ccprintf(os, ", [");
|
||||
printReg(os, base);
|
||||
}
|
||||
|
||||
void
|
||||
Memory64::setExcAcRel(bool exclusive, bool acrel)
|
||||
{
|
||||
if (exclusive)
|
||||
memAccessFlags |= Request::LLSC;
|
||||
else
|
||||
memAccessFlags |= ArmISA::TLB::AllowUnaligned;
|
||||
if (acrel) {
|
||||
flags[IsMemBarrier] = true;
|
||||
flags[IsWriteBarrier] = true;
|
||||
flags[IsReadBarrier] = true;
|
||||
}
|
||||
}
|
||||
|
||||
std::string
|
||||
MemoryImm64::generateDisassembly(Addr pc, const SymbolTable *symtab) const
|
||||
{
|
||||
std::stringstream ss;
|
||||
startDisassembly(ss);
|
||||
if (imm)
|
||||
ccprintf(ss, ", #%d", imm);
|
||||
ccprintf(ss, "]");
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
std::string
|
||||
MemoryDImm64::generateDisassembly(Addr pc, const SymbolTable *symtab) const
|
||||
{
|
||||
std::stringstream ss;
|
||||
printMnemonic(ss, "", false);
|
||||
printReg(ss, dest);
|
||||
ccprintf(ss, ", ");
|
||||
printReg(ss, dest2);
|
||||
ccprintf(ss, ", [");
|
||||
printReg(ss, base);
|
||||
if (imm)
|
||||
ccprintf(ss, ", #%d", imm);
|
||||
ccprintf(ss, "]");
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
std::string
|
||||
MemoryDImmEx64::generateDisassembly(Addr pc, const SymbolTable *symtab) const
|
||||
{
|
||||
std::stringstream ss;
|
||||
printMnemonic(ss, "", false);
|
||||
printReg(ss, result);
|
||||
ccprintf(ss, ", ");
|
||||
printReg(ss, dest);
|
||||
ccprintf(ss, ", ");
|
||||
printReg(ss, dest2);
|
||||
ccprintf(ss, ", [");
|
||||
printReg(ss, base);
|
||||
if (imm)
|
||||
ccprintf(ss, ", #%d", imm);
|
||||
ccprintf(ss, "]");
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
std::string
|
||||
MemoryPreIndex64::generateDisassembly(Addr pc, const SymbolTable *symtab) const
|
||||
{
|
||||
std::stringstream ss;
|
||||
startDisassembly(ss);
|
||||
ccprintf(ss, ", #%d]!", imm);
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
std::string
|
||||
MemoryPostIndex64::generateDisassembly(Addr pc, const SymbolTable *symtab) const
|
||||
{
|
||||
std::stringstream ss;
|
||||
startDisassembly(ss);
|
||||
if (imm)
|
||||
ccprintf(ss, "], #%d", imm);
|
||||
ccprintf(ss, "]");
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
std::string
|
||||
MemoryReg64::generateDisassembly(Addr pc, const SymbolTable *symtab) const
|
||||
{
|
||||
std::stringstream ss;
|
||||
startDisassembly(ss);
|
||||
printExtendOperand(false, ss, offset, type, shiftAmt);
|
||||
ccprintf(ss, "]");
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
std::string
|
||||
MemoryRaw64::generateDisassembly(Addr pc, const SymbolTable *symtab) const
|
||||
{
|
||||
std::stringstream ss;
|
||||
startDisassembly(ss);
|
||||
ccprintf(ss, "]");
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
std::string
|
||||
MemoryEx64::generateDisassembly(Addr pc, const SymbolTable *symtab) const
|
||||
{
|
||||
std::stringstream ss;
|
||||
printMnemonic(ss, "", false);
|
||||
printReg(ss, dest);
|
||||
ccprintf(ss, ", ");
|
||||
printReg(ss, result);
|
||||
ccprintf(ss, ", [");
|
||||
printReg(ss, base);
|
||||
ccprintf(ss, "]");
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
std::string
|
||||
MemoryLiteral64::generateDisassembly(Addr pc, const SymbolTable *symtab) const
|
||||
{
|
||||
std::stringstream ss;
|
||||
printMnemonic(ss, "", false);
|
||||
printReg(ss, dest);
|
||||
ccprintf(ss, ", #%d", pc + imm);
|
||||
return ss.str();
|
||||
}
|
||||
}
|
253
src/arch/arm/insts/mem64.hh
Normal file
253
src/arch/arm/insts/mem64.hh
Normal file
|
@ -0,0 +1,253 @@
|
|||
/*
|
||||
* Copyright (c) 2011-2013 ARM Limited
|
||||
* All rights reserved
|
||||
*
|
||||
* The license below extends only to copyright in the software and shall
|
||||
* not be construed as granting a license to any other intellectual
|
||||
* property including but not limited to intellectual property relating
|
||||
* to a hardware implementation of the functionality of the software
|
||||
* licensed hereunder. You may use the software subject to the license
|
||||
* terms below provided that you ensure that this notice is replicated
|
||||
* unmodified and in its entirety in all distributions of the software,
|
||||
* modified or unmodified, in source code or in binary form.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are
|
||||
* met: redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer;
|
||||
* redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution;
|
||||
* neither the name of the copyright holders nor the names of its
|
||||
* contributors may be used to endorse or promote products derived from
|
||||
* this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
* Authors: Gabe Black
|
||||
*/
|
||||
#ifndef __ARCH_ARM_MEM64_HH__
|
||||
#define __ARCH_ARM_MEM64_HH__
|
||||
|
||||
#include "arch/arm/insts/static_inst.hh"
|
||||
|
||||
namespace ArmISA
|
||||
{
|
||||
|
||||
class SysDC64 : public ArmStaticInst
|
||||
{
|
||||
protected:
|
||||
IntRegIndex base;
|
||||
IntRegIndex dest;
|
||||
uint64_t imm;
|
||||
|
||||
SysDC64(const char *mnem, ExtMachInst _machInst, OpClass __opClass,
|
||||
IntRegIndex _base, IntRegIndex _dest, uint64_t _imm)
|
||||
: ArmStaticInst(mnem, _machInst, __opClass), base(_base), dest(_dest),
|
||||
imm(_imm)
|
||||
{}
|
||||
std::string generateDisassembly(Addr pc, const SymbolTable *symtab) const;
|
||||
};
|
||||
|
||||
class MightBeMicro64 : public ArmStaticInst
|
||||
{
|
||||
protected:
|
||||
MightBeMicro64(const char *mnem, ExtMachInst _machInst, OpClass __opClass)
|
||||
: ArmStaticInst(mnem, _machInst, __opClass)
|
||||
{}
|
||||
|
||||
void
|
||||
advancePC(PCState &pcState) const
|
||||
{
|
||||
if (flags[IsLastMicroop]) {
|
||||
pcState.uEnd();
|
||||
} else if (flags[IsMicroop]) {
|
||||
pcState.uAdvance();
|
||||
} else {
|
||||
pcState.advance();
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
class Memory64 : public MightBeMicro64
|
||||
{
|
||||
public:
|
||||
enum AddrMode {
|
||||
AddrMd_Offset,
|
||||
AddrMd_PreIndex,
|
||||
AddrMd_PostIndex
|
||||
};
|
||||
|
||||
protected:
|
||||
|
||||
IntRegIndex dest;
|
||||
IntRegIndex base;
|
||||
/// True if the base register is SP (used for SP alignment checking).
|
||||
bool baseIsSP;
|
||||
static const unsigned numMicroops = 3;
|
||||
|
||||
StaticInstPtr *uops;
|
||||
|
||||
Memory64(const char *mnem, ExtMachInst _machInst, OpClass __opClass,
|
||||
IntRegIndex _dest, IntRegIndex _base)
|
||||
: MightBeMicro64(mnem, _machInst, __opClass),
|
||||
dest(_dest), base(_base), uops(NULL)
|
||||
{
|
||||
baseIsSP = isSP(_base);
|
||||
}
|
||||
|
||||
virtual
|
||||
~Memory64()
|
||||
{
|
||||
delete [] uops;
|
||||
}
|
||||
|
||||
StaticInstPtr
|
||||
fetchMicroop(MicroPC microPC) const
|
||||
{
|
||||
assert(uops != NULL && microPC < numMicroops);
|
||||
return uops[microPC];
|
||||
}
|
||||
|
||||
void startDisassembly(std::ostream &os) const;
|
||||
|
||||
unsigned memAccessFlags;
|
||||
|
||||
void setExcAcRel(bool exclusive, bool acrel);
|
||||
};
|
||||
|
||||
class MemoryImm64 : public Memory64
|
||||
{
|
||||
protected:
|
||||
int64_t imm;
|
||||
|
||||
MemoryImm64(const char *mnem, ExtMachInst _machInst, OpClass __opClass,
|
||||
IntRegIndex _dest, IntRegIndex _base, int64_t _imm)
|
||||
: Memory64(mnem, _machInst, __opClass, _dest, _base), imm(_imm)
|
||||
{}
|
||||
|
||||
std::string generateDisassembly(Addr pc, const SymbolTable *symtab) const;
|
||||
};
|
||||
|
||||
class MemoryDImm64 : public MemoryImm64
|
||||
{
|
||||
protected:
|
||||
IntRegIndex dest2;
|
||||
|
||||
MemoryDImm64(const char *mnem, ExtMachInst _machInst, OpClass __opClass,
|
||||
IntRegIndex _dest, IntRegIndex _dest2, IntRegIndex _base,
|
||||
int64_t _imm)
|
||||
: MemoryImm64(mnem, _machInst, __opClass, _dest, _base, _imm),
|
||||
dest2(_dest2)
|
||||
{}
|
||||
|
||||
std::string generateDisassembly(Addr pc, const SymbolTable *symtab) const;
|
||||
};
|
||||
|
||||
class MemoryDImmEx64 : public MemoryDImm64
|
||||
{
|
||||
protected:
|
||||
IntRegIndex result;
|
||||
|
||||
MemoryDImmEx64(const char *mnem, ExtMachInst _machInst, OpClass __opClass,
|
||||
IntRegIndex _result, IntRegIndex _dest, IntRegIndex _dest2,
|
||||
IntRegIndex _base, int32_t _imm)
|
||||
: MemoryDImm64(mnem, _machInst, __opClass, _dest, _dest2,
|
||||
_base, _imm), result(_result)
|
||||
{}
|
||||
|
||||
std::string generateDisassembly(Addr pc, const SymbolTable *symtab) const;
|
||||
};
|
||||
|
||||
class MemoryPreIndex64 : public MemoryImm64
|
||||
{
|
||||
protected:
|
||||
MemoryPreIndex64(const char *mnem, ExtMachInst _machInst,
|
||||
OpClass __opClass, IntRegIndex _dest, IntRegIndex _base,
|
||||
int64_t _imm)
|
||||
: MemoryImm64(mnem, _machInst, __opClass, _dest, _base, _imm)
|
||||
{}
|
||||
|
||||
std::string generateDisassembly(Addr pc, const SymbolTable *symtab) const;
|
||||
};
|
||||
|
||||
class MemoryPostIndex64 : public MemoryImm64
|
||||
{
|
||||
protected:
|
||||
MemoryPostIndex64(const char *mnem, ExtMachInst _machInst,
|
||||
OpClass __opClass, IntRegIndex _dest, IntRegIndex _base,
|
||||
int64_t _imm)
|
||||
: MemoryImm64(mnem, _machInst, __opClass, _dest, _base, _imm)
|
||||
{}
|
||||
|
||||
std::string generateDisassembly(Addr pc, const SymbolTable *symtab) const;
|
||||
};
|
||||
|
||||
class MemoryReg64 : public Memory64
|
||||
{
|
||||
protected:
|
||||
IntRegIndex offset;
|
||||
ArmExtendType type;
|
||||
uint64_t shiftAmt;
|
||||
|
||||
MemoryReg64(const char *mnem, ExtMachInst _machInst,
|
||||
OpClass __opClass, IntRegIndex _dest, IntRegIndex _base,
|
||||
IntRegIndex _offset, ArmExtendType _type,
|
||||
uint64_t _shiftAmt)
|
||||
: Memory64(mnem, _machInst, __opClass, _dest, _base),
|
||||
offset(_offset), type(_type), shiftAmt(_shiftAmt)
|
||||
{}
|
||||
|
||||
std::string generateDisassembly(Addr pc, const SymbolTable *symtab) const;
|
||||
};
|
||||
|
||||
class MemoryRaw64 : public Memory64
|
||||
{
|
||||
protected:
|
||||
MemoryRaw64(const char *mnem, ExtMachInst _machInst,
|
||||
OpClass __opClass, IntRegIndex _dest, IntRegIndex _base)
|
||||
: Memory64(mnem, _machInst, __opClass, _dest, _base)
|
||||
{}
|
||||
|
||||
std::string generateDisassembly(Addr pc, const SymbolTable *symtab) const;
|
||||
};
|
||||
|
||||
class MemoryEx64 : public Memory64
|
||||
{
|
||||
protected:
|
||||
IntRegIndex result;
|
||||
|
||||
MemoryEx64(const char *mnem, ExtMachInst _machInst,
|
||||
OpClass __opClass, IntRegIndex _dest, IntRegIndex _base,
|
||||
IntRegIndex _result)
|
||||
: Memory64(mnem, _machInst, __opClass, _dest, _base), result(_result)
|
||||
{}
|
||||
|
||||
std::string generateDisassembly(Addr pc, const SymbolTable *symtab) const;
|
||||
};
|
||||
|
||||
class MemoryLiteral64 : public Memory64
|
||||
{
|
||||
protected:
|
||||
int64_t imm;
|
||||
|
||||
MemoryLiteral64(const char *mnem, ExtMachInst _machInst,
|
||||
OpClass __opClass, IntRegIndex _dest, int64_t _imm)
|
||||
: Memory64(mnem, _machInst, __opClass, _dest, INTREG_ZERO), imm(_imm)
|
||||
{}
|
||||
|
||||
std::string generateDisassembly(Addr pc, const SymbolTable *symtab) const;
|
||||
};
|
||||
}
|
||||
|
||||
#endif //__ARCH_ARM_INSTS_MEM_HH__
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2010 ARM Limited
|
||||
* Copyright (c) 2010, 2012-2013 ARM Limited
|
||||
* Copyright (c) 2013 Advanced Micro Devices, Inc.
|
||||
* All rights reserved
|
||||
*
|
||||
|
@ -145,6 +145,32 @@ MsrRegOp::generateDisassembly(Addr pc, const SymbolTable *symtab) const
|
|||
return ss.str();
|
||||
}
|
||||
|
||||
std::string
|
||||
MrrcOp::generateDisassembly(Addr pc, const SymbolTable *symtab) const
|
||||
{
|
||||
std::stringstream ss;
|
||||
printMnemonic(ss);
|
||||
printReg(ss, dest);
|
||||
ss << ", ";
|
||||
printReg(ss, dest2);
|
||||
ss << ", ";
|
||||
printReg(ss, op1);
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
std::string
|
||||
McrrOp::generateDisassembly(Addr pc, const SymbolTable *symtab) const
|
||||
{
|
||||
std::stringstream ss;
|
||||
printMnemonic(ss);
|
||||
printReg(ss, dest);
|
||||
ss << ", ";
|
||||
printReg(ss, op1);
|
||||
ss << ", ";
|
||||
printReg(ss, op2);
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
std::string
|
||||
ImmOp::generateDisassembly(Addr pc, const SymbolTable *symtab) const
|
||||
{
|
||||
|
@ -229,6 +255,16 @@ RegRegImmOp::generateDisassembly(Addr pc, const SymbolTable *symtab) const
|
|||
return ss.str();
|
||||
}
|
||||
|
||||
std::string
|
||||
RegImmImmOp::generateDisassembly(Addr pc, const SymbolTable *symtab) const
|
||||
{
|
||||
std::stringstream ss;
|
||||
printMnemonic(ss);
|
||||
printReg(ss, dest);
|
||||
ccprintf(ss, ", #%d, #%d", imm1, imm2);
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
std::string
|
||||
RegRegImmImmOp::generateDisassembly(Addr pc, const SymbolTable *symtab) const
|
||||
{
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2010 ARM Limited
|
||||
* Copyright (c) 2010, 2012-2013 ARM Limited
|
||||
* All rights reserved
|
||||
*
|
||||
* The license below extends only to copyright in the software and shall
|
||||
|
@ -94,6 +94,42 @@ class MsrRegOp : public MsrBase
|
|||
std::string generateDisassembly(Addr pc, const SymbolTable *symtab) const;
|
||||
};
|
||||
|
||||
class MrrcOp : public PredOp
|
||||
{
|
||||
protected:
|
||||
IntRegIndex op1;
|
||||
IntRegIndex dest;
|
||||
IntRegIndex dest2;
|
||||
uint32_t imm;
|
||||
|
||||
MrrcOp(const char *mnem, ExtMachInst _machInst, OpClass __opClass,
|
||||
IntRegIndex _op1, IntRegIndex _dest, IntRegIndex _dest2,
|
||||
uint32_t _imm) :
|
||||
PredOp(mnem, _machInst, __opClass), op1(_op1), dest(_dest),
|
||||
dest2(_dest2), imm(_imm)
|
||||
{}
|
||||
|
||||
std::string generateDisassembly(Addr pc, const SymbolTable *symtab) const;
|
||||
};
|
||||
|
||||
class McrrOp : public PredOp
|
||||
{
|
||||
protected:
|
||||
IntRegIndex op1;
|
||||
IntRegIndex op2;
|
||||
IntRegIndex dest;
|
||||
uint32_t imm;
|
||||
|
||||
McrrOp(const char *mnem, ExtMachInst _machInst, OpClass __opClass,
|
||||
IntRegIndex _op1, IntRegIndex _op2, IntRegIndex _dest,
|
||||
uint32_t _imm) :
|
||||
PredOp(mnem, _machInst, __opClass), op1(_op1), op2(_op2),
|
||||
dest(_dest), imm(_imm)
|
||||
{}
|
||||
|
||||
std::string generateDisassembly(Addr pc, const SymbolTable *symtab) const;
|
||||
};
|
||||
|
||||
class ImmOp : public PredOp
|
||||
{
|
||||
protected:
|
||||
|
@ -220,6 +256,23 @@ class RegRegImmOp : public PredOp
|
|||
std::string generateDisassembly(Addr pc, const SymbolTable *symtab) const;
|
||||
};
|
||||
|
||||
class RegImmImmOp : public PredOp
|
||||
{
|
||||
protected:
|
||||
IntRegIndex dest;
|
||||
IntRegIndex op1;
|
||||
uint64_t imm1;
|
||||
uint64_t imm2;
|
||||
|
||||
RegImmImmOp(const char *mnem, ExtMachInst _machInst, OpClass __opClass,
|
||||
IntRegIndex _dest, uint64_t _imm1, uint64_t _imm2) :
|
||||
PredOp(mnem, _machInst, __opClass),
|
||||
dest(_dest), imm1(_imm1), imm2(_imm2)
|
||||
{}
|
||||
|
||||
std::string generateDisassembly(Addr pc, const SymbolTable *symtab) const;
|
||||
};
|
||||
|
||||
class RegRegImmImmOp : public PredOp
|
||||
{
|
||||
protected:
|
||||
|
|
73
src/arch/arm/insts/misc64.cc
Normal file
73
src/arch/arm/insts/misc64.cc
Normal file
|
@ -0,0 +1,73 @@
|
|||
/*
|
||||
* Copyright (c) 2011-2013 ARM Limited
|
||||
* All rights reserved
|
||||
*
|
||||
* The license below extends only to copyright in the software and shall
|
||||
* not be construed as granting a license to any other intellectual
|
||||
* property including but not limited to intellectual property relating
|
||||
* to a hardware implementation of the functionality of the software
|
||||
* licensed hereunder. You may use the software subject to the license
|
||||
* terms below provided that you ensure that this notice is replicated
|
||||
* unmodified and in its entirety in all distributions of the software,
|
||||
* modified or unmodified, in source code or in binary form.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are
|
||||
* met: redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer;
|
||||
* redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution;
|
||||
* neither the name of the copyright holders nor the names of its
|
||||
* contributors may be used to endorse or promote products derived from
|
||||
* this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
* Authors: Gabe Black
|
||||
*/
|
||||
|
||||
#include "arch/arm/insts/misc64.hh"
|
||||
|
||||
std::string
|
||||
RegRegImmImmOp64::generateDisassembly(Addr pc, const SymbolTable *symtab) const
|
||||
{
|
||||
std::stringstream ss;
|
||||
printMnemonic(ss, "", false);
|
||||
printReg(ss, dest);
|
||||
ss << ", ";
|
||||
printReg(ss, op1);
|
||||
ccprintf(ss, ", #%d, #%d", imm1, imm2);
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
std::string
|
||||
RegRegRegImmOp64::generateDisassembly(
|
||||
Addr pc, const SymbolTable *symtab) const
|
||||
{
|
||||
std::stringstream ss;
|
||||
printMnemonic(ss, "", false);
|
||||
printReg(ss, dest);
|
||||
ss << ", ";
|
||||
printReg(ss, op1);
|
||||
ss << ", ";
|
||||
printReg(ss, op2);
|
||||
ccprintf(ss, ", #%d", imm);
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
std::string
|
||||
UnknownOp64::generateDisassembly(Addr pc, const SymbolTable *symtab) const
|
||||
{
|
||||
return csprintf("%-10s (inst %#08x)", "unknown", machInst);
|
||||
}
|
92
src/arch/arm/insts/misc64.hh
Normal file
92
src/arch/arm/insts/misc64.hh
Normal file
|
@ -0,0 +1,92 @@
|
|||
/*
|
||||
* Copyright (c) 2011-2013 ARM Limited
|
||||
* All rights reserved
|
||||
*
|
||||
* The license below extends only to copyright in the software and shall
|
||||
* not be construed as granting a license to any other intellectual
|
||||
* property including but not limited to intellectual property relating
|
||||
* to a hardware implementation of the functionality of the software
|
||||
* licensed hereunder. You may use the software subject to the license
|
||||
* terms below provided that you ensure that this notice is replicated
|
||||
* unmodified and in its entirety in all distributions of the software,
|
||||
* modified or unmodified, in source code or in binary form.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are
|
||||
* met: redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer;
|
||||
* redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution;
|
||||
* neither the name of the copyright holders nor the names of its
|
||||
* contributors may be used to endorse or promote products derived from
|
||||
* this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
* Authors: Gabe Black
|
||||
*/
|
||||
|
||||
#ifndef __ARCH_ARM_INSTS_MISC64_HH__
|
||||
#define __ARCH_ARM_INSTS_MISC64_HH__
|
||||
|
||||
#include "arch/arm/insts/static_inst.hh"
|
||||
|
||||
class RegRegImmImmOp64 : public ArmStaticInst
|
||||
{
|
||||
protected:
|
||||
IntRegIndex dest;
|
||||
IntRegIndex op1;
|
||||
uint64_t imm1;
|
||||
uint64_t imm2;
|
||||
|
||||
RegRegImmImmOp64(const char *mnem, ExtMachInst _machInst,
|
||||
OpClass __opClass, IntRegIndex _dest, IntRegIndex _op1,
|
||||
uint64_t _imm1, uint64_t _imm2) :
|
||||
ArmStaticInst(mnem, _machInst, __opClass),
|
||||
dest(_dest), op1(_op1), imm1(_imm1), imm2(_imm2)
|
||||
{}
|
||||
|
||||
std::string generateDisassembly(Addr pc, const SymbolTable *symtab) const;
|
||||
};
|
||||
|
||||
class RegRegRegImmOp64 : public ArmStaticInst
|
||||
{
|
||||
protected:
|
||||
IntRegIndex dest;
|
||||
IntRegIndex op1;
|
||||
IntRegIndex op2;
|
||||
uint64_t imm;
|
||||
|
||||
RegRegRegImmOp64(const char *mnem, ExtMachInst _machInst,
|
||||
OpClass __opClass, IntRegIndex _dest, IntRegIndex _op1,
|
||||
IntRegIndex _op2, uint64_t _imm) :
|
||||
ArmStaticInst(mnem, _machInst, __opClass),
|
||||
dest(_dest), op1(_op1), op2(_op2), imm(_imm)
|
||||
{}
|
||||
|
||||
std::string generateDisassembly(Addr pc, const SymbolTable *symtab) const;
|
||||
};
|
||||
|
||||
class UnknownOp64 : public ArmStaticInst
|
||||
{
|
||||
protected:
|
||||
|
||||
UnknownOp64(const char *mnem, ExtMachInst _machInst, OpClass __opClass) :
|
||||
ArmStaticInst(mnem, _machInst, __opClass)
|
||||
{}
|
||||
|
||||
std::string generateDisassembly(Addr pc, const SymbolTable *symtab) const;
|
||||
};
|
||||
|
||||
#endif
|
128
src/arch/arm/insts/neon64_mem.hh
Normal file
128
src/arch/arm/insts/neon64_mem.hh
Normal file
|
@ -0,0 +1,128 @@
|
|||
/*
|
||||
* Copyright (c) 2012-2013 ARM Limited
|
||||
* All rights reserved
|
||||
*
|
||||
* The license below extends only to copyright in the software and shall
|
||||
* not be construed as granting a license to any other intellectual
|
||||
* property including but not limited to intellectual property relating
|
||||
* to a hardware implementation of the functionality of the software
|
||||
* licensed hereunder. You may use the software subject to the license
|
||||
* terms below provided that you ensure that this notice is replicated
|
||||
* unmodified and in its entirety in all distributions of the software,
|
||||
* modified or unmodified, in source code or in binary form.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are
|
||||
* met: redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer;
|
||||
* redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution;
|
||||
* neither the name of the copyright holders nor the names of its
|
||||
* contributors may be used to endorse or promote products derived from
|
||||
* this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
* Authors: Mbou Eyole
|
||||
* Giacomo Gabrielli
|
||||
*/
|
||||
|
||||
/// @file
|
||||
/// Utility functions and datatypes used by AArch64 NEON memory instructions.
|
||||
|
||||
#ifndef __ARCH_ARM_INSTS_NEON64_MEM_HH__
|
||||
#define __ARCH_ARM_INSTS_NEON64_MEM_HH__
|
||||
|
||||
namespace ArmISA
|
||||
{
|
||||
|
||||
typedef uint64_t XReg;
|
||||
|
||||
/// 128-bit NEON vector register.
|
||||
struct VReg {
|
||||
XReg hi;
|
||||
XReg lo;
|
||||
};
|
||||
|
||||
/// Write a single NEON vector element leaving the others untouched.
|
||||
inline void
|
||||
writeVecElem(VReg *dest, XReg src, int index, int eSize)
|
||||
{
|
||||
// eSize must be less than 4:
|
||||
// 0 -> 8-bit elems,
|
||||
// 1 -> 16-bit elems,
|
||||
// 2 -> 32-bit elems,
|
||||
// 3 -> 64-bit elems
|
||||
assert(eSize <= 3);
|
||||
|
||||
int eBits = 8 << eSize;
|
||||
int lsbPos = index * eBits;
|
||||
assert(lsbPos < 128);
|
||||
int shiftAmt = lsbPos % 64;
|
||||
|
||||
XReg maskBits = -1;
|
||||
if (eBits == 64) {
|
||||
maskBits = 0;
|
||||
} else {
|
||||
maskBits = maskBits << eBits;
|
||||
}
|
||||
maskBits = ~maskBits;
|
||||
|
||||
XReg sMask = maskBits;
|
||||
maskBits = sMask << shiftAmt;
|
||||
|
||||
if (lsbPos < 64) {
|
||||
dest->lo = (dest->lo & (~maskBits)) | ((src & sMask) << shiftAmt);
|
||||
} else {
|
||||
dest->hi = (dest->hi & (~maskBits)) | ((src & sMask) << shiftAmt);
|
||||
}
|
||||
}
|
||||
|
||||
/// Read a single NEON vector element.
|
||||
inline XReg
|
||||
readVecElem(VReg src, int index, int eSize)
|
||||
{
|
||||
// eSize must be less than 4:
|
||||
// 0 -> 8-bit elems,
|
||||
// 1 -> 16-bit elems,
|
||||
// 2 -> 32-bit elems,
|
||||
// 3 -> 64-bit elems
|
||||
assert(eSize <= 3);
|
||||
|
||||
XReg data;
|
||||
|
||||
int eBits = 8 << eSize;
|
||||
int lsbPos = index * eBits;
|
||||
assert(lsbPos < 128);
|
||||
int shiftAmt = lsbPos % 64;
|
||||
|
||||
XReg maskBits = -1;
|
||||
if (eBits == 64) {
|
||||
maskBits = 0;
|
||||
} else {
|
||||
maskBits = maskBits << eBits;
|
||||
}
|
||||
maskBits = ~maskBits;
|
||||
|
||||
if (lsbPos < 64) {
|
||||
data = (src.lo >> shiftAmt) & maskBits;
|
||||
} else {
|
||||
data = (src.hi >> shiftAmt) & maskBits;
|
||||
}
|
||||
return data;
|
||||
}
|
||||
|
||||
} // namespace ArmISA
|
||||
|
||||
#endif // __ARCH_ARM_INSTS_NEON64_MEM_HH__
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2010 ARM Limited
|
||||
* Copyright (c) 2010, 2012-2013 ARM Limited
|
||||
* All rights reserved
|
||||
*
|
||||
* The license below extends only to copyright in the software and shall
|
||||
|
@ -78,7 +78,8 @@ modified_imm(uint8_t ctrlImm, uint8_t dataImm)
|
|||
}
|
||||
|
||||
static inline uint64_t
|
||||
simd_modified_imm(bool op, uint8_t cmode, uint8_t data, bool &immValid)
|
||||
simd_modified_imm(bool op, uint8_t cmode, uint8_t data, bool &immValid,
|
||||
bool isAarch64 = false)
|
||||
{
|
||||
uint64_t bigData = data;
|
||||
immValid = true;
|
||||
|
@ -133,12 +134,20 @@ simd_modified_imm(bool op, uint8_t cmode, uint8_t data, bool &immValid)
|
|||
}
|
||||
break;
|
||||
case 0xf:
|
||||
{
|
||||
uint64_t bVal = 0;
|
||||
if (!op) {
|
||||
uint64_t bVal = bits(bigData, 6) ? (0x1F) : (0x20);
|
||||
bVal = bits(bigData, 6) ? (0x1F) : (0x20);
|
||||
bigData = (bits(bigData, 5, 0) << 19) |
|
||||
(bVal << 25) | (bits(bigData, 7) << 31);
|
||||
bigData |= (bigData << 32);
|
||||
break;
|
||||
} else if (isAarch64) {
|
||||
bVal = bits(bigData, 6) ? (0x0FF) : (0x100);
|
||||
bigData = (bits(bigData, 5, 0) << 48) |
|
||||
(bVal << 54) | (bits(bigData, 7) << 63);
|
||||
break;
|
||||
}
|
||||
}
|
||||
// Fall through, immediate encoding is invalid.
|
||||
default:
|
||||
|
@ -179,11 +188,14 @@ class PredOp : public ArmStaticInst
|
|||
|
||||
/// Constructor
|
||||
PredOp(const char *mnem, ExtMachInst _machInst, OpClass __opClass) :
|
||||
ArmStaticInst(mnem, _machInst, __opClass),
|
||||
condCode(machInst.itstateMask ?
|
||||
(ConditionCode)(uint8_t)machInst.itstateCond :
|
||||
(ConditionCode)(unsigned)machInst.condCode)
|
||||
ArmStaticInst(mnem, _machInst, __opClass)
|
||||
{
|
||||
if (machInst.aarch64)
|
||||
condCode = COND_UC;
|
||||
else if (machInst.itstateMask)
|
||||
condCode = (ConditionCode)(uint8_t)machInst.itstateCond;
|
||||
else
|
||||
condCode = (ConditionCode)(unsigned)machInst.condCode;
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2010 ARM Limited
|
||||
* Copyright (c) 2010-2013 ARM Limited
|
||||
* Copyright (c) 2013 Advanced Micro Devices, Inc.
|
||||
* All rights reserved
|
||||
*
|
||||
|
@ -86,6 +86,90 @@ ArmStaticInst::shift_rm_imm(uint32_t base, uint32_t shamt,
|
|||
return 0;
|
||||
}
|
||||
|
||||
int64_t
|
||||
ArmStaticInst::shiftReg64(uint64_t base, uint64_t shiftAmt,
|
||||
ArmShiftType type, uint8_t width) const
|
||||
{
|
||||
shiftAmt = shiftAmt % width;
|
||||
ArmShiftType shiftType;
|
||||
shiftType = (ArmShiftType)type;
|
||||
|
||||
switch (shiftType)
|
||||
{
|
||||
case LSL:
|
||||
return base << shiftAmt;
|
||||
case LSR:
|
||||
if (shiftAmt == 0)
|
||||
return base;
|
||||
else
|
||||
return (base & mask(width)) >> shiftAmt;
|
||||
case ASR:
|
||||
if (shiftAmt == 0) {
|
||||
return base;
|
||||
} else {
|
||||
int sign_bit = bits(base, intWidth - 1);
|
||||
base >>= shiftAmt;
|
||||
base = sign_bit ? (base | ~mask(intWidth - shiftAmt)) : base;
|
||||
return base & mask(intWidth);
|
||||
}
|
||||
case ROR:
|
||||
if (shiftAmt == 0)
|
||||
return base;
|
||||
else
|
||||
return (base << (width - shiftAmt)) | (base >> shiftAmt);
|
||||
default:
|
||||
ccprintf(std::cerr, "Unhandled shift type\n");
|
||||
exit(1);
|
||||
break;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int64_t
|
||||
ArmStaticInst::extendReg64(uint64_t base, ArmExtendType type,
|
||||
uint64_t shiftAmt, uint8_t width) const
|
||||
{
|
||||
bool sign_extend = false;
|
||||
int len = 0;
|
||||
switch (type) {
|
||||
case UXTB:
|
||||
len = 8;
|
||||
break;
|
||||
case UXTH:
|
||||
len = 16;
|
||||
break;
|
||||
case UXTW:
|
||||
len = 32;
|
||||
break;
|
||||
case UXTX:
|
||||
len = 64;
|
||||
break;
|
||||
case SXTB:
|
||||
len = 8;
|
||||
sign_extend = true;
|
||||
break;
|
||||
case SXTH:
|
||||
len = 16;
|
||||
sign_extend = true;
|
||||
break;
|
||||
case SXTW:
|
||||
len = 32;
|
||||
sign_extend = true;
|
||||
break;
|
||||
case SXTX:
|
||||
len = 64;
|
||||
sign_extend = true;
|
||||
break;
|
||||
}
|
||||
len = len <= width - shiftAmt ? len : width - shiftAmt;
|
||||
uint64_t tmp = (uint64_t) bits(base, len - 1, 0) << shiftAmt;
|
||||
if (sign_extend) {
|
||||
int sign_bit = bits(tmp, len + shiftAmt - 1);
|
||||
tmp = sign_bit ? (tmp | ~mask(len + shiftAmt)) : tmp;
|
||||
}
|
||||
return tmp & mask(width);
|
||||
}
|
||||
|
||||
// Shift Rm by Rs
|
||||
int32_t
|
||||
ArmStaticInst::shift_rm_rs(uint32_t base, uint32_t shamt,
|
||||
|
@ -214,6 +298,16 @@ ArmStaticInst::printReg(std::ostream &os, int reg) const
|
|||
|
||||
switch (regIdxToClass(reg, &rel_reg)) {
|
||||
case IntRegClass:
|
||||
if (aarch64) {
|
||||
if (reg == INTREG_UREG0)
|
||||
ccprintf(os, "ureg0");
|
||||
else if (reg == INTREG_SPX)
|
||||
ccprintf(os, "%s%s", (intWidth == 32) ? "w" : "", "sp");
|
||||
else if (reg == INTREG_X31)
|
||||
ccprintf(os, "%szr", (intWidth == 32) ? "w" : "x");
|
||||
else
|
||||
ccprintf(os, "%s%d", (intWidth == 32) ? "w" : "x", reg);
|
||||
} else {
|
||||
switch (rel_reg) {
|
||||
case PCReg:
|
||||
ccprintf(os, "pc");
|
||||
|
@ -231,6 +325,7 @@ ArmStaticInst::printReg(std::ostream &os, int reg) const
|
|||
ccprintf(os, "r%d", reg);
|
||||
break;
|
||||
}
|
||||
}
|
||||
break;
|
||||
case FloatRegClass:
|
||||
ccprintf(os, "f%d", rel_reg);
|
||||
|
@ -247,12 +342,48 @@ ArmStaticInst::printReg(std::ostream &os, int reg) const
|
|||
void
|
||||
ArmStaticInst::printMnemonic(std::ostream &os,
|
||||
const std::string &suffix,
|
||||
bool withPred) const
|
||||
bool withPred,
|
||||
bool withCond64,
|
||||
ConditionCode cond64) const
|
||||
{
|
||||
os << " " << mnemonic;
|
||||
if (withPred) {
|
||||
unsigned condCode = machInst.condCode;
|
||||
switch (condCode) {
|
||||
if (withPred && !aarch64) {
|
||||
printCondition(os, machInst.condCode);
|
||||
os << suffix;
|
||||
} else if (withCond64) {
|
||||
os << ".";
|
||||
printCondition(os, cond64);
|
||||
os << suffix;
|
||||
}
|
||||
if (machInst.bigThumb)
|
||||
os << ".w";
|
||||
os << " ";
|
||||
}
|
||||
|
||||
void
|
||||
ArmStaticInst::printTarget(std::ostream &os, Addr target,
|
||||
const SymbolTable *symtab) const
|
||||
{
|
||||
Addr symbolAddr;
|
||||
std::string symbol;
|
||||
|
||||
if (symtab && symtab->findNearestSymbol(target, symbol, symbolAddr)) {
|
||||
ccprintf(os, "<%s", symbol);
|
||||
if (symbolAddr != target)
|
||||
ccprintf(os, "+%d>", target - symbolAddr);
|
||||
else
|
||||
ccprintf(os, ">");
|
||||
} else {
|
||||
ccprintf(os, "%#x", target);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
ArmStaticInst::printCondition(std::ostream &os,
|
||||
unsigned code,
|
||||
bool noImplicit) const
|
||||
{
|
||||
switch (code) {
|
||||
case COND_EQ:
|
||||
os << "eq";
|
||||
break;
|
||||
|
@ -297,17 +428,16 @@ ArmStaticInst::printMnemonic(std::ostream &os,
|
|||
break;
|
||||
case COND_AL:
|
||||
// This one is implicit.
|
||||
if (noImplicit)
|
||||
os << "al";
|
||||
break;
|
||||
case COND_UC:
|
||||
// Unconditional.
|
||||
if (noImplicit)
|
||||
os << "uc";
|
||||
break;
|
||||
default:
|
||||
panic("Unrecognized condition code %d.\n", condCode);
|
||||
}
|
||||
os << suffix;
|
||||
if (machInst.bigThumb)
|
||||
os << ".w";
|
||||
os << " ";
|
||||
panic("Unrecognized condition code %d.\n", code);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -392,6 +522,38 @@ ArmStaticInst::printShiftOperand(std::ostream &os,
|
|||
}
|
||||
}
|
||||
|
||||
void
|
||||
ArmStaticInst::printExtendOperand(bool firstOperand, std::ostream &os,
|
||||
IntRegIndex rm, ArmExtendType type,
|
||||
int64_t shiftAmt) const
|
||||
{
|
||||
if (!firstOperand)
|
||||
ccprintf(os, ", ");
|
||||
printReg(os, rm);
|
||||
if (type == UXTX && shiftAmt == 0)
|
||||
return;
|
||||
switch (type) {
|
||||
case UXTB: ccprintf(os, ", UXTB");
|
||||
break;
|
||||
case UXTH: ccprintf(os, ", UXTH");
|
||||
break;
|
||||
case UXTW: ccprintf(os, ", UXTW");
|
||||
break;
|
||||
case UXTX: ccprintf(os, ", LSL");
|
||||
break;
|
||||
case SXTB: ccprintf(os, ", SXTB");
|
||||
break;
|
||||
case SXTH: ccprintf(os, ", SXTH");
|
||||
break;
|
||||
case SXTW: ccprintf(os, ", SXTW");
|
||||
break;
|
||||
case SXTX: ccprintf(os, ", SXTW");
|
||||
break;
|
||||
}
|
||||
if (type == UXTX || shiftAmt)
|
||||
ccprintf(os, " #%d", shiftAmt);
|
||||
}
|
||||
|
||||
void
|
||||
ArmStaticInst::printDataInst(std::ostream &os, bool withImm,
|
||||
bool immShift, bool s, IntRegIndex rd, IntRegIndex rn,
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2010 ARM Limited
|
||||
* Copyright (c) 2010-2013 ARM Limited
|
||||
* All rights reserved
|
||||
*
|
||||
* The license below extends only to copyright in the software and shall
|
||||
|
@ -44,6 +44,7 @@
|
|||
|
||||
#include "arch/arm/faults.hh"
|
||||
#include "arch/arm/utility.hh"
|
||||
#include "arch/arm/system.hh"
|
||||
#include "base/trace.hh"
|
||||
#include "cpu/static_inst.hh"
|
||||
#include "sim/byteswap.hh"
|
||||
|
@ -55,6 +56,9 @@ namespace ArmISA
|
|||
class ArmStaticInst : public StaticInst
|
||||
{
|
||||
protected:
|
||||
bool aarch64;
|
||||
uint8_t intWidth;
|
||||
|
||||
int32_t shift_rm_imm(uint32_t base, uint32_t shamt,
|
||||
uint32_t type, uint32_t cfval) const;
|
||||
int32_t shift_rm_rs(uint32_t base, uint32_t shamt,
|
||||
|
@ -65,6 +69,11 @@ class ArmStaticInst : public StaticInst
|
|||
bool shift_carry_rs(uint32_t base, uint32_t shamt,
|
||||
uint32_t type, uint32_t cfval) const;
|
||||
|
||||
int64_t shiftReg64(uint64_t base, uint64_t shiftAmt,
|
||||
ArmShiftType type, uint8_t width) const;
|
||||
int64_t extendReg64(uint64_t base, ArmExtendType type,
|
||||
uint64_t shiftAmt, uint8_t width) const;
|
||||
|
||||
template<int width>
|
||||
static inline bool
|
||||
saturateOp(int32_t &res, int64_t op1, int64_t op2, bool sub=false)
|
||||
|
@ -135,6 +144,11 @@ class ArmStaticInst : public StaticInst
|
|||
OpClass __opClass)
|
||||
: StaticInst(mnem, _machInst, __opClass)
|
||||
{
|
||||
aarch64 = machInst.aarch64;
|
||||
if (bits(machInst, 28, 24) == 0x10)
|
||||
intWidth = 64; // Force 64-bit width for ADR/ADRP
|
||||
else
|
||||
intWidth = (aarch64 && bits(machInst, 31)) ? 64 : 32;
|
||||
}
|
||||
|
||||
/// Print a register name for disassembly given the unique
|
||||
|
@ -142,13 +156,22 @@ class ArmStaticInst : public StaticInst
|
|||
void printReg(std::ostream &os, int reg) const;
|
||||
void printMnemonic(std::ostream &os,
|
||||
const std::string &suffix = "",
|
||||
bool withPred = true) const;
|
||||
bool withPred = true,
|
||||
bool withCond64 = false,
|
||||
ConditionCode cond64 = COND_UC) const;
|
||||
void printTarget(std::ostream &os, Addr target,
|
||||
const SymbolTable *symtab) const;
|
||||
void printCondition(std::ostream &os, unsigned code,
|
||||
bool noImplicit=false) const;
|
||||
void printMemSymbol(std::ostream &os, const SymbolTable *symtab,
|
||||
const std::string &prefix, const Addr addr,
|
||||
const std::string &suffix) const;
|
||||
void printShiftOperand(std::ostream &os, IntRegIndex rm,
|
||||
bool immShift, uint32_t shiftAmt,
|
||||
IntRegIndex rs, ArmShiftType type) const;
|
||||
void printExtendOperand(bool firstOperand, std::ostream &os,
|
||||
IntRegIndex rm, ArmExtendType type,
|
||||
int64_t shiftAmt) const;
|
||||
|
||||
|
||||
void printDataInst(std::ostream &os, bool withImm) const;
|
||||
|
@ -166,10 +189,13 @@ class ArmStaticInst : public StaticInst
|
|||
std::string generateDisassembly(Addr pc, const SymbolTable *symtab) const;
|
||||
|
||||
static inline uint32_t
|
||||
cpsrWriteByInstr(CPSR cpsr, uint32_t val,
|
||||
uint8_t byteMask, bool affectState, bool nmfi)
|
||||
cpsrWriteByInstr(CPSR cpsr, uint32_t val, SCR scr, NSACR nsacr,
|
||||
uint8_t byteMask, bool affectState, bool nmfi, ThreadContext *tc)
|
||||
{
|
||||
bool privileged = (cpsr.mode != MODE_USER);
|
||||
bool haveVirt = ArmSystem::haveVirtualization(tc);
|
||||
bool haveSecurity = ArmSystem::haveSecurity(tc);
|
||||
bool isSecure = inSecureState(scr, cpsr) || !haveSecurity;
|
||||
|
||||
uint32_t bitMask = 0;
|
||||
|
||||
|
@ -182,14 +208,53 @@ class ArmStaticInst : public StaticInst
|
|||
}
|
||||
if (bits(byteMask, 1)) {
|
||||
unsigned highIdx = affectState ? 15 : 9;
|
||||
unsigned lowIdx = privileged ? 8 : 9;
|
||||
unsigned lowIdx = (privileged && (isSecure || scr.aw || haveVirt))
|
||||
? 8 : 9;
|
||||
bitMask = bitMask | mask(highIdx, lowIdx);
|
||||
}
|
||||
if (bits(byteMask, 0)) {
|
||||
if (privileged) {
|
||||
bitMask = bitMask | mask(7, 6);
|
||||
if (!badMode((OperatingMode)(val & mask(5)))) {
|
||||
bitMask |= 1 << 7;
|
||||
if ( (!nmfi || !((val >> 6) & 0x1)) &&
|
||||
(isSecure || scr.fw || haveVirt) ) {
|
||||
bitMask |= 1 << 6;
|
||||
}
|
||||
// Now check the new mode is allowed
|
||||
OperatingMode newMode = (OperatingMode) (val & mask(5));
|
||||
OperatingMode oldMode = (OperatingMode)(uint32_t)cpsr.mode;
|
||||
if (!badMode(newMode)) {
|
||||
bool validModeChange = true;
|
||||
// Check for attempts to enter modes only permitted in
|
||||
// Secure state from Non-secure state. These are Monitor
|
||||
// mode ('10110'), and FIQ mode ('10001') if the Security
|
||||
// Extensions have reserved it.
|
||||
if (!isSecure && newMode == MODE_MON)
|
||||
validModeChange = false;
|
||||
if (!isSecure && newMode == MODE_FIQ && nsacr.rfr == '1')
|
||||
validModeChange = false;
|
||||
// There is no Hyp mode ('11010') in Secure state, so that
|
||||
// is UNPREDICTABLE
|
||||
if (scr.ns == '0' && newMode == MODE_HYP)
|
||||
validModeChange = false;
|
||||
// Cannot move into Hyp mode directly from a Non-secure
|
||||
// PL1 mode
|
||||
if (!isSecure && oldMode != MODE_HYP && newMode == MODE_HYP)
|
||||
validModeChange = false;
|
||||
// Cannot move out of Hyp mode with this function except
|
||||
// on an exception return
|
||||
if (oldMode == MODE_HYP && newMode != MODE_HYP && !affectState)
|
||||
validModeChange = false;
|
||||
// Must not change to 64 bit when running in 32 bit mode
|
||||
if (!opModeIs64(oldMode) && opModeIs64(newMode))
|
||||
validModeChange = false;
|
||||
|
||||
// If we passed all of the above then set the bit mask to
|
||||
// copy the mode accross
|
||||
if (validModeChange) {
|
||||
bitMask = bitMask | mask(5);
|
||||
} else {
|
||||
warn_once("Illegal change to CPSR mode attempted\n");
|
||||
}
|
||||
} else {
|
||||
warn_once("Ignoring write of bad mode to CPSR.\n");
|
||||
}
|
||||
|
@ -198,11 +263,7 @@ class ArmStaticInst : public StaticInst
|
|||
bitMask = bitMask | (1 << 5);
|
||||
}
|
||||
|
||||
bool cpsr_f = cpsr.f;
|
||||
uint32_t new_cpsr = ((uint32_t)cpsr & ~bitMask) | (val & bitMask);
|
||||
if (nmfi && !cpsr_f)
|
||||
new_cpsr &= ~(1 << 6);
|
||||
return new_cpsr;
|
||||
return ((uint32_t)cpsr & ~bitMask) | (val & bitMask);
|
||||
}
|
||||
|
||||
static inline uint32_t
|
||||
|
@ -296,12 +357,12 @@ class ArmStaticInst : public StaticInst
|
|||
inline Fault
|
||||
disabledFault() const
|
||||
{
|
||||
if (FullSystem) {
|
||||
return new UndefinedInstruction();
|
||||
} else {
|
||||
return new UndefinedInstruction(machInst, false, mnemonic, true);
|
||||
}
|
||||
}
|
||||
|
||||
public:
|
||||
virtual void
|
||||
annotateFault(ArmFault *fault) {}
|
||||
};
|
||||
}
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2010 ARM Limited
|
||||
* Copyright (c) 2010-2013 ARM Limited
|
||||
* All rights reserved
|
||||
*
|
||||
* The license below extends only to copyright in the software and shall
|
||||
|
@ -45,6 +45,37 @@
|
|||
* exception bits read before it, etc.
|
||||
*/
|
||||
|
||||
std::string
|
||||
FpCondCompRegOp::generateDisassembly(
|
||||
Addr pc, const SymbolTable *symtab) const
|
||||
{
|
||||
std::stringstream ss;
|
||||
printMnemonic(ss, "", false);
|
||||
printReg(ss, op1);
|
||||
ccprintf(ss, ", ");
|
||||
printReg(ss, op2);
|
||||
ccprintf(ss, ", #%d", defCc);
|
||||
ccprintf(ss, ", ");
|
||||
printCondition(ss, condCode, true);
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
std::string
|
||||
FpCondSelOp::generateDisassembly(
|
||||
Addr pc, const SymbolTable *symtab) const
|
||||
{
|
||||
std::stringstream ss;
|
||||
printMnemonic(ss, "", false);
|
||||
printReg(ss, dest);
|
||||
ccprintf(ss, ", ");
|
||||
printReg(ss, op1);
|
||||
ccprintf(ss, ", ");
|
||||
printReg(ss, op2);
|
||||
ccprintf(ss, ", ");
|
||||
printCondition(ss, condCode, true);
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
std::string
|
||||
FpRegRegOp::generateDisassembly(Addr pc, const SymbolTable *symtab) const
|
||||
{
|
||||
|
@ -91,6 +122,21 @@ FpRegRegRegOp::generateDisassembly(Addr pc, const SymbolTable *symtab) const
|
|||
return ss.str();
|
||||
}
|
||||
|
||||
std::string
|
||||
FpRegRegRegRegOp::generateDisassembly(Addr pc, const SymbolTable *symtab) const
|
||||
{
|
||||
std::stringstream ss;
|
||||
printMnemonic(ss);
|
||||
printReg(ss, dest + FP_Reg_Base);
|
||||
ss << ", ";
|
||||
printReg(ss, op1 + FP_Reg_Base);
|
||||
ss << ", ";
|
||||
printReg(ss, op2 + FP_Reg_Base);
|
||||
ss << ", ";
|
||||
printReg(ss, op3 + FP_Reg_Base);
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
std::string
|
||||
FpRegRegRegImmOp::generateDisassembly(Addr pc, const SymbolTable *symtab) const
|
||||
{
|
||||
|
@ -131,24 +177,25 @@ prepFpState(uint32_t rMode)
|
|||
}
|
||||
|
||||
void
|
||||
finishVfp(FPSCR &fpscr, VfpSavedState state, bool flush)
|
||||
finishVfp(FPSCR &fpscr, VfpSavedState state, bool flush, FPSCR mask)
|
||||
{
|
||||
int exceptions = fetestexcept(FeAllExceptions);
|
||||
bool underflow = false;
|
||||
if (exceptions & FeInvalid) {
|
||||
if ((exceptions & FeInvalid) && mask.ioc) {
|
||||
fpscr.ioc = 1;
|
||||
}
|
||||
if (exceptions & FeDivByZero) {
|
||||
if ((exceptions & FeDivByZero) && mask.dzc) {
|
||||
fpscr.dzc = 1;
|
||||
}
|
||||
if (exceptions & FeOverflow) {
|
||||
if ((exceptions & FeOverflow) && mask.ofc) {
|
||||
fpscr.ofc = 1;
|
||||
}
|
||||
if (exceptions & FeUnderflow) {
|
||||
underflow = true;
|
||||
if (mask.ufc)
|
||||
fpscr.ufc = 1;
|
||||
}
|
||||
if ((exceptions & FeInexact) && !(underflow && flush)) {
|
||||
if ((exceptions & FeInexact) && !(underflow && flush) && mask.ixc) {
|
||||
fpscr.ixc = 1;
|
||||
}
|
||||
fesetround(state);
|
||||
|
@ -329,19 +376,33 @@ fixFpSFpDDest(FPSCR fpscr, float val)
|
|||
return mid;
|
||||
}
|
||||
|
||||
uint16_t
|
||||
vcvtFpSFpH(FPSCR &fpscr, bool flush, bool defaultNan,
|
||||
uint32_t rMode, bool ahp, float op)
|
||||
static inline uint16_t
|
||||
vcvtFpFpH(FPSCR &fpscr, bool flush, bool defaultNan,
|
||||
uint32_t rMode, bool ahp, uint64_t opBits, bool isDouble)
|
||||
{
|
||||
uint32_t opBits = fpToBits(op);
|
||||
uint32_t mWidth;
|
||||
uint32_t eWidth;
|
||||
uint32_t eHalfRange;
|
||||
uint32_t sBitPos;
|
||||
|
||||
if (isDouble) {
|
||||
mWidth = 52;
|
||||
eWidth = 11;
|
||||
} else {
|
||||
mWidth = 23;
|
||||
eWidth = 8;
|
||||
}
|
||||
sBitPos = eWidth + mWidth;
|
||||
eHalfRange = (1 << (eWidth-1)) - 1;
|
||||
|
||||
// Extract the operand.
|
||||
bool neg = bits(opBits, 31);
|
||||
uint32_t exponent = bits(opBits, 30, 23);
|
||||
uint32_t oldMantissa = bits(opBits, 22, 0);
|
||||
uint32_t mantissa = oldMantissa >> (23 - 10);
|
||||
bool neg = bits(opBits, sBitPos);
|
||||
uint32_t exponent = bits(opBits, sBitPos-1, mWidth);
|
||||
uint64_t oldMantissa = bits(opBits, mWidth-1, 0);
|
||||
uint32_t mantissa = oldMantissa >> (mWidth - 10);
|
||||
// Do the conversion.
|
||||
uint32_t extra = oldMantissa & mask(23 - 10);
|
||||
if (exponent == 0xff) {
|
||||
uint64_t extra = oldMantissa & mask(mWidth - 10);
|
||||
if (exponent == mask(eWidth)) {
|
||||
if (oldMantissa != 0) {
|
||||
// Nans.
|
||||
if (bits(mantissa, 9) == 0) {
|
||||
|
@ -379,7 +440,6 @@ vcvtFpSFpH(FPSCR &fpscr, bool flush, bool defaultNan,
|
|||
|
||||
if (exponent == 0) {
|
||||
// Denormalized.
|
||||
|
||||
// If flush to zero is on, this shouldn't happen.
|
||||
assert(!flush);
|
||||
|
||||
|
@ -407,13 +467,13 @@ vcvtFpSFpH(FPSCR &fpscr, bool flush, bool defaultNan,
|
|||
|
||||
// We need to track the dropped bits differently since
|
||||
// more can be dropped by denormalizing.
|
||||
bool topOne = bits(extra, 12);
|
||||
bool restZeros = bits(extra, 11, 0) == 0;
|
||||
bool topOne = bits(extra, mWidth - 10 - 1);
|
||||
bool restZeros = bits(extra, mWidth - 10 - 2, 0) == 0;
|
||||
|
||||
if (exponent <= (127 - 15)) {
|
||||
if (exponent <= (eHalfRange - 15)) {
|
||||
// The result is too small. Denormalize.
|
||||
mantissa |= (1 << 10);
|
||||
while (mantissa && exponent <= (127 - 15)) {
|
||||
while (mantissa && exponent <= (eHalfRange - 15)) {
|
||||
restZeros = restZeros && !topOne;
|
||||
topOne = bits(mantissa, 0);
|
||||
mantissa = mantissa >> 1;
|
||||
|
@ -424,7 +484,7 @@ vcvtFpSFpH(FPSCR &fpscr, bool flush, bool defaultNan,
|
|||
exponent = 0;
|
||||
} else {
|
||||
// Change bias.
|
||||
exponent -= (127 - 15);
|
||||
exponent -= (eHalfRange - 15);
|
||||
}
|
||||
|
||||
if (exponent == 0 && (inexact || fpscr.ufe)) {
|
||||
|
@ -488,155 +548,115 @@ vcvtFpSFpH(FPSCR &fpscr, bool flush, bool defaultNan,
|
|||
return result;
|
||||
}
|
||||
|
||||
float
|
||||
vcvtFpHFpS(FPSCR &fpscr, bool defaultNan, bool ahp, uint16_t op)
|
||||
uint16_t
|
||||
vcvtFpSFpH(FPSCR &fpscr, bool flush, bool defaultNan,
|
||||
uint32_t rMode, bool ahp, float op)
|
||||
{
|
||||
float junk = 0.0;
|
||||
uint64_t opBits = fpToBits(op);
|
||||
return vcvtFpFpH(fpscr, flush, defaultNan, rMode, ahp, opBits, false);
|
||||
}
|
||||
|
||||
uint16_t
|
||||
vcvtFpDFpH(FPSCR &fpscr, bool flush, bool defaultNan,
|
||||
uint32_t rMode, bool ahp, double op)
|
||||
{
|
||||
uint64_t opBits = fpToBits(op);
|
||||
return vcvtFpFpH(fpscr, flush, defaultNan, rMode, ahp, opBits, true);
|
||||
}
|
||||
|
||||
static inline uint64_t
|
||||
vcvtFpHFp(FPSCR &fpscr, bool defaultNan, bool ahp, uint16_t op, bool isDouble)
|
||||
{
|
||||
uint32_t mWidth;
|
||||
uint32_t eWidth;
|
||||
uint32_t eHalfRange;
|
||||
uint32_t sBitPos;
|
||||
|
||||
if (isDouble) {
|
||||
mWidth = 52;
|
||||
eWidth = 11;
|
||||
} else {
|
||||
mWidth = 23;
|
||||
eWidth = 8;
|
||||
}
|
||||
sBitPos = eWidth + mWidth;
|
||||
eHalfRange = (1 << (eWidth-1)) - 1;
|
||||
|
||||
// Extract the bitfields.
|
||||
bool neg = bits(op, 15);
|
||||
uint32_t exponent = bits(op, 14, 10);
|
||||
uint32_t mantissa = bits(op, 9, 0);
|
||||
uint64_t mantissa = bits(op, 9, 0);
|
||||
// Do the conversion.
|
||||
if (exponent == 0) {
|
||||
if (mantissa != 0) {
|
||||
// Normalize the value.
|
||||
exponent = exponent + (127 - 15) + 1;
|
||||
exponent = exponent + (eHalfRange - 15) + 1;
|
||||
while (mantissa < (1 << 10)) {
|
||||
mantissa = mantissa << 1;
|
||||
exponent--;
|
||||
}
|
||||
}
|
||||
mantissa = mantissa << (23 - 10);
|
||||
mantissa = mantissa << (mWidth - 10);
|
||||
} else if (exponent == 0x1f && !ahp) {
|
||||
// Infinities and nans.
|
||||
exponent = 0xff;
|
||||
exponent = mask(eWidth);
|
||||
if (mantissa != 0) {
|
||||
// Nans.
|
||||
mantissa = mantissa << (23 - 10);
|
||||
if (bits(mantissa, 22) == 0) {
|
||||
mantissa = mantissa << (mWidth - 10);
|
||||
if (bits(mantissa, mWidth-1) == 0) {
|
||||
// Signalling nan.
|
||||
fpscr.ioc = 1;
|
||||
mantissa |= (1 << 22);
|
||||
mantissa |= (((uint64_t) 1) << (mWidth-1));
|
||||
}
|
||||
if (defaultNan) {
|
||||
mantissa &= ~mask(22);
|
||||
mantissa &= ~mask(mWidth-1);
|
||||
neg = false;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
exponent = exponent + (127 - 15);
|
||||
mantissa = mantissa << (23 - 10);
|
||||
exponent = exponent + (eHalfRange - 15);
|
||||
mantissa = mantissa << (mWidth - 10);
|
||||
}
|
||||
// Reassemble the result.
|
||||
uint32_t result = bits(mantissa, 22, 0);
|
||||
replaceBits(result, 30, 23, exponent);
|
||||
if (neg)
|
||||
result |= (1 << 31);
|
||||
uint64_t result = bits(mantissa, mWidth-1, 0);
|
||||
replaceBits(result, sBitPos-1, mWidth, exponent);
|
||||
if (neg) {
|
||||
result |= (((uint64_t) 1) << sBitPos);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
double
|
||||
vcvtFpHFpD(FPSCR &fpscr, bool defaultNan, bool ahp, uint16_t op)
|
||||
{
|
||||
double junk = 0.0;
|
||||
uint64_t result;
|
||||
|
||||
result = vcvtFpHFp(fpscr, defaultNan, ahp, op, true);
|
||||
return bitsToFp(result, junk);
|
||||
}
|
||||
|
||||
uint64_t
|
||||
vfpFpSToFixed(float val, bool isSigned, bool half,
|
||||
uint8_t imm, bool rzero)
|
||||
float
|
||||
vcvtFpHFpS(FPSCR &fpscr, bool defaultNan, bool ahp, uint16_t op)
|
||||
{
|
||||
int rmode = rzero ? FeRoundZero : fegetround();
|
||||
__asm__ __volatile__("" : "=m" (rmode) : "m" (rmode));
|
||||
fesetround(FeRoundNearest);
|
||||
val = val * powf(2.0, imm);
|
||||
__asm__ __volatile__("" : "=m" (val) : "m" (val));
|
||||
fesetround(rmode);
|
||||
feclearexcept(FeAllExceptions);
|
||||
__asm__ __volatile__("" : "=m" (val) : "m" (val));
|
||||
float origVal = val;
|
||||
val = rintf(val);
|
||||
int fpType = std::fpclassify(val);
|
||||
if (fpType == FP_SUBNORMAL || fpType == FP_NAN) {
|
||||
if (fpType == FP_NAN) {
|
||||
feraiseexcept(FeInvalid);
|
||||
}
|
||||
val = 0.0;
|
||||
} else if (origVal != val) {
|
||||
switch (rmode) {
|
||||
case FeRoundNearest:
|
||||
if (origVal - val > 0.5)
|
||||
val += 1.0;
|
||||
else if (val - origVal > 0.5)
|
||||
val -= 1.0;
|
||||
break;
|
||||
case FeRoundDown:
|
||||
if (origVal < val)
|
||||
val -= 1.0;
|
||||
break;
|
||||
case FeRoundUpward:
|
||||
if (origVal > val)
|
||||
val += 1.0;
|
||||
break;
|
||||
}
|
||||
feraiseexcept(FeInexact);
|
||||
}
|
||||
float junk = 0.0;
|
||||
uint64_t result;
|
||||
|
||||
if (isSigned) {
|
||||
if (half) {
|
||||
if ((double)val < (int16_t)(1 << 15)) {
|
||||
feraiseexcept(FeInvalid);
|
||||
feclearexcept(FeInexact);
|
||||
return (int16_t)(1 << 15);
|
||||
}
|
||||
if ((double)val > (int16_t)mask(15)) {
|
||||
feraiseexcept(FeInvalid);
|
||||
feclearexcept(FeInexact);
|
||||
return (int16_t)mask(15);
|
||||
}
|
||||
return (int16_t)val;
|
||||
} else {
|
||||
if ((double)val < (int32_t)(1 << 31)) {
|
||||
feraiseexcept(FeInvalid);
|
||||
feclearexcept(FeInexact);
|
||||
return (int32_t)(1 << 31);
|
||||
}
|
||||
if ((double)val > (int32_t)mask(31)) {
|
||||
feraiseexcept(FeInvalid);
|
||||
feclearexcept(FeInexact);
|
||||
return (int32_t)mask(31);
|
||||
}
|
||||
return (int32_t)val;
|
||||
}
|
||||
} else {
|
||||
if (half) {
|
||||
if ((double)val < 0) {
|
||||
feraiseexcept(FeInvalid);
|
||||
feclearexcept(FeInexact);
|
||||
return 0;
|
||||
}
|
||||
if ((double)val > (mask(16))) {
|
||||
feraiseexcept(FeInvalid);
|
||||
feclearexcept(FeInexact);
|
||||
return mask(16);
|
||||
}
|
||||
return (uint16_t)val;
|
||||
} else {
|
||||
if ((double)val < 0) {
|
||||
feraiseexcept(FeInvalid);
|
||||
feclearexcept(FeInexact);
|
||||
return 0;
|
||||
}
|
||||
if ((double)val > (mask(32))) {
|
||||
feraiseexcept(FeInvalid);
|
||||
feclearexcept(FeInexact);
|
||||
return mask(32);
|
||||
}
|
||||
return (uint32_t)val;
|
||||
}
|
||||
}
|
||||
result = vcvtFpHFp(fpscr, defaultNan, ahp, op, false);
|
||||
return bitsToFp(result, junk);
|
||||
}
|
||||
|
||||
float
|
||||
vfpUFixedToFpS(bool flush, bool defaultNan,
|
||||
uint32_t val, bool half, uint8_t imm)
|
||||
uint64_t val, uint8_t width, uint8_t imm)
|
||||
{
|
||||
fesetround(FeRoundNearest);
|
||||
if (half)
|
||||
if (width == 16)
|
||||
val = (uint16_t)val;
|
||||
else if (width == 32)
|
||||
val = (uint32_t)val;
|
||||
else if (width != 64)
|
||||
panic("Unsupported width %d", width);
|
||||
float scale = powf(2.0, imm);
|
||||
__asm__ __volatile__("" : "=m" (scale) : "m" (scale));
|
||||
feclearexcept(FeAllExceptions);
|
||||
|
@ -646,11 +666,16 @@ vfpUFixedToFpS(bool flush, bool defaultNan,
|
|||
|
||||
float
|
||||
vfpSFixedToFpS(bool flush, bool defaultNan,
|
||||
int32_t val, bool half, uint8_t imm)
|
||||
int64_t val, uint8_t width, uint8_t imm)
|
||||
{
|
||||
fesetround(FeRoundNearest);
|
||||
if (half)
|
||||
if (width == 16)
|
||||
val = sext<16>(val & mask(16));
|
||||
else if (width == 32)
|
||||
val = sext<32>(val & mask(32));
|
||||
else if (width != 64)
|
||||
panic("Unsupported width %d", width);
|
||||
|
||||
float scale = powf(2.0, imm);
|
||||
__asm__ __volatile__("" : "=m" (scale) : "m" (scale));
|
||||
feclearexcept(FeAllExceptions);
|
||||
|
@ -658,106 +683,19 @@ vfpSFixedToFpS(bool flush, bool defaultNan,
|
|||
return fixDivDest(flush, defaultNan, val / scale, (float)val, scale);
|
||||
}
|
||||
|
||||
uint64_t
|
||||
vfpFpDToFixed(double val, bool isSigned, bool half,
|
||||
uint8_t imm, bool rzero)
|
||||
{
|
||||
int rmode = rzero ? FeRoundZero : fegetround();
|
||||
fesetround(FeRoundNearest);
|
||||
val = val * pow(2.0, imm);
|
||||
__asm__ __volatile__("" : "=m" (val) : "m" (val));
|
||||
fesetround(rmode);
|
||||
feclearexcept(FeAllExceptions);
|
||||
__asm__ __volatile__("" : "=m" (val) : "m" (val));
|
||||
double origVal = val;
|
||||
val = rint(val);
|
||||
int fpType = std::fpclassify(val);
|
||||
if (fpType == FP_SUBNORMAL || fpType == FP_NAN) {
|
||||
if (fpType == FP_NAN) {
|
||||
feraiseexcept(FeInvalid);
|
||||
}
|
||||
val = 0.0;
|
||||
} else if (origVal != val) {
|
||||
switch (rmode) {
|
||||
case FeRoundNearest:
|
||||
if (origVal - val > 0.5)
|
||||
val += 1.0;
|
||||
else if (val - origVal > 0.5)
|
||||
val -= 1.0;
|
||||
break;
|
||||
case FeRoundDown:
|
||||
if (origVal < val)
|
||||
val -= 1.0;
|
||||
break;
|
||||
case FeRoundUpward:
|
||||
if (origVal > val)
|
||||
val += 1.0;
|
||||
break;
|
||||
}
|
||||
feraiseexcept(FeInexact);
|
||||
}
|
||||
if (isSigned) {
|
||||
if (half) {
|
||||
if (val < (int16_t)(1 << 15)) {
|
||||
feraiseexcept(FeInvalid);
|
||||
feclearexcept(FeInexact);
|
||||
return (int16_t)(1 << 15);
|
||||
}
|
||||
if (val > (int16_t)mask(15)) {
|
||||
feraiseexcept(FeInvalid);
|
||||
feclearexcept(FeInexact);
|
||||
return (int16_t)mask(15);
|
||||
}
|
||||
return (int16_t)val;
|
||||
} else {
|
||||
if (val < (int32_t)(1 << 31)) {
|
||||
feraiseexcept(FeInvalid);
|
||||
feclearexcept(FeInexact);
|
||||
return (int32_t)(1 << 31);
|
||||
}
|
||||
if (val > (int32_t)mask(31)) {
|
||||
feraiseexcept(FeInvalid);
|
||||
feclearexcept(FeInexact);
|
||||
return (int32_t)mask(31);
|
||||
}
|
||||
return (int32_t)val;
|
||||
}
|
||||
} else {
|
||||
if (half) {
|
||||
if (val < 0) {
|
||||
feraiseexcept(FeInvalid);
|
||||
feclearexcept(FeInexact);
|
||||
return 0;
|
||||
}
|
||||
if (val > mask(16)) {
|
||||
feraiseexcept(FeInvalid);
|
||||
feclearexcept(FeInexact);
|
||||
return mask(16);
|
||||
}
|
||||
return (uint16_t)val;
|
||||
} else {
|
||||
if (val < 0) {
|
||||
feraiseexcept(FeInvalid);
|
||||
feclearexcept(FeInexact);
|
||||
return 0;
|
||||
}
|
||||
if (val > mask(32)) {
|
||||
feraiseexcept(FeInvalid);
|
||||
feclearexcept(FeInexact);
|
||||
return mask(32);
|
||||
}
|
||||
return (uint32_t)val;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
double
|
||||
vfpUFixedToFpD(bool flush, bool defaultNan,
|
||||
uint32_t val, bool half, uint8_t imm)
|
||||
uint64_t val, uint8_t width, uint8_t imm)
|
||||
{
|
||||
fesetround(FeRoundNearest);
|
||||
if (half)
|
||||
if (width == 16)
|
||||
val = (uint16_t)val;
|
||||
else if (width == 32)
|
||||
val = (uint32_t)val;
|
||||
else if (width != 64)
|
||||
panic("Unsupported width %d", width);
|
||||
|
||||
double scale = pow(2.0, imm);
|
||||
__asm__ __volatile__("" : "=m" (scale) : "m" (scale));
|
||||
feclearexcept(FeAllExceptions);
|
||||
|
@ -767,11 +705,16 @@ vfpUFixedToFpD(bool flush, bool defaultNan,
|
|||
|
||||
double
|
||||
vfpSFixedToFpD(bool flush, bool defaultNan,
|
||||
int32_t val, bool half, uint8_t imm)
|
||||
int64_t val, uint8_t width, uint8_t imm)
|
||||
{
|
||||
fesetround(FeRoundNearest);
|
||||
if (half)
|
||||
if (width == 16)
|
||||
val = sext<16>(val & mask(16));
|
||||
else if (width == 32)
|
||||
val = sext<32>(val & mask(32));
|
||||
else if (width != 64)
|
||||
panic("Unsupported width %d", width);
|
||||
|
||||
double scale = pow(2.0, imm);
|
||||
__asm__ __volatile__("" : "=m" (scale) : "m" (scale));
|
||||
feclearexcept(FeAllExceptions);
|
||||
|
@ -976,6 +919,85 @@ template
|
|||
double FpOp::processNans(FPSCR &fpscr, bool &done, bool defaultNan,
|
||||
double op1, double op2) const;
|
||||
|
||||
// @TODO remove this function when we've finished switching all FMA code to use the new FPLIB
|
||||
template <class fpType>
|
||||
fpType
|
||||
FpOp::ternaryOp(FPSCR &fpscr, fpType op1, fpType op2, fpType op3,
|
||||
fpType (*func)(fpType, fpType, fpType),
|
||||
bool flush, bool defaultNan, uint32_t rMode) const
|
||||
{
|
||||
const bool single = (sizeof(fpType) == sizeof(float));
|
||||
fpType junk = 0.0;
|
||||
|
||||
if (flush && (flushToZero(op1, op2) || flushToZero(op3)))
|
||||
fpscr.idc = 1;
|
||||
VfpSavedState state = prepFpState(rMode);
|
||||
__asm__ __volatile__ ("" : "=m" (op1), "=m" (op2), "=m" (op3), "=m" (state)
|
||||
: "m" (op1), "m" (op2), "m" (op3), "m" (state));
|
||||
fpType dest = func(op1, op2, op3);
|
||||
__asm__ __volatile__ ("" : "=m" (dest) : "m" (dest));
|
||||
|
||||
int fpClass = std::fpclassify(dest);
|
||||
// Get NAN behavior right. This varies between x86 and ARM.
|
||||
if (fpClass == FP_NAN) {
|
||||
const uint64_t qnan =
|
||||
single ? 0x7fc00000 : ULL(0x7ff8000000000000);
|
||||
const bool nan1 = std::isnan(op1);
|
||||
const bool nan2 = std::isnan(op2);
|
||||
const bool nan3 = std::isnan(op3);
|
||||
const bool signal1 = nan1 && ((fpToBits(op1) & qnan) != qnan);
|
||||
const bool signal2 = nan2 && ((fpToBits(op2) & qnan) != qnan);
|
||||
const bool signal3 = nan3 && ((fpToBits(op3) & qnan) != qnan);
|
||||
if ((!nan1 && !nan2 && !nan3) || (defaultNan == 1)) {
|
||||
dest = bitsToFp(qnan, junk);
|
||||
} else if (signal1) {
|
||||
dest = bitsToFp(fpToBits(op1) | qnan, junk);
|
||||
} else if (signal2) {
|
||||
dest = bitsToFp(fpToBits(op2) | qnan, junk);
|
||||
} else if (signal3) {
|
||||
dest = bitsToFp(fpToBits(op3) | qnan, junk);
|
||||
} else if (nan1) {
|
||||
dest = op1;
|
||||
} else if (nan2) {
|
||||
dest = op2;
|
||||
} else if (nan3) {
|
||||
dest = op3;
|
||||
}
|
||||
} else if (flush && flushToZero(dest)) {
|
||||
feraiseexcept(FeUnderflow);
|
||||
} else if ((
|
||||
(single && (dest == bitsToFp(0x00800000, junk) ||
|
||||
dest == bitsToFp(0x80800000, junk))) ||
|
||||
(!single &&
|
||||
(dest == bitsToFp(ULL(0x0010000000000000), junk) ||
|
||||
dest == bitsToFp(ULL(0x8010000000000000), junk)))
|
||||
) && rMode != VfpRoundZero) {
|
||||
/*
|
||||
* Correct for the fact that underflow is detected -before- rounding
|
||||
* in ARM and -after- rounding in x86.
|
||||
*/
|
||||
fesetround(FeRoundZero);
|
||||
__asm__ __volatile__ ("" : "=m" (op1), "=m" (op2), "=m" (op3)
|
||||
: "m" (op1), "m" (op2), "m" (op3));
|
||||
fpType temp = func(op1, op2, op2);
|
||||
__asm__ __volatile__ ("" : "=m" (temp) : "m" (temp));
|
||||
if (flush && flushToZero(temp)) {
|
||||
dest = temp;
|
||||
}
|
||||
}
|
||||
finishVfp(fpscr, state, flush);
|
||||
return dest;
|
||||
}
|
||||
|
||||
template
|
||||
float FpOp::ternaryOp(FPSCR &fpscr, float op1, float op2, float op3,
|
||||
float (*func)(float, float, float),
|
||||
bool flush, bool defaultNan, uint32_t rMode) const;
|
||||
template
|
||||
double FpOp::ternaryOp(FPSCR &fpscr, double op1, double op2, double op3,
|
||||
double (*func)(double, double, double),
|
||||
bool flush, bool defaultNan, uint32_t rMode) const;
|
||||
|
||||
template <class fpType>
|
||||
fpType
|
||||
FpOp::binaryOp(FPSCR &fpscr, fpType op1, fpType op2,
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2010 ARM Limited
|
||||
* Copyright (c) 2010-2013 ARM Limited
|
||||
* All rights reserved
|
||||
*
|
||||
* The license below extends only to copyright in the software and shall
|
||||
|
@ -104,7 +104,8 @@ enum VfpRoundingMode
|
|||
VfpRoundNearest = 0,
|
||||
VfpRoundUpward = 1,
|
||||
VfpRoundDown = 2,
|
||||
VfpRoundZero = 3
|
||||
VfpRoundZero = 3,
|
||||
VfpRoundAway = 4
|
||||
};
|
||||
|
||||
static inline float bitsToFp(uint64_t, float);
|
||||
|
@ -212,7 +213,7 @@ isSnan(fpType val)
|
|||
typedef int VfpSavedState;
|
||||
|
||||
VfpSavedState prepFpState(uint32_t rMode);
|
||||
void finishVfp(FPSCR &fpscr, VfpSavedState state, bool flush);
|
||||
void finishVfp(FPSCR &fpscr, VfpSavedState state, bool flush, FPSCR mask = FpscrExcMask);
|
||||
|
||||
template <class fpType>
|
||||
fpType fixDest(FPSCR fpscr, fpType val, fpType op1);
|
||||
|
@ -228,7 +229,11 @@ double fixFpSFpDDest(FPSCR fpscr, float val);
|
|||
|
||||
uint16_t vcvtFpSFpH(FPSCR &fpscr, bool flush, bool defaultNan,
|
||||
uint32_t rMode, bool ahp, float op);
|
||||
uint16_t vcvtFpDFpH(FPSCR &fpscr, bool flush, bool defaultNan,
|
||||
uint32_t rMode, bool ahp, double op);
|
||||
|
||||
float vcvtFpHFpS(FPSCR &fpscr, bool defaultNan, bool ahp, uint16_t op);
|
||||
double vcvtFpHFpD(FPSCR &fpscr, bool defaultNan, bool ahp, uint16_t op);
|
||||
|
||||
static inline double
|
||||
makeDouble(uint32_t low, uint32_t high)
|
||||
|
@ -249,19 +254,192 @@ highFromDouble(double val)
|
|||
return fpToBits(val) >> 32;
|
||||
}
|
||||
|
||||
uint64_t vfpFpSToFixed(float val, bool isSigned, bool half,
|
||||
uint8_t imm, bool rzero = true);
|
||||
float vfpUFixedToFpS(bool flush, bool defaultNan,
|
||||
uint32_t val, bool half, uint8_t imm);
|
||||
float vfpSFixedToFpS(bool flush, bool defaultNan,
|
||||
int32_t val, bool half, uint8_t imm);
|
||||
static inline void
|
||||
setFPExceptions(int exceptions) {
|
||||
feclearexcept(FeAllExceptions);
|
||||
feraiseexcept(exceptions);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
uint64_t
|
||||
vfpFpToFixed(T val, bool isSigned, uint8_t width, uint8_t imm, bool
|
||||
useRmode = true, VfpRoundingMode roundMode = VfpRoundZero,
|
||||
bool aarch64 = false)
|
||||
{
|
||||
int rmode;
|
||||
bool roundAwayFix = false;
|
||||
|
||||
if (!useRmode) {
|
||||
rmode = fegetround();
|
||||
} else {
|
||||
switch (roundMode)
|
||||
{
|
||||
case VfpRoundNearest:
|
||||
rmode = FeRoundNearest;
|
||||
break;
|
||||
case VfpRoundUpward:
|
||||
rmode = FeRoundUpward;
|
||||
break;
|
||||
case VfpRoundDown:
|
||||
rmode = FeRoundDown;
|
||||
break;
|
||||
case VfpRoundZero:
|
||||
rmode = FeRoundZero;
|
||||
break;
|
||||
case VfpRoundAway:
|
||||
// There is no equivalent rounding mode, use round down and we'll
|
||||
// fix it later
|
||||
rmode = FeRoundDown;
|
||||
roundAwayFix = true;
|
||||
break;
|
||||
default:
|
||||
panic("Unsupported roundMode %d\n", roundMode);
|
||||
}
|
||||
}
|
||||
__asm__ __volatile__("" : "=m" (rmode) : "m" (rmode));
|
||||
fesetround(FeRoundNearest);
|
||||
val = val * pow(2.0, imm);
|
||||
__asm__ __volatile__("" : "=m" (val) : "m" (val));
|
||||
fesetround(rmode);
|
||||
feclearexcept(FeAllExceptions);
|
||||
__asm__ __volatile__("" : "=m" (val) : "m" (val));
|
||||
T origVal = val;
|
||||
val = rint(val);
|
||||
__asm__ __volatile__("" : "=m" (val) : "m" (val));
|
||||
|
||||
int exceptions = fetestexcept(FeAllExceptions);
|
||||
|
||||
int fpType = std::fpclassify(val);
|
||||
if (fpType == FP_SUBNORMAL || fpType == FP_NAN) {
|
||||
if (fpType == FP_NAN) {
|
||||
exceptions |= FeInvalid;
|
||||
}
|
||||
val = 0.0;
|
||||
} else if (origVal != val) {
|
||||
switch (rmode) {
|
||||
case FeRoundNearest:
|
||||
if (origVal - val > 0.5)
|
||||
val += 1.0;
|
||||
else if (val - origVal > 0.5)
|
||||
val -= 1.0;
|
||||
break;
|
||||
case FeRoundDown:
|
||||
if (roundAwayFix) {
|
||||
// The ordering on the subtraction looks a bit odd in that we
|
||||
// don't do the obvious origVal - val, instead we do
|
||||
// -(val - origVal). This is required to get the corruct bit
|
||||
// exact behaviour when very close to the 0.5 threshold.
|
||||
volatile T error = val;
|
||||
error -= origVal;
|
||||
error = -error;
|
||||
if ( (error > 0.5) ||
|
||||
((error == 0.5) && (val >= 0)) )
|
||||
val += 1.0;
|
||||
} else {
|
||||
if (origVal < val)
|
||||
val -= 1.0;
|
||||
}
|
||||
break;
|
||||
case FeRoundUpward:
|
||||
if (origVal > val)
|
||||
val += 1.0;
|
||||
break;
|
||||
}
|
||||
exceptions |= FeInexact;
|
||||
}
|
||||
|
||||
__asm__ __volatile__("" : "=m" (val) : "m" (val));
|
||||
|
||||
if (isSigned) {
|
||||
bool outOfRange = false;
|
||||
int64_t result = (int64_t) val;
|
||||
uint64_t finalVal;
|
||||
|
||||
if (!aarch64) {
|
||||
if (width == 16) {
|
||||
finalVal = (int16_t)val;
|
||||
} else if (width == 32) {
|
||||
finalVal =(int32_t)val;
|
||||
} else if (width == 64) {
|
||||
finalVal = result;
|
||||
} else {
|
||||
panic("Unsupported width %d\n", width);
|
||||
}
|
||||
|
||||
// check if value is in range
|
||||
int64_t minVal = ~mask(width-1);
|
||||
if ((double)val < minVal) {
|
||||
outOfRange = true;
|
||||
finalVal = minVal;
|
||||
}
|
||||
int64_t maxVal = mask(width-1);
|
||||
if ((double)val > maxVal) {
|
||||
outOfRange = true;
|
||||
finalVal = maxVal;
|
||||
}
|
||||
} else {
|
||||
bool isNeg = val < 0;
|
||||
finalVal = result & mask(width);
|
||||
// If the result is supposed to be less than 64 bits check that the
|
||||
// upper bits that got thrown away are just sign extension bits
|
||||
if (width != 64) {
|
||||
outOfRange = ((uint64_t) result >> (width - 1)) !=
|
||||
(isNeg ? mask(64-width+1) : 0);
|
||||
}
|
||||
// Check if the original floating point value doesn't matches the
|
||||
// integer version we are also out of range. So create a saturated
|
||||
// result.
|
||||
if (isNeg) {
|
||||
outOfRange |= val < result;
|
||||
if (outOfRange) {
|
||||
finalVal = 1LL << (width-1);
|
||||
}
|
||||
} else {
|
||||
outOfRange |= val > result;
|
||||
if (outOfRange) {
|
||||
finalVal = mask(width-1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Raise an exception if the value was out of range
|
||||
if (outOfRange) {
|
||||
exceptions |= FeInvalid;
|
||||
exceptions &= ~FeInexact;
|
||||
}
|
||||
setFPExceptions(exceptions);
|
||||
return finalVal;
|
||||
} else {
|
||||
if ((double)val < 0) {
|
||||
exceptions |= FeInvalid;
|
||||
exceptions &= ~FeInexact;
|
||||
setFPExceptions(exceptions);
|
||||
return 0;
|
||||
}
|
||||
|
||||
uint64_t result = ((uint64_t) val) & mask(width);
|
||||
if (val > result) {
|
||||
exceptions |= FeInvalid;
|
||||
exceptions &= ~FeInexact;
|
||||
setFPExceptions(exceptions);
|
||||
return mask(width);
|
||||
}
|
||||
|
||||
setFPExceptions(exceptions);
|
||||
return result;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
float vfpUFixedToFpS(bool flush, bool defaultNan,
|
||||
uint64_t val, uint8_t width, uint8_t imm);
|
||||
float vfpSFixedToFpS(bool flush, bool defaultNan,
|
||||
int64_t val, uint8_t width, uint8_t imm);
|
||||
|
||||
uint64_t vfpFpDToFixed(double val, bool isSigned, bool half,
|
||||
uint8_t imm, bool rzero = true);
|
||||
double vfpUFixedToFpD(bool flush, bool defaultNan,
|
||||
uint32_t val, bool half, uint8_t imm);
|
||||
uint64_t val, uint8_t width, uint8_t imm);
|
||||
double vfpSFixedToFpD(bool flush, bool defaultNan,
|
||||
int32_t val, bool half, uint8_t imm);
|
||||
int64_t val, uint8_t width, uint8_t imm);
|
||||
|
||||
float fprSqrtEstimate(FPSCR &fpscr, float op);
|
||||
uint32_t unsignedRSqrtEstimate(uint32_t op);
|
||||
|
@ -292,6 +470,20 @@ class VfpMacroOp : public PredMacroOp
|
|||
void nextIdxs(IntRegIndex &dest);
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
static inline T
|
||||
fpAdd(T a, T b)
|
||||
{
|
||||
return a + b;
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
static inline T
|
||||
fpSub(T a, T b)
|
||||
{
|
||||
return a - b;
|
||||
};
|
||||
|
||||
static inline float
|
||||
fpAddS(float a, float b)
|
||||
{
|
||||
|
@ -328,6 +520,54 @@ fpDivD(double a, double b)
|
|||
return a / b;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
static inline T
|
||||
fpDiv(T a, T b)
|
||||
{
|
||||
return a / b;
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
static inline T
|
||||
fpMulX(T a, T b)
|
||||
{
|
||||
uint64_t opData;
|
||||
uint32_t sign1;
|
||||
uint32_t sign2;
|
||||
const bool single = (sizeof(T) == sizeof(float));
|
||||
if (single) {
|
||||
opData = (fpToBits(a));
|
||||
sign1 = opData>>31;
|
||||
opData = (fpToBits(b));
|
||||
sign2 = opData>>31;
|
||||
} else {
|
||||
opData = (fpToBits(a));
|
||||
sign1 = opData>>63;
|
||||
opData = (fpToBits(b));
|
||||
sign2 = opData>>63;
|
||||
}
|
||||
bool inf1 = (std::fpclassify(a) == FP_INFINITE);
|
||||
bool inf2 = (std::fpclassify(b) == FP_INFINITE);
|
||||
bool zero1 = (std::fpclassify(a) == FP_ZERO);
|
||||
bool zero2 = (std::fpclassify(b) == FP_ZERO);
|
||||
if ((inf1 && zero2) || (zero1 && inf2)) {
|
||||
if(sign1 ^ sign2)
|
||||
return (T)(-2.0);
|
||||
else
|
||||
return (T)(2.0);
|
||||
} else {
|
||||
return (a * b);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
template <typename T>
|
||||
static inline T
|
||||
fpMul(T a, T b)
|
||||
{
|
||||
return a * b;
|
||||
};
|
||||
|
||||
static inline float
|
||||
fpMulS(float a, float b)
|
||||
{
|
||||
|
@ -340,23 +580,140 @@ fpMulD(double a, double b)
|
|||
return a * b;
|
||||
}
|
||||
|
||||
static inline float
|
||||
fpMaxS(float a, float b)
|
||||
template <typename T>
|
||||
static inline T
|
||||
// @todo remove this when all calls to it have been replaced with the new fplib implementation
|
||||
fpMulAdd(T op1, T op2, T addend)
|
||||
{
|
||||
T result;
|
||||
|
||||
if (sizeof(T) == sizeof(float))
|
||||
result = fmaf(op1, op2, addend);
|
||||
else
|
||||
result = fma(op1, op2, addend);
|
||||
|
||||
// ARM doesn't generate signed nan's from this opperation, so fix up the result
|
||||
if (std::isnan(result) && !std::isnan(op1) &&
|
||||
!std::isnan(op2) && !std::isnan(addend))
|
||||
{
|
||||
uint64_t bitMask = ULL(0x1) << ((sizeof(T) * 8) - 1);
|
||||
result = bitsToFp(fpToBits(result) & ~bitMask, op1);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
static inline T
|
||||
fpRIntX(T a, FPSCR &fpscr)
|
||||
{
|
||||
T rVal;
|
||||
|
||||
rVal = rint(a);
|
||||
if (rVal != a && !std::isnan(a))
|
||||
fpscr.ixc = 1;
|
||||
return (rVal);
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
static inline T
|
||||
fpMaxNum(T a, T b)
|
||||
{
|
||||
const bool single = (sizeof(T) == sizeof(float));
|
||||
const uint64_t qnan = single ? 0x7fc00000 : ULL(0x7ff8000000000000);
|
||||
|
||||
if (std::isnan(a))
|
||||
return ((fpToBits(a) & qnan) == qnan) ? b : a;
|
||||
if (std::isnan(b))
|
||||
return ((fpToBits(b) & qnan) == qnan) ? a : b;
|
||||
// Handle comparisons of +0 and -0.
|
||||
if (!std::signbit(a) && std::signbit(b))
|
||||
return a;
|
||||
return fmaxf(a, b);
|
||||
}
|
||||
return fmax(a, b);
|
||||
};
|
||||
|
||||
static inline float
|
||||
fpMinS(float a, float b)
|
||||
template <typename T>
|
||||
static inline T
|
||||
fpMax(T a, T b)
|
||||
{
|
||||
if (std::isnan(a))
|
||||
return a;
|
||||
if (std::isnan(b))
|
||||
return b;
|
||||
return fpMaxNum<T>(a, b);
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
static inline T
|
||||
fpMinNum(T a, T b)
|
||||
{
|
||||
const bool single = (sizeof(T) == sizeof(float));
|
||||
const uint64_t qnan = single ? 0x7fc00000 : ULL(0x7ff8000000000000);
|
||||
|
||||
if (std::isnan(a))
|
||||
return ((fpToBits(a) & qnan) == qnan) ? b : a;
|
||||
if (std::isnan(b))
|
||||
return ((fpToBits(b) & qnan) == qnan) ? a : b;
|
||||
// Handle comparisons of +0 and -0.
|
||||
if (std::signbit(a) && !std::signbit(b))
|
||||
return a;
|
||||
return fminf(a, b);
|
||||
}
|
||||
return fmin(a, b);
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
static inline T
|
||||
fpMin(T a, T b)
|
||||
{
|
||||
if (std::isnan(a))
|
||||
return a;
|
||||
if (std::isnan(b))
|
||||
return b;
|
||||
return fpMinNum<T>(a, b);
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
static inline T
|
||||
fpRSqrts(T a, T b)
|
||||
{
|
||||
int fpClassA = std::fpclassify(a);
|
||||
int fpClassB = std::fpclassify(b);
|
||||
T aXb;
|
||||
int fpClassAxB;
|
||||
|
||||
if ((fpClassA == FP_ZERO && fpClassB == FP_INFINITE) ||
|
||||
(fpClassA == FP_INFINITE && fpClassB == FP_ZERO)) {
|
||||
return 1.5;
|
||||
}
|
||||
aXb = a*b;
|
||||
fpClassAxB = std::fpclassify(aXb);
|
||||
if(fpClassAxB == FP_SUBNORMAL) {
|
||||
feraiseexcept(FeUnderflow);
|
||||
return 1.5;
|
||||
}
|
||||
return (3.0 - (a * b)) / 2.0;
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
static inline T
|
||||
fpRecps(T a, T b)
|
||||
{
|
||||
int fpClassA = std::fpclassify(a);
|
||||
int fpClassB = std::fpclassify(b);
|
||||
T aXb;
|
||||
int fpClassAxB;
|
||||
|
||||
if ((fpClassA == FP_ZERO && fpClassB == FP_INFINITE) ||
|
||||
(fpClassA == FP_INFINITE && fpClassB == FP_ZERO)) {
|
||||
return 2.0;
|
||||
}
|
||||
aXb = a*b;
|
||||
fpClassAxB = std::fpclassify(aXb);
|
||||
if(fpClassAxB == FP_SUBNORMAL) {
|
||||
feraiseexcept(FeUnderflow);
|
||||
return 2.0;
|
||||
}
|
||||
return 2.0 - (a * b);
|
||||
};
|
||||
|
||||
|
||||
static inline float
|
||||
fpRSqrtsS(float a, float b)
|
||||
|
@ -400,6 +757,23 @@ fpRecpsS(float a, float b)
|
|||
return 2.0 - (a * b);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
static inline T
|
||||
roundNEven(T a) {
|
||||
T val;
|
||||
|
||||
val = round(a);
|
||||
if (a - val == 0.5) {
|
||||
if ( (((int) a) & 1) == 0 ) val += 1.0;
|
||||
}
|
||||
else if (a - val == -0.5) {
|
||||
if ( (((int) a) & 1) == 0 ) val -= 1.0;
|
||||
}
|
||||
return val;
|
||||
}
|
||||
|
||||
|
||||
|
||||
class FpOp : public PredOp
|
||||
{
|
||||
protected:
|
||||
|
@ -455,6 +829,12 @@ class FpOp : public PredOp
|
|||
processNans(FPSCR &fpscr, bool &done, bool defaultNan,
|
||||
fpType op1, fpType op2) const;
|
||||
|
||||
template <class fpType>
|
||||
fpType
|
||||
ternaryOp(FPSCR &fpscr, fpType op1, fpType op2, fpType op3,
|
||||
fpType (*func)(fpType, fpType, fpType),
|
||||
bool flush, bool defaultNan, uint32_t rMode) const;
|
||||
|
||||
template <class fpType>
|
||||
fpType
|
||||
binaryOp(FPSCR &fpscr, fpType op1, fpType op2,
|
||||
|
@ -478,6 +858,55 @@ class FpOp : public PredOp
|
|||
pcState.advance();
|
||||
}
|
||||
}
|
||||
|
||||
float
|
||||
fpSqrt (FPSCR fpscr,float x) const
|
||||
{
|
||||
|
||||
return unaryOp(fpscr,x,sqrtf,fpscr.fz,fpscr.rMode);
|
||||
|
||||
}
|
||||
|
||||
double
|
||||
fpSqrt (FPSCR fpscr,double x) const
|
||||
{
|
||||
|
||||
return unaryOp(fpscr,x,sqrt,fpscr.fz,fpscr.rMode);
|
||||
|
||||
}
|
||||
};
|
||||
|
||||
class FpCondCompRegOp : public FpOp
|
||||
{
|
||||
protected:
|
||||
IntRegIndex op1, op2;
|
||||
ConditionCode condCode;
|
||||
uint8_t defCc;
|
||||
|
||||
FpCondCompRegOp(const char *mnem, ExtMachInst _machInst,
|
||||
OpClass __opClass, IntRegIndex _op1, IntRegIndex _op2,
|
||||
ConditionCode _condCode, uint8_t _defCc) :
|
||||
FpOp(mnem, _machInst, __opClass),
|
||||
op1(_op1), op2(_op2), condCode(_condCode), defCc(_defCc)
|
||||
{}
|
||||
|
||||
std::string generateDisassembly(Addr pc, const SymbolTable *symtab) const;
|
||||
};
|
||||
|
||||
class FpCondSelOp : public FpOp
|
||||
{
|
||||
protected:
|
||||
IntRegIndex dest, op1, op2;
|
||||
ConditionCode condCode;
|
||||
|
||||
FpCondSelOp(const char *mnem, ExtMachInst _machInst, OpClass __opClass,
|
||||
IntRegIndex _dest, IntRegIndex _op1, IntRegIndex _op2,
|
||||
ConditionCode _condCode) :
|
||||
FpOp(mnem, _machInst, __opClass),
|
||||
dest(_dest), op1(_op1), op2(_op2), condCode(_condCode)
|
||||
{}
|
||||
|
||||
std::string generateDisassembly(Addr pc, const SymbolTable *symtab) const;
|
||||
};
|
||||
|
||||
class FpRegRegOp : public FpOp
|
||||
|
@ -550,6 +979,26 @@ class FpRegRegRegOp : public FpOp
|
|||
std::string generateDisassembly(Addr pc, const SymbolTable *symtab) const;
|
||||
};
|
||||
|
||||
class FpRegRegRegRegOp : public FpOp
|
||||
{
|
||||
protected:
|
||||
IntRegIndex dest;
|
||||
IntRegIndex op1;
|
||||
IntRegIndex op2;
|
||||
IntRegIndex op3;
|
||||
|
||||
FpRegRegRegRegOp(const char *mnem, ExtMachInst _machInst, OpClass __opClass,
|
||||
IntRegIndex _dest, IntRegIndex _op1, IntRegIndex _op2,
|
||||
IntRegIndex _op3, VfpMicroMode mode = VfpNotAMicroop) :
|
||||
FpOp(mnem, _machInst, __opClass), dest(_dest), op1(_op1), op2(_op2),
|
||||
op3(_op3)
|
||||
{
|
||||
setVfpMicroFlags(mode, flags);
|
||||
}
|
||||
|
||||
std::string generateDisassembly(Addr pc, const SymbolTable *symtab) const;
|
||||
};
|
||||
|
||||
class FpRegRegRegImmOp : public FpOp
|
||||
{
|
||||
protected:
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2009 ARM Limited
|
||||
* Copyright (c) 2009, 2012-2013 ARM Limited
|
||||
* All rights reserved.
|
||||
*
|
||||
* The license below extends only to copyright in the software and shall
|
||||
|
@ -38,9 +38,128 @@
|
|||
*/
|
||||
|
||||
#include "arch/arm/interrupts.hh"
|
||||
#include "arch/arm/system.hh"
|
||||
|
||||
ArmISA::Interrupts *
|
||||
ArmInterruptsParams::create()
|
||||
{
|
||||
return new ArmISA::Interrupts(this);
|
||||
}
|
||||
|
||||
bool
|
||||
ArmISA::Interrupts::takeInt(ThreadContext *tc, InterruptTypes int_type) const
|
||||
{
|
||||
// Table G1-17~19 of ARM V8 ARM
|
||||
InterruptMask mask;
|
||||
bool highest_el_is_64 = ArmSystem::highestELIs64(tc);
|
||||
|
||||
CPSR cpsr = tc->readMiscReg(MISCREG_CPSR);
|
||||
SCR scr;
|
||||
HCR hcr;
|
||||
hcr = tc->readMiscReg(MISCREG_HCR);
|
||||
ExceptionLevel el = (ExceptionLevel) ((uint32_t) cpsr.el);
|
||||
bool cpsr_mask_bit, scr_routing_bit, scr_fwaw_bit, hcr_mask_override_bit;
|
||||
|
||||
if (!highest_el_is_64)
|
||||
scr = tc->readMiscReg(MISCREG_SCR);
|
||||
else
|
||||
scr = tc->readMiscReg(MISCREG_SCR_EL3);
|
||||
|
||||
bool is_secure = inSecureState(scr, cpsr);
|
||||
|
||||
switch(int_type) {
|
||||
case INT_FIQ:
|
||||
cpsr_mask_bit = cpsr.f;
|
||||
scr_routing_bit = scr.fiq;
|
||||
scr_fwaw_bit = scr.fw;
|
||||
hcr_mask_override_bit = hcr.fmo;
|
||||
break;
|
||||
case INT_IRQ:
|
||||
cpsr_mask_bit = cpsr.i;
|
||||
scr_routing_bit = scr.irq;
|
||||
scr_fwaw_bit = 1;
|
||||
hcr_mask_override_bit = hcr.imo;
|
||||
break;
|
||||
case INT_ABT:
|
||||
cpsr_mask_bit = cpsr.a;
|
||||
scr_routing_bit = scr.ea;
|
||||
scr_fwaw_bit = scr.aw;
|
||||
hcr_mask_override_bit = hcr.amo;
|
||||
break;
|
||||
default:
|
||||
panic("Unhandled interrupt type!");
|
||||
}
|
||||
|
||||
if (hcr.tge)
|
||||
hcr_mask_override_bit = 1;
|
||||
|
||||
if (!highest_el_is_64) {
|
||||
// AArch32
|
||||
if (!scr_routing_bit) {
|
||||
// SCR IRQ == 0
|
||||
if (!hcr_mask_override_bit)
|
||||
mask = INT_MASK_M;
|
||||
else {
|
||||
if (!is_secure && (el == EL0 || el == EL1))
|
||||
mask = INT_MASK_T;
|
||||
else
|
||||
mask = INT_MASK_M;
|
||||
}
|
||||
} else {
|
||||
// SCR IRQ == 1
|
||||
if ((!is_secure) &&
|
||||
(hcr_mask_override_bit ||
|
||||
(!scr_fwaw_bit && !hcr_mask_override_bit)))
|
||||
mask = INT_MASK_T;
|
||||
else
|
||||
mask = INT_MASK_M;
|
||||
}
|
||||
} else {
|
||||
// AArch64
|
||||
if (!scr_routing_bit) {
|
||||
// SCR IRQ == 0
|
||||
if (!scr.rw) {
|
||||
// SCR RW == 0
|
||||
if (!hcr_mask_override_bit) {
|
||||
if (el == EL3)
|
||||
mask = INT_MASK_P;
|
||||
else
|
||||
mask = INT_MASK_M;
|
||||
} else {
|
||||
if (el == EL3)
|
||||
mask = INT_MASK_T;
|
||||
else if (is_secure || el == EL2)
|
||||
mask = INT_MASK_M;
|
||||
else
|
||||
mask = INT_MASK_T;
|
||||
}
|
||||
} else {
|
||||
// SCR RW == 1
|
||||
if (!hcr_mask_override_bit) {
|
||||
if (el == EL3 || el == EL2)
|
||||
mask = INT_MASK_P;
|
||||
else
|
||||
mask = INT_MASK_M;
|
||||
} else {
|
||||
if (el == EL3)
|
||||
mask = INT_MASK_P;
|
||||
else if (is_secure || el == EL2)
|
||||
mask = INT_MASK_M;
|
||||
else
|
||||
mask = INT_MASK_T;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// SCR IRQ == 1
|
||||
if (el == EL3)
|
||||
mask = INT_MASK_M;
|
||||
else
|
||||
mask = INT_MASK_T;
|
||||
}
|
||||
}
|
||||
|
||||
return ((mask == INT_MASK_T) ||
|
||||
((mask == INT_MASK_M) && !cpsr_mask_bit)) &&
|
||||
(mask != INT_MASK_P);
|
||||
}
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2010,2012 ARM Limited
|
||||
* Copyright (c) 2010, 2012-2013 ARM Limited
|
||||
* All rights reserved
|
||||
*
|
||||
* The license below extends only to copyright in the software and shall
|
||||
|
@ -47,6 +47,7 @@
|
|||
#include "arch/arm/isa_traits.hh"
|
||||
#include "arch/arm/miscregs.hh"
|
||||
#include "arch/arm/registers.hh"
|
||||
#include "arch/arm/utility.hh"
|
||||
#include "cpu/thread_context.hh"
|
||||
#include "debug/Interrupt.hh"
|
||||
#include "params/ArmInterrupts.hh"
|
||||
|
@ -123,31 +124,79 @@ class Interrupts : public SimObject
|
|||
memset(interrupts, 0, sizeof(interrupts));
|
||||
}
|
||||
|
||||
enum InterruptMask {
|
||||
INT_MASK_M, // masked (subject to PSTATE.{A,I,F} mask bit
|
||||
INT_MASK_T, // taken regardless of mask
|
||||
INT_MASK_P // pending
|
||||
};
|
||||
|
||||
bool takeInt(ThreadContext *tc, InterruptTypes int_type) const;
|
||||
|
||||
bool
|
||||
checkInterrupts(ThreadContext *tc) const
|
||||
{
|
||||
if (!intStatus)
|
||||
HCR hcr = tc->readMiscReg(MISCREG_HCR);
|
||||
|
||||
if (!(intStatus || hcr.va || hcr.vi || hcr.vf))
|
||||
return false;
|
||||
|
||||
CPSR cpsr = tc->readMiscReg(MISCREG_CPSR);
|
||||
SCR scr = tc->readMiscReg(MISCREG_SCR);
|
||||
|
||||
return ((interrupts[INT_IRQ] && !cpsr.i) ||
|
||||
(interrupts[INT_FIQ] && !cpsr.f) ||
|
||||
(interrupts[INT_ABT] && !cpsr.a) ||
|
||||
bool isHypMode = cpsr.mode == MODE_HYP;
|
||||
bool isSecure = inSecureState(scr, cpsr);
|
||||
bool allowVIrq = !cpsr.i && hcr.imo && !isSecure && !isHypMode;
|
||||
bool allowVFiq = !cpsr.f && hcr.fmo && !isSecure && !isHypMode;
|
||||
bool allowVAbort = !cpsr.a && hcr.amo && !isSecure && !isHypMode;
|
||||
|
||||
bool take_irq = takeInt(tc, INT_IRQ);
|
||||
bool take_fiq = takeInt(tc, INT_FIQ);
|
||||
bool take_ea = takeInt(tc, INT_ABT);
|
||||
|
||||
return ((interrupts[INT_IRQ] && take_irq) ||
|
||||
(interrupts[INT_FIQ] && take_fiq) ||
|
||||
(interrupts[INT_ABT] && take_ea) ||
|
||||
((interrupts[INT_VIRT_IRQ] || hcr.vi) && allowVIrq) ||
|
||||
((interrupts[INT_VIRT_FIQ] || hcr.vf) && allowVFiq) ||
|
||||
(hcr.va && allowVAbort) ||
|
||||
(interrupts[INT_RST]) ||
|
||||
(interrupts[INT_SEV]));
|
||||
(interrupts[INT_SEV])
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Check the raw interrupt state.
|
||||
* This function is used to check if a wfi operation should sleep. If there
|
||||
* is an interrupt pending, even if it's masked, wfi doesn't sleep.
|
||||
* @return any interrupts pending
|
||||
*/
|
||||
bool
|
||||
checkRaw() const
|
||||
checkWfiWake(HCR hcr, CPSR cpsr, SCR scr) const
|
||||
{
|
||||
return intStatus;
|
||||
uint64_t maskedIntStatus;
|
||||
bool virtWake;
|
||||
|
||||
maskedIntStatus = intStatus & ~((1 << INT_VIRT_IRQ) |
|
||||
(1 << INT_VIRT_FIQ));
|
||||
virtWake = (hcr.vi || interrupts[INT_VIRT_IRQ]) && hcr.imo;
|
||||
virtWake |= (hcr.vf || interrupts[INT_VIRT_FIQ]) && hcr.fmo;
|
||||
virtWake |= hcr.va && hcr.amo;
|
||||
virtWake &= (cpsr.mode != MODE_HYP) && !inSecureState(scr, cpsr);
|
||||
return maskedIntStatus || virtWake;
|
||||
}
|
||||
|
||||
uint32_t
|
||||
getISR(HCR hcr, CPSR cpsr, SCR scr)
|
||||
{
|
||||
bool useHcrMux;
|
||||
CPSR isr = 0; // ARM ARM states ISR reg uses same bit possitions as CPSR
|
||||
|
||||
useHcrMux = (cpsr.mode != MODE_HYP) && !inSecureState(scr, cpsr);
|
||||
isr.i = (useHcrMux & hcr.imo) ? (interrupts[INT_VIRT_IRQ] || hcr.vi)
|
||||
: interrupts[INT_IRQ];
|
||||
isr.f = (useHcrMux & hcr.fmo) ? (interrupts[INT_VIRT_FIQ] || hcr.vf)
|
||||
: interrupts[INT_FIQ];
|
||||
isr.a = (useHcrMux & hcr.amo) ? hcr.va : interrupts[INT_ABT];
|
||||
return isr;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -172,17 +221,40 @@ class Interrupts : public SimObject
|
|||
Fault
|
||||
getInterrupt(ThreadContext *tc)
|
||||
{
|
||||
if (!intStatus)
|
||||
HCR hcr = tc->readMiscReg(MISCREG_HCR);
|
||||
CPSR cpsr = tc->readMiscReg(MISCREG_CPSR);
|
||||
SCR scr = tc->readMiscReg(MISCREG_SCR);
|
||||
|
||||
// Calculate a few temp vars so we can work out if there's a pending
|
||||
// virtual interrupt, and if its allowed to happen
|
||||
// ARM ARM Issue C section B1.9.9, B1.9.11, and B1.9.13
|
||||
bool isHypMode = cpsr.mode == MODE_HYP;
|
||||
bool isSecure = inSecureState(scr, cpsr);
|
||||
bool allowVIrq = !cpsr.i && hcr.imo && !isSecure && !isHypMode;
|
||||
bool allowVFiq = !cpsr.f && hcr.fmo && !isSecure && !isHypMode;
|
||||
bool allowVAbort = !cpsr.a && hcr.amo && !isSecure && !isHypMode;
|
||||
|
||||
if ( !(intStatus || (hcr.vi && allowVIrq) || (hcr.vf && allowVFiq) ||
|
||||
(hcr.va && allowVAbort)) )
|
||||
return NoFault;
|
||||
|
||||
CPSR cpsr = tc->readMiscReg(MISCREG_CPSR);
|
||||
bool take_irq = takeInt(tc, INT_IRQ);
|
||||
bool take_fiq = takeInt(tc, INT_FIQ);
|
||||
bool take_ea = takeInt(tc, INT_ABT);
|
||||
|
||||
if (interrupts[INT_IRQ] && !cpsr.i)
|
||||
|
||||
if (interrupts[INT_IRQ] && take_irq)
|
||||
return new Interrupt;
|
||||
if (interrupts[INT_FIQ] && !cpsr.f)
|
||||
if ((interrupts[INT_VIRT_IRQ] || hcr.vi) && allowVIrq)
|
||||
return new VirtualInterrupt;
|
||||
if (interrupts[INT_FIQ] && take_fiq)
|
||||
return new FastInterrupt;
|
||||
if (interrupts[INT_ABT] && !cpsr.a)
|
||||
return new DataAbort(0, false, 0,
|
||||
if ((interrupts[INT_VIRT_FIQ] || hcr.vf) && allowVFiq)
|
||||
return new VirtualFastInterrupt;
|
||||
if (interrupts[INT_ABT] && take_ea)
|
||||
return new SystemError;
|
||||
if (hcr.va && allowVAbort)
|
||||
return new VirtualDataAbort(0, TlbEntry::DomainType::NoAccess, false,
|
||||
ArmFault::AsynchronousExternalAbort);
|
||||
if (interrupts[INT_RST])
|
||||
return new Reset;
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2010 ARM Limited
|
||||
* Copyright (c) 2010-2013 ARM Limited
|
||||
* All rights reserved
|
||||
*
|
||||
* The license below extends only to copyright in the software and shall
|
||||
|
@ -83,6 +83,9 @@ enum IntRegIndex
|
|||
INTREG_R14_MON,
|
||||
INTREG_LR_MON = INTREG_R14_MON,
|
||||
|
||||
INTREG_R13_HYP,
|
||||
INTREG_SP_HYP = INTREG_R13_HYP,
|
||||
|
||||
INTREG_R13_ABT,
|
||||
INTREG_SP_ABT = INTREG_R13_ABT,
|
||||
INTREG_R14_ABT,
|
||||
|
@ -108,7 +111,7 @@ enum IntRegIndex
|
|||
INTREG_R14_FIQ,
|
||||
INTREG_LR_FIQ = INTREG_R14_FIQ,
|
||||
|
||||
INTREG_ZERO, // Dummy zero reg since there has to be one.
|
||||
INTREG_ZERO,
|
||||
INTREG_UREG0,
|
||||
INTREG_UREG1,
|
||||
INTREG_UREG2,
|
||||
|
@ -117,9 +120,51 @@ enum IntRegIndex
|
|||
INTREG_CONDCODES_V,
|
||||
INTREG_CONDCODES_GE,
|
||||
INTREG_FPCONDCODES,
|
||||
INTREG_DUMMY, // Dummy reg used to throw away int reg results
|
||||
|
||||
INTREG_SP0,
|
||||
INTREG_SP1,
|
||||
INTREG_SP2,
|
||||
INTREG_SP3,
|
||||
|
||||
NUM_INTREGS,
|
||||
NUM_ARCH_INTREGS = INTREG_PC + 1,
|
||||
NUM_ARCH_INTREGS = 32,
|
||||
|
||||
/* AArch64 registers */
|
||||
INTREG_X0 = 0,
|
||||
INTREG_X1,
|
||||
INTREG_X2,
|
||||
INTREG_X3,
|
||||
INTREG_X4,
|
||||
INTREG_X5,
|
||||
INTREG_X6,
|
||||
INTREG_X7,
|
||||
INTREG_X8,
|
||||
INTREG_X9,
|
||||
INTREG_X10,
|
||||
INTREG_X11,
|
||||
INTREG_X12,
|
||||
INTREG_X13,
|
||||
INTREG_X14,
|
||||
INTREG_X15,
|
||||
INTREG_X16,
|
||||
INTREG_X17,
|
||||
INTREG_X18,
|
||||
INTREG_X19,
|
||||
INTREG_X20,
|
||||
INTREG_X21,
|
||||
INTREG_X22,
|
||||
INTREG_X23,
|
||||
INTREG_X24,
|
||||
INTREG_X25,
|
||||
INTREG_X26,
|
||||
INTREG_X27,
|
||||
INTREG_X28,
|
||||
INTREG_X29,
|
||||
INTREG_X30,
|
||||
INTREG_X31,
|
||||
|
||||
INTREG_SPX = NUM_INTREGS,
|
||||
|
||||
/* All the aliased indexes. */
|
||||
|
||||
|
@ -195,6 +240,25 @@ enum IntRegIndex
|
|||
INTREG_PC_ABT = INTREG_PC,
|
||||
INTREG_R15_ABT = INTREG_R15,
|
||||
|
||||
/* HYP mode */
|
||||
INTREG_R0_HYP = INTREG_R0,
|
||||
INTREG_R1_HYP = INTREG_R1,
|
||||
INTREG_R2_HYP = INTREG_R2,
|
||||
INTREG_R3_HYP = INTREG_R3,
|
||||
INTREG_R4_HYP = INTREG_R4,
|
||||
INTREG_R5_HYP = INTREG_R5,
|
||||
INTREG_R6_HYP = INTREG_R6,
|
||||
INTREG_R7_HYP = INTREG_R7,
|
||||
INTREG_R8_HYP = INTREG_R8,
|
||||
INTREG_R9_HYP = INTREG_R9,
|
||||
INTREG_R10_HYP = INTREG_R10,
|
||||
INTREG_R11_HYP = INTREG_R11,
|
||||
INTREG_R12_HYP = INTREG_R12,
|
||||
INTREG_LR_HYP = INTREG_LR,
|
||||
INTREG_R14_HYP = INTREG_R14,
|
||||
INTREG_PC_HYP = INTREG_PC,
|
||||
INTREG_R15_HYP = INTREG_R15,
|
||||
|
||||
/* UND mode */
|
||||
INTREG_R0_UND = INTREG_R0,
|
||||
INTREG_R1_UND = INTREG_R1,
|
||||
|
@ -244,11 +308,26 @@ enum IntRegIndex
|
|||
|
||||
typedef IntRegIndex IntRegMap[NUM_ARCH_INTREGS];
|
||||
|
||||
const IntRegMap IntReg64Map = {
|
||||
INTREG_R0, INTREG_R1, INTREG_R2, INTREG_R3,
|
||||
INTREG_R4, INTREG_R5, INTREG_R6, INTREG_R7,
|
||||
INTREG_R8_USR, INTREG_R9_USR, INTREG_R10_USR, INTREG_R11_USR,
|
||||
INTREG_R12_USR, INTREG_R13_USR, INTREG_R14_USR, INTREG_R13_HYP,
|
||||
INTREG_R14_IRQ, INTREG_R13_IRQ, INTREG_R14_SVC, INTREG_R13_SVC,
|
||||
INTREG_R14_ABT, INTREG_R13_ABT, INTREG_R14_UND, INTREG_R13_UND,
|
||||
INTREG_R8_FIQ, INTREG_R9_FIQ, INTREG_R10_FIQ, INTREG_R11_FIQ,
|
||||
INTREG_R12_FIQ, INTREG_R13_FIQ, INTREG_R14_FIQ, INTREG_ZERO
|
||||
};
|
||||
|
||||
const IntRegMap IntRegUsrMap = {
|
||||
INTREG_R0_USR, INTREG_R1_USR, INTREG_R2_USR, INTREG_R3_USR,
|
||||
INTREG_R4_USR, INTREG_R5_USR, INTREG_R6_USR, INTREG_R7_USR,
|
||||
INTREG_R8_USR, INTREG_R9_USR, INTREG_R10_USR, INTREG_R11_USR,
|
||||
INTREG_R12_USR, INTREG_R13_USR, INTREG_R14_USR, INTREG_R15_USR
|
||||
INTREG_R12_USR, INTREG_R13_USR, INTREG_R14_USR, INTREG_R15_USR,
|
||||
INTREG_ZERO, INTREG_ZERO, INTREG_ZERO, INTREG_ZERO,
|
||||
INTREG_ZERO, INTREG_ZERO, INTREG_ZERO, INTREG_ZERO,
|
||||
INTREG_ZERO, INTREG_ZERO, INTREG_ZERO, INTREG_ZERO,
|
||||
INTREG_ZERO, INTREG_ZERO, INTREG_ZERO, INTREG_ZERO
|
||||
};
|
||||
|
||||
static inline IntRegIndex
|
||||
|
@ -258,11 +337,33 @@ INTREG_USR(unsigned index)
|
|||
return IntRegUsrMap[index];
|
||||
}
|
||||
|
||||
const IntRegMap IntRegHypMap = {
|
||||
INTREG_R0_HYP, INTREG_R1_HYP, INTREG_R2_HYP, INTREG_R3_HYP,
|
||||
INTREG_R4_HYP, INTREG_R5_HYP, INTREG_R6_HYP, INTREG_R7_HYP,
|
||||
INTREG_R8_HYP, INTREG_R9_HYP, INTREG_R10_HYP, INTREG_R11_HYP,
|
||||
INTREG_R12_HYP, INTREG_R13_HYP, INTREG_R14_HYP, INTREG_R15_HYP,
|
||||
INTREG_ZERO, INTREG_ZERO, INTREG_ZERO, INTREG_ZERO,
|
||||
INTREG_ZERO, INTREG_ZERO, INTREG_ZERO, INTREG_ZERO,
|
||||
INTREG_ZERO, INTREG_ZERO, INTREG_ZERO, INTREG_ZERO,
|
||||
INTREG_ZERO, INTREG_ZERO, INTREG_ZERO, INTREG_ZERO
|
||||
};
|
||||
|
||||
static inline IntRegIndex
|
||||
INTREG_HYP(unsigned index)
|
||||
{
|
||||
assert(index < NUM_ARCH_INTREGS);
|
||||
return IntRegHypMap[index];
|
||||
}
|
||||
|
||||
const IntRegMap IntRegSvcMap = {
|
||||
INTREG_R0_SVC, INTREG_R1_SVC, INTREG_R2_SVC, INTREG_R3_SVC,
|
||||
INTREG_R4_SVC, INTREG_R5_SVC, INTREG_R6_SVC, INTREG_R7_SVC,
|
||||
INTREG_R8_SVC, INTREG_R9_SVC, INTREG_R10_SVC, INTREG_R11_SVC,
|
||||
INTREG_R12_SVC, INTREG_R13_SVC, INTREG_R14_SVC, INTREG_R15_SVC
|
||||
INTREG_R12_SVC, INTREG_R13_SVC, INTREG_R14_SVC, INTREG_R15_SVC,
|
||||
INTREG_ZERO, INTREG_ZERO, INTREG_ZERO, INTREG_ZERO,
|
||||
INTREG_ZERO, INTREG_ZERO, INTREG_ZERO, INTREG_ZERO,
|
||||
INTREG_ZERO, INTREG_ZERO, INTREG_ZERO, INTREG_ZERO,
|
||||
INTREG_ZERO, INTREG_ZERO, INTREG_ZERO, INTREG_ZERO
|
||||
};
|
||||
|
||||
static inline IntRegIndex
|
||||
|
@ -276,7 +377,11 @@ const IntRegMap IntRegMonMap = {
|
|||
INTREG_R0_MON, INTREG_R1_MON, INTREG_R2_MON, INTREG_R3_MON,
|
||||
INTREG_R4_MON, INTREG_R5_MON, INTREG_R6_MON, INTREG_R7_MON,
|
||||
INTREG_R8_MON, INTREG_R9_MON, INTREG_R10_MON, INTREG_R11_MON,
|
||||
INTREG_R12_MON, INTREG_R13_MON, INTREG_R14_MON, INTREG_R15_MON
|
||||
INTREG_R12_MON, INTREG_R13_MON, INTREG_R14_MON, INTREG_R15_MON,
|
||||
INTREG_ZERO, INTREG_ZERO, INTREG_ZERO, INTREG_ZERO,
|
||||
INTREG_ZERO, INTREG_ZERO, INTREG_ZERO, INTREG_ZERO,
|
||||
INTREG_ZERO, INTREG_ZERO, INTREG_ZERO, INTREG_ZERO,
|
||||
INTREG_ZERO, INTREG_ZERO, INTREG_ZERO, INTREG_ZERO
|
||||
};
|
||||
|
||||
static inline IntRegIndex
|
||||
|
@ -290,7 +395,11 @@ const IntRegMap IntRegAbtMap = {
|
|||
INTREG_R0_ABT, INTREG_R1_ABT, INTREG_R2_ABT, INTREG_R3_ABT,
|
||||
INTREG_R4_ABT, INTREG_R5_ABT, INTREG_R6_ABT, INTREG_R7_ABT,
|
||||
INTREG_R8_ABT, INTREG_R9_ABT, INTREG_R10_ABT, INTREG_R11_ABT,
|
||||
INTREG_R12_ABT, INTREG_R13_ABT, INTREG_R14_ABT, INTREG_R15_ABT
|
||||
INTREG_R12_ABT, INTREG_R13_ABT, INTREG_R14_ABT, INTREG_R15_ABT,
|
||||
INTREG_ZERO, INTREG_ZERO, INTREG_ZERO, INTREG_ZERO,
|
||||
INTREG_ZERO, INTREG_ZERO, INTREG_ZERO, INTREG_ZERO,
|
||||
INTREG_ZERO, INTREG_ZERO, INTREG_ZERO, INTREG_ZERO,
|
||||
INTREG_ZERO, INTREG_ZERO, INTREG_ZERO, INTREG_ZERO
|
||||
};
|
||||
|
||||
static inline IntRegIndex
|
||||
|
@ -304,7 +413,11 @@ const IntRegMap IntRegUndMap = {
|
|||
INTREG_R0_UND, INTREG_R1_UND, INTREG_R2_UND, INTREG_R3_UND,
|
||||
INTREG_R4_UND, INTREG_R5_UND, INTREG_R6_UND, INTREG_R7_UND,
|
||||
INTREG_R8_UND, INTREG_R9_UND, INTREG_R10_UND, INTREG_R11_UND,
|
||||
INTREG_R12_UND, INTREG_R13_UND, INTREG_R14_UND, INTREG_R15_UND
|
||||
INTREG_R12_UND, INTREG_R13_UND, INTREG_R14_UND, INTREG_R15_UND,
|
||||
INTREG_ZERO, INTREG_ZERO, INTREG_ZERO, INTREG_ZERO,
|
||||
INTREG_ZERO, INTREG_ZERO, INTREG_ZERO, INTREG_ZERO,
|
||||
INTREG_ZERO, INTREG_ZERO, INTREG_ZERO, INTREG_ZERO,
|
||||
INTREG_ZERO, INTREG_ZERO, INTREG_ZERO, INTREG_ZERO
|
||||
};
|
||||
|
||||
static inline IntRegIndex
|
||||
|
@ -318,7 +431,11 @@ const IntRegMap IntRegIrqMap = {
|
|||
INTREG_R0_IRQ, INTREG_R1_IRQ, INTREG_R2_IRQ, INTREG_R3_IRQ,
|
||||
INTREG_R4_IRQ, INTREG_R5_IRQ, INTREG_R6_IRQ, INTREG_R7_IRQ,
|
||||
INTREG_R8_IRQ, INTREG_R9_IRQ, INTREG_R10_IRQ, INTREG_R11_IRQ,
|
||||
INTREG_R12_IRQ, INTREG_R13_IRQ, INTREG_R14_IRQ, INTREG_R15_IRQ
|
||||
INTREG_R12_IRQ, INTREG_R13_IRQ, INTREG_R14_IRQ, INTREG_R15_IRQ,
|
||||
INTREG_ZERO, INTREG_ZERO, INTREG_ZERO, INTREG_ZERO,
|
||||
INTREG_ZERO, INTREG_ZERO, INTREG_ZERO, INTREG_ZERO,
|
||||
INTREG_ZERO, INTREG_ZERO, INTREG_ZERO, INTREG_ZERO,
|
||||
INTREG_ZERO, INTREG_ZERO, INTREG_ZERO, INTREG_ZERO
|
||||
};
|
||||
|
||||
static inline IntRegIndex
|
||||
|
@ -332,7 +449,11 @@ const IntRegMap IntRegFiqMap = {
|
|||
INTREG_R0_FIQ, INTREG_R1_FIQ, INTREG_R2_FIQ, INTREG_R3_FIQ,
|
||||
INTREG_R4_FIQ, INTREG_R5_FIQ, INTREG_R6_FIQ, INTREG_R7_FIQ,
|
||||
INTREG_R8_FIQ, INTREG_R9_FIQ, INTREG_R10_FIQ, INTREG_R11_FIQ,
|
||||
INTREG_R12_FIQ, INTREG_R13_FIQ, INTREG_R14_FIQ, INTREG_R15_FIQ
|
||||
INTREG_R12_FIQ, INTREG_R13_FIQ, INTREG_R14_FIQ, INTREG_R15_FIQ,
|
||||
INTREG_ZERO, INTREG_ZERO, INTREG_ZERO, INTREG_ZERO,
|
||||
INTREG_ZERO, INTREG_ZERO, INTREG_ZERO, INTREG_ZERO,
|
||||
INTREG_ZERO, INTREG_ZERO, INTREG_ZERO, INTREG_ZERO,
|
||||
INTREG_ZERO, INTREG_ZERO, INTREG_ZERO, INTREG_ZERO
|
||||
};
|
||||
|
||||
static inline IntRegIndex
|
||||
|
@ -351,6 +472,51 @@ intRegInMode(OperatingMode mode, int reg)
|
|||
return mode * intRegsPerMode + reg;
|
||||
}
|
||||
|
||||
static inline int
|
||||
flattenIntRegModeIndex(int reg)
|
||||
{
|
||||
int mode = reg / intRegsPerMode;
|
||||
reg = reg % intRegsPerMode;
|
||||
switch (mode) {
|
||||
case MODE_USER:
|
||||
case MODE_SYSTEM:
|
||||
return INTREG_USR(reg);
|
||||
case MODE_FIQ:
|
||||
return INTREG_FIQ(reg);
|
||||
case MODE_IRQ:
|
||||
return INTREG_IRQ(reg);
|
||||
case MODE_SVC:
|
||||
return INTREG_SVC(reg);
|
||||
case MODE_MON:
|
||||
return INTREG_MON(reg);
|
||||
case MODE_ABORT:
|
||||
return INTREG_ABT(reg);
|
||||
case MODE_HYP:
|
||||
return INTREG_HYP(reg);
|
||||
case MODE_UNDEFINED:
|
||||
return INTREG_UND(reg);
|
||||
default:
|
||||
panic("%d: Flattening into an unknown mode: reg:%#x mode:%#x\n",
|
||||
curTick(), reg, mode);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static inline IntRegIndex
|
||||
makeSP(IntRegIndex reg)
|
||||
{
|
||||
if (reg == INTREG_X31)
|
||||
reg = INTREG_SPX;
|
||||
return reg;
|
||||
}
|
||||
|
||||
|
||||
static inline bool
|
||||
isSP(IntRegIndex reg)
|
||||
{
|
||||
return reg == INTREG_SPX;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
1601
src/arch/arm/isa.cc
1601
src/arch/arm/isa.cc
File diff suppressed because it is too large
Load diff
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2010 ARM Limited
|
||||
* Copyright (c) 2010, 2012-2013 ARM Limited
|
||||
* All rights reserved
|
||||
*
|
||||
* The license below extends only to copyright in the software and shall
|
||||
|
@ -44,9 +44,11 @@
|
|||
#define __ARCH_ARM_ISA_HH__
|
||||
|
||||
#include "arch/arm/registers.hh"
|
||||
#include "arch/arm/system.hh"
|
||||
#include "arch/arm/tlb.hh"
|
||||
#include "arch/arm/types.hh"
|
||||
#include "debug/Checkpoint.hh"
|
||||
#include "dev/arm/generic_timer.hh"
|
||||
#include "sim/sim_object.hh"
|
||||
|
||||
struct ArmISAParams;
|
||||
|
@ -56,15 +58,113 @@ class EventManager;
|
|||
|
||||
namespace ArmISA
|
||||
{
|
||||
|
||||
/**
|
||||
* At the moment there are 57 registers which need to be aliased/
|
||||
* translated with other registers in the ISA. This enum helps with that
|
||||
* translation.
|
||||
*/
|
||||
enum translateTable {
|
||||
miscRegTranslateCSSELR_EL1,
|
||||
miscRegTranslateSCTLR_EL1,
|
||||
miscRegTranslateSCTLR_EL2,
|
||||
miscRegTranslateACTLR_EL1,
|
||||
miscRegTranslateACTLR_EL2,
|
||||
miscRegTranslateCPACR_EL1,
|
||||
miscRegTranslateCPTR_EL2,
|
||||
miscRegTranslateHCR_EL2,
|
||||
miscRegTranslateMDCR_EL2,
|
||||
miscRegTranslateHSTR_EL2,
|
||||
miscRegTranslateHACR_EL2,
|
||||
miscRegTranslateTTBR0_EL1,
|
||||
miscRegTranslateTTBR1_EL1,
|
||||
miscRegTranslateTTBR0_EL2,
|
||||
miscRegTranslateVTTBR_EL2,
|
||||
miscRegTranslateTCR_EL1,
|
||||
miscRegTranslateTCR_EL2,
|
||||
miscRegTranslateVTCR_EL2,
|
||||
miscRegTranslateAFSR0_EL1,
|
||||
miscRegTranslateAFSR1_EL1,
|
||||
miscRegTranslateAFSR0_EL2,
|
||||
miscRegTranslateAFSR1_EL2,
|
||||
miscRegTranslateESR_EL2,
|
||||
miscRegTranslateFAR_EL1,
|
||||
miscRegTranslateFAR_EL2,
|
||||
miscRegTranslateHPFAR_EL2,
|
||||
miscRegTranslatePAR_EL1,
|
||||
miscRegTranslateMAIR_EL1,
|
||||
miscRegTranslateMAIR_EL2,
|
||||
miscRegTranslateAMAIR_EL1,
|
||||
miscRegTranslateVBAR_EL1,
|
||||
miscRegTranslateVBAR_EL2,
|
||||
miscRegTranslateCONTEXTIDR_EL1,
|
||||
miscRegTranslateTPIDR_EL0,
|
||||
miscRegTranslateTPIDRRO_EL0,
|
||||
miscRegTranslateTPIDR_EL1,
|
||||
miscRegTranslateTPIDR_EL2,
|
||||
miscRegTranslateTEECR32_EL1,
|
||||
miscRegTranslateCNTFRQ_EL0,
|
||||
miscRegTranslateCNTPCT_EL0,
|
||||
miscRegTranslateCNTVCT_EL0,
|
||||
miscRegTranslateCNTVOFF_EL2,
|
||||
miscRegTranslateCNTKCTL_EL1,
|
||||
miscRegTranslateCNTHCTL_EL2,
|
||||
miscRegTranslateCNTP_TVAL_EL0,
|
||||
miscRegTranslateCNTP_CTL_EL0,
|
||||
miscRegTranslateCNTP_CVAL_EL0,
|
||||
miscRegTranslateCNTV_TVAL_EL0,
|
||||
miscRegTranslateCNTV_CTL_EL0,
|
||||
miscRegTranslateCNTV_CVAL_EL0,
|
||||
miscRegTranslateCNTHP_TVAL_EL2,
|
||||
miscRegTranslateCNTHP_CTL_EL2,
|
||||
miscRegTranslateCNTHP_CVAL_EL2,
|
||||
miscRegTranslateDACR32_EL2,
|
||||
miscRegTranslateIFSR32_EL2,
|
||||
miscRegTranslateTEEHBR32_EL1,
|
||||
miscRegTranslateSDER32_EL3,
|
||||
miscRegTranslateMax
|
||||
};
|
||||
|
||||
class ISA : public SimObject
|
||||
{
|
||||
protected:
|
||||
// Parent system
|
||||
ArmSystem *system;
|
||||
|
||||
// Cached copies of system-level properties
|
||||
bool haveSecurity;
|
||||
bool haveLPAE;
|
||||
bool haveVirtualization;
|
||||
bool haveLargeAsid64;
|
||||
uint8_t physAddrRange64;
|
||||
|
||||
/** Register translation entry used in lookUpMiscReg */
|
||||
struct MiscRegLUTEntry {
|
||||
uint32_t lower;
|
||||
uint32_t upper;
|
||||
};
|
||||
|
||||
struct MiscRegInitializerEntry {
|
||||
uint32_t index;
|
||||
struct MiscRegLUTEntry entry;
|
||||
};
|
||||
|
||||
/** Register table noting all translations */
|
||||
static const struct MiscRegInitializerEntry
|
||||
MiscRegSwitch[miscRegTranslateMax];
|
||||
|
||||
/** Translation table accessible via the value of the register */
|
||||
std::vector<struct MiscRegLUTEntry> lookUpMiscReg;
|
||||
|
||||
MiscReg miscRegs[NumMiscRegs];
|
||||
const IntRegIndex *intRegMap;
|
||||
|
||||
void
|
||||
updateRegMap(CPSR cpsr)
|
||||
{
|
||||
if (cpsr.width == 0) {
|
||||
intRegMap = IntReg64Map;
|
||||
} else {
|
||||
switch (cpsr.mode) {
|
||||
case MODE_USER:
|
||||
case MODE_SYSTEM:
|
||||
|
@ -85,6 +185,9 @@ namespace ArmISA
|
|||
case MODE_ABORT:
|
||||
intRegMap = IntRegAbtMap;
|
||||
break;
|
||||
case MODE_HYP:
|
||||
intRegMap = IntRegHypMap;
|
||||
break;
|
||||
case MODE_UNDEFINED:
|
||||
intRegMap = IntRegUndMap;
|
||||
break;
|
||||
|
@ -92,9 +195,37 @@ namespace ArmISA
|
|||
panic("Unrecognized mode setting in CPSR.\n");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
::GenericTimer::SystemCounter * getSystemCounter(ThreadContext *tc);
|
||||
::GenericTimer::ArchTimer * getArchTimer(ThreadContext *tc,
|
||||
int cpu_id);
|
||||
|
||||
|
||||
private:
|
||||
inline void assert32(ThreadContext *tc) {
|
||||
CPSR cpsr M5_VAR_USED = readMiscReg(MISCREG_CPSR, tc);
|
||||
assert(cpsr.width);
|
||||
}
|
||||
|
||||
inline void assert64(ThreadContext *tc) {
|
||||
CPSR cpsr M5_VAR_USED = readMiscReg(MISCREG_CPSR, tc);
|
||||
assert(!cpsr.width);
|
||||
}
|
||||
|
||||
void tlbiVA(ThreadContext *tc, MiscReg newVal, uint8_t asid,
|
||||
bool secure_lookup, uint8_t target_el);
|
||||
|
||||
void tlbiALL(ThreadContext *tc, bool secure_lookup, uint8_t target_el);
|
||||
|
||||
void tlbiALLN(ThreadContext *tc, bool hyp, uint8_t target_el);
|
||||
|
||||
void tlbiMVA(ThreadContext *tc, MiscReg newVal, bool secure_lookup,
|
||||
bool hyp, uint8_t target_el);
|
||||
|
||||
public:
|
||||
void clear();
|
||||
void clear64(const ArmISAParams *p);
|
||||
|
||||
MiscReg readMiscRegNoEffect(int misc_reg) const;
|
||||
MiscReg readMiscReg(int misc_reg, ThreadContext *tc);
|
||||
|
@ -109,28 +240,28 @@ namespace ArmISA
|
|||
return intRegMap[reg];
|
||||
} else if (reg < NUM_INTREGS) {
|
||||
return reg;
|
||||
} else {
|
||||
int mode = reg / intRegsPerMode;
|
||||
reg = reg % intRegsPerMode;
|
||||
switch (mode) {
|
||||
case MODE_USER:
|
||||
case MODE_SYSTEM:
|
||||
return INTREG_USR(reg);
|
||||
case MODE_FIQ:
|
||||
return INTREG_FIQ(reg);
|
||||
case MODE_IRQ:
|
||||
return INTREG_IRQ(reg);
|
||||
case MODE_SVC:
|
||||
return INTREG_SVC(reg);
|
||||
case MODE_MON:
|
||||
return INTREG_MON(reg);
|
||||
case MODE_ABORT:
|
||||
return INTREG_ABT(reg);
|
||||
case MODE_UNDEFINED:
|
||||
return INTREG_UND(reg);
|
||||
} else if (reg == INTREG_SPX) {
|
||||
CPSR cpsr = miscRegs[MISCREG_CPSR];
|
||||
ExceptionLevel el = opModeToEL(
|
||||
(OperatingMode) (uint8_t) cpsr.mode);
|
||||
if (!cpsr.sp && el != EL0)
|
||||
return INTREG_SP0;
|
||||
switch (el) {
|
||||
case EL3:
|
||||
return INTREG_SP3;
|
||||
// @todo: uncomment this to enable Virtualization
|
||||
// case EL2:
|
||||
// return INTREG_SP2;
|
||||
case EL1:
|
||||
return INTREG_SP1;
|
||||
case EL0:
|
||||
return INTREG_SP0;
|
||||
default:
|
||||
panic("Flattening into an unknown mode.\n");
|
||||
panic("Invalid exception level");
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
return flattenIntRegModeIndex(reg);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -150,47 +281,127 @@ namespace ArmISA
|
|||
int
|
||||
flattenMiscIndex(int reg) const
|
||||
{
|
||||
int flat_idx = reg;
|
||||
|
||||
if (reg == MISCREG_SPSR) {
|
||||
int spsr_idx = NUM_MISCREGS;
|
||||
CPSR cpsr = miscRegs[MISCREG_CPSR];
|
||||
switch (cpsr.mode) {
|
||||
case MODE_EL0T:
|
||||
warn("User mode does not have SPSR\n");
|
||||
flat_idx = MISCREG_SPSR;
|
||||
break;
|
||||
case MODE_EL1T:
|
||||
case MODE_EL1H:
|
||||
flat_idx = MISCREG_SPSR_EL1;
|
||||
break;
|
||||
case MODE_EL2T:
|
||||
case MODE_EL2H:
|
||||
flat_idx = MISCREG_SPSR_EL2;
|
||||
break;
|
||||
case MODE_EL3T:
|
||||
case MODE_EL3H:
|
||||
flat_idx = MISCREG_SPSR_EL3;
|
||||
break;
|
||||
case MODE_USER:
|
||||
warn("User mode does not have SPSR\n");
|
||||
spsr_idx = MISCREG_SPSR;
|
||||
flat_idx = MISCREG_SPSR;
|
||||
break;
|
||||
case MODE_FIQ:
|
||||
spsr_idx = MISCREG_SPSR_FIQ;
|
||||
flat_idx = MISCREG_SPSR_FIQ;
|
||||
break;
|
||||
case MODE_IRQ:
|
||||
spsr_idx = MISCREG_SPSR_IRQ;
|
||||
flat_idx = MISCREG_SPSR_IRQ;
|
||||
break;
|
||||
case MODE_SVC:
|
||||
spsr_idx = MISCREG_SPSR_SVC;
|
||||
flat_idx = MISCREG_SPSR_SVC;
|
||||
break;
|
||||
case MODE_MON:
|
||||
spsr_idx = MISCREG_SPSR_MON;
|
||||
flat_idx = MISCREG_SPSR_MON;
|
||||
break;
|
||||
case MODE_ABORT:
|
||||
spsr_idx = MISCREG_SPSR_ABT;
|
||||
flat_idx = MISCREG_SPSR_ABT;
|
||||
break;
|
||||
case MODE_HYP:
|
||||
flat_idx = MISCREG_SPSR_HYP;
|
||||
break;
|
||||
case MODE_UNDEFINED:
|
||||
spsr_idx = MISCREG_SPSR_UND;
|
||||
flat_idx = MISCREG_SPSR_UND;
|
||||
break;
|
||||
default:
|
||||
warn("Trying to access SPSR in an invalid mode: %d\n",
|
||||
cpsr.mode);
|
||||
spsr_idx = MISCREG_SPSR;
|
||||
flat_idx = MISCREG_SPSR;
|
||||
break;
|
||||
}
|
||||
return spsr_idx;
|
||||
} else if (miscRegInfo[reg][MISCREG_MUTEX]) {
|
||||
// Mutually exclusive CP15 register
|
||||
switch (reg) {
|
||||
case MISCREG_PRRR_MAIR0:
|
||||
case MISCREG_PRRR_MAIR0_NS:
|
||||
case MISCREG_PRRR_MAIR0_S:
|
||||
{
|
||||
TTBCR ttbcr = readMiscRegNoEffect(MISCREG_TTBCR);
|
||||
// If the muxed reg has been flattened, work out the
|
||||
// offset and apply it to the unmuxed reg
|
||||
int idxOffset = reg - MISCREG_PRRR_MAIR0;
|
||||
if (ttbcr.eae)
|
||||
flat_idx = flattenMiscIndex(MISCREG_MAIR0 +
|
||||
idxOffset);
|
||||
else
|
||||
flat_idx = flattenMiscIndex(MISCREG_PRRR +
|
||||
idxOffset);
|
||||
}
|
||||
return reg;
|
||||
break;
|
||||
case MISCREG_NMRR_MAIR1:
|
||||
case MISCREG_NMRR_MAIR1_NS:
|
||||
case MISCREG_NMRR_MAIR1_S:
|
||||
{
|
||||
TTBCR ttbcr = readMiscRegNoEffect(MISCREG_TTBCR);
|
||||
// If the muxed reg has been flattened, work out the
|
||||
// offset and apply it to the unmuxed reg
|
||||
int idxOffset = reg - MISCREG_NMRR_MAIR1;
|
||||
if (ttbcr.eae)
|
||||
flat_idx = flattenMiscIndex(MISCREG_MAIR1 +
|
||||
idxOffset);
|
||||
else
|
||||
flat_idx = flattenMiscIndex(MISCREG_NMRR +
|
||||
idxOffset);
|
||||
}
|
||||
break;
|
||||
case MISCREG_PMXEVTYPER_PMCCFILTR:
|
||||
{
|
||||
PMSELR pmselr = miscRegs[MISCREG_PMSELR];
|
||||
if (pmselr.sel == 31)
|
||||
flat_idx = flattenMiscIndex(MISCREG_PMCCFILTR);
|
||||
else
|
||||
flat_idx = flattenMiscIndex(MISCREG_PMXEVTYPER);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
panic("Unrecognized misc. register.\n");
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
if (miscRegInfo[reg][MISCREG_BANKED]) {
|
||||
bool secureReg = haveSecurity &&
|
||||
inSecureState(miscRegs[MISCREG_SCR],
|
||||
miscRegs[MISCREG_CPSR]);
|
||||
flat_idx += secureReg ? 2 : 1;
|
||||
}
|
||||
}
|
||||
return flat_idx;
|
||||
}
|
||||
|
||||
void serialize(std::ostream &os)
|
||||
{
|
||||
DPRINTF(Checkpoint, "Serializing Arm Misc Registers\n");
|
||||
SERIALIZE_ARRAY(miscRegs, NumMiscRegs);
|
||||
|
||||
SERIALIZE_SCALAR(haveSecurity);
|
||||
SERIALIZE_SCALAR(haveLPAE);
|
||||
SERIALIZE_SCALAR(haveVirtualization);
|
||||
SERIALIZE_SCALAR(haveLargeAsid64);
|
||||
SERIALIZE_SCALAR(physAddrRange64);
|
||||
}
|
||||
void unserialize(Checkpoint *cp, const std::string §ion)
|
||||
{
|
||||
|
@ -198,6 +409,12 @@ namespace ArmISA
|
|||
UNSERIALIZE_ARRAY(miscRegs, NumMiscRegs);
|
||||
CPSR tmp_cpsr = miscRegs[MISCREG_CPSR];
|
||||
updateRegMap(tmp_cpsr);
|
||||
|
||||
UNSERIALIZE_SCALAR(haveSecurity);
|
||||
UNSERIALIZE_SCALAR(haveLPAE);
|
||||
UNSERIALIZE_SCALAR(haveVirtualization);
|
||||
UNSERIALIZE_SCALAR(haveLargeAsid64);
|
||||
UNSERIALIZE_SCALAR(physAddrRange64);
|
||||
}
|
||||
|
||||
void startup(ThreadContext *tc) {}
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
// -*- mode:c++ -*-
|
||||
|
||||
// Copyright (c) 2010 ARM Limited
|
||||
// Copyright (c) 2010, 2011 ARM Limited
|
||||
// All rights reserved
|
||||
//
|
||||
// The license below extends only to copyright in the software and shall
|
||||
|
@ -73,6 +73,7 @@ def bitfield SEVEN_AND_FOUR sevenAndFour;
|
|||
|
||||
def bitfield THUMB thumb;
|
||||
def bitfield BIGTHUMB bigThumb;
|
||||
def bitfield AARCH64 aarch64;
|
||||
|
||||
// Other
|
||||
def bitfield COND_CODE condCode;
|
||||
|
|
48
src/arch/arm/isa/decoder/aarch64.isa
Normal file
48
src/arch/arm/isa/decoder/aarch64.isa
Normal file
|
@ -0,0 +1,48 @@
|
|||
// -*- mode:c++ -*-
|
||||
|
||||
// Copyright (c) 2011 ARM Limited
|
||||
// All rights reserved
|
||||
//
|
||||
// The license below extends only to copyright in the software and shall
|
||||
// not be construed as granting a license to any other intellectual
|
||||
// property including but not limited to intellectual property relating
|
||||
// to a hardware implementation of the functionality of the software
|
||||
// licensed hereunder. You may use the software subject to the license
|
||||
// terms below provided that you ensure that this notice is replicated
|
||||
// unmodified and in its entirety in all distributions of the software,
|
||||
// modified or unmodified, in source code or in binary form.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met: redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer;
|
||||
// redistributions in binary form must reproduce the above copyright
|
||||
// notice, this list of conditions and the following disclaimer in the
|
||||
// documentation and/or other materials provided with the distribution;
|
||||
// neither the name of the copyright holders nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
//
|
||||
// Authors: Gabe Black
|
||||
|
||||
////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// The 64 bit ARM decoder
|
||||
// --------------------------
|
||||
//
|
||||
|
||||
|
||||
Aarch64::aarch64();
|
||||
|
|
@ -1,6 +1,6 @@
|
|||
// -*- mode:c++ -*-
|
||||
|
||||
// Copyright (c) 2010-2012 ARM Limited
|
||||
// Copyright (c) 2010-2013 ARM Limited
|
||||
// All rights reserved
|
||||
//
|
||||
// The license below extends only to copyright in the software and shall
|
||||
|
@ -73,7 +73,11 @@ format DataOp {
|
|||
0x9: ArmBlxReg::armBlxReg();
|
||||
}
|
||||
0x5: ArmSatAddSub::armSatAddSub();
|
||||
0x7: Breakpoint::bkpt();
|
||||
0x6: ArmERet::armERet();
|
||||
0x7: decode OPCODE_22 {
|
||||
0: Breakpoint::bkpt();
|
||||
1: ArmSmcHyp::armSmcHyp();
|
||||
}
|
||||
}
|
||||
0x1: ArmHalfWordMultAndMultAcc::armHalfWordMultAndMultAcc();
|
||||
}
|
||||
|
@ -105,6 +109,10 @@ format DataOp {
|
|||
}
|
||||
0x6: decode CPNUM {
|
||||
0xa, 0xb: ExtensionRegLoadStore::extensionRegLoadStore();
|
||||
0xf: decode OPCODE_20 {
|
||||
0: Mcrr15::Mcrr15();
|
||||
1: Mrrc15::Mrrc15();
|
||||
}
|
||||
}
|
||||
0x7: decode OPCODE_24 {
|
||||
0: decode OPCODE_4 {
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
// -*- mode:c++ -*-
|
||||
|
||||
// Copyright (c) 2010 ARM Limited
|
||||
// Copyright (c) 2010-2011 ARM Limited
|
||||
// All rights reserved
|
||||
//
|
||||
// The license below extends only to copyright in the software and shall
|
||||
|
@ -41,8 +41,12 @@
|
|||
// Authors: Gabe Black
|
||||
|
||||
decode THUMB default Unknown::unknown() {
|
||||
0:
|
||||
##include "arm.isa"
|
||||
0: decode AARCH64 {
|
||||
0:
|
||||
##include "arm.isa"
|
||||
1:
|
||||
##include "aarch64.isa"
|
||||
}
|
||||
1:
|
||||
##include "thumb.isa"
|
||||
}
|
||||
|
|
|
@ -95,8 +95,14 @@ decode BIGTHUMB {
|
|||
0xa, 0xb: ExtensionRegLoadStore::extensionRegLoadStre();
|
||||
0xf: decode HTOPCODE_9_4 {
|
||||
0x00: Unknown::undefined();
|
||||
0x04: WarnUnimpl::mcrr(); // mcrr2
|
||||
0x05: WarnUnimpl::mrrc(); // mrrc2
|
||||
0x04: decode LTCOPROC {
|
||||
0xf: Mcrr15::Mcrr15();
|
||||
default: WarnUnimpl::mcrr(); // mcrr2
|
||||
}
|
||||
0x05: decode LTCOPROC {
|
||||
0xf: Mrrc15::Mrrc15();
|
||||
default: WarnUnimpl::mrrc(); // mrrc2
|
||||
}
|
||||
0x02, 0x06, 0x08, 0x0a, 0x0c, 0x0e, 0x10,
|
||||
0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e:
|
||||
WarnUnimpl::stc(); // stc2
|
||||
|
|
2035
src/arch/arm/isa/formats/aarch64.isa
Normal file
2035
src/arch/arm/isa/formats/aarch64.isa
Normal file
File diff suppressed because it is too large
Load diff
|
@ -1,6 +1,6 @@
|
|||
// -*- mode:c++ -*-
|
||||
|
||||
// Copyright (c) 2010 ARM Limited
|
||||
// Copyright (c) 2010, 2012-2013 ARM Limited
|
||||
// All rights reserved
|
||||
//
|
||||
// The license below extends only to copyright in the software and shall
|
||||
|
@ -101,7 +101,7 @@ def format Thumb16CondBranchAndSvc() {{
|
|||
return new B(machInst, sext<9>(bits(machInst, 7, 0) << 1),
|
||||
(ConditionCode)(uint32_t)bits(machInst, 11, 8));
|
||||
} else if (bits(machInst, 8)) {
|
||||
return new Svc(machInst);
|
||||
return new Svc(machInst, bits(machInst, 7, 0));
|
||||
} else {
|
||||
// This space will not be allocated in the future.
|
||||
return new Unknown(machInst);
|
||||
|
@ -127,7 +127,7 @@ def format Thumb32BranchesAndMiscCtrl() {{
|
|||
// Permanently undefined.
|
||||
return new Unknown(machInst);
|
||||
} else {
|
||||
return new WarnUnimplemented("smc", machInst);
|
||||
return new Smc(machInst);
|
||||
}
|
||||
} else if ((op & 0x38) != 0x38) {
|
||||
const uint32_t s = bits(machInst, 26);
|
||||
|
@ -141,20 +141,26 @@ def format Thumb32BranchesAndMiscCtrl() {{
|
|||
return new B(machInst, imm,
|
||||
(ConditionCode)(uint32_t)bits(machInst, 25, 22));
|
||||
} else {
|
||||
// HIGH: 12-11=10, LOW: 15-14=00, 12=0
|
||||
switch (op) {
|
||||
case 0x38:
|
||||
{
|
||||
const IntRegIndex rn =
|
||||
(IntRegIndex)(uint32_t)bits(machInst, 19, 16);
|
||||
const uint8_t byteMask = bits(machInst, 11, 8);
|
||||
return new MsrCpsrReg(machInst, rn, byteMask);
|
||||
}
|
||||
case 0x39:
|
||||
{
|
||||
const IntRegIndex rn =
|
||||
(IntRegIndex)(uint32_t)bits(machInst, 19, 16);
|
||||
const uint8_t byteMask = bits(machInst, 11, 8);
|
||||
const bool r = bits(machInst, 20);
|
||||
if (bits(machInst, 5)) {
|
||||
const uint8_t sysM = (bits(machInst, 4) << 4) |
|
||||
byteMask;
|
||||
return new MsrBankedReg(machInst, rn, sysM, r);
|
||||
} else {
|
||||
if (r) {
|
||||
return new MsrSpsrReg(machInst, rn, byteMask);
|
||||
} else {
|
||||
return new MsrCpsrReg(machInst, rn, byteMask);
|
||||
}
|
||||
}
|
||||
}
|
||||
case 0x3a:
|
||||
{
|
||||
|
@ -196,11 +202,11 @@ def format Thumb32BranchesAndMiscCtrl() {{
|
|||
case 0x2:
|
||||
return new Clrex(machInst);
|
||||
case 0x4:
|
||||
return new Dsb(machInst);
|
||||
return new Dsb(machInst, 0);
|
||||
case 0x5:
|
||||
return new Dmb(machInst);
|
||||
return new Dmb(machInst, 0);
|
||||
case 0x6:
|
||||
return new Isb(machInst);
|
||||
return new Isb(machInst, 0);
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
@ -208,28 +214,44 @@ def format Thumb32BranchesAndMiscCtrl() {{
|
|||
}
|
||||
case 0x3c:
|
||||
{
|
||||
// On systems that don't support bxj, bxj == bx
|
||||
return new BxReg(machInst,
|
||||
return new BxjReg(machInst,
|
||||
(IntRegIndex)(uint32_t)bits(machInst, 19, 16),
|
||||
COND_UC);
|
||||
}
|
||||
case 0x3d:
|
||||
{
|
||||
const uint32_t imm32 = bits(machInst, 7, 0);
|
||||
return new SubsImmPclr(machInst, INTREG_PC, INTREG_LR,
|
||||
imm32, false);
|
||||
if (imm32 == 0) {
|
||||
return new Eret(machInst);
|
||||
} else {
|
||||
return new SubsImmPclr(machInst, INTREG_PC,
|
||||
INTREG_LR, imm32, false);
|
||||
}
|
||||
}
|
||||
case 0x3e:
|
||||
{
|
||||
const IntRegIndex rd =
|
||||
(IntRegIndex)(uint32_t)bits(machInst, 11, 8);
|
||||
return new MrsCpsr(machInst, rd);
|
||||
}
|
||||
case 0x3f:
|
||||
{
|
||||
|
||||
const IntRegIndex rd =
|
||||
(IntRegIndex)(uint32_t)bits(machInst, 11, 8);
|
||||
const bool r = bits(machInst, 20);
|
||||
if (bits(machInst, 5)) {
|
||||
const uint8_t sysM = (bits(machInst, 4) << 4) |
|
||||
bits(machInst, 11, 8);
|
||||
return new MrsBankedReg(machInst, rd, sysM, r);
|
||||
} else {
|
||||
if (r) {
|
||||
return new MrsSpsr(machInst, rd);
|
||||
} else {
|
||||
return new MrsCpsr(machInst, rd);
|
||||
}
|
||||
}
|
||||
}
|
||||
case 0xfe:
|
||||
{
|
||||
uint32_t imm16 = (bits(machInst, 19, 16) << 12) |
|
||||
(bits(machInst, 11, 0) << 0);
|
||||
return new Hvc(machInst, imm16);
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
// -*- mode:c++ -*-
|
||||
|
||||
// Copyright (c) 2010 ARM Limited
|
||||
// Copyright (c) 2010-2011 ARM Limited
|
||||
// All rights reserved
|
||||
//
|
||||
// The license below extends only to copyright in the software and shall
|
||||
|
@ -44,6 +44,12 @@
|
|||
//Include the basic format
|
||||
##include "basic.isa"
|
||||
|
||||
//Include support for decoding AArch64 instructions
|
||||
##include "aarch64.isa"
|
||||
|
||||
//Include support for decoding AArch64 NEON instructions
|
||||
##include "neon64.isa"
|
||||
|
||||
//Include support for predicated instructions
|
||||
##include "pred.isa"
|
||||
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
// -*- mode:c++ -*-
|
||||
|
||||
// Copyright (c) 2010 ARM Limited
|
||||
// Copyright (c) 2010-2011 ARM Limited
|
||||
// All rights reserved
|
||||
//
|
||||
// The license below extends only to copyright in the software and shall
|
||||
|
@ -151,8 +151,7 @@ let {{
|
|||
if (singleAll) {
|
||||
size = bits(machInst, 7, 6);
|
||||
bool t = bits(machInst, 5);
|
||||
unsigned eBytes = (1 << size);
|
||||
align = (eBytes - 1) | TLB::AllowUnaligned;
|
||||
align = size | TLB::AllowUnaligned;
|
||||
if (width == 1) {
|
||||
regs = t ? 2 : 1;
|
||||
inc = 1;
|
||||
|
@ -164,7 +163,7 @@ let {{
|
|||
case 1:
|
||||
case 2:
|
||||
if (bits(machInst, 4))
|
||||
align = width * eBytes - 1;
|
||||
align = size + width - 1;
|
||||
break;
|
||||
case 3:
|
||||
break;
|
||||
|
@ -173,20 +172,19 @@ let {{
|
|||
if (bits(machInst, 4) == 0)
|
||||
return new Unknown(machInst);
|
||||
size = 2;
|
||||
align = 0xf;
|
||||
align = 0x4;
|
||||
} else if (size == 2) {
|
||||
if (bits(machInst, 4))
|
||||
align = 7;
|
||||
align = 0x3;
|
||||
} else {
|
||||
if (bits(machInst, 4))
|
||||
align = 4 * eBytes - 1;
|
||||
align = size + 2;
|
||||
}
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
size = bits(machInst, 11, 10);
|
||||
unsigned eBytes = (1 << size);
|
||||
align = (eBytes - 1) | TLB::AllowUnaligned;
|
||||
align = size | TLB::AllowUnaligned;
|
||||
regs = width;
|
||||
unsigned indexAlign = bits(machInst, 7, 4);
|
||||
// If width is 1, inc is always 1. That's overridden later.
|
||||
|
@ -219,13 +217,13 @@ let {{
|
|||
break;
|
||||
case 2:
|
||||
if (bits(indexAlign, 1, 0))
|
||||
align = 3;
|
||||
align = 2;
|
||||
break;
|
||||
}
|
||||
break;
|
||||
case 2:
|
||||
if (bits(indexAlign, 0))
|
||||
align = (2 * eBytes) - 1;
|
||||
align = size + 1;
|
||||
break;
|
||||
case 3:
|
||||
break;
|
||||
|
@ -234,11 +232,11 @@ let {{
|
|||
case 0:
|
||||
case 1:
|
||||
if (bits(indexAlign, 0))
|
||||
align = (4 * eBytes) - 1;
|
||||
align = size + 2;
|
||||
break;
|
||||
case 2:
|
||||
if (bits(indexAlign, 0))
|
||||
align = (4 << bits(indexAlign, 1, 0)) - 1;
|
||||
align = bits(indexAlign, 1, 0) + 2;
|
||||
break;
|
||||
}
|
||||
break;
|
||||
|
@ -252,9 +250,9 @@ let {{
|
|||
align = bits(machInst, 5, 4);
|
||||
if (align == 0) {
|
||||
// @align wasn't specified, so alignment can be turned off.
|
||||
align = ((1 << size) - 1) | TLB::AllowUnaligned;
|
||||
align = size | TLB::AllowUnaligned;
|
||||
} else {
|
||||
align = ((4 << align) - 1);
|
||||
align = align + 2;
|
||||
}
|
||||
switch (width) {
|
||||
case 1:
|
||||
|
@ -588,6 +586,23 @@ let {{
|
|||
}
|
||||
}
|
||||
case 0xc:
|
||||
if (b) {
|
||||
if (!u) {
|
||||
if (bits(c, 1) == 0) {
|
||||
if (q) {
|
||||
return new NVfmaQFp<float>(machInst, vd, vn, vm);
|
||||
} else {
|
||||
return new NVfmaDFp<float>(machInst, vd, vn, vm);
|
||||
}
|
||||
} else {
|
||||
if (q) {
|
||||
return new NVfmsQFp<float>(machInst, vd, vn, vm);
|
||||
} else {
|
||||
return new NVfmsDFp<float>(machInst, vd, vn, vm);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return new Unknown(machInst);
|
||||
case 0xd:
|
||||
if (b) {
|
||||
|
@ -1827,7 +1842,7 @@ let {{
|
|||
break;
|
||||
case 0x1:
|
||||
{
|
||||
if (offset == 0 || vd + offset/2 > NumFloatArchRegs) {
|
||||
if (offset == 0 || vd + offset/2 > NumFloatV7ArchRegs) {
|
||||
break;
|
||||
}
|
||||
switch (bits(opcode, 1, 0)) {
|
||||
|
@ -1951,8 +1966,9 @@ let {{
|
|||
} else if (a == 0x7) {
|
||||
const IntRegIndex rt =
|
||||
(IntRegIndex)(uint32_t)bits(machInst, 15, 12);
|
||||
uint32_t specReg = bits(machInst, 19, 16);
|
||||
switch (specReg) {
|
||||
uint32_t reg = bits(machInst, 19, 16);
|
||||
uint32_t specReg;
|
||||
switch (reg) {
|
||||
case 0:
|
||||
specReg = MISCREG_FPSID;
|
||||
break;
|
||||
|
@ -1974,7 +1990,9 @@ let {{
|
|||
if (specReg == MISCREG_FPSCR) {
|
||||
return new VmsrFpscr(machInst, (IntRegIndex)specReg, rt);
|
||||
} else {
|
||||
return new Vmsr(machInst, (IntRegIndex)specReg, rt);
|
||||
uint32_t iss = mcrMrcIssBuild(0, bits(machInst, 3, 0), rt,
|
||||
reg, a, bits(machInst, 7, 5));
|
||||
return new Vmsr(machInst, (IntRegIndex)specReg, rt, iss);
|
||||
}
|
||||
}
|
||||
} else if (l == 0 && c == 1) {
|
||||
|
@ -2041,8 +2059,9 @@ let {{
|
|||
} else if (a == 7) {
|
||||
const IntRegIndex rt =
|
||||
(IntRegIndex)(uint32_t)bits(machInst, 15, 12);
|
||||
uint32_t specReg = bits(machInst, 19, 16);
|
||||
switch (specReg) {
|
||||
uint32_t reg = bits(machInst, 19, 16);
|
||||
uint32_t specReg;
|
||||
switch (reg) {
|
||||
case 0:
|
||||
specReg = MISCREG_FPSID;
|
||||
break;
|
||||
|
@ -2070,7 +2089,9 @@ let {{
|
|||
} else if (specReg == MISCREG_FPSCR) {
|
||||
return new VmrsFpscr(machInst, rt, (IntRegIndex)specReg);
|
||||
} else {
|
||||
return new Vmrs(machInst, rt, (IntRegIndex)specReg);
|
||||
uint32_t iss = mcrMrcIssBuild(l, bits(machInst, 3, 0), rt,
|
||||
reg, a, bits(machInst, 7, 5));
|
||||
return new Vmrs(machInst, rt, (IntRegIndex)specReg, iss);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
|
@ -2235,6 +2256,44 @@ let {{
|
|||
}
|
||||
}
|
||||
break;
|
||||
case 0x9:
|
||||
if ((opc3 & 0x1) == 0) {
|
||||
if (single) {
|
||||
return decodeVfpRegRegRegOp<VfnmaS>(
|
||||
machInst, vd, vn, vm, false);
|
||||
} else {
|
||||
return decodeVfpRegRegRegOp<VfnmaD>(
|
||||
machInst, vd, vn, vm, true);
|
||||
}
|
||||
} else {
|
||||
if (single) {
|
||||
return decodeVfpRegRegRegOp<VfnmsS>(
|
||||
machInst, vd, vn, vm, false);
|
||||
} else {
|
||||
return decodeVfpRegRegRegOp<VfnmsD>(
|
||||
machInst, vd, vn, vm, true);
|
||||
}
|
||||
}
|
||||
break;
|
||||
case 0xa:
|
||||
if ((opc3 & 0x1) == 0) {
|
||||
if (single) {
|
||||
return decodeVfpRegRegRegOp<VfmaS>(
|
||||
machInst, vd, vn, vm, false);
|
||||
} else {
|
||||
return decodeVfpRegRegRegOp<VfmaD>(
|
||||
machInst, vd, vn, vm, true);
|
||||
}
|
||||
} else {
|
||||
if (single) {
|
||||
return decodeVfpRegRegRegOp<VfmsS>(
|
||||
machInst, vd, vn, vm, false);
|
||||
} else {
|
||||
return decodeVfpRegRegRegOp<VfmsD>(
|
||||
machInst, vd, vn, vm, true);
|
||||
}
|
||||
}
|
||||
break;
|
||||
case 0xb:
|
||||
if ((opc3 & 0x1) == 0) {
|
||||
const uint32_t baseImm =
|
||||
|
|
|
@ -282,7 +282,7 @@ def format Thumb32SrsRfe() {{
|
|||
}
|
||||
} else {
|
||||
const uint32_t mode = bits(machInst, 4, 0);
|
||||
if (badMode((OperatingMode)mode))
|
||||
if (badMode32((OperatingMode)mode))
|
||||
return new Unknown(machInst);
|
||||
if (!add && !wb) {
|
||||
return new %(srs)s(machInst, mode,
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
// -*- mode:c++ -*-
|
||||
|
||||
// Copyright (c) 2010-2012 ARM Limited
|
||||
// Copyright (c) 2010-2013 ARM Limited
|
||||
// All rights reserved
|
||||
//
|
||||
// The license below extends only to copyright in the software and shall
|
||||
|
@ -36,19 +36,42 @@
|
|||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
//
|
||||
// Authors: Gabe Black
|
||||
// Giacomo Gabrielli
|
||||
|
||||
def format ArmERet() {{
|
||||
decode_block = "return new Eret(machInst);"
|
||||
}};
|
||||
|
||||
def format Svc() {{
|
||||
decode_block = "return new Svc(machInst);"
|
||||
decode_block = "return new Svc(machInst, bits(machInst, 23, 0));"
|
||||
}};
|
||||
|
||||
def format ArmSmcHyp() {{
|
||||
decode_block = '''
|
||||
{
|
||||
if (bits(machInst, 21))
|
||||
{
|
||||
return new Smc(machInst);
|
||||
} else {
|
||||
uint32_t imm16 = (bits(machInst, 19, 8) << 4) |
|
||||
(bits(machInst, 3, 0) << 0);
|
||||
return new Hvc(machInst, imm16);
|
||||
}
|
||||
}
|
||||
'''
|
||||
}};
|
||||
|
||||
def format ArmMsrMrs() {{
|
||||
decode_block = '''
|
||||
{
|
||||
const uint8_t byteMask = bits(machInst, 19, 16);
|
||||
const uint8_t sysM = byteMask | (bits(machInst, 8) << 4);
|
||||
const IntRegIndex rn = (IntRegIndex)(uint32_t)bits(machInst, 3, 0);
|
||||
const IntRegIndex rd = (IntRegIndex)(uint32_t)bits(machInst, 15, 12);
|
||||
const uint32_t opcode = bits(machInst, 24, 21);
|
||||
const bool useImm = bits(machInst, 25);
|
||||
const bool r = bits(machInst, 22);
|
||||
const bool isBanked = bits(machInst, 9);
|
||||
|
||||
const uint32_t unrotated = bits(machInst, 7, 0);
|
||||
const uint32_t rotation = (bits(machInst, 11, 8) << 1);
|
||||
|
@ -56,21 +79,37 @@ def format ArmMsrMrs() {{
|
|||
|
||||
switch (opcode) {
|
||||
case 0x8:
|
||||
if (isBanked) {
|
||||
return new MrsBankedReg(machInst, rd, sysM, r!=0);
|
||||
} else {
|
||||
return new MrsCpsr(machInst, rd);
|
||||
}
|
||||
case 0x9:
|
||||
if (useImm) {
|
||||
return new MsrCpsrImm(machInst, imm, byteMask);
|
||||
} else {
|
||||
if (isBanked) {
|
||||
return new MsrBankedReg(machInst, rn, sysM, r!=0);
|
||||
} else {
|
||||
return new MsrCpsrReg(machInst, rn, byteMask);
|
||||
}
|
||||
}
|
||||
case 0xa:
|
||||
if (isBanked) {
|
||||
return new MrsBankedReg(machInst, rd, sysM, r!=0);
|
||||
} else {
|
||||
return new MrsSpsr(machInst, rd);
|
||||
}
|
||||
case 0xb:
|
||||
if (useImm) {
|
||||
return new MsrSpsrImm(machInst, imm, byteMask);
|
||||
} else {
|
||||
if (isBanked) {
|
||||
return new MsrBankedReg(machInst, rn, sysM, r!=0);
|
||||
} else {
|
||||
return new MsrSpsrReg(machInst, rn, byteMask);
|
||||
}
|
||||
}
|
||||
default:
|
||||
return new Unknown(machInst);
|
||||
}
|
||||
|
@ -99,16 +138,17 @@ let {{
|
|||
switch (miscReg) {
|
||||
case MISCREG_NOP:
|
||||
return new NopInst(machInst);
|
||||
case NUM_MISCREGS:
|
||||
case MISCREG_CP14_UNIMPL:
|
||||
return new FailUnimplemented(
|
||||
csprintf("miscreg crn:%d opc1:%d crm:%d opc2:%d %s unknown",
|
||||
crn, opc1, crm, opc2, isRead ? "read" : "write").c_str(),
|
||||
machInst);
|
||||
default:
|
||||
uint32_t iss = mcrMrcIssBuild(isRead, crm, rt, crn, opc1, opc2);
|
||||
if (isRead) {
|
||||
return new Mrc14(machInst, rt, (IntRegIndex)miscReg);
|
||||
return new Mrc14(machInst, rt, (IntRegIndex)miscReg, iss);
|
||||
} else {
|
||||
return new Mcr14(machInst, (IntRegIndex)miscReg, rt);
|
||||
return new Mcr14(machInst, (IntRegIndex)miscReg, rt, iss);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -123,8 +163,8 @@ def format McrMrc14() {{
|
|||
|
||||
let {{
|
||||
header_output = '''
|
||||
StaticInstPtr
|
||||
decodeMcrMrc15(ExtMachInst machInst);
|
||||
StaticInstPtr decodeMcrMrc14(ExtMachInst machInst);
|
||||
StaticInstPtr decodeMcrMrc15(ExtMachInst machInst);
|
||||
'''
|
||||
decoder_output = '''
|
||||
StaticInstPtr
|
||||
|
@ -136,107 +176,50 @@ let {{
|
|||
const uint32_t crm = bits(machInst, 3, 0);
|
||||
const MiscRegIndex miscReg = decodeCP15Reg(crn, opc1, crm, opc2);
|
||||
const IntRegIndex rt = (IntRegIndex)(uint32_t)bits(machInst, 15, 12);
|
||||
|
||||
const bool isRead = bits(machInst, 20);
|
||||
uint32_t iss = mcrMrcIssBuild(isRead, crm, rt, crn, opc1, opc2);
|
||||
|
||||
switch (miscReg) {
|
||||
case MISCREG_NOP:
|
||||
return new NopInst(machInst);
|
||||
case NUM_MISCREGS:
|
||||
case MISCREG_CP15_UNIMPL:
|
||||
return new FailUnimplemented(
|
||||
csprintf("miscreg crn:%d opc1:%d crm:%d opc2:%d %s unknown",
|
||||
crn, opc1, crm, opc2, isRead ? "read" : "write").c_str(),
|
||||
machInst);
|
||||
case MISCREG_DCCISW:
|
||||
return new WarnUnimplemented(
|
||||
isRead ? "mrc dccisw" : "mcr dcisw", machInst);
|
||||
case MISCREG_DCCIMVAC:
|
||||
return new WarnUnimplemented(
|
||||
isRead ? "mrc dccimvac" : "mcr dccimvac", machInst);
|
||||
case MISCREG_DCIMVAC:
|
||||
return new WarnUnimplemented(
|
||||
isRead ? "mrc dcimvac" : "mcr dcimvac", machInst);
|
||||
case MISCREG_DCCMVAC:
|
||||
return new FlushPipeInst(
|
||||
isRead ? "mrc dccmvac" : "mcr dccmvac", machInst);
|
||||
case MISCREG_DCCMVAU:
|
||||
return new WarnUnimplemented(
|
||||
isRead ? "mrc dccmvau" : "mcr dccmvau", machInst);
|
||||
case MISCREG_CP15ISB:
|
||||
return new Isb(machInst);
|
||||
return new Isb(machInst, iss);
|
||||
case MISCREG_CP15DSB:
|
||||
return new Dsb(machInst);
|
||||
return new Dsb(machInst, iss);
|
||||
case MISCREG_CP15DMB:
|
||||
return new Dmb(machInst);
|
||||
case MISCREG_ICIALLUIS:
|
||||
return new WarnUnimplemented(
|
||||
isRead ? "mrc icialluis" : "mcr icialluis", machInst);
|
||||
case MISCREG_ICIMVAU:
|
||||
return new WarnUnimplemented(
|
||||
isRead ? "mrc icimvau" : "mcr icimvau", machInst);
|
||||
case MISCREG_BPIMVA:
|
||||
return new WarnUnimplemented(
|
||||
isRead ? "mrc bpimva" : "mcr bpimva", machInst);
|
||||
case MISCREG_BPIALLIS:
|
||||
return new WarnUnimplemented(
|
||||
isRead ? "mrc bpiallis" : "mcr bpiallis", machInst);
|
||||
case MISCREG_BPIALL:
|
||||
return new WarnUnimplemented(
|
||||
isRead ? "mrc bpiall" : "mcr bpiall", machInst);
|
||||
case MISCREG_L2LATENCY:
|
||||
return new WarnUnimplemented(
|
||||
isRead ? "mrc l2latency" : "mcr l2latency", machInst);
|
||||
case MISCREG_CRN15:
|
||||
return new WarnUnimplemented(
|
||||
isRead ? "mrc crn15" : "mcr crn15", machInst);
|
||||
|
||||
// Write only.
|
||||
case MISCREG_TLBIALLIS:
|
||||
case MISCREG_TLBIMVAIS:
|
||||
case MISCREG_TLBIASIDIS:
|
||||
case MISCREG_TLBIMVAAIS:
|
||||
case MISCREG_ITLBIALL:
|
||||
case MISCREG_ITLBIMVA:
|
||||
case MISCREG_ITLBIASID:
|
||||
case MISCREG_DTLBIALL:
|
||||
case MISCREG_DTLBIMVA:
|
||||
case MISCREG_DTLBIASID:
|
||||
case MISCREG_TLBIALL:
|
||||
case MISCREG_TLBIMVA:
|
||||
case MISCREG_TLBIASID:
|
||||
case MISCREG_TLBIMVAA:
|
||||
if (isRead) {
|
||||
return new Unknown(machInst);
|
||||
} else {
|
||||
return new Mcr15(machInst, (IntRegIndex)miscReg, rt);
|
||||
}
|
||||
|
||||
// Read only in user mode.
|
||||
case MISCREG_TPIDRURO:
|
||||
if (isRead) {
|
||||
return new Mrc15User(machInst, rt, (IntRegIndex)miscReg);
|
||||
} else {
|
||||
return new Mcr15(machInst, (IntRegIndex)miscReg, rt);
|
||||
}
|
||||
|
||||
// Read/write in user mode.
|
||||
case MISCREG_TPIDRURW:
|
||||
if (isRead) {
|
||||
return new Mrc15User(machInst, rt, (IntRegIndex)miscReg);
|
||||
} else {
|
||||
return new Mcr15User(machInst, (IntRegIndex)miscReg, rt);
|
||||
}
|
||||
|
||||
// Read/write, priveleged only.
|
||||
return new Dmb(machInst, iss);
|
||||
default:
|
||||
if (miscReg >= MISCREG_CP15_UNIMP_START)
|
||||
if (miscRegInfo[miscReg][MISCREG_WARN_NOT_FAIL]) {
|
||||
std::string full_mnem = csprintf("%s %s",
|
||||
isRead ? "mrc" : "mcr", miscRegName[miscReg]);
|
||||
warn("\\tinstruction '%s' unimplemented\\n", full_mnem);
|
||||
|
||||
// Remove the warn flag and set the implemented flag. This
|
||||
// prevents the instruction warning a second time, it also
|
||||
// means the instruction is actually generated. Actually
|
||||
// creating the instruction to access an register that isn't
|
||||
// implemented sounds a bit silly, but its required to get
|
||||
// the correct behaviour for hyp traps and undef exceptions.
|
||||
miscRegInfo[miscReg][MISCREG_IMPLEMENTED] = true;
|
||||
miscRegInfo[miscReg][MISCREG_WARN_NOT_FAIL] = false;
|
||||
}
|
||||
|
||||
if (miscRegInfo[miscReg][MISCREG_IMPLEMENTED]) {
|
||||
if (isRead)
|
||||
return new Mrc15(machInst, rt, (IntRegIndex)miscReg, iss);
|
||||
return new Mcr15(machInst, (IntRegIndex)miscReg, rt, iss);
|
||||
} else {
|
||||
return new FailUnimplemented(csprintf("%s %s",
|
||||
isRead ? "mrc" : "mcr", miscRegName[miscReg]).c_str(),
|
||||
machInst);
|
||||
if (isRead) {
|
||||
return new Mrc15(machInst, rt, (IntRegIndex)miscReg);
|
||||
} else {
|
||||
return new Mcr15(machInst, (IntRegIndex)miscReg, rt);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -248,3 +231,70 @@ def format McrMrc15() {{
|
|||
return decodeMcrMrc15(machInst);
|
||||
'''
|
||||
}};
|
||||
|
||||
let {{
|
||||
header_output = '''
|
||||
StaticInstPtr
|
||||
decodeMcrrMrrc15(ExtMachInst machInst);
|
||||
'''
|
||||
decoder_output = '''
|
||||
StaticInstPtr
|
||||
decodeMcrrMrrc15(ExtMachInst machInst)
|
||||
{
|
||||
const uint32_t crm = bits(machInst, 3, 0);
|
||||
const uint32_t opc1 = bits(machInst, 7, 4);
|
||||
const MiscRegIndex miscReg = decodeCP15Reg64(crm, opc1);
|
||||
const IntRegIndex rt = (IntRegIndex) (uint32_t) bits(machInst, 15, 12);
|
||||
const IntRegIndex rt2 = (IntRegIndex) (uint32_t) bits(machInst, 19, 16);
|
||||
|
||||
const bool isRead = bits(machInst, 20);
|
||||
|
||||
switch (miscReg) {
|
||||
case MISCREG_CP15_UNIMPL:
|
||||
return new FailUnimplemented(
|
||||
csprintf("miscreg crm:%d opc1:%d 64-bit %s unknown",
|
||||
crm, opc1, isRead ? "read" : "write").c_str(),
|
||||
machInst);
|
||||
default:
|
||||
if (miscRegInfo[miscReg][MISCREG_WARN_NOT_FAIL]) {
|
||||
std::string full_mnem = csprintf("%s %s",
|
||||
isRead ? "mrrc" : "mcrr", miscRegName[miscReg]);
|
||||
warn("\\tinstruction '%s' unimplemented\\n", full_mnem);
|
||||
|
||||
// Remove the warn flag and set the implemented flag. This
|
||||
// prevents the instruction warning a second time, it also
|
||||
// means the instruction is actually generated. Actually
|
||||
// creating the instruction to access an register that isn't
|
||||
// implemented sounds a bit silly, but its required to get
|
||||
// the correct behaviour for hyp traps and undef exceptions.
|
||||
miscRegInfo[miscReg][MISCREG_IMPLEMENTED] = true;
|
||||
miscRegInfo[miscReg][MISCREG_WARN_NOT_FAIL] = false;
|
||||
}
|
||||
|
||||
if (miscRegInfo[miscReg][MISCREG_IMPLEMENTED]) {
|
||||
uint32_t iss = mcrrMrrcIssBuild(isRead, crm, rt, rt2, opc1);
|
||||
|
||||
if (isRead)
|
||||
return new Mrrc15(machInst, (IntRegIndex) miscReg, rt2, rt, iss);
|
||||
return new Mcrr15(machInst, rt2, rt, (IntRegIndex) miscReg, iss);
|
||||
} else {
|
||||
return new FailUnimplemented(csprintf("%s %s",
|
||||
isRead ? "mrrc" : "mcrr", miscRegName[miscReg]).c_str(),
|
||||
machInst);
|
||||
}
|
||||
}
|
||||
}
|
||||
'''
|
||||
}};
|
||||
|
||||
def format Mcrr15() {{
|
||||
decode_block = '''
|
||||
return decodeMcrrMrrc15(machInst);
|
||||
'''
|
||||
}};
|
||||
|
||||
def format Mrrc15() {{
|
||||
decode_block = '''
|
||||
return decodeMcrrMrrc15(machInst);
|
||||
'''
|
||||
}};
|
||||
|
|
2626
src/arch/arm/isa/formats/neon64.isa
Normal file
2626
src/arch/arm/isa/formats/neon64.isa
Normal file
File diff suppressed because it is too large
Load diff
|
@ -99,11 +99,11 @@ def format ArmUnconditional() {{
|
|||
case 0x1:
|
||||
return new Clrex(machInst);
|
||||
case 0x4:
|
||||
return new Dsb(machInst);
|
||||
return new Dsb(machInst, 0);
|
||||
case 0x5:
|
||||
return new Dmb(machInst);
|
||||
return new Dmb(machInst, 0);
|
||||
case 0x6:
|
||||
return new Isb(machInst);
|
||||
return new Isb(machInst, 0);
|
||||
}
|
||||
}
|
||||
} else if (bits(op2, 0) == 0) {
|
||||
|
@ -166,7 +166,7 @@ def format ArmUnconditional() {{
|
|||
const uint32_t val = ((machInst >> 20) & 0x5);
|
||||
if (val == 0x4) {
|
||||
const uint32_t mode = bits(machInst, 4, 0);
|
||||
if (badMode((OperatingMode)mode))
|
||||
if (badMode32((OperatingMode)mode))
|
||||
return new Unknown(machInst);
|
||||
switch (bits(machInst, 24, 21)) {
|
||||
case 0x2:
|
||||
|
@ -250,17 +250,10 @@ def format ArmUnconditional() {{
|
|||
"ldc, ldc2 (immediate)", machInst);
|
||||
}
|
||||
}
|
||||
if (op1 == 0xC5) {
|
||||
return new WarnUnimplemented(
|
||||
"mrrc, mrrc2", machInst);
|
||||
}
|
||||
} else {
|
||||
if (bits(op1, 4, 3) != 0 || bits(op1, 1) == 1) {
|
||||
return new WarnUnimplemented(
|
||||
"stc, stc2", machInst);
|
||||
} else if (op1 == 0xC4) {
|
||||
return new WarnUnimplemented(
|
||||
"mcrr, mcrrc", machInst);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
// -*- mode:c++ -*-
|
||||
|
||||
// Copyright (c) 2010 ARM Limited
|
||||
// Copyright (c) 2010, 2012 ARM Limited
|
||||
// All rights reserved
|
||||
//
|
||||
// The license below extends only to copyright in the software and shall
|
||||
|
@ -85,6 +85,9 @@ output header {{
|
|||
private:
|
||||
/// Have we warned on this instruction yet?
|
||||
mutable bool warned;
|
||||
/// Full mnemonic for MRC and MCR instructions including the
|
||||
/// coproc. register name
|
||||
std::string fullMnemonic;
|
||||
|
||||
public:
|
||||
/// Constructor
|
||||
|
@ -96,6 +99,16 @@ output header {{
|
|||
flags[IsNonSpeculative] = true;
|
||||
}
|
||||
|
||||
WarnUnimplemented(const char *_mnemonic, ExtMachInst _machInst,
|
||||
const std::string& _fullMnemonic)
|
||||
: ArmStaticInst(_mnemonic, _machInst, No_OpClass), warned(false),
|
||||
fullMnemonic(_fullMnemonic)
|
||||
{
|
||||
// don't call execute() (which panics) if we're on a
|
||||
// speculative path
|
||||
flags[IsNonSpeculative] = true;
|
||||
}
|
||||
|
||||
%(BasicExecDeclare)s
|
||||
|
||||
std::string
|
||||
|
@ -147,9 +160,6 @@ output exec {{
|
|||
FailUnimplemented::execute(%(CPU_exec_context)s *xc,
|
||||
Trace::InstRecord *traceData) const
|
||||
{
|
||||
if (FullSystem)
|
||||
return new UndefinedInstruction;
|
||||
else
|
||||
return new UndefinedInstruction(machInst, false, mnemonic);
|
||||
}
|
||||
|
||||
|
@ -158,7 +168,8 @@ output exec {{
|
|||
Trace::InstRecord *traceData) const
|
||||
{
|
||||
if (!warned) {
|
||||
warn("\tinstruction '%s' unimplemented\n", mnemonic);
|
||||
warn("\tinstruction '%s' unimplemented\n",
|
||||
fullMnemonic.size() ? fullMnemonic.c_str() : mnemonic);
|
||||
warned = true;
|
||||
}
|
||||
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
// -*- mode:c++ -*-
|
||||
|
||||
// Copyright (c) 2010 ARM Limited
|
||||
// Copyright (c) 2010, 2012 ARM Limited
|
||||
// All rights reserved
|
||||
//
|
||||
// The license below extends only to copyright in the software and shall
|
||||
|
@ -50,10 +50,16 @@ output header {{
|
|||
#include <sstream>
|
||||
|
||||
#include "arch/arm/insts/branch.hh"
|
||||
#include "arch/arm/insts/branch64.hh"
|
||||
#include "arch/arm/insts/data64.hh"
|
||||
#include "arch/arm/insts/fplib.hh"
|
||||
#include "arch/arm/insts/macromem.hh"
|
||||
#include "arch/arm/insts/mem.hh"
|
||||
#include "arch/arm/insts/mem64.hh"
|
||||
#include "arch/arm/insts/misc.hh"
|
||||
#include "arch/arm/insts/misc64.hh"
|
||||
#include "arch/arm/insts/mult.hh"
|
||||
#include "arch/arm/insts/neon64_mem.hh"
|
||||
#include "arch/arm/insts/pred_inst.hh"
|
||||
#include "arch/arm/insts/static_inst.hh"
|
||||
#include "arch/arm/insts/vfp.hh"
|
||||
|
@ -63,6 +69,7 @@ output header {{
|
|||
}};
|
||||
|
||||
output decoder {{
|
||||
#include <string>
|
||||
#include "arch/arm/decoder.hh"
|
||||
#include "arch/arm/faults.hh"
|
||||
#include "arch/arm/intregs.hh"
|
||||
|
|
58
src/arch/arm/isa/insts/aarch64.isa
Normal file
58
src/arch/arm/isa/insts/aarch64.isa
Normal file
|
@ -0,0 +1,58 @@
|
|||
// -*- mode:c++ -*-
|
||||
|
||||
// Copyright (c) 2011 ARM Limited
|
||||
// All rights reserved
|
||||
//
|
||||
// The license below extends only to copyright in the software and shall
|
||||
// not be construed as granting a license to any other intellectual
|
||||
// property including but not limited to intellectual property relating
|
||||
// to a hardware implementation of the functionality of the software
|
||||
// licensed hereunder. You may use the software subject to the license
|
||||
// terms below provided that you ensure that this notice is replicated
|
||||
// unmodified and in its entirety in all distributions of the software,
|
||||
// modified or unmodified, in source code or in binary form.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met: redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer;
|
||||
// redistributions in binary form must reproduce the above copyright
|
||||
// notice, this list of conditions and the following disclaimer in the
|
||||
// documentation and/or other materials provided with the distribution;
|
||||
// neither the name of the copyright holders nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
//
|
||||
// Authors: Gabe Black
|
||||
|
||||
let {{
|
||||
movzCode = 'Dest64 = ((uint64_t)imm1) << imm2;'
|
||||
movzIop = InstObjParams("movz", "Movz", "RegImmImmOp", movzCode, [])
|
||||
header_output += RegImmImmOpDeclare.subst(movzIop)
|
||||
decoder_output += RegImmImmOpConstructor.subst(movzIop)
|
||||
exec_output += BasicExecute.subst(movzIop)
|
||||
|
||||
movkCode = 'Dest64 = insertBits(Dest64, imm2 + 15, imm2, imm1);'
|
||||
movkIop = InstObjParams("movk", "Movk", "RegImmImmOp", movkCode, [])
|
||||
header_output += RegImmImmOpDeclare.subst(movkIop)
|
||||
decoder_output += RegImmImmOpConstructor.subst(movkIop)
|
||||
exec_output += BasicExecute.subst(movkIop)
|
||||
|
||||
movnCode = 'Dest64 = ~(((uint64_t)imm1) << imm2);'
|
||||
movnIop = InstObjParams("movn", "Movn", "RegImmImmOp", movnCode, [])
|
||||
header_output += RegImmImmOpDeclare.subst(movnIop)
|
||||
decoder_output += RegImmImmOpConstructor.subst(movnIop)
|
||||
exec_output += BasicExecute.subst(movnIop)
|
||||
}};
|
|
@ -1,6 +1,6 @@
|
|||
// -*- mode:c++ -*-
|
||||
|
||||
// Copyright (c) 2010 ARM Limited
|
||||
// Copyright (c) 2010-2012 ARM Limited
|
||||
// All rights reserved
|
||||
//
|
||||
// The license below extends only to copyright in the software and shall
|
||||
|
@ -48,7 +48,7 @@ let {{
|
|||
bCode = '''
|
||||
NPC = (uint32_t)(PC + imm);
|
||||
'''
|
||||
br_tgt_code = '''pcs.instNPC(branchPC.instPC() + imm);'''
|
||||
br_tgt_code = '''pcs.instNPC((uint32_t)(branchPC.instPC() + imm));'''
|
||||
instFlags = ["IsDirectControl"]
|
||||
if (link):
|
||||
bCode += '''
|
||||
|
@ -86,9 +86,9 @@ let {{
|
|||
Name += "Imm"
|
||||
# Since we're switching ISAs, the target ISA will be the opposite
|
||||
# of the current ISA. Thumb is whether the target is ARM.
|
||||
newPC = '(Thumb ? (roundDown(PC, 4) + imm) : (PC + imm))'
|
||||
newPC = '(uint32_t)(Thumb ? (roundDown(PC, 4) + imm) : (PC + imm))'
|
||||
br_tgt_code = '''
|
||||
pcs.instNPC((branchPC.thumb() ? (roundDown(branchPC.instPC(),4) + imm) :
|
||||
pcs.instNPC((uint32_t)(branchPC.thumb() ? (roundDown(branchPC.instPC(),4) + imm) :
|
||||
(branchPC.instPC() + imm)));
|
||||
'''
|
||||
base = "BranchImmCond"
|
||||
|
@ -150,7 +150,26 @@ let {{
|
|||
if imm:
|
||||
decoder_output += BranchTarget.subst(blxIop)
|
||||
|
||||
#Ignore BXJ for now
|
||||
bxjcode = '''
|
||||
HSTR hstr = Hstr;
|
||||
CPSR cpsr = Cpsr;
|
||||
SCR scr = Scr;
|
||||
|
||||
if (ArmSystem::haveVirtualization(xc->tcBase()) && hstr.tjdbx &&
|
||||
!inSecureState(scr, cpsr) && (cpsr.mode != MODE_HYP)) {
|
||||
fault = new HypervisorTrap(machInst, op1, EC_TRAPPED_BXJ);
|
||||
}
|
||||
IWNPC = Op1;
|
||||
'''
|
||||
|
||||
bxjIop = InstObjParams("bxj", "BxjReg", "BranchRegCond",
|
||||
{"code": bxjcode,
|
||||
"predicate_test": predicateTest,
|
||||
"is_ras_pop": "op1 == INTREG_LR" },
|
||||
["IsIndirectControl"])
|
||||
header_output += BranchRegCondDeclare.subst(bxjIop)
|
||||
decoder_output += BranchRegCondConstructor.subst(bxjIop)
|
||||
exec_output += PredOpExecute.subst(bxjIop)
|
||||
|
||||
#CBNZ, CBZ. These are always unconditional as far as predicates
|
||||
for (mnem, test) in (("cbz", "=="), ("cbnz", "!=")):
|
||||
|
|
248
src/arch/arm/isa/insts/branch64.isa
Normal file
248
src/arch/arm/isa/insts/branch64.isa
Normal file
|
@ -0,0 +1,248 @@
|
|||
// -*- mode:c++ -*-
|
||||
|
||||
// Copyright (c) 2011-2013 ARM Limited
|
||||
// All rights reserved
|
||||
//
|
||||
// The license below extends only to copyright in the software and shall
|
||||
// not be construed as granting a license to any other intellectual
|
||||
// property including but not limited to intellectual property relating
|
||||
// to a hardware implementation of the functionality of the software
|
||||
// licensed hereunder. You may use the software subject to the license
|
||||
// terms below provided that you ensure that this notice is replicated
|
||||
// unmodified and in its entirety in all distributions of the software,
|
||||
// modified or unmodified, in source code or in binary form.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met: redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer;
|
||||
// redistributions in binary form must reproduce the above copyright
|
||||
// notice, this list of conditions and the following disclaimer in the
|
||||
// documentation and/or other materials provided with the distribution;
|
||||
// neither the name of the copyright holders nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
//
|
||||
// Authors: Gabe Black
|
||||
// Giacomo Gabrielli
|
||||
|
||||
let {{
|
||||
|
||||
header_output = ""
|
||||
decoder_output = ""
|
||||
exec_output = ""
|
||||
|
||||
# B, BL
|
||||
for (mnem, link) in (("b", False), ("bl", True)):
|
||||
bCode = ('NPC = purifyTaggedAddr(RawPC + imm, xc->tcBase(), '
|
||||
'currEL(xc->tcBase()));\n')
|
||||
instFlags = ['IsDirectControl', 'IsUncondControl']
|
||||
if (link):
|
||||
bCode += 'XLR = RawPC + 4;\n'
|
||||
instFlags += ['IsCall']
|
||||
|
||||
bIop = InstObjParams(mnem, mnem.capitalize() + "64",
|
||||
"BranchImm64", bCode, instFlags)
|
||||
header_output += BranchImm64Declare.subst(bIop)
|
||||
decoder_output += BranchImm64Constructor.subst(bIop)
|
||||
exec_output += BasicExecute.subst(bIop)
|
||||
|
||||
# BR, BLR
|
||||
for (mnem, link) in (("br", False), ("blr", True)):
|
||||
bCode = ('NPC = purifyTaggedAddr(XOp1, xc->tcBase(), '
|
||||
'currEL(xc->tcBase()));\n')
|
||||
instFlags = ['IsIndirectControl', 'IsUncondControl']
|
||||
if (link):
|
||||
bCode += 'XLR = RawPC + 4;\n'
|
||||
instFlags += ['IsCall']
|
||||
|
||||
bIop = InstObjParams(mnem, mnem.capitalize() + "64",
|
||||
"BranchReg64", bCode, instFlags)
|
||||
header_output += BranchReg64Declare.subst(bIop)
|
||||
decoder_output += BranchReg64Constructor.subst(bIop)
|
||||
exec_output += BasicExecute.subst(bIop)
|
||||
|
||||
# B conditional
|
||||
bCode = '''
|
||||
if (testPredicate(CondCodesNZ, CondCodesC, CondCodesV, condCode))
|
||||
NPC = purifyTaggedAddr(RawPC + imm, xc->tcBase(),
|
||||
currEL(xc->tcBase()));
|
||||
else
|
||||
NPC = NPC;
|
||||
'''
|
||||
bIop = InstObjParams("b", "BCond64", "BranchImmCond64", bCode,
|
||||
['IsCondControl', 'IsDirectControl'])
|
||||
header_output += BranchImmCond64Declare.subst(bIop)
|
||||
decoder_output += BranchImmCond64Constructor.subst(bIop)
|
||||
exec_output += BasicExecute.subst(bIop)
|
||||
|
||||
# RET
|
||||
bCode = ('NPC = purifyTaggedAddr(XOp1, xc->tcBase(), '
|
||||
'currEL(xc->tcBase()));\n')
|
||||
instFlags = ['IsIndirectControl', 'IsUncondControl', 'IsReturn']
|
||||
|
||||
bIop = InstObjParams('ret', 'Ret64', "BranchRet64", bCode, instFlags)
|
||||
header_output += BranchReg64Declare.subst(bIop)
|
||||
decoder_output += BranchReg64Constructor.subst(bIop)
|
||||
exec_output += BasicExecute.subst(bIop)
|
||||
|
||||
# ERET
|
||||
bCode = '''Addr newPc;
|
||||
CPSR cpsr = Cpsr;
|
||||
CPSR spsr = Spsr;
|
||||
|
||||
ExceptionLevel curr_el = opModeToEL((OperatingMode) (uint8_t) cpsr.mode);
|
||||
switch (curr_el) {
|
||||
case EL3:
|
||||
newPc = xc->tcBase()->readMiscReg(MISCREG_ELR_EL3);
|
||||
break;
|
||||
case EL2:
|
||||
newPc = xc->tcBase()->readMiscReg(MISCREG_ELR_EL2);
|
||||
break;
|
||||
case EL1:
|
||||
newPc = xc->tcBase()->readMiscReg(MISCREG_ELR_EL1);
|
||||
break;
|
||||
default:
|
||||
return new UndefinedInstruction(machInst, false, mnemonic);
|
||||
break;
|
||||
}
|
||||
if (spsr.width && (newPc & mask(2))) {
|
||||
// To avoid PC Alignment fault when returning to AArch32
|
||||
if (spsr.t)
|
||||
newPc = newPc & ~mask(1);
|
||||
else
|
||||
newPc = newPc & ~mask(2);
|
||||
}
|
||||
spsr.q = 0;
|
||||
spsr.it1 = 0;
|
||||
spsr.j = 0;
|
||||
spsr.res0_23_22 = 0;
|
||||
spsr.ge = 0;
|
||||
spsr.it2 = 0;
|
||||
spsr.t = 0;
|
||||
|
||||
OperatingMode mode = (OperatingMode) (uint8_t) spsr.mode;
|
||||
bool illegal = false;
|
||||
ExceptionLevel target_el;
|
||||
if (badMode(mode)) {
|
||||
illegal = true;
|
||||
} else {
|
||||
target_el = opModeToEL(mode);
|
||||
if (((target_el == EL2) &&
|
||||
!ArmSystem::haveVirtualization(xc->tcBase())) ||
|
||||
(target_el > curr_el) ||
|
||||
(spsr.width == 1)) {
|
||||
illegal = true;
|
||||
} else {
|
||||
bool known = true;
|
||||
bool from32 = (spsr.width == 1);
|
||||
bool to32 = false;
|
||||
if (false) { // TODO: !haveAArch32EL
|
||||
to32 = false;
|
||||
} else if (!ArmSystem::highestELIs64(xc->tcBase())) {
|
||||
to32 = true;
|
||||
} else {
|
||||
bool scr_rw, hcr_rw;
|
||||
if (ArmSystem::haveSecurity(xc->tcBase())) {
|
||||
SCR scr = xc->tcBase()->readMiscReg(MISCREG_SCR_EL3);
|
||||
scr_rw = scr.rw;
|
||||
} else {
|
||||
scr_rw = true;
|
||||
}
|
||||
|
||||
if (ArmSystem::haveVirtualization(xc->tcBase())) {
|
||||
HCR hcr = xc->tcBase()->readMiscReg(MISCREG_HCR_EL2);
|
||||
hcr_rw = hcr.rw;
|
||||
} else {
|
||||
hcr_rw = scr_rw;
|
||||
}
|
||||
|
||||
switch (target_el) {
|
||||
case EL3:
|
||||
to32 = false;
|
||||
break;
|
||||
case EL2:
|
||||
to32 = !scr_rw;
|
||||
break;
|
||||
case EL1:
|
||||
to32 = !scr_rw || !hcr_rw;
|
||||
break;
|
||||
case EL0:
|
||||
if (curr_el == EL0) {
|
||||
to32 = cpsr.width;
|
||||
} else if (!scr_rw || !hcr_rw) {
|
||||
// EL0 using AArch32 if EL1 using AArch32
|
||||
to32 = true;
|
||||
} else {
|
||||
known = false;
|
||||
to32 = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (known)
|
||||
illegal = (from32 != to32);
|
||||
}
|
||||
}
|
||||
|
||||
if (illegal) {
|
||||
uint8_t old_mode = cpsr.mode;
|
||||
spsr.mode = old_mode; // Preserve old mode when invalid
|
||||
spsr.il = 1;
|
||||
} else {
|
||||
if (cpsr.width != spsr.width)
|
||||
panic("AArch32/AArch64 interprocessing not supported yet");
|
||||
}
|
||||
Cpsr = spsr;
|
||||
|
||||
CondCodesNZ = spsr.nz;
|
||||
CondCodesC = spsr.c;
|
||||
CondCodesV = spsr.v;
|
||||
NPC = purifyTaggedAddr(newPc, xc->tcBase(),
|
||||
opModeToEL((OperatingMode) (uint8_t) spsr.mode));
|
||||
LLSCLock = 0; // Clear exclusive monitor
|
||||
SevMailbox = 1; //Set Event Register
|
||||
'''
|
||||
instFlags = ['IsSerializeAfter', 'IsNonSpeculative', 'IsSquashAfter']
|
||||
bIop = InstObjParams('eret', 'Eret64', "BranchEret64", bCode, instFlags)
|
||||
header_output += BasicDeclare.subst(bIop)
|
||||
decoder_output += BasicConstructor64.subst(bIop)
|
||||
exec_output += BasicExecute.subst(bIop)
|
||||
|
||||
# CBNZ, CBZ
|
||||
for (mnem, test) in (("cbz", "=="), ("cbnz", "!=")):
|
||||
code = ('NPC = (Op164 %(test)s 0) ? '
|
||||
'purifyTaggedAddr(RawPC + imm, xc->tcBase(), '
|
||||
'currEL(xc->tcBase())) : NPC;\n')
|
||||
code = code % {"test": test}
|
||||
iop = InstObjParams(mnem, mnem.capitalize() + "64",
|
||||
"BranchImmReg64", code,
|
||||
['IsCondControl', 'IsDirectControl'])
|
||||
header_output += BranchImmReg64Declare.subst(iop)
|
||||
decoder_output += BranchImmReg64Constructor.subst(iop)
|
||||
exec_output += BasicExecute.subst(iop)
|
||||
|
||||
# TBNZ, TBZ
|
||||
for (mnem, test) in (("tbz", "=="), ("tbnz", "!=")):
|
||||
code = ('NPC = ((Op164 & imm1) %(test)s 0) ? '
|
||||
'purifyTaggedAddr(RawPC + imm2, xc->tcBase(), '
|
||||
'currEL(xc->tcBase())) : NPC;\n')
|
||||
code = code % {"test": test}
|
||||
iop = InstObjParams(mnem, mnem.capitalize() + "64",
|
||||
"BranchImmImmReg64", code,
|
||||
['IsCondControl', 'IsDirectControl'])
|
||||
header_output += BranchImmImmReg64Declare.subst(iop)
|
||||
decoder_output += BranchImmImmReg64Constructor.subst(iop)
|
||||
exec_output += BasicExecute.subst(iop)
|
||||
}};
|
|
@ -1,6 +1,6 @@
|
|||
// -*- mode:c++ -*-
|
||||
|
||||
// Copyright (c) 2010 ARM Limited
|
||||
// Copyright (c) 2010, 2013 ARM Limited
|
||||
// All rights reserved
|
||||
//
|
||||
// The license below extends only to copyright in the software and shall
|
||||
|
@ -257,7 +257,8 @@ let {{
|
|||
CPSR old_cpsr = Cpsr;
|
||||
|
||||
CPSR new_cpsr =
|
||||
cpsrWriteByInstr(old_cpsr, Spsr, 0xF, true, sctlr.nmfi);
|
||||
cpsrWriteByInstr(old_cpsr, Spsr, Scr, Nsacr, 0xF, true,
|
||||
sctlr.nmfi, xc->tcBase());
|
||||
Cpsr = ~CondCodesMask & new_cpsr;
|
||||
CondCodesNZ = new_cpsr.nz;
|
||||
CondCodesC = new_cpsr.c;
|
||||
|
|
465
src/arch/arm/isa/insts/data64.isa
Normal file
465
src/arch/arm/isa/insts/data64.isa
Normal file
|
@ -0,0 +1,465 @@
|
|||
// -*- mode:c++ -*-
|
||||
|
||||
// Copyright (c) 2011-2013 ARM Limited
|
||||
// All rights reserved
|
||||
//
|
||||
// The license below extends only to copyright in the software and shall
|
||||
// not be construed as granting a license to any other intellectual
|
||||
// property including but not limited to intellectual property relating
|
||||
// to a hardware implementation of the functionality of the software
|
||||
// licensed hereunder. You may use the software subject to the license
|
||||
// terms below provided that you ensure that this notice is replicated
|
||||
// unmodified and in its entirety in all distributions of the software,
|
||||
// modified or unmodified, in source code or in binary form.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met: redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer;
|
||||
// redistributions in binary form must reproduce the above copyright
|
||||
// notice, this list of conditions and the following disclaimer in the
|
||||
// documentation and/or other materials provided with the distribution;
|
||||
// neither the name of the copyright holders nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
//
|
||||
// Authors: Gabe Black
|
||||
|
||||
let {{
|
||||
|
||||
header_output = ""
|
||||
decoder_output = ""
|
||||
exec_output = ""
|
||||
|
||||
def createCcCode64(carry, overflow):
|
||||
code = ""
|
||||
code += '''
|
||||
uint16_t _iz, _in;
|
||||
_in = bits(resTemp, intWidth - 1);
|
||||
_iz = ((resTemp & mask(intWidth)) == 0);
|
||||
CondCodesNZ = (_in << 1) | _iz;
|
||||
DPRINTF(Arm, "(in, iz) = (%%d, %%d)\\n", _in, _iz);
|
||||
'''
|
||||
if overflow and overflow != "none":
|
||||
code += '''
|
||||
uint16_t _iv;
|
||||
_iv = %s & 1;
|
||||
CondCodesV = _iv;
|
||||
DPRINTF(Arm, "(iv) = (%%d)\\n", _iv);
|
||||
''' % overflow
|
||||
if carry and carry != "none":
|
||||
code += '''
|
||||
uint16_t _ic;
|
||||
_ic = %s & 1;
|
||||
CondCodesC = _ic;
|
||||
DPRINTF(Arm, "(ic) = (%%d)\\n", _ic);
|
||||
''' % carry
|
||||
return code
|
||||
|
||||
oldC = 'CondCodesC'
|
||||
oldV = 'CondCodesV'
|
||||
# Dicts of ways to set the carry flag.
|
||||
carryCode64 = {
|
||||
"none": "none",
|
||||
"add": 'findCarry(intWidth, resTemp, Op164, secOp)',
|
||||
"sub": 'findCarry(intWidth, resTemp, Op164, ~secOp)',
|
||||
"logic": '0'
|
||||
}
|
||||
# Dict of ways to set the overflow flag.
|
||||
overflowCode64 = {
|
||||
"none": "none",
|
||||
"add": 'findOverflow(intWidth, resTemp, Op164, secOp)',
|
||||
"sub": 'findOverflow(intWidth, resTemp, Op164, ~secOp)',
|
||||
"logic": '0'
|
||||
}
|
||||
|
||||
immOp2 = "uint64_t secOp M5_VAR_USED = imm;"
|
||||
sRegOp2 = "uint64_t secOp M5_VAR_USED = " + \
|
||||
"shiftReg64(Op264, shiftAmt, shiftType, intWidth);"
|
||||
eRegOp2 = "uint64_t secOp M5_VAR_USED = " + \
|
||||
"extendReg64(Op264, extendType, shiftAmt, intWidth);"
|
||||
|
||||
def buildDataWork(mnem, code, flagType, suffix, buildCc, buildNonCc,
|
||||
base, templateBase):
|
||||
code = '''
|
||||
uint64_t resTemp M5_VAR_USED = 0;
|
||||
''' + code
|
||||
ccCode = createCcCode64(carryCode64[flagType], overflowCode64[flagType])
|
||||
Name = mnem.capitalize() + suffix
|
||||
iop = InstObjParams(mnem, Name, base, code)
|
||||
iopCc = InstObjParams(mnem + "s", Name + "Cc", base, code + ccCode)
|
||||
|
||||
def subst(iop):
|
||||
global header_output, decoder_output, exec_output
|
||||
header_output += eval(templateBase + "Declare").subst(iop)
|
||||
decoder_output += eval(templateBase + "Constructor").subst(iop)
|
||||
exec_output += BasicExecute.subst(iop)
|
||||
|
||||
if buildNonCc:
|
||||
subst(iop)
|
||||
if buildCc:
|
||||
subst(iopCc)
|
||||
|
||||
def buildXImmDataInst(mnem, code, flagType = "logic", \
|
||||
buildCc = True, buildNonCc = True, \
|
||||
suffix = "XImm"):
|
||||
buildDataWork(mnem, immOp2 + code, flagType, suffix,
|
||||
buildCc, buildNonCc, "DataXImmOp", "DataXImm")
|
||||
|
||||
def buildXSRegDataInst(mnem, code, flagType = "logic", \
|
||||
buildCc = True, buildNonCc = True, \
|
||||
suffix = "XSReg"):
|
||||
buildDataWork(mnem, sRegOp2 + code, flagType, suffix,
|
||||
buildCc, buildNonCc, "DataXSRegOp", "DataXSReg")
|
||||
|
||||
def buildXERegDataInst(mnem, code, flagType = "logic", \
|
||||
buildCc = True, buildNonCc = True, \
|
||||
suffix = "XEReg"):
|
||||
buildDataWork(mnem, eRegOp2 + code, flagType, suffix,
|
||||
buildCc, buildNonCc, "DataXERegOp", "DataXEReg")
|
||||
|
||||
def buildDataInst(mnem, code, flagType = "logic",
|
||||
buildCc = True, buildNonCc = True):
|
||||
buildXImmDataInst(mnem, code, flagType, buildCc, buildNonCc)
|
||||
buildXSRegDataInst(mnem, code, flagType, buildCc, buildNonCc)
|
||||
buildXERegDataInst(mnem, code, flagType, buildCc, buildNonCc)
|
||||
|
||||
buildXImmDataInst("adr", "Dest64 = RawPC + imm", buildCc = False);
|
||||
buildXImmDataInst("adrp", "Dest64 = (RawPC & ~mask(12)) + imm",
|
||||
buildCc = False);
|
||||
buildDataInst("and", "Dest64 = resTemp = Op164 & secOp;")
|
||||
buildDataInst("eor", "Dest64 = Op164 ^ secOp;", buildCc = False)
|
||||
buildXSRegDataInst("eon", "Dest64 = Op164 ^ ~secOp;", buildCc = False)
|
||||
buildDataInst("sub", "Dest64 = resTemp = Op164 - secOp;", "sub")
|
||||
buildDataInst("add", "Dest64 = resTemp = Op164 + secOp;", "add")
|
||||
buildXSRegDataInst("adc",
|
||||
"Dest64 = resTemp = Op164 + secOp + %s;" % oldC, "add")
|
||||
buildXSRegDataInst("sbc",
|
||||
"Dest64 = resTemp = Op164 - secOp - !%s;" % oldC, "sub")
|
||||
buildDataInst("orr", "Dest64 = Op164 | secOp;", buildCc = False)
|
||||
buildXSRegDataInst("orn", "Dest64 = Op164 | ~secOp;", buildCc = False)
|
||||
buildXSRegDataInst("bic", "Dest64 = resTemp = Op164 & ~secOp;")
|
||||
|
||||
def buildDataXImmInst(mnem, code, optArgs = []):
|
||||
global header_output, decoder_output, exec_output
|
||||
classNamePrefix = mnem[0].upper() + mnem[1:]
|
||||
templateBase = "DataXImm"
|
||||
iop = InstObjParams(mnem, classNamePrefix + "64",
|
||||
templateBase + "Op", code, optArgs)
|
||||
header_output += eval(templateBase + "Declare").subst(iop)
|
||||
decoder_output += eval(templateBase + "Constructor").subst(iop)
|
||||
exec_output += BasicExecute.subst(iop)
|
||||
|
||||
def buildDataXRegInst(mnem, regOps, code, optArgs = [],
|
||||
overrideOpClass=None):
|
||||
global header_output, decoder_output, exec_output
|
||||
templateBase = "DataX%dReg" % regOps
|
||||
classNamePrefix = mnem[0].upper() + mnem[1:]
|
||||
if overrideOpClass:
|
||||
iop = InstObjParams(mnem, classNamePrefix + "64",
|
||||
templateBase + "Op",
|
||||
{ 'code': code, 'op_class': overrideOpClass},
|
||||
optArgs)
|
||||
else:
|
||||
iop = InstObjParams(mnem, classNamePrefix + "64",
|
||||
templateBase + "Op", code, optArgs)
|
||||
header_output += eval(templateBase + "Declare").subst(iop)
|
||||
decoder_output += eval(templateBase + "Constructor").subst(iop)
|
||||
exec_output += BasicExecute.subst(iop)
|
||||
|
||||
buildDataXRegInst("madd", 3, "Dest64 = Op164 + Op264 * Op364",
|
||||
overrideOpClass="IntMultOp")
|
||||
buildDataXRegInst("msub", 3, "Dest64 = Op164 - Op264 * Op364",
|
||||
overrideOpClass="IntMultOp")
|
||||
buildDataXRegInst("smaddl", 3,
|
||||
"XDest = XOp1 + sext<32>(WOp2) * sext<32>(WOp3)",
|
||||
overrideOpClass="IntMultOp")
|
||||
buildDataXRegInst("smsubl", 3,
|
||||
"XDest = XOp1 - sext<32>(WOp2) * sext<32>(WOp3)",
|
||||
overrideOpClass="IntMultOp")
|
||||
buildDataXRegInst("smulh", 2, '''
|
||||
uint64_t op1H = (int32_t)(XOp1 >> 32);
|
||||
uint64_t op1L = (uint32_t)XOp1;
|
||||
uint64_t op2H = (int32_t)(XOp2 >> 32);
|
||||
uint64_t op2L = (uint32_t)XOp2;
|
||||
uint64_t mid1 = ((op1L * op2L) >> 32) + op1H * op2L;
|
||||
uint64_t mid2 = op1L * op2H;
|
||||
uint64_t result = ((uint64_t)(uint32_t)mid1 + (uint32_t)mid2) >> 32;
|
||||
result += shiftReg64(mid1, 32, ASR, intWidth);
|
||||
result += shiftReg64(mid2, 32, ASR, intWidth);
|
||||
XDest = result + op1H * op2H;
|
||||
''', overrideOpClass="IntMultOp")
|
||||
buildDataXRegInst("umaddl", 3, "XDest = XOp1 + WOp2 * WOp3",
|
||||
overrideOpClass="IntMultOp")
|
||||
buildDataXRegInst("umsubl", 3, "XDest = XOp1 - WOp2 * WOp3",
|
||||
overrideOpClass="IntMultOp")
|
||||
buildDataXRegInst("umulh", 2, '''
|
||||
uint64_t op1H = (uint32_t)(XOp1 >> 32);
|
||||
uint64_t op1L = (uint32_t)XOp1;
|
||||
uint64_t op2H = (uint32_t)(XOp2 >> 32);
|
||||
uint64_t op2L = (uint32_t)XOp2;
|
||||
uint64_t mid1 = ((op1L * op2L) >> 32) + op1H * op2L;
|
||||
uint64_t mid2 = op1L * op2H;
|
||||
uint64_t result = ((uint64_t)(uint32_t)mid1 + (uint32_t)mid2) >> 32;
|
||||
result += mid1 >> 32;
|
||||
result += mid2 >> 32;
|
||||
XDest = result + op1H * op2H;
|
||||
''', overrideOpClass="IntMultOp")
|
||||
|
||||
buildDataXRegInst("asrv", 2,
|
||||
"Dest64 = shiftReg64(Op164, Op264, ASR, intWidth)")
|
||||
buildDataXRegInst("lslv", 2,
|
||||
"Dest64 = shiftReg64(Op164, Op264, LSL, intWidth)")
|
||||
buildDataXRegInst("lsrv", 2,
|
||||
"Dest64 = shiftReg64(Op164, Op264, LSR, intWidth)")
|
||||
buildDataXRegInst("rorv", 2,
|
||||
"Dest64 = shiftReg64(Op164, Op264, ROR, intWidth)")
|
||||
buildDataXRegInst("sdiv", 2, '''
|
||||
int64_t op1 = Op164;
|
||||
int64_t op2 = Op264;
|
||||
if (intWidth == 32) {
|
||||
op1 = sext<32>(op1);
|
||||
op2 = sext<32>(op2);
|
||||
}
|
||||
Dest64 = op2 == -1 ? -op1 : op2 ? op1 / op2 : 0;
|
||||
''', overrideOpClass="IntDivOp")
|
||||
buildDataXRegInst("udiv", 2, "Dest64 = Op264 ? Op164 / Op264 : 0",
|
||||
overrideOpClass="IntDivOp")
|
||||
|
||||
buildDataXRegInst("cls", 1, '''
|
||||
uint64_t op1 = Op164;
|
||||
if (bits(op1, intWidth - 1))
|
||||
op1 ^= mask(intWidth);
|
||||
Dest64 = (op1 == 0) ? intWidth - 1 : (intWidth - 2 - findMsbSet(op1));
|
||||
''')
|
||||
buildDataXRegInst("clz", 1, '''
|
||||
Dest64 = (Op164 == 0) ? intWidth : (intWidth - 1 - findMsbSet(Op164));
|
||||
''')
|
||||
buildDataXRegInst("rbit", 1, '''
|
||||
uint64_t result = Op164;
|
||||
uint64_t lBit = 1ULL << (intWidth - 1);
|
||||
uint64_t rBit = 1ULL;
|
||||
while (lBit > rBit) {
|
||||
uint64_t maskBits = lBit | rBit;
|
||||
uint64_t testBits = result & maskBits;
|
||||
// If these bits are different, swap them by toggling them.
|
||||
if (testBits && testBits != maskBits)
|
||||
result ^= maskBits;
|
||||
lBit >>= 1; rBit <<= 1;
|
||||
}
|
||||
Dest64 = result;
|
||||
''')
|
||||
buildDataXRegInst("rev", 1, '''
|
||||
if (intWidth == 32)
|
||||
Dest64 = betole<uint32_t>(Op164);
|
||||
else
|
||||
Dest64 = betole<uint64_t>(Op164);
|
||||
''')
|
||||
buildDataXRegInst("rev16", 1, '''
|
||||
int count = intWidth / 16;
|
||||
uint64_t result = 0;
|
||||
for (unsigned i = 0; i < count; i++) {
|
||||
uint16_t hw = Op164 >> (i * 16);
|
||||
result |= (uint64_t)betole<uint16_t>(hw) << (i * 16);
|
||||
}
|
||||
Dest64 = result;
|
||||
''')
|
||||
buildDataXRegInst("rev32", 1, '''
|
||||
int count = intWidth / 32;
|
||||
uint64_t result = 0;
|
||||
for (unsigned i = 0; i < count; i++) {
|
||||
uint32_t hw = Op164 >> (i * 32);
|
||||
result |= (uint64_t)betole<uint32_t>(hw) << (i * 32);
|
||||
}
|
||||
Dest64 = result;
|
||||
''')
|
||||
|
||||
msrMrs64EnabledCheckCode = '''
|
||||
// Check for read/write access right
|
||||
if (!can%sAArch64SysReg(flat_idx, Scr64, cpsr, xc->tcBase())) {
|
||||
if (flat_idx == MISCREG_DAIF ||
|
||||
flat_idx == MISCREG_DC_ZVA_Xt ||
|
||||
flat_idx == MISCREG_DC_CVAC_Xt ||
|
||||
flat_idx == MISCREG_DC_CIVAC_Xt
|
||||
)
|
||||
return new UndefinedInstruction(machInst, 0, EC_TRAPPED_MSR_MRS_64);
|
||||
return new UndefinedInstruction(machInst, false, mnemonic);
|
||||
}
|
||||
|
||||
// Check for traps to supervisor (FP/SIMD regs)
|
||||
if (el <= EL1 && msrMrs64TrapToSup(flat_idx, el, Cpacr64))
|
||||
return new SupervisorTrap(machInst, 0x1E00000, EC_TRAPPED_SIMD_FP);
|
||||
|
||||
bool is_vfp_neon = false;
|
||||
|
||||
// Check for traps to hypervisor
|
||||
if ((ArmSystem::haveVirtualization(xc->tcBase()) && el <= EL2) &&
|
||||
msrMrs64TrapToHyp(flat_idx, %s, CptrEl264, Hcr64, &is_vfp_neon)) {
|
||||
return new HypervisorTrap(machInst, is_vfp_neon ? 0x1E00000 : imm,
|
||||
is_vfp_neon ? EC_TRAPPED_SIMD_FP : EC_TRAPPED_MSR_MRS_64);
|
||||
}
|
||||
|
||||
// Check for traps to secure monitor
|
||||
if ((ArmSystem::haveSecurity(xc->tcBase()) && el <= EL3) &&
|
||||
msrMrs64TrapToMon(flat_idx, CptrEl364, el, &is_vfp_neon)) {
|
||||
return new SecureMonitorTrap(machInst,
|
||||
is_vfp_neon ? 0x1E00000 : imm,
|
||||
is_vfp_neon ? EC_TRAPPED_SIMD_FP : EC_TRAPPED_MSR_MRS_64);
|
||||
}
|
||||
'''
|
||||
|
||||
buildDataXImmInst("mrs", '''
|
||||
MiscRegIndex flat_idx = (MiscRegIndex) xc->tcBase()->
|
||||
flattenMiscIndex(op1);
|
||||
CPSR cpsr = Cpsr;
|
||||
ExceptionLevel el = (ExceptionLevel) (uint8_t) cpsr.el;
|
||||
%s
|
||||
XDest = MiscOp1_ud;
|
||||
''' % (msrMrs64EnabledCheckCode % ('Read', 'true'),),
|
||||
["IsSerializeBefore"])
|
||||
|
||||
buildDataXRegInst("mrsNZCV", 1, '''
|
||||
CPSR cpsr = 0;
|
||||
cpsr.nz = CondCodesNZ;
|
||||
cpsr.c = CondCodesC;
|
||||
cpsr.v = CondCodesV;
|
||||
XDest = cpsr;
|
||||
''')
|
||||
|
||||
buildDataXImmInst("msr", '''
|
||||
MiscRegIndex flat_idx = (MiscRegIndex) xc->tcBase()->
|
||||
flattenMiscIndex(dest);
|
||||
CPSR cpsr = Cpsr;
|
||||
ExceptionLevel el = (ExceptionLevel) (uint8_t) cpsr.el;
|
||||
%s
|
||||
MiscDest_ud = XOp1;
|
||||
''' % (msrMrs64EnabledCheckCode % ('Write', 'false'),),
|
||||
["IsSerializeAfter", "IsNonSpeculative"])
|
||||
|
||||
buildDataXRegInst("msrNZCV", 1, '''
|
||||
CPSR cpsr = XOp1;
|
||||
CondCodesNZ = cpsr.nz;
|
||||
CondCodesC = cpsr.c;
|
||||
CondCodesV = cpsr.v;
|
||||
''')
|
||||
|
||||
msrdczva_ea_code = '''
|
||||
MiscRegIndex flat_idx = (MiscRegIndex) xc->tcBase()->flattenMiscIndex(dest);
|
||||
CPSR cpsr = Cpsr;
|
||||
ExceptionLevel el = (ExceptionLevel) (uint8_t) cpsr.el;
|
||||
'''
|
||||
|
||||
msrdczva_ea_code += msrMrs64EnabledCheckCode % ('Write', 'false')
|
||||
msrdczva_ea_code += '''
|
||||
Request::Flags memAccessFlags = Request::CACHE_BLOCK_ZERO|ArmISA::TLB::MustBeOne;
|
||||
EA = XBase;
|
||||
assert(!(Dczid & 0x10));
|
||||
uint64_t op_size = power(2, Dczid + 2);
|
||||
EA &= ~(op_size - 1);
|
||||
|
||||
'''
|
||||
|
||||
msrDCZVAIop = InstObjParams("dczva", "Dczva", "SysDC64",
|
||||
{ "ea_code" : msrdczva_ea_code,
|
||||
"memacc_code" : ";", "use_uops" : 0,
|
||||
"op_wb" : ";", "fa_code" : ";"}, ['IsStore', 'IsMemRef']);
|
||||
header_output += DCStore64Declare.subst(msrDCZVAIop);
|
||||
decoder_output += DCStore64Constructor.subst(msrDCZVAIop);
|
||||
exec_output += DCStore64Execute.subst(msrDCZVAIop);
|
||||
exec_output += DCStore64InitiateAcc.subst(msrDCZVAIop);
|
||||
exec_output += Store64CompleteAcc.subst(msrDCZVAIop);
|
||||
|
||||
|
||||
|
||||
buildDataXImmInst("msrSP", '''
|
||||
if (!canWriteAArch64SysReg(
|
||||
(MiscRegIndex) xc->tcBase()->flattenMiscIndex(dest),
|
||||
Scr64, Cpsr, xc->tcBase())) {
|
||||
return new UndefinedInstruction(machInst, false, mnemonic);
|
||||
}
|
||||
MiscDest_ud = imm;
|
||||
''', optArgs = ["IsSerializeAfter", "IsNonSpeculative"])
|
||||
|
||||
buildDataXImmInst("msrDAIFSet", '''
|
||||
if (!canWriteAArch64SysReg(
|
||||
(MiscRegIndex) xc->tcBase()->flattenMiscIndex(dest),
|
||||
Scr64, Cpsr, xc->tcBase())) {
|
||||
return new UndefinedInstruction(machInst, 0, EC_TRAPPED_MSR_MRS_64);
|
||||
}
|
||||
CPSR cpsr = Cpsr;
|
||||
cpsr.daif = cpsr.daif | imm;
|
||||
Cpsr = cpsr;
|
||||
''', optArgs = ["IsSerializeAfter", "IsNonSpeculative"])
|
||||
|
||||
buildDataXImmInst("msrDAIFClr", '''
|
||||
if (!canWriteAArch64SysReg(
|
||||
(MiscRegIndex) xc->tcBase()->flattenMiscIndex(dest),
|
||||
Scr64, Cpsr, xc->tcBase())) {
|
||||
return new UndefinedInstruction(machInst, 0, EC_TRAPPED_MSR_MRS_64);
|
||||
}
|
||||
CPSR cpsr = Cpsr;
|
||||
cpsr.daif = cpsr.daif & ~imm;
|
||||
Cpsr = cpsr;
|
||||
''', optArgs = ["IsSerializeAfter", "IsNonSpeculative"])
|
||||
|
||||
def buildDataXCompInst(mnem, instType, suffix, code):
|
||||
global header_output, decoder_output, exec_output
|
||||
templateBase = "DataXCond%s" % instType
|
||||
iop = InstObjParams(mnem, mnem.capitalize() + suffix + "64",
|
||||
templateBase + "Op", code)
|
||||
header_output += eval(templateBase + "Declare").subst(iop)
|
||||
decoder_output += eval(templateBase + "Constructor").subst(iop)
|
||||
exec_output += BasicExecute.subst(iop)
|
||||
|
||||
def buildDataXCondImmInst(mnem, code):
|
||||
buildDataXCompInst(mnem, "CompImm", "Imm", code)
|
||||
def buildDataXCondRegInst(mnem, code):
|
||||
buildDataXCompInst(mnem, "CompReg", "Reg", code)
|
||||
def buildDataXCondSelInst(mnem, code):
|
||||
buildDataXCompInst(mnem, "Sel", "", code)
|
||||
|
||||
def condCompCode(flagType, op, imm):
|
||||
ccCode = createCcCode64(carryCode64[flagType], overflowCode64[flagType])
|
||||
opDecl = "uint64_t secOp M5_VAR_USED = imm;"
|
||||
if not imm:
|
||||
opDecl = "uint64_t secOp M5_VAR_USED = Op264;"
|
||||
return opDecl + '''
|
||||
if (testPredicate(CondCodesNZ, CondCodesC, CondCodesV, condCode)) {
|
||||
uint64_t resTemp = Op164 ''' + op + ''' secOp;
|
||||
''' + ccCode + '''
|
||||
} else {
|
||||
CondCodesNZ = (defCc >> 2) & 0x3;
|
||||
CondCodesC = (defCc >> 1) & 0x1;
|
||||
CondCodesV = defCc & 0x1;
|
||||
}
|
||||
'''
|
||||
|
||||
buildDataXCondImmInst("ccmn", condCompCode("add", "+", True))
|
||||
buildDataXCondImmInst("ccmp", condCompCode("sub", "-", True))
|
||||
buildDataXCondRegInst("ccmn", condCompCode("add", "+", False))
|
||||
buildDataXCondRegInst("ccmp", condCompCode("sub", "-", False))
|
||||
|
||||
condSelCode = '''
|
||||
if (testPredicate(CondCodesNZ, CondCodesC, CondCodesV, condCode)) {
|
||||
Dest64 = Op164;
|
||||
} else {
|
||||
Dest64 = %(altVal)s;
|
||||
}
|
||||
'''
|
||||
buildDataXCondSelInst("csel", condSelCode % {"altVal" : "Op264"})
|
||||
buildDataXCondSelInst("csinc", condSelCode % {"altVal" : "Op264 + 1"})
|
||||
buildDataXCondSelInst("csinv", condSelCode % {"altVal" : "~Op264"})
|
||||
buildDataXCondSelInst("csneg", condSelCode % {"altVal" : "-Op264"})
|
||||
}};
|
|
@ -40,12 +40,6 @@
|
|||
let {{
|
||||
sdivCode = '''
|
||||
if (Op2_sw == 0) {
|
||||
if (((SCTLR)Sctlr).dz) {
|
||||
if (FullSystem)
|
||||
return new UndefinedInstruction;
|
||||
else
|
||||
return new UndefinedInstruction(false, mnemonic);
|
||||
}
|
||||
Dest_sw = 0;
|
||||
} else if (Op1_sw == INT_MIN && Op2_sw == -1) {
|
||||
Dest_sw = INT_MIN;
|
||||
|
@ -63,12 +57,6 @@ let {{
|
|||
|
||||
udivCode = '''
|
||||
if (Op2_uw == 0) {
|
||||
if (((SCTLR)Sctlr).dz) {
|
||||
if (FullSystem)
|
||||
return new UndefinedInstruction;
|
||||
else
|
||||
return new UndefinedInstruction(false, mnemonic);
|
||||
}
|
||||
Dest_uw = 0;
|
||||
} else {
|
||||
Dest_uw = Op1_uw / Op2_uw;
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
// -*- mode:c++ -*-
|
||||
|
||||
// Copyright (c) 2010 ARM Limited
|
||||
// Copyright (c) 2010-2013 ARM Limited
|
||||
// All rights reserved
|
||||
//
|
||||
// The license below extends only to copyright in the software and shall
|
||||
|
@ -191,14 +191,17 @@ let {{
|
|||
decoder_output = ""
|
||||
exec_output = ""
|
||||
|
||||
vmsrIop = InstObjParams("vmsr", "Vmsr", "FpRegRegOp",
|
||||
{ "code": vmsrEnabledCheckCode + \
|
||||
"MiscDest = Op1;",
|
||||
vmsrCode = vmsrEnabledCheckCode + '''
|
||||
MiscDest = Op1;
|
||||
'''
|
||||
|
||||
vmsrIop = InstObjParams("vmsr", "Vmsr", "FpRegRegImmOp",
|
||||
{ "code": vmsrCode,
|
||||
"predicate_test": predicateTest,
|
||||
"op_class": "SimdFloatMiscOp" },
|
||||
["IsSerializeAfter","IsNonSpeculative"])
|
||||
header_output += FpRegRegOpDeclare.subst(vmsrIop);
|
||||
decoder_output += FpRegRegOpConstructor.subst(vmsrIop);
|
||||
header_output += FpRegRegImmOpDeclare.subst(vmsrIop);
|
||||
decoder_output += FpRegRegImmOpConstructor.subst(vmsrIop);
|
||||
exec_output += PredOpExecute.subst(vmsrIop);
|
||||
|
||||
vmsrFpscrCode = vmsrEnabledCheckCode + '''
|
||||
|
@ -215,14 +218,36 @@ let {{
|
|||
decoder_output += FpRegRegOpConstructor.subst(vmsrFpscrIop);
|
||||
exec_output += PredOpExecute.subst(vmsrFpscrIop);
|
||||
|
||||
vmrsIop = InstObjParams("vmrs", "Vmrs", "FpRegRegOp",
|
||||
{ "code": vmrsEnabledCheckCode + \
|
||||
"Dest = MiscOp1;",
|
||||
vmrsCode = vmrsEnabledCheckCode + '''
|
||||
CPSR cpsr = Cpsr;
|
||||
SCR scr = Scr;
|
||||
if (!inSecureState(scr, cpsr) && (cpsr.mode != MODE_HYP)) {
|
||||
HCR hcr = Hcr;
|
||||
bool hypTrap = false;
|
||||
switch(xc->tcBase()->flattenMiscIndex(op1)) {
|
||||
case MISCREG_FPSID:
|
||||
hypTrap = hcr.tid0;
|
||||
break;
|
||||
case MISCREG_MVFR0:
|
||||
case MISCREG_MVFR1:
|
||||
hypTrap = hcr.tid3;
|
||||
break;
|
||||
}
|
||||
if (hypTrap) {
|
||||
return new HypervisorTrap(machInst, imm,
|
||||
EC_TRAPPED_CP10_MRC_VMRS);
|
||||
}
|
||||
}
|
||||
Dest = MiscOp1;
|
||||
'''
|
||||
|
||||
vmrsIop = InstObjParams("vmrs", "Vmrs", "FpRegRegImmOp",
|
||||
{ "code": vmrsCode,
|
||||
"predicate_test": predicateTest,
|
||||
"op_class": "SimdFloatMiscOp" },
|
||||
["IsSerializeBefore"])
|
||||
header_output += FpRegRegOpDeclare.subst(vmrsIop);
|
||||
decoder_output += FpRegRegOpConstructor.subst(vmrsIop);
|
||||
header_output += FpRegRegImmOpDeclare.subst(vmrsIop);
|
||||
decoder_output += FpRegRegImmOpConstructor.subst(vmrsIop);
|
||||
exec_output += PredOpExecute.subst(vmrsIop);
|
||||
|
||||
vmrsFpscrIop = InstObjParams("vmrs", "VmrsFpscr", "FpRegRegOp",
|
||||
|
@ -323,7 +348,7 @@ let {{
|
|||
decoder_output += FpRegRegOpConstructor.subst(vmovRegQIop);
|
||||
exec_output += PredOpExecute.subst(vmovRegQIop);
|
||||
|
||||
vmovCoreRegBCode = vfpEnabledCheckCode + '''
|
||||
vmovCoreRegBCode = simdEnabledCheckCode + '''
|
||||
FpDest_uw = insertBits(FpDest_uw, imm * 8 + 7, imm * 8, Op1_ub);
|
||||
'''
|
||||
vmovCoreRegBIop = InstObjParams("vmov", "VmovCoreRegB", "FpRegRegImmOp",
|
||||
|
@ -334,7 +359,7 @@ let {{
|
|||
decoder_output += FpRegRegImmOpConstructor.subst(vmovCoreRegBIop);
|
||||
exec_output += PredOpExecute.subst(vmovCoreRegBIop);
|
||||
|
||||
vmovCoreRegHCode = vfpEnabledCheckCode + '''
|
||||
vmovCoreRegHCode = simdEnabledCheckCode + '''
|
||||
FpDest_uw = insertBits(FpDest_uw, imm * 16 + 15, imm * 16, Op1_uh);
|
||||
'''
|
||||
vmovCoreRegHIop = InstObjParams("vmov", "VmovCoreRegH", "FpRegRegImmOp",
|
||||
|
@ -453,6 +478,17 @@ let {{
|
|||
singleCode = singleSimpleCode + '''
|
||||
FpscrExc = fpscr;
|
||||
'''
|
||||
singleTernOp = vfpEnabledCheckCode + '''
|
||||
FPSCR fpscr = (FPSCR) FpscrExc;
|
||||
VfpSavedState state = prepFpState(fpscr.rMode);
|
||||
float cOp1 = FpOp1;
|
||||
float cOp2 = FpOp2;
|
||||
float cOp3 = FpDestP0;
|
||||
FpDestP0 = ternaryOp(fpscr, %(palam)s, %(op)s,
|
||||
fpscr.fz, fpscr.dn, fpscr.rMode);
|
||||
finishVfp(fpscr, state, fpscr.fz);
|
||||
FpscrExc = fpscr;
|
||||
'''
|
||||
singleBinOp = "binaryOp(fpscr, FpOp1, FpOp2," + \
|
||||
"%(func)s, fpscr.fz, fpscr.dn, fpscr.rMode)"
|
||||
singleUnaryOp = "unaryOp(fpscr, FpOp1, %(func)s, fpscr.fz, fpscr.rMode)"
|
||||
|
@ -463,6 +499,19 @@ let {{
|
|||
FpDestP1_uw = dblHi(dest);
|
||||
FpscrExc = fpscr;
|
||||
'''
|
||||
doubleTernOp = vfpEnabledCheckCode + '''
|
||||
FPSCR fpscr = (FPSCR) FpscrExc;
|
||||
VfpSavedState state = prepFpState(fpscr.rMode);
|
||||
double cOp1 = dbl(FpOp1P0_uw, FpOp1P1_uw);
|
||||
double cOp2 = dbl(FpOp2P0_uw, FpOp2P1_uw);
|
||||
double cOp3 = dbl(FpDestP0_uw, FpDestP1_uw);
|
||||
double cDest = ternaryOp(fpscr, %(palam)s, %(op)s,
|
||||
fpscr.fz, fpscr.dn, fpscr.rMode);
|
||||
FpDestP0_uw = dblLow(cDest);
|
||||
FpDestP1_uw = dblHi(cDest);
|
||||
finishVfp(fpscr, state, fpscr.fz);
|
||||
FpscrExc = fpscr;
|
||||
'''
|
||||
doubleBinOp = '''
|
||||
binaryOp(fpscr, dbl(FpOp1P0_uw, FpOp1P1_uw),
|
||||
dbl(FpOp2P0_uw, FpOp2P1_uw),
|
||||
|
@ -473,6 +522,37 @@ let {{
|
|||
fpscr.fz, fpscr.rMode)
|
||||
'''
|
||||
|
||||
def buildTernaryFpOp(Name, base, opClass, singleOp, doubleOp, paramStr):
|
||||
global header_output, decoder_output, exec_output
|
||||
|
||||
code = singleTernOp % { "op": singleOp, "palam": paramStr }
|
||||
sIop = InstObjParams(Name.lower() + "s", Name + "S", base,
|
||||
{ "code": code,
|
||||
"predicate_test": predicateTest,
|
||||
"op_class": opClass }, [])
|
||||
code = doubleTernOp % { "op": doubleOp, "palam": paramStr }
|
||||
dIop = InstObjParams(Name.lower() + "d", Name + "D", base,
|
||||
{ "code": code,
|
||||
"predicate_test": predicateTest,
|
||||
"op_class": opClass }, [])
|
||||
|
||||
declareTempl = eval(base + "Declare");
|
||||
constructorTempl = eval(base + "Constructor");
|
||||
|
||||
for iop in sIop, dIop:
|
||||
header_output += declareTempl.subst(iop)
|
||||
decoder_output += constructorTempl.subst(iop)
|
||||
exec_output += PredOpExecute.subst(iop)
|
||||
|
||||
buildTernaryFpOp("Vfma", "FpRegRegRegOp", "SimdFloatMultAccOp",
|
||||
"fpMulAdd<float>", "fpMulAdd<double>", " cOp1, cOp2, cOp3" )
|
||||
buildTernaryFpOp("Vfms", "FpRegRegRegOp", "SimdFloatMultAccOp",
|
||||
"fpMulAdd<float>", "fpMulAdd<double>", "-cOp1, cOp2, cOp3" )
|
||||
buildTernaryFpOp("Vfnma", "FpRegRegRegOp", "SimdFloatMultAccOp",
|
||||
"fpMulAdd<float>", "fpMulAdd<double>", "-cOp1, cOp2, -cOp3" )
|
||||
buildTernaryFpOp("Vfnms", "FpRegRegRegOp", "SimdFloatMultAccOp",
|
||||
"fpMulAdd<float>", "fpMulAdd<double>", " cOp1, cOp2, -cOp3" )
|
||||
|
||||
def buildBinFpOp(name, Name, base, opClass, singleOp, doubleOp):
|
||||
global header_output, decoder_output, exec_output
|
||||
|
||||
|
@ -830,7 +910,7 @@ let {{
|
|||
VfpSavedState state = prepFpState(fpscr.rMode);
|
||||
vfpFlushToZero(fpscr, FpOp1);
|
||||
__asm__ __volatile__("" : "=m" (FpOp1) : "m" (FpOp1));
|
||||
FpDest_uw = vfpFpSToFixed(FpOp1, false, false, 0, false);
|
||||
FpDest_uw = vfpFpToFixed<float>(FpOp1, false, 32, 0, false);
|
||||
__asm__ __volatile__("" :: "m" (FpDest_uw));
|
||||
finishVfp(fpscr, state, fpscr.fz);
|
||||
FpscrExc = fpscr;
|
||||
|
@ -849,7 +929,7 @@ let {{
|
|||
vfpFlushToZero(fpscr, cOp1);
|
||||
VfpSavedState state = prepFpState(fpscr.rMode);
|
||||
__asm__ __volatile__("" : "=m" (cOp1) : "m" (cOp1));
|
||||
uint64_t result = vfpFpDToFixed(cOp1, false, false, 0, false);
|
||||
uint64_t result = vfpFpToFixed<double>(cOp1, false, 32, 0, false);
|
||||
__asm__ __volatile__("" :: "m" (result));
|
||||
finishVfp(fpscr, state, fpscr.fz);
|
||||
FpDestP0_uw = result;
|
||||
|
@ -868,7 +948,7 @@ let {{
|
|||
VfpSavedState state = prepFpState(fpscr.rMode);
|
||||
vfpFlushToZero(fpscr, FpOp1);
|
||||
__asm__ __volatile__("" : "=m" (FpOp1) : "m" (FpOp1));
|
||||
FpDest_sw = vfpFpSToFixed(FpOp1, true, false, 0, false);
|
||||
FpDest_sw = vfpFpToFixed<float>(FpOp1, true, 32, 0, false);
|
||||
__asm__ __volatile__("" :: "m" (FpDest_sw));
|
||||
finishVfp(fpscr, state, fpscr.fz);
|
||||
FpscrExc = fpscr;
|
||||
|
@ -887,7 +967,7 @@ let {{
|
|||
vfpFlushToZero(fpscr, cOp1);
|
||||
VfpSavedState state = prepFpState(fpscr.rMode);
|
||||
__asm__ __volatile__("" : "=m" (cOp1) : "m" (cOp1));
|
||||
int64_t result = vfpFpDToFixed(cOp1, true, false, 0, false);
|
||||
int64_t result = vfpFpToFixed<double>(cOp1, true, 32, 0, false);
|
||||
__asm__ __volatile__("" :: "m" (result));
|
||||
finishVfp(fpscr, state, fpscr.fz);
|
||||
FpDestP0_uw = result;
|
||||
|
@ -907,7 +987,7 @@ let {{
|
|||
VfpSavedState state = prepFpState(fpscr.rMode);
|
||||
fesetround(FeRoundZero);
|
||||
__asm__ __volatile__("" : "=m" (FpOp1) : "m" (FpOp1));
|
||||
FpDest_uw = vfpFpSToFixed(FpOp1, false, false, 0);
|
||||
FpDest_uw = vfpFpToFixed<float>(FpOp1, false, 32, 0);
|
||||
__asm__ __volatile__("" :: "m" (FpDest_uw));
|
||||
finishVfp(fpscr, state, fpscr.fz);
|
||||
FpscrExc = fpscr;
|
||||
|
@ -927,7 +1007,7 @@ let {{
|
|||
VfpSavedState state = prepFpState(fpscr.rMode);
|
||||
fesetround(FeRoundZero);
|
||||
__asm__ __volatile__("" : "=m" (cOp1) : "m" (cOp1));
|
||||
uint64_t result = vfpFpDToFixed(cOp1, false, false, 0);
|
||||
uint64_t result = vfpFpToFixed<double>(cOp1, false, 32, 0);
|
||||
__asm__ __volatile__("" :: "m" (result));
|
||||
finishVfp(fpscr, state, fpscr.fz);
|
||||
FpDestP0_uw = result;
|
||||
|
@ -947,7 +1027,7 @@ let {{
|
|||
VfpSavedState state = prepFpState(fpscr.rMode);
|
||||
fesetround(FeRoundZero);
|
||||
__asm__ __volatile__("" : "=m" (FpOp1) : "m" (FpOp1));
|
||||
FpDest_sw = vfpFpSToFixed(FpOp1, true, false, 0);
|
||||
FpDest_sw = vfpFpToFixed<float>(FpOp1, true, 32, 0);
|
||||
__asm__ __volatile__("" :: "m" (FpDest_sw));
|
||||
finishVfp(fpscr, state, fpscr.fz);
|
||||
FpscrExc = fpscr;
|
||||
|
@ -967,7 +1047,7 @@ let {{
|
|||
VfpSavedState state = prepFpState(fpscr.rMode);
|
||||
fesetround(FeRoundZero);
|
||||
__asm__ __volatile__("" : "=m" (cOp1) : "m" (cOp1));
|
||||
int64_t result = vfpFpDToFixed(cOp1, true, false, 0);
|
||||
int64_t result = vfpFpToFixed<double>(cOp1, true, 32, 0);
|
||||
__asm__ __volatile__("" :: "m" (result));
|
||||
finishVfp(fpscr, state, fpscr.fz);
|
||||
FpDestP0_uw = result;
|
||||
|
@ -1333,7 +1413,7 @@ let {{
|
|||
vfpFlushToZero(fpscr, FpOp1);
|
||||
VfpSavedState state = prepFpState(fpscr.rMode);
|
||||
__asm__ __volatile__("" : "=m" (FpOp1) : "m" (FpOp1));
|
||||
FpDest_sw = vfpFpSToFixed(FpOp1, true, false, imm);
|
||||
FpDest_sw = vfpFpToFixed<float>(FpOp1, true, 32, imm);
|
||||
__asm__ __volatile__("" :: "m" (FpDest_sw));
|
||||
finishVfp(fpscr, state, fpscr.fz);
|
||||
FpscrExc = fpscr;
|
||||
|
@ -1352,7 +1432,7 @@ let {{
|
|||
vfpFlushToZero(fpscr, cOp1);
|
||||
VfpSavedState state = prepFpState(fpscr.rMode);
|
||||
__asm__ __volatile__("" : "=m" (cOp1) : "m" (cOp1));
|
||||
uint64_t mid = vfpFpDToFixed(cOp1, true, false, imm);
|
||||
uint64_t mid = vfpFpToFixed<double>(cOp1, true, 32, imm);
|
||||
__asm__ __volatile__("" :: "m" (mid));
|
||||
finishVfp(fpscr, state, fpscr.fz);
|
||||
FpDestP0_uw = mid;
|
||||
|
@ -1372,7 +1452,7 @@ let {{
|
|||
vfpFlushToZero(fpscr, FpOp1);
|
||||
VfpSavedState state = prepFpState(fpscr.rMode);
|
||||
__asm__ __volatile__("" : "=m" (FpOp1) : "m" (FpOp1));
|
||||
FpDest_uw = vfpFpSToFixed(FpOp1, false, false, imm);
|
||||
FpDest_uw = vfpFpToFixed<float>(FpOp1, false, 32, imm);
|
||||
__asm__ __volatile__("" :: "m" (FpDest_uw));
|
||||
finishVfp(fpscr, state, fpscr.fz);
|
||||
FpscrExc = fpscr;
|
||||
|
@ -1391,7 +1471,7 @@ let {{
|
|||
vfpFlushToZero(fpscr, cOp1);
|
||||
VfpSavedState state = prepFpState(fpscr.rMode);
|
||||
__asm__ __volatile__("" : "=m" (cOp1) : "m" (cOp1));
|
||||
uint64_t mid = vfpFpDToFixed(cOp1, false, false, imm);
|
||||
uint64_t mid = vfpFpToFixed<double>(cOp1, false, 32, imm);
|
||||
__asm__ __volatile__("" :: "m" (mid));
|
||||
finishVfp(fpscr, state, fpscr.fz);
|
||||
FpDestP0_uw = mid;
|
||||
|
@ -1410,7 +1490,7 @@ let {{
|
|||
FPSCR fpscr = (FPSCR) FpscrExc;
|
||||
VfpSavedState state = prepFpState(fpscr.rMode);
|
||||
__asm__ __volatile__("" : "=m" (FpOp1_sw) : "m" (FpOp1_sw));
|
||||
FpDest = vfpSFixedToFpS(fpscr.fz, fpscr.dn, FpOp1_sw, false, imm);
|
||||
FpDest = vfpSFixedToFpS(fpscr.fz, fpscr.dn, FpOp1_sw, 32, imm);
|
||||
__asm__ __volatile__("" :: "m" (FpDest));
|
||||
finishVfp(fpscr, state, fpscr.fz);
|
||||
FpscrExc = fpscr;
|
||||
|
@ -1428,7 +1508,7 @@ let {{
|
|||
uint64_t mid = ((uint64_t)FpOp1P0_uw | ((uint64_t)FpOp1P1_uw << 32));
|
||||
VfpSavedState state = prepFpState(fpscr.rMode);
|
||||
__asm__ __volatile__("" : "=m" (mid) : "m" (mid));
|
||||
double cDest = vfpSFixedToFpD(fpscr.fz, fpscr.dn, mid, false, imm);
|
||||
double cDest = vfpSFixedToFpD(fpscr.fz, fpscr.dn, mid, 32, imm);
|
||||
__asm__ __volatile__("" :: "m" (cDest));
|
||||
finishVfp(fpscr, state, fpscr.fz);
|
||||
FpDestP0_uw = dblLow(cDest);
|
||||
|
@ -1447,7 +1527,7 @@ let {{
|
|||
FPSCR fpscr = (FPSCR) FpscrExc;
|
||||
VfpSavedState state = prepFpState(fpscr.rMode);
|
||||
__asm__ __volatile__("" : "=m" (FpOp1_uw) : "m" (FpOp1_uw));
|
||||
FpDest = vfpUFixedToFpS(fpscr.fz, fpscr.dn, FpOp1_uw, false, imm);
|
||||
FpDest = vfpUFixedToFpS(fpscr.fz, fpscr.dn, FpOp1_uw, 32, imm);
|
||||
__asm__ __volatile__("" :: "m" (FpDest));
|
||||
finishVfp(fpscr, state, fpscr.fz);
|
||||
FpscrExc = fpscr;
|
||||
|
@ -1465,7 +1545,7 @@ let {{
|
|||
uint64_t mid = ((uint64_t)FpOp1P0_uw | ((uint64_t)FpOp1P1_uw << 32));
|
||||
VfpSavedState state = prepFpState(fpscr.rMode);
|
||||
__asm__ __volatile__("" : "=m" (mid) : "m" (mid));
|
||||
double cDest = vfpUFixedToFpD(fpscr.fz, fpscr.dn, mid, false, imm);
|
||||
double cDest = vfpUFixedToFpD(fpscr.fz, fpscr.dn, mid, 32, imm);
|
||||
__asm__ __volatile__("" :: "m" (cDest));
|
||||
finishVfp(fpscr, state, fpscr.fz);
|
||||
FpDestP0_uw = dblLow(cDest);
|
||||
|
@ -1485,7 +1565,7 @@ let {{
|
|||
vfpFlushToZero(fpscr, FpOp1);
|
||||
VfpSavedState state = prepFpState(fpscr.rMode);
|
||||
__asm__ __volatile__("" : "=m" (FpOp1) : "m" (FpOp1));
|
||||
FpDest_sh = vfpFpSToFixed(FpOp1, true, true, imm);
|
||||
FpDest_sh = vfpFpToFixed<float>(FpOp1, true, 16, imm);
|
||||
__asm__ __volatile__("" :: "m" (FpDest_sh));
|
||||
finishVfp(fpscr, state, fpscr.fz);
|
||||
FpscrExc = fpscr;
|
||||
|
@ -1505,7 +1585,7 @@ let {{
|
|||
vfpFlushToZero(fpscr, cOp1);
|
||||
VfpSavedState state = prepFpState(fpscr.rMode);
|
||||
__asm__ __volatile__("" : "=m" (cOp1) : "m" (cOp1));
|
||||
uint64_t result = vfpFpDToFixed(cOp1, true, true, imm);
|
||||
uint64_t result = vfpFpToFixed<double>(cOp1, true, 16, imm);
|
||||
__asm__ __volatile__("" :: "m" (result));
|
||||
finishVfp(fpscr, state, fpscr.fz);
|
||||
FpDestP0_uw = result;
|
||||
|
@ -1526,7 +1606,7 @@ let {{
|
|||
vfpFlushToZero(fpscr, FpOp1);
|
||||
VfpSavedState state = prepFpState(fpscr.rMode);
|
||||
__asm__ __volatile__("" : "=m" (FpOp1) : "m" (FpOp1));
|
||||
FpDest_uh = vfpFpSToFixed(FpOp1, false, true, imm);
|
||||
FpDest_uh = vfpFpToFixed<float>(FpOp1, false, 16, imm);
|
||||
__asm__ __volatile__("" :: "m" (FpDest_uh));
|
||||
finishVfp(fpscr, state, fpscr.fz);
|
||||
FpscrExc = fpscr;
|
||||
|
@ -1546,7 +1626,7 @@ let {{
|
|||
vfpFlushToZero(fpscr, cOp1);
|
||||
VfpSavedState state = prepFpState(fpscr.rMode);
|
||||
__asm__ __volatile__("" : "=m" (cOp1) : "m" (cOp1));
|
||||
uint64_t mid = vfpFpDToFixed(cOp1, false, true, imm);
|
||||
uint64_t mid = vfpFpToFixed<double>(cOp1, false, 16, imm);
|
||||
__asm__ __volatile__("" :: "m" (mid));
|
||||
finishVfp(fpscr, state, fpscr.fz);
|
||||
FpDestP0_uw = mid;
|
||||
|
@ -1566,7 +1646,7 @@ let {{
|
|||
FPSCR fpscr = (FPSCR) FpscrExc;
|
||||
VfpSavedState state = prepFpState(fpscr.rMode);
|
||||
__asm__ __volatile__("" : "=m" (FpOp1_sh) : "m" (FpOp1_sh));
|
||||
FpDest = vfpSFixedToFpS(fpscr.fz, fpscr.dn, FpOp1_sh, true, imm);
|
||||
FpDest = vfpSFixedToFpS(fpscr.fz, fpscr.dn, FpOp1_sh, 16, imm);
|
||||
__asm__ __volatile__("" :: "m" (FpDest));
|
||||
finishVfp(fpscr, state, fpscr.fz);
|
||||
FpscrExc = fpscr;
|
||||
|
@ -1585,7 +1665,7 @@ let {{
|
|||
uint64_t mid = ((uint64_t)FpOp1P0_uw | ((uint64_t)FpOp1P1_uw << 32));
|
||||
VfpSavedState state = prepFpState(fpscr.rMode);
|
||||
__asm__ __volatile__("" : "=m" (mid) : "m" (mid));
|
||||
double cDest = vfpSFixedToFpD(fpscr.fz, fpscr.dn, mid, true, imm);
|
||||
double cDest = vfpSFixedToFpD(fpscr.fz, fpscr.dn, mid, 16, imm);
|
||||
__asm__ __volatile__("" :: "m" (cDest));
|
||||
finishVfp(fpscr, state, fpscr.fz);
|
||||
FpDestP0_uw = dblLow(cDest);
|
||||
|
@ -1605,7 +1685,7 @@ let {{
|
|||
FPSCR fpscr = (FPSCR) FpscrExc;
|
||||
VfpSavedState state = prepFpState(fpscr.rMode);
|
||||
__asm__ __volatile__("" : "=m" (FpOp1_uh) : "m" (FpOp1_uh));
|
||||
FpDest = vfpUFixedToFpS(fpscr.fz, fpscr.dn, FpOp1_uh, true, imm);
|
||||
FpDest = vfpUFixedToFpS(fpscr.fz, fpscr.dn, FpOp1_uh, 16, imm);
|
||||
__asm__ __volatile__("" :: "m" (FpDest));
|
||||
finishVfp(fpscr, state, fpscr.fz);
|
||||
FpscrExc = fpscr;
|
||||
|
@ -1624,7 +1704,7 @@ let {{
|
|||
uint64_t mid = ((uint64_t)FpOp1P0_uw | ((uint64_t)FpOp1P1_uw << 32));
|
||||
VfpSavedState state = prepFpState(fpscr.rMode);
|
||||
__asm__ __volatile__("" : "=m" (mid) : "m" (mid));
|
||||
double cDest = vfpUFixedToFpD(fpscr.fz, fpscr.dn, mid, true, imm);
|
||||
double cDest = vfpUFixedToFpD(fpscr.fz, fpscr.dn, mid, 16, imm);
|
||||
__asm__ __volatile__("" :: "m" (cDest));
|
||||
finishVfp(fpscr, state, fpscr.fz);
|
||||
FpDestP0_uw = dblLow(cDest);
|
||||
|
|
811
src/arch/arm/isa/insts/fp64.isa
Normal file
811
src/arch/arm/isa/insts/fp64.isa
Normal file
|
@ -0,0 +1,811 @@
|
|||
// -*- mode:c++ -*-
|
||||
|
||||
// Copyright (c) 2012-2013 ARM Limited
|
||||
// All rights reserved
|
||||
//
|
||||
// The license below extends only to copyright in the software and shall
|
||||
// not be construed as granting a license to any other intellectual
|
||||
// property including but not limited to intellectual property relating
|
||||
// to a hardware implementation of the functionality of the software
|
||||
// licensed hereunder. You may use the software subject to the license
|
||||
// terms below provided that you ensure that this notice is replicated
|
||||
// unmodified and in its entirety in all distributions of the software,
|
||||
// modified or unmodified, in source code or in binary form.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met: redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer;
|
||||
// redistributions in binary form must reproduce the above copyright
|
||||
// notice, this list of conditions and the following disclaimer in the
|
||||
// documentation and/or other materials provided with the distribution;
|
||||
// neither the name of the copyright holders nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
//
|
||||
// Authors: Thomas Grocutt
|
||||
// Edmund Grimley Evans
|
||||
|
||||
let {{
|
||||
|
||||
header_output = ""
|
||||
decoder_output = ""
|
||||
exec_output = ""
|
||||
|
||||
fmovImmSCode = vfp64EnabledCheckCode + '''
|
||||
AA64FpDestP0_uw = bits(imm, 31, 0);
|
||||
AA64FpDestP1_uw = 0;
|
||||
AA64FpDestP2_uw = 0;
|
||||
AA64FpDestP3_uw = 0;
|
||||
'''
|
||||
fmovImmSIop = InstObjParams("fmov", "FmovImmS", "FpRegImmOp",
|
||||
{ "code": fmovImmSCode,
|
||||
"op_class": "SimdFloatMiscOp" }, [])
|
||||
header_output += FpRegImmOpDeclare.subst(fmovImmSIop);
|
||||
decoder_output += FpRegImmOpConstructor.subst(fmovImmSIop);
|
||||
exec_output += BasicExecute.subst(fmovImmSIop);
|
||||
|
||||
fmovImmDCode = vfp64EnabledCheckCode + '''
|
||||
AA64FpDestP0_uw = bits(imm, 31, 0);
|
||||
AA64FpDestP1_uw = bits(imm, 63, 32);
|
||||
AA64FpDestP2_uw = 0;
|
||||
AA64FpDestP3_uw = 0;
|
||||
'''
|
||||
fmovImmDIop = InstObjParams("fmov", "FmovImmD", "FpRegImmOp",
|
||||
{ "code": fmovImmDCode,
|
||||
"op_class": "SimdFloatMiscOp" }, [])
|
||||
header_output += FpRegImmOpDeclare.subst(fmovImmDIop);
|
||||
decoder_output += AA64FpRegImmOpConstructor.subst(fmovImmDIop);
|
||||
exec_output += BasicExecute.subst(fmovImmDIop);
|
||||
|
||||
fmovRegSCode = vfp64EnabledCheckCode + '''
|
||||
AA64FpDestP0_uw = AA64FpOp1P0_uw;
|
||||
AA64FpDestP1_uw = 0;
|
||||
AA64FpDestP2_uw = 0;
|
||||
AA64FpDestP3_uw = 0;
|
||||
'''
|
||||
fmovRegSIop = InstObjParams("fmov", "FmovRegS", "FpRegRegOp",
|
||||
{ "code": fmovRegSCode,
|
||||
"op_class": "SimdFloatMiscOp" }, [])
|
||||
header_output += FpRegRegOpDeclare.subst(fmovRegSIop);
|
||||
decoder_output += AA64FpRegRegOpConstructor.subst(fmovRegSIop);
|
||||
exec_output += BasicExecute.subst(fmovRegSIop);
|
||||
|
||||
fmovRegDCode = vfp64EnabledCheckCode + '''
|
||||
AA64FpDestP0_uw = AA64FpOp1P0_uw;
|
||||
AA64FpDestP1_uw = AA64FpOp1P1_uw;
|
||||
AA64FpDestP2_uw = 0;
|
||||
AA64FpDestP3_uw = 0;
|
||||
'''
|
||||
fmovRegDIop = InstObjParams("fmov", "FmovRegD", "FpRegRegOp",
|
||||
{ "code": fmovRegDCode,
|
||||
"op_class": "SimdFloatMiscOp" }, [])
|
||||
header_output += FpRegRegOpDeclare.subst(fmovRegDIop);
|
||||
decoder_output += AA64FpRegRegOpConstructor.subst(fmovRegDIop);
|
||||
exec_output += BasicExecute.subst(fmovRegDIop);
|
||||
|
||||
fmovCoreRegWCode = vfp64EnabledCheckCode + '''
|
||||
AA64FpDestP0_uw = WOp1_uw;
|
||||
AA64FpDestP1_uw = 0;
|
||||
AA64FpDestP2_uw = 0;
|
||||
AA64FpDestP3_uw = 0;
|
||||
'''
|
||||
fmovCoreRegWIop = InstObjParams("fmov", "FmovCoreRegW", "FpRegRegOp",
|
||||
{ "code": fmovCoreRegWCode,
|
||||
"op_class": "SimdFloatMiscOp" }, [])
|
||||
header_output += FpRegRegOpDeclare.subst(fmovCoreRegWIop);
|
||||
decoder_output += AA64FpRegRegOpConstructor.subst(fmovCoreRegWIop);
|
||||
exec_output += BasicExecute.subst(fmovCoreRegWIop);
|
||||
|
||||
fmovCoreRegXCode = vfp64EnabledCheckCode + '''
|
||||
AA64FpDestP0_uw = XOp1_ud;
|
||||
AA64FpDestP1_uw = XOp1_ud >> 32;
|
||||
AA64FpDestP2_uw = 0;
|
||||
AA64FpDestP3_uw = 0;
|
||||
'''
|
||||
fmovCoreRegXIop = InstObjParams("fmov", "FmovCoreRegX", "FpRegRegOp",
|
||||
{ "code": fmovCoreRegXCode,
|
||||
"op_class": "SimdFloatMiscOp" }, [])
|
||||
header_output += FpRegRegOpDeclare.subst(fmovCoreRegXIop);
|
||||
decoder_output += AA64FpRegRegOpConstructor.subst(fmovCoreRegXIop);
|
||||
exec_output += BasicExecute.subst(fmovCoreRegXIop);
|
||||
|
||||
fmovUCoreRegXCode = vfp64EnabledCheckCode + '''
|
||||
AA64FpDestP2_uw = XOp1_ud;
|
||||
AA64FpDestP3_uw = XOp1_ud >> 32;
|
||||
'''
|
||||
fmovUCoreRegXIop = InstObjParams("fmov", "FmovUCoreRegX", "FpRegRegOp",
|
||||
{ "code": fmovUCoreRegXCode,
|
||||
"op_class": "SimdFloatMiscOp" }, [])
|
||||
header_output += FpRegRegOpDeclare.subst(fmovUCoreRegXIop);
|
||||
decoder_output += AA64FpRegRegOpConstructor.subst(fmovUCoreRegXIop);
|
||||
exec_output += BasicExecute.subst(fmovUCoreRegXIop);
|
||||
|
||||
fmovRegCoreWCode = vfp64EnabledCheckCode + '''
|
||||
WDest = AA64FpOp1P0_uw;
|
||||
'''
|
||||
fmovRegCoreWIop = InstObjParams("fmov", "FmovRegCoreW", "FpRegRegOp",
|
||||
{ "code": fmovRegCoreWCode,
|
||||
"op_class": "SimdFloatMiscOp" }, [])
|
||||
header_output += FpRegRegOpDeclare.subst(fmovRegCoreWIop);
|
||||
decoder_output += AA64FpRegRegOpConstructor.subst(fmovRegCoreWIop);
|
||||
exec_output += BasicExecute.subst(fmovRegCoreWIop);
|
||||
|
||||
fmovRegCoreXCode = vfp64EnabledCheckCode + '''
|
||||
XDest = ( ((uint64_t) AA64FpOp1P1_uw) << 32) | AA64FpOp1P0_uw;
|
||||
'''
|
||||
fmovRegCoreXIop = InstObjParams("fmov", "FmovRegCoreX", "FpRegRegOp",
|
||||
{ "code": fmovRegCoreXCode,
|
||||
"op_class": "SimdFloatMiscOp" }, [])
|
||||
header_output += FpRegRegOpDeclare.subst(fmovRegCoreXIop);
|
||||
decoder_output += AA64FpRegRegOpConstructor.subst(fmovRegCoreXIop);
|
||||
exec_output += BasicExecute.subst(fmovRegCoreXIop);
|
||||
|
||||
fmovURegCoreXCode = vfp64EnabledCheckCode + '''
|
||||
XDest = ( ((uint64_t) AA64FpOp1P3_uw) << 32) | AA64FpOp1P2_uw;
|
||||
'''
|
||||
fmovURegCoreXIop = InstObjParams("fmov", "FmovURegCoreX", "FpRegRegOp",
|
||||
{ "code": fmovURegCoreXCode,
|
||||
"op_class": "SimdFloatMiscOp" }, [])
|
||||
header_output += FpRegRegOpDeclare.subst(fmovURegCoreXIop);
|
||||
decoder_output += AA64FpRegRegOpConstructor.subst(fmovURegCoreXIop);
|
||||
exec_output += BasicExecute.subst(fmovURegCoreXIop);
|
||||
}};
|
||||
|
||||
let {{
|
||||
|
||||
header_output = ""
|
||||
decoder_output = ""
|
||||
exec_output = ""
|
||||
|
||||
singleIntConvCode = vfp64EnabledCheckCode + '''
|
||||
FPSCR fpscr = (FPSCR) FpscrExc;
|
||||
uint32_t cOp1 = AA64FpOp1P0_uw;
|
||||
uint32_t cDest = %(op)s;
|
||||
AA64FpDestP0_uw = cDest;
|
||||
AA64FpDestP1_uw = 0;
|
||||
AA64FpDestP2_uw = 0;
|
||||
AA64FpDestP3_uw = 0;
|
||||
FpscrExc = fpscr;
|
||||
'''
|
||||
|
||||
singleIntConvCode2 = vfp64EnabledCheckCode + '''
|
||||
FPSCR fpscr = (FPSCR) FpscrExc;
|
||||
uint32_t cOp1 = AA64FpOp1P0_uw;
|
||||
uint32_t cOp2 = AA64FpOp2P0_uw;
|
||||
uint32_t cDest = %(op)s;
|
||||
AA64FpDestP0_uw = cDest;
|
||||
AA64FpDestP1_uw = 0;
|
||||
AA64FpDestP2_uw = 0;
|
||||
AA64FpDestP3_uw = 0;
|
||||
FpscrExc = fpscr;
|
||||
'''
|
||||
|
||||
singleBinOp = "binaryOp(fpscr, AA64FpOp1P0, AA64FpOp2P0," + \
|
||||
"%(func)s, fpscr.fz, fpscr.dn, fpscr.rMode)"
|
||||
singleUnaryOp = "unaryOp(fpscr, AA64FpOp1P0, %(func)s, fpscr.fz, fpscr.rMode)"
|
||||
|
||||
doubleIntConvCode = vfp64EnabledCheckCode + '''
|
||||
FPSCR fpscr = (FPSCR) FpscrExc;
|
||||
uint64_t cOp1 = ((uint64_t) AA64FpOp1P1_uw) << 32 | AA64FpOp1P0_uw;
|
||||
uint64_t cDest = %(op)s;
|
||||
AA64FpDestP0_uw = cDest & 0xFFFFFFFF;
|
||||
AA64FpDestP1_uw = cDest >> 32;
|
||||
AA64FpDestP2_uw = 0;
|
||||
AA64FpDestP3_uw = 0;
|
||||
FpscrExc = fpscr;
|
||||
'''
|
||||
|
||||
doubleIntConvCode2 = vfp64EnabledCheckCode + '''
|
||||
FPSCR fpscr = (FPSCR) FpscrExc;
|
||||
uint64_t cOp1 = ((uint64_t) AA64FpOp1P1_uw) << 32 | AA64FpOp1P0_uw;
|
||||
uint64_t cOp2 = ((uint64_t) AA64FpOp2P1_uw) << 32 | AA64FpOp2P0_uw;
|
||||
uint64_t cDest = %(op)s;
|
||||
AA64FpDestP0_uw = cDest & 0xFFFFFFFF;
|
||||
AA64FpDestP1_uw = cDest >> 32;
|
||||
AA64FpDestP2_uw = 0;
|
||||
AA64FpDestP3_uw = 0;
|
||||
FpscrExc = fpscr;
|
||||
'''
|
||||
|
||||
doubleBinOp = '''
|
||||
binaryOp(fpscr, dbl(AA64FpOp1P0_uw, AA64FpOp1P1_uw),
|
||||
dbl(AA64FpOp2P0_uw, AA64FpOp2P1_uw),
|
||||
%(func)s, fpscr.fz, fpscr.dn, fpscr.rMode);
|
||||
'''
|
||||
doubleUnaryOp = '''
|
||||
unaryOp(fpscr, dbl(AA64FpOp1P0_uw, AA64FpOp1P1_uw), %(func)s,
|
||||
fpscr.fz, fpscr.rMode)
|
||||
'''
|
||||
|
||||
def buildTernaryFpOp(name, opClass, sOp, dOp):
|
||||
global header_output, decoder_output, exec_output
|
||||
for isDouble in True, False:
|
||||
code = vfp64EnabledCheckCode + '''
|
||||
FPSCR fpscr = (FPSCR) FpscrExc;
|
||||
'''
|
||||
if isDouble:
|
||||
code += '''
|
||||
uint64_t cOp1 = AA64FpOp1P0_uw | (uint64_t)AA64FpOp1P1_uw << 32;
|
||||
uint64_t cOp2 = AA64FpOp2P0_uw | (uint64_t)AA64FpOp2P1_uw << 32;
|
||||
uint64_t cOp3 = AA64FpOp3P0_uw | (uint64_t)AA64FpOp3P1_uw << 32;
|
||||
uint64_t cDest;
|
||||
''' "cDest = " + dOp + ";" + '''
|
||||
AA64FpDestP0_uw = cDest;
|
||||
AA64FpDestP1_uw = cDest >> 32;
|
||||
'''
|
||||
else:
|
||||
code += '''
|
||||
uint32_t cOp1 = AA64FpOp1P0_uw;
|
||||
uint32_t cOp2 = AA64FpOp2P0_uw;
|
||||
uint32_t cOp3 = AA64FpOp3P0_uw;
|
||||
uint32_t cDest;
|
||||
''' "cDest = " + sOp + ";" + '''
|
||||
AA64FpDestP0_uw = cDest;
|
||||
AA64FpDestP1_uw = 0;
|
||||
'''
|
||||
code += '''
|
||||
AA64FpDestP2_uw = 0;
|
||||
AA64FpDestP3_uw = 0;
|
||||
FpscrExc = fpscr;
|
||||
'''
|
||||
|
||||
iop = InstObjParams(name.lower(), name + ("D" if isDouble else "S"),
|
||||
"FpRegRegRegRegOp",
|
||||
{ "code": code, "op_class": opClass }, [])
|
||||
|
||||
header_output += AA64FpRegRegRegRegOpDeclare.subst(iop)
|
||||
decoder_output += AA64FpRegRegRegRegOpConstructor.subst(iop)
|
||||
exec_output += BasicExecute.subst(iop)
|
||||
|
||||
buildTernaryFpOp("FMAdd", "SimdFloatMultAccOp",
|
||||
"fplibMulAdd<uint32_t>(cOp3, cOp1, cOp2, fpscr)",
|
||||
"fplibMulAdd<uint64_t>(cOp3, cOp1, cOp2, fpscr)" )
|
||||
buildTernaryFpOp("FMSub", "SimdFloatMultAccOp",
|
||||
"fplibMulAdd<uint32_t>(cOp3, fplibNeg<uint32_t>(cOp1), cOp2, fpscr)",
|
||||
"fplibMulAdd<uint64_t>(cOp3, fplibNeg<uint64_t>(cOp1), cOp2, fpscr)" )
|
||||
buildTernaryFpOp("FNMAdd", "SimdFloatMultAccOp",
|
||||
"fplibMulAdd<uint32_t>(fplibNeg<uint32_t>(cOp3), fplibNeg<uint32_t>(cOp1), cOp2, fpscr)",
|
||||
"fplibMulAdd<uint64_t>(fplibNeg<uint64_t>(cOp3), fplibNeg<uint64_t>(cOp1), cOp2, fpscr)" )
|
||||
buildTernaryFpOp("FNMSub", "SimdFloatMultAccOp",
|
||||
"fplibMulAdd<uint32_t>(fplibNeg<uint32_t>(cOp3), cOp1, cOp2, fpscr)",
|
||||
"fplibMulAdd<uint64_t>(fplibNeg<uint64_t>(cOp3), cOp1, cOp2, fpscr)" )
|
||||
|
||||
def buildBinFpOp(name, Name, base, opClass, singleOp, doubleOp):
|
||||
global header_output, decoder_output, exec_output
|
||||
|
||||
code = singleIntConvCode2 % { "op": singleOp }
|
||||
sIop = InstObjParams(name, Name + "S", base,
|
||||
{ "code": code,
|
||||
"op_class": opClass }, [])
|
||||
|
||||
code = doubleIntConvCode2 % { "op": doubleOp }
|
||||
dIop = InstObjParams(name, Name + "D", base,
|
||||
{ "code": code,
|
||||
"op_class": opClass }, [])
|
||||
|
||||
declareTempl = eval( base + "Declare");
|
||||
constructorTempl = eval("AA64" + base + "Constructor");
|
||||
|
||||
for iop in sIop, dIop:
|
||||
header_output += declareTempl.subst(iop)
|
||||
decoder_output += constructorTempl.subst(iop)
|
||||
exec_output += BasicExecute.subst(iop)
|
||||
|
||||
buildBinFpOp("fadd", "FAdd", "FpRegRegRegOp", "SimdFloatAddOp",
|
||||
"fplibAdd<uint32_t>(cOp1, cOp2, fpscr)",
|
||||
"fplibAdd<uint64_t>(cOp1, cOp2, fpscr)")
|
||||
buildBinFpOp("fsub", "FSub", "FpRegRegRegOp", "SimdFloatAddOp",
|
||||
"fplibSub<uint32_t>(cOp1, cOp2, fpscr)",
|
||||
"fplibSub<uint64_t>(cOp1, cOp2, fpscr)")
|
||||
buildBinFpOp("fdiv", "FDiv", "FpRegRegRegOp", "SimdFloatDivOp",
|
||||
"fplibDiv<uint32_t>(cOp1, cOp2, fpscr)",
|
||||
"fplibDiv<uint64_t>(cOp1, cOp2, fpscr)")
|
||||
buildBinFpOp("fmul", "FMul", "FpRegRegRegOp", "SimdFloatMultOp",
|
||||
"fplibMul<uint32_t>(cOp1, cOp2, fpscr)",
|
||||
"fplibMul<uint64_t>(cOp1, cOp2, fpscr)")
|
||||
buildBinFpOp("fnmul", "FNMul", "FpRegRegRegOp", "SimdFloatMultOp",
|
||||
"fplibNeg<uint32_t>(fplibMul<uint32_t>(cOp1, cOp2, fpscr))",
|
||||
"fplibNeg<uint64_t>(fplibMul<uint64_t>(cOp1, cOp2, fpscr))")
|
||||
buildBinFpOp("fmin", "FMin", "FpRegRegRegOp", "SimdFloatCmpOp",
|
||||
"fplibMin<uint32_t>(cOp1, cOp2, fpscr)",
|
||||
"fplibMin<uint64_t>(cOp1, cOp2, fpscr)")
|
||||
buildBinFpOp("fmax", "FMax", "FpRegRegRegOp", "SimdFloatCmpOp",
|
||||
"fplibMax<uint32_t>(cOp1, cOp2, fpscr)",
|
||||
"fplibMax<uint64_t>(cOp1, cOp2, fpscr)")
|
||||
buildBinFpOp("fminnm", "FMinNM", "FpRegRegRegOp", "SimdFloatCmpOp",
|
||||
"fplibMinNum<uint32_t>(cOp1, cOp2, fpscr)",
|
||||
"fplibMinNum<uint64_t>(cOp1, cOp2, fpscr)")
|
||||
buildBinFpOp("fmaxnm", "FMaxNM", "FpRegRegRegOp", "SimdFloatCmpOp",
|
||||
"fplibMaxNum<uint32_t>(cOp1, cOp2, fpscr)",
|
||||
"fplibMaxNum<uint64_t>(cOp1, cOp2, fpscr)")
|
||||
|
||||
def buildUnaryFpOp(name, Name, base, opClass, singleOp, doubleOp = None):
|
||||
if doubleOp is None:
|
||||
doubleOp = singleOp
|
||||
global header_output, decoder_output, exec_output
|
||||
|
||||
code = singleIntConvCode % { "op": singleOp }
|
||||
sIop = InstObjParams(name, Name + "S", base,
|
||||
{ "code": code,
|
||||
"op_class": opClass }, [])
|
||||
code = doubleIntConvCode % { "op": doubleOp }
|
||||
dIop = InstObjParams(name, Name + "D", base,
|
||||
{ "code": code,
|
||||
"op_class": opClass }, [])
|
||||
|
||||
declareTempl = eval( base + "Declare");
|
||||
constructorTempl = eval("AA64" + base + "Constructor");
|
||||
|
||||
for iop in sIop, dIop:
|
||||
header_output += declareTempl.subst(iop)
|
||||
decoder_output += constructorTempl.subst(iop)
|
||||
exec_output += BasicExecute.subst(iop)
|
||||
|
||||
buildUnaryFpOp("fsqrt", "FSqrt", "FpRegRegOp", "SimdFloatSqrtOp",
|
||||
"fplibSqrt<uint32_t>(cOp1, fpscr)", "fplibSqrt<uint64_t>(cOp1, fpscr)")
|
||||
|
||||
def buildSimpleUnaryFpOp(name, Name, base, opClass, singleOp,
|
||||
doubleOp = None, isIntConv = True):
|
||||
if doubleOp is None:
|
||||
doubleOp = singleOp
|
||||
global header_output, decoder_output, exec_output
|
||||
|
||||
if isIntConv:
|
||||
sCode = singleIntConvCode
|
||||
dCode = doubleIntConvCode
|
||||
else:
|
||||
sCode = singleCode
|
||||
dCode = doubleCode
|
||||
|
||||
for code, op, suffix in [[sCode, singleOp, "S"],
|
||||
[dCode, doubleOp, "D"]]:
|
||||
iop = InstObjParams(name, Name + suffix, base,
|
||||
{ "code": code % { "op": op },
|
||||
"op_class": opClass }, [])
|
||||
|
||||
declareTempl = eval( base + "Declare");
|
||||
constructorTempl = eval("AA64" + base + "Constructor");
|
||||
|
||||
header_output += declareTempl.subst(iop)
|
||||
decoder_output += constructorTempl.subst(iop)
|
||||
exec_output += BasicExecute.subst(iop)
|
||||
|
||||
buildSimpleUnaryFpOp("fneg", "FNeg", "FpRegRegOp", "SimdFloatMiscOp",
|
||||
"fplibNeg<uint32_t>(cOp1)", "fplibNeg<uint64_t>(cOp1)")
|
||||
buildSimpleUnaryFpOp("fabs", "FAbs", "FpRegRegOp", "SimdFloatMiscOp",
|
||||
"fplibAbs<uint32_t>(cOp1)", "fplibAbs<uint64_t>(cOp1)")
|
||||
buildSimpleUnaryFpOp("frintn", "FRIntN", "FpRegRegOp", "SimdFloatMiscOp",
|
||||
"fplibRoundInt<uint32_t>(cOp1, FPRounding_TIEEVEN, false, fpscr)",
|
||||
"fplibRoundInt<uint64_t>(cOp1, FPRounding_TIEEVEN, false, fpscr)")
|
||||
buildSimpleUnaryFpOp("frintp", "FRIntP", "FpRegRegOp", "SimdFloatMiscOp",
|
||||
"fplibRoundInt<uint32_t>(cOp1, FPRounding_POSINF, false, fpscr)",
|
||||
"fplibRoundInt<uint64_t>(cOp1, FPRounding_POSINF, false, fpscr)")
|
||||
buildSimpleUnaryFpOp("frintm", "FRIntM", "FpRegRegOp", "SimdFloatMiscOp",
|
||||
"fplibRoundInt<uint32_t>(cOp1, FPRounding_NEGINF, false, fpscr)",
|
||||
"fplibRoundInt<uint64_t>(cOp1, FPRounding_NEGINF, false, fpscr)")
|
||||
buildSimpleUnaryFpOp("frintz", "FRIntZ", "FpRegRegOp", "SimdFloatMiscOp",
|
||||
"fplibRoundInt<uint32_t>(cOp1, FPRounding_ZERO, false, fpscr)",
|
||||
"fplibRoundInt<uint64_t>(cOp1, FPRounding_ZERO, false, fpscr)")
|
||||
buildSimpleUnaryFpOp("frinta", "FRIntA", "FpRegRegOp", "SimdFloatMiscOp",
|
||||
"fplibRoundInt<uint32_t>(cOp1, FPRounding_TIEAWAY, false, fpscr)",
|
||||
"fplibRoundInt<uint64_t>(cOp1, FPRounding_TIEAWAY, false, fpscr)")
|
||||
buildSimpleUnaryFpOp("frinti", "FRIntI", "FpRegRegOp", "SimdFloatMiscOp",
|
||||
"fplibRoundInt<uint32_t>(cOp1, FPCRRounding(fpscr), false, fpscr)",
|
||||
"fplibRoundInt<uint64_t>(cOp1, FPCRRounding(fpscr), false, fpscr)")
|
||||
buildSimpleUnaryFpOp("frintx", "FRIntX", "FpRegRegOp", "SimdFloatMiscOp",
|
||||
"fplibRoundInt<uint32_t>(cOp1, FPCRRounding(fpscr), true, fpscr)",
|
||||
"fplibRoundInt<uint64_t>(cOp1, FPCRRounding(fpscr), true, fpscr)")
|
||||
}};
|
||||
|
||||
let {{
|
||||
|
||||
header_output = ""
|
||||
decoder_output = ""
|
||||
exec_output = ""
|
||||
|
||||
# Creates the integer to floating point instructions, including variants for
|
||||
# signed/unsigned, float/double, etc
|
||||
for regL, regOpL, width in [["W", "w", 32],
|
||||
["X", "d", 64]]:
|
||||
for isDouble in True, False:
|
||||
for us, usCode in [["U", "uint%d_t cSrc = %sOp1_u%s;" %(width, regL, regOpL)],
|
||||
["S", "int%d_t cSrc = %sOp1_u%s;" %(width, regL, regOpL)]]:
|
||||
fcvtIntFpDCode = vfp64EnabledCheckCode + '''
|
||||
FPSCR fpscr = (FPSCR) FpscrExc;
|
||||
%s
|
||||
''' %(usCode)
|
||||
|
||||
if isDouble:
|
||||
fcvtIntFpDCode += '''
|
||||
uint64_t cDest = fplibFixedToFP<uint64_t>(cSrc, 0,
|
||||
%s, FPCRRounding(fpscr), fpscr);
|
||||
AA64FpDestP0_uw = cDest;
|
||||
AA64FpDestP1_uw = cDest >> 32;
|
||||
''' % ("true" if us == "U" else "false")
|
||||
else:
|
||||
fcvtIntFpDCode += '''
|
||||
uint32_t cDest = fplibFixedToFP<uint32_t>(cSrc, 0,
|
||||
%s, FPCRRounding(fpscr), fpscr);
|
||||
AA64FpDestP0_uw = cDest;
|
||||
AA64FpDestP1_uw = 0;
|
||||
''' % ("true" if us == "U" else "false")
|
||||
fcvtIntFpDCode += '''
|
||||
AA64FpDestP2_uw = 0;
|
||||
AA64FpDestP3_uw = 0;
|
||||
FpscrExc = fpscr;
|
||||
'''
|
||||
|
||||
instName = "Fcvt%s%sIntFp%s" %(regL, us, "D" if isDouble else "S")
|
||||
mnem = "%scvtf" %(us.lower())
|
||||
fcvtIntFpDIop = InstObjParams(mnem, instName, "FpRegRegOp",
|
||||
{ "code": fcvtIntFpDCode,
|
||||
"op_class": "SimdFloatCvtOp" }, [])
|
||||
header_output += FpRegRegOpDeclare.subst(fcvtIntFpDIop);
|
||||
decoder_output += AA64FpRegRegOpConstructor.subst(fcvtIntFpDIop);
|
||||
exec_output += BasicExecute.subst(fcvtIntFpDIop);
|
||||
|
||||
# Generates the floating point to integer conversion instructions in various
|
||||
# variants, eg signed/unsigned
|
||||
def buildFpCvtIntOp(isDouble, isSigned, isXReg):
|
||||
global header_output, decoder_output, exec_output
|
||||
|
||||
for rmode, roundingMode in [["N", "FPRounding_TIEEVEN"],
|
||||
["P", "FPRounding_POSINF"],
|
||||
["M", "FPRounding_NEGINF"],
|
||||
["Z", "FPRounding_ZERO"],
|
||||
["A", "FPRounding_TIEAWAY"]]:
|
||||
fcvtFpIntCode = vfp64EnabledCheckCode + '''
|
||||
FPSCR fpscr = (FPSCR) FpscrExc;'''
|
||||
if isDouble:
|
||||
fcvtFpIntCode += '''
|
||||
uint64_t cOp1 = AA64FpOp1P0_uw | (uint64_t)AA64FpOp1P1_uw << 32;
|
||||
'''
|
||||
else:
|
||||
fcvtFpIntCode += "uint32_t cOp1 = AA64FpOp1P0_uw;"
|
||||
|
||||
fcvtFpIntCode += '''
|
||||
%sDest = fplibFPToFixed<uint%s_t, uint%s_t>(cOp1, 0, %s, %s, fpscr);
|
||||
FpscrExc = fpscr;
|
||||
''' %("X" if isXReg else "W",
|
||||
"64" if isDouble else "32",
|
||||
"64" if isXReg else "32",
|
||||
"false" if isSigned else "true",
|
||||
roundingMode)
|
||||
|
||||
instName = "FcvtFp%sInt%s%s%s" %("S" if isSigned else "U",
|
||||
"X" if isXReg else "W",
|
||||
"D" if isDouble else "S", rmode)
|
||||
mnem = "fcvt%s%s" %(rmode, "s" if isSigned else "u")
|
||||
fcvtFpIntIop = InstObjParams(mnem, instName, "FpRegRegOp",
|
||||
{ "code": fcvtFpIntCode,
|
||||
"op_class": "SimdFloatCvtOp" }, [])
|
||||
header_output += FpRegRegOpDeclare.subst(fcvtFpIntIop);
|
||||
decoder_output += FpRegRegOpConstructor.subst(fcvtFpIntIop);
|
||||
exec_output += BasicExecute.subst(fcvtFpIntIop);
|
||||
|
||||
# Now actually do the building with the different variants
|
||||
for isDouble in True, False:
|
||||
for isSigned in True, False:
|
||||
for isXReg in True, False:
|
||||
buildFpCvtIntOp(isDouble, isSigned, isXReg)
|
||||
|
||||
fcvtFpSFpDCode = vfp64EnabledCheckCode + '''
|
||||
FPSCR fpscr = (FPSCR) FpscrExc;
|
||||
uint64_t cDest = fplibConvert<uint32_t, uint64_t>(AA64FpOp1P0_uw,
|
||||
FPCRRounding(fpscr), fpscr);
|
||||
AA64FpDestP0_uw = cDest;
|
||||
AA64FpDestP1_uw = cDest >> 32;
|
||||
AA64FpDestP2_uw = 0;
|
||||
AA64FpDestP3_uw = 0;
|
||||
FpscrExc = fpscr;
|
||||
'''
|
||||
fcvtFpSFpDIop = InstObjParams("fcvt", "FCvtFpSFpD", "FpRegRegOp",
|
||||
{ "code": fcvtFpSFpDCode,
|
||||
"op_class": "SimdFloatCvtOp" }, [])
|
||||
header_output += FpRegRegOpDeclare.subst(fcvtFpSFpDIop);
|
||||
decoder_output += AA64FpRegRegOpConstructor.subst(fcvtFpSFpDIop);
|
||||
exec_output += BasicExecute.subst(fcvtFpSFpDIop);
|
||||
|
||||
fcvtFpDFpSCode = vfp64EnabledCheckCode + '''
|
||||
FPSCR fpscr = (FPSCR) FpscrExc;
|
||||
uint64_t cOp1 = AA64FpOp1P0_uw | (uint64_t)AA64FpOp1P1_uw << 32;
|
||||
AA64FpDestP0_uw = fplibConvert<uint64_t, uint32_t>(cOp1,
|
||||
FPCRRounding(fpscr), fpscr);
|
||||
AA64FpDestP1_uw = 0;
|
||||
AA64FpDestP2_uw = 0;
|
||||
AA64FpDestP3_uw = 0;
|
||||
FpscrExc = fpscr;
|
||||
'''
|
||||
fcvtFpDFpSIop = InstObjParams("fcvt", "FcvtFpDFpS", "FpRegRegOp",
|
||||
{"code": fcvtFpDFpSCode,
|
||||
"op_class": "SimdFloatCvtOp" }, [])
|
||||
header_output += FpRegRegOpDeclare.subst(fcvtFpDFpSIop);
|
||||
decoder_output += AA64FpRegRegOpConstructor.subst(fcvtFpDFpSIop);
|
||||
exec_output += BasicExecute.subst(fcvtFpDFpSIop);
|
||||
|
||||
# Half precision to single or double precision conversion
|
||||
for isDouble in True, False:
|
||||
code = vfp64EnabledCheckCode + '''
|
||||
FPSCR fpscr = (FPSCR) FpscrExc;
|
||||
%s cDest = fplibConvert<uint16_t, uint%s_t>(AA64FpOp1P0_uw,
|
||||
FPCRRounding(fpscr), fpscr);
|
||||
''' % ("uint64_t" if isDouble else "uint32_t",
|
||||
"64" if isDouble else "32")
|
||||
if isDouble:
|
||||
code += '''
|
||||
AA64FpDestP0_uw = cDest;
|
||||
AA64FpDestP1_uw = cDest >> 32;
|
||||
'''
|
||||
else:
|
||||
code += '''
|
||||
AA64FpDestP0_uw = cDest;
|
||||
AA64FpDestP1_uw = 0;
|
||||
'''
|
||||
code += '''
|
||||
AA64FpDestP2_uw = 0;
|
||||
AA64FpDestP3_uw = 0;
|
||||
FpscrExc = fpscr;
|
||||
'''
|
||||
|
||||
instName = "FcvtFpHFp%s" %("D" if isDouble else "S")
|
||||
fcvtFpHFpIop = InstObjParams("fcvt", instName, "FpRegRegOp",
|
||||
{ "code": code,
|
||||
"op_class": "SimdFloatCvtOp" }, [])
|
||||
header_output += FpRegRegOpDeclare.subst(fcvtFpHFpIop);
|
||||
decoder_output += AA64FpRegRegOpConstructor.subst(fcvtFpHFpIop);
|
||||
exec_output += BasicExecute.subst(fcvtFpHFpIop);
|
||||
|
||||
# single or double precision to Half precision conversion
|
||||
for isDouble in True, False:
|
||||
code = vfp64EnabledCheckCode + '''
|
||||
FPSCR fpscr = (FPSCR) FpscrExc;
|
||||
%s;
|
||||
AA64FpDestP0_uw = fplibConvert<uint%s_t, uint16_t>(cOp1,
|
||||
FPCRRounding(fpscr), fpscr);
|
||||
AA64FpDestP1_uw = 0;
|
||||
AA64FpDestP2_uw = 0;
|
||||
AA64FpDestP3_uw = 0;
|
||||
FpscrExc = fpscr;
|
||||
''' % ("uint64_t cOp1 = AA64FpOp1P0_uw | (uint64_t)AA64FpOp1P1_uw << 32"
|
||||
if isDouble else "uint32_t cOp1 = AA64FpOp1P0_uw",
|
||||
"64" if isDouble else "32")
|
||||
|
||||
instName = "FcvtFp%sFpH" %("D" if isDouble else "S")
|
||||
fcvtFpFpHIop = InstObjParams("fcvt", instName, "FpRegRegOp",
|
||||
{ "code": code,
|
||||
"op_class": "SimdFloatCvtOp" }, [])
|
||||
header_output += FpRegRegOpDeclare.subst(fcvtFpFpHIop);
|
||||
decoder_output += AA64FpRegRegOpConstructor.subst(fcvtFpFpHIop);
|
||||
exec_output += BasicExecute.subst(fcvtFpFpHIop);
|
||||
|
||||
# Build the various versions of the floating point compare instructions
|
||||
def buildFCmpOp(isQuiet, isDouble, isImm):
|
||||
global header_output, decoder_output, exec_output
|
||||
|
||||
fcmpCode = vfp64EnabledCheckCode + '''
|
||||
FPSCR fpscr = (FPSCR) FpscrExc;
|
||||
%s cOp1 = %s;
|
||||
''' % ("uint64_t" if isDouble else "uint32_t",
|
||||
"AA64FpDestP0_uw | (uint64_t)AA64FpDestP1_uw << 32"
|
||||
if isDouble else "AA64FpDestP0_uw")
|
||||
if isImm:
|
||||
fcmpCode += '''
|
||||
%s cOp2 = imm;
|
||||
''' % ("uint64_t" if isDouble else "uint32_t")
|
||||
else:
|
||||
fcmpCode += '''
|
||||
%s cOp2 = %s;
|
||||
''' % ("uint64_t" if isDouble else "uint32_t",
|
||||
"AA64FpOp1P0_uw | (uint64_t)AA64FpOp1P1_uw << 32"
|
||||
if isDouble else "AA64FpOp1P0_uw")
|
||||
fcmpCode += '''
|
||||
int cc = fplibCompare<uint%s_t>(cOp1, cOp2, %s, fpscr);
|
||||
CondCodesNZ = cc >> 2 & 3;
|
||||
CondCodesC = cc >> 1 & 1;
|
||||
CondCodesV = cc & 1;
|
||||
FpCondCodes = fpscr & FpCondCodesMask;
|
||||
FpscrExc = fpscr;
|
||||
''' % ("64" if isDouble else "32", "false" if isQuiet else "true")
|
||||
|
||||
typeName = "Imm" if isImm else "Reg"
|
||||
instName = "FCmp%s%s%s" %("" if isQuiet else "E", typeName,
|
||||
"D" if isDouble else "S")
|
||||
fcmpIop = InstObjParams("fcmp%s" %("" if isQuiet else "e"), instName,
|
||||
"FpReg%sOp" %(typeName),
|
||||
{"code": fcmpCode,
|
||||
"op_class": "SimdFloatCmpOp"}, [])
|
||||
|
||||
declareTemp = eval("FpReg%sOpDeclare" %(typeName));
|
||||
constructorTemp = eval("AA64FpReg%sOpConstructor" %(typeName));
|
||||
header_output += declareTemp.subst(fcmpIop);
|
||||
decoder_output += constructorTemp.subst(fcmpIop);
|
||||
exec_output += BasicExecute.subst(fcmpIop);
|
||||
|
||||
for isQuiet in True, False:
|
||||
for isDouble in True, False:
|
||||
for isImm in True, False:
|
||||
buildFCmpOp(isQuiet, isDouble, isImm)
|
||||
|
||||
# Build the various versions of the conditional floating point compare
|
||||
# instructions
|
||||
def buildFCCmpOp(isQuiet, isDouble):
|
||||
global header_output, decoder_output, exec_output
|
||||
|
||||
fccmpCode = vfp64EnabledCheckCode + '''
|
||||
FPSCR fpscr = (FPSCR) FpscrExc;
|
||||
if (testPredicate(CondCodesNZ, CondCodesC, CondCodesV, condCode)) {
|
||||
%s cOp1 = %s;
|
||||
%s cOp2 = %s;
|
||||
int cc = fplibCompare<uint%s_t>(cOp1, cOp2, %s, fpscr);
|
||||
CondCodesNZ = cc >> 2 & 3;
|
||||
CondCodesC = cc >> 1 & 1;
|
||||
CondCodesV = cc & 1;
|
||||
} else {
|
||||
CondCodesNZ = (defCc >> 2) & 0x3;
|
||||
CondCodesC = (defCc >> 1) & 0x1;
|
||||
CondCodesV = defCc & 0x1;
|
||||
}
|
||||
FpCondCodes = fpscr & FpCondCodesMask;
|
||||
FpscrExc = fpscr;
|
||||
''' % ("uint64_t" if isDouble else "uint32_t",
|
||||
"AA64FpOp1P0_uw | (uint64_t)AA64FpOp1P1_uw << 32"
|
||||
if isDouble else "AA64FpOp1P0_uw",
|
||||
"uint64_t" if isDouble else "uint32_t",
|
||||
"AA64FpOp2P0_uw | (uint64_t)AA64FpOp2P1_uw << 32"
|
||||
if isDouble else "AA64FpOp2P0_uw",
|
||||
"64" if isDouble else "32", "false" if isQuiet else "true")
|
||||
|
||||
instName = "FCCmp%sReg%s" %("" if isQuiet else "E",
|
||||
"D" if isDouble else "S")
|
||||
fccmpIop = InstObjParams("fccmp%s" %("" if isQuiet else "e"),
|
||||
instName, "FpCondCompRegOp",
|
||||
{"code": fccmpCode,
|
||||
"op_class": "SimdFloatCmpOp"}, [])
|
||||
header_output += DataXCondCompRegDeclare.subst(fccmpIop);
|
||||
decoder_output += DataXCondCompRegConstructor.subst(fccmpIop);
|
||||
exec_output += BasicExecute.subst(fccmpIop);
|
||||
|
||||
for isQuiet in True, False:
|
||||
for isDouble in True, False:
|
||||
buildFCCmpOp(isQuiet, isDouble)
|
||||
|
||||
}};
|
||||
|
||||
let {{
|
||||
|
||||
header_output = ""
|
||||
decoder_output = ""
|
||||
exec_output = ""
|
||||
|
||||
# Generates the variants of the floating to fixed point instructions
|
||||
def buildFpCvtFixedOp(isSigned, isDouble, isXReg):
|
||||
global header_output, decoder_output, exec_output
|
||||
|
||||
fcvtFpFixedCode = vfp64EnabledCheckCode + '''
|
||||
FPSCR fpscr = (FPSCR) FpscrExc;
|
||||
'''
|
||||
if isDouble:
|
||||
fcvtFpFixedCode += '''
|
||||
uint64_t cOp1 = AA64FpOp1P0_uw | (uint64_t)AA64FpOp1P1_uw << 32;
|
||||
'''
|
||||
else:
|
||||
fcvtFpFixedCode += "uint32_t cOp1 = AA64FpOp1P0_uw;"
|
||||
fcvtFpFixedCode += '''
|
||||
%sDest = fplibFPToFixed<uint%s_t, uint%s_t>(cOp1, 64 - imm, %s,
|
||||
FPRounding_ZERO, fpscr);
|
||||
FpscrExc = fpscr;
|
||||
''' %("X" if isXReg else "W",
|
||||
"64" if isDouble else "32",
|
||||
"64" if isXReg else "32",
|
||||
"false" if isSigned else "true")
|
||||
|
||||
instName = "FcvtFp%sFixed%s%s" %("S" if isSigned else "U",
|
||||
"D" if isDouble else "S",
|
||||
"X" if isXReg else "W")
|
||||
mnem = "fcvtz%s" %("s" if isSigned else "u")
|
||||
fcvtFpFixedIop = InstObjParams(mnem, instName, "FpRegRegImmOp",
|
||||
{ "code": fcvtFpFixedCode,
|
||||
"op_class": "SimdFloatCvtOp" }, [])
|
||||
header_output += FpRegRegImmOpDeclare.subst(fcvtFpFixedIop);
|
||||
decoder_output += AA64FpRegRegImmOpConstructor.subst(fcvtFpFixedIop);
|
||||
exec_output += BasicExecute.subst(fcvtFpFixedIop);
|
||||
|
||||
# Generates the variants of the fixed to floating point instructions
|
||||
def buildFixedCvtFpOp(isSigned, isDouble, isXReg):
|
||||
global header_output, decoder_output, exec_output
|
||||
|
||||
srcRegType = "X" if isXReg else "W"
|
||||
fcvtFixedFpCode = vfp64EnabledCheckCode + '''
|
||||
FPSCR fpscr = (FPSCR) FpscrExc;
|
||||
%s result = fplibFixedToFP<uint%s_t>((%s%s_t)%sOp1, 64 - imm,
|
||||
%s, FPCRRounding(fpscr), fpscr);
|
||||
''' %("uint64_t" if isDouble else "uint32_t",
|
||||
"64" if isDouble else "32",
|
||||
"int" if isSigned else "uint", "64" if isXReg else "32",
|
||||
srcRegType,
|
||||
"false" if isSigned else "true")
|
||||
if isDouble:
|
||||
fcvtFixedFpCode += '''
|
||||
AA64FpDestP0_uw = result;
|
||||
AA64FpDestP1_uw = result >> 32;
|
||||
'''
|
||||
else:
|
||||
fcvtFixedFpCode += '''
|
||||
AA64FpDestP0_uw = result;
|
||||
AA64FpDestP1_uw = 0;
|
||||
'''
|
||||
fcvtFixedFpCode += '''
|
||||
AA64FpDestP2_uw = 0;
|
||||
AA64FpDestP3_uw = 0;
|
||||
FpscrExc = fpscr;
|
||||
'''
|
||||
|
||||
instName = "Fcvt%sFixedFp%s%s" %("S" if isSigned else "U",
|
||||
"D" if isDouble else "S",
|
||||
srcRegType)
|
||||
mnem = "%scvtf" %("s" if isSigned else "u")
|
||||
fcvtFixedFpIop = InstObjParams(mnem, instName, "FpRegRegImmOp",
|
||||
{ "code": fcvtFixedFpCode,
|
||||
"op_class": "SimdFloatCvtOp" }, [])
|
||||
header_output += FpRegRegImmOpDeclare.subst(fcvtFixedFpIop);
|
||||
decoder_output += FpRegRegImmOpConstructor.subst(fcvtFixedFpIop);
|
||||
exec_output += BasicExecute.subst(fcvtFixedFpIop);
|
||||
|
||||
# loop over the variants building the instructions for each
|
||||
for isXReg in True, False:
|
||||
for isDouble in True, False:
|
||||
for isSigned in True, False:
|
||||
buildFpCvtFixedOp(isSigned, isDouble, isXReg)
|
||||
buildFixedCvtFpOp(isSigned, isDouble, isXReg)
|
||||
}};
|
||||
|
||||
let {{
|
||||
|
||||
header_output = ""
|
||||
decoder_output = ""
|
||||
exec_output = ""
|
||||
|
||||
for isDouble in True, False:
|
||||
code = '''
|
||||
if (testPredicate(CondCodesNZ, CondCodesC, CondCodesV, condCode)) {
|
||||
AA64FpDestP0_uw = AA64FpOp1P0_uw;
|
||||
'''
|
||||
if isDouble:
|
||||
code += '''
|
||||
AA64FpDestP1_uw = AA64FpOp1P1_uw;
|
||||
} else {
|
||||
AA64FpDestP0_uw = AA64FpOp2P0_uw;
|
||||
AA64FpDestP1_uw = AA64FpOp2P1_uw;
|
||||
}
|
||||
'''
|
||||
else:
|
||||
code += '''
|
||||
} else {
|
||||
AA64FpDestP0_uw = AA64FpOp2P0_uw;
|
||||
}
|
||||
AA64FpDestP1_uw = 0;
|
||||
'''
|
||||
code += '''
|
||||
AA64FpDestP2_uw = 0;
|
||||
AA64FpDestP3_uw = 0;
|
||||
'''
|
||||
|
||||
iop = InstObjParams("fcsel", "FCSel%s" %("D" if isDouble else "S"),
|
||||
"FpCondSelOp", code)
|
||||
header_output += DataXCondSelDeclare.subst(iop)
|
||||
decoder_output += DataXCondSelConstructor.subst(iop)
|
||||
exec_output += BasicExecute.subst(iop)
|
||||
}};
|
|
@ -1,6 +1,6 @@
|
|||
// -*- mode:c++ -*-
|
||||
|
||||
// Copyright (c) 2010 ARM Limited
|
||||
// Copyright (c) 2010-2012 ARM Limited
|
||||
// All rights reserved
|
||||
//
|
||||
// The license below extends only to copyright in the software and shall
|
||||
|
@ -37,6 +37,9 @@
|
|||
//
|
||||
// Authors: Gabe Black
|
||||
|
||||
//AArch64 instructions
|
||||
##include "aarch64.isa"
|
||||
|
||||
//Basic forms of various templates
|
||||
##include "basic.isa"
|
||||
|
||||
|
@ -46,8 +49,15 @@
|
|||
//Loads of a single item
|
||||
##include "ldr.isa"
|
||||
|
||||
//Loads of a single item, AArch64
|
||||
##include "ldr64.isa"
|
||||
|
||||
//Miscellaneous instructions that don't fit elsewhere
|
||||
##include "misc.isa"
|
||||
##include "misc64.isa"
|
||||
|
||||
//Stores of a single item, AArch64
|
||||
##include "str64.isa"
|
||||
|
||||
//Stores of a single item
|
||||
##include "str.isa"
|
||||
|
@ -61,8 +71,12 @@
|
|||
//Data processing instructions
|
||||
##include "data.isa"
|
||||
|
||||
//AArch64 data processing instructions
|
||||
##include "data64.isa"
|
||||
|
||||
//Branches
|
||||
##include "branch.isa"
|
||||
##include "branch64.isa"
|
||||
|
||||
//Multiply
|
||||
##include "mult.isa"
|
||||
|
@ -72,9 +86,14 @@
|
|||
|
||||
//VFP
|
||||
##include "fp.isa"
|
||||
##include "fp64.isa"
|
||||
|
||||
//Neon
|
||||
##include "neon.isa"
|
||||
|
||||
//AArch64 Neon
|
||||
##include "neon64.isa"
|
||||
##include "neon64_mem.isa"
|
||||
|
||||
//m5 Psuedo-ops
|
||||
##include "m5ops.isa"
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
// -*- mode:c++ -*-
|
||||
|
||||
// Copyright (c) 2010 ARM Limited
|
||||
// Copyright (c) 2010-2011 ARM Limited
|
||||
// All rights reserved
|
||||
//
|
||||
// The license below extends only to copyright in the software and shall
|
||||
|
@ -38,6 +38,7 @@
|
|||
// Authors: Gabe Black
|
||||
|
||||
let {{
|
||||
import math
|
||||
|
||||
header_output = ""
|
||||
decoder_output = ""
|
||||
|
@ -78,7 +79,8 @@ let {{
|
|||
newDecoder,
|
||||
newExec) = self.fillTemplates(self.name, self.Name, codeBlobs,
|
||||
self.memFlags, instFlags, base,
|
||||
wbDecl, pcDecl, self.rasPop)
|
||||
wbDecl, pcDecl, self.rasPop,
|
||||
self.size, self.sign)
|
||||
|
||||
header_output += newHeader
|
||||
decoder_output += newDecoder
|
||||
|
@ -160,7 +162,7 @@ let {{
|
|||
self.size, self.sign, self.user)
|
||||
|
||||
# Add memory request flags where necessary
|
||||
self.memFlags.append("%d" % (self.size - 1))
|
||||
self.memFlags.append("%d" % int(math.log(self.size, 2)))
|
||||
if self.user:
|
||||
self.memFlags.append("ArmISA::TLB::UserMode")
|
||||
|
||||
|
|
446
src/arch/arm/isa/insts/ldr64.isa
Normal file
446
src/arch/arm/isa/insts/ldr64.isa
Normal file
|
@ -0,0 +1,446 @@
|
|||
// -*- mode:c++ -*-
|
||||
|
||||
// Copyright (c) 2011-2013 ARM Limited
|
||||
// All rights reserved
|
||||
//
|
||||
// The license below extends only to copyright in the software and shall
|
||||
// not be construed as granting a license to any other intellectual
|
||||
// property including but not limited to intellectual property relating
|
||||
// to a hardware implementation of the functionality of the software
|
||||
// licensed hereunder. You may use the software subject to the license
|
||||
// terms below provided that you ensure that this notice is replicated
|
||||
// unmodified and in its entirety in all distributions of the software,
|
||||
// modified or unmodified, in source code or in binary form.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met: redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer;
|
||||
// redistributions in binary form must reproduce the above copyright
|
||||
// notice, this list of conditions and the following disclaimer in the
|
||||
// documentation and/or other materials provided with the distribution;
|
||||
// neither the name of the copyright holders nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
//
|
||||
// Authors: Gabe Black
|
||||
|
||||
let {{
|
||||
|
||||
header_output = ""
|
||||
decoder_output = ""
|
||||
exec_output = ""
|
||||
|
||||
class LoadInst64(LoadStoreInst):
|
||||
execBase = 'Load64'
|
||||
micro = False
|
||||
|
||||
def __init__(self, mnem, Name, size=4, sign=False, user=False,
|
||||
literal=False, flavor="normal", top=False):
|
||||
super(LoadInst64, self).__init__()
|
||||
|
||||
self.name = mnem
|
||||
self.Name = Name
|
||||
self.size = size
|
||||
self.sign = sign
|
||||
self.user = user
|
||||
self.literal = literal
|
||||
self.flavor = flavor
|
||||
self.top = top
|
||||
|
||||
self.memFlags = ["ArmISA::TLB::MustBeOne"]
|
||||
self.instFlags = []
|
||||
self.codeBlobs = {"postacc_code" : ""}
|
||||
|
||||
# Add memory request flags where necessary
|
||||
if self.user:
|
||||
self.memFlags.append("ArmISA::TLB::UserMode")
|
||||
|
||||
if self.flavor == "dprefetch":
|
||||
self.memFlags.append("Request::PREFETCH")
|
||||
self.instFlags = ['IsDataPrefetch']
|
||||
elif self.flavor == "iprefetch":
|
||||
self.memFlags.append("Request::PREFETCH")
|
||||
self.instFlags = ['IsInstPrefetch']
|
||||
if self.micro:
|
||||
self.instFlags.append("IsMicroop")
|
||||
|
||||
if self.flavor in ("acexp", "exp"):
|
||||
# For exclusive pair ops alignment check is based on total size
|
||||
self.memFlags.append("%d" % int(math.log(self.size, 2) + 1))
|
||||
elif not (self.size == 16 and self.top):
|
||||
# Only the first microop should perform alignment checking.
|
||||
self.memFlags.append("%d" % int(math.log(self.size, 2)))
|
||||
|
||||
if self.flavor not in ("acquire", "acex", "exclusive",
|
||||
"acexp", "exp"):
|
||||
self.memFlags.append("ArmISA::TLB::AllowUnaligned")
|
||||
|
||||
if self.flavor in ("acquire", "acex", "acexp"):
|
||||
self.instFlags.extend(["IsMemBarrier",
|
||||
"IsWriteBarrier",
|
||||
"IsReadBarrier"])
|
||||
if self.flavor in ("acex", "exclusive", "exp", "acexp"):
|
||||
self.memFlags.append("Request::LLSC")
|
||||
|
||||
def buildEACode(self):
|
||||
# Address computation code
|
||||
eaCode = ""
|
||||
if self.flavor == "fp":
|
||||
eaCode += vfp64EnabledCheckCode
|
||||
|
||||
if self.literal:
|
||||
eaCode += "EA = RawPC"
|
||||
else:
|
||||
eaCode += SPAlignmentCheckCode + "EA = XBase"
|
||||
|
||||
if self.size == 16:
|
||||
if self.top:
|
||||
eaCode += " + (isBigEndian64(xc->tcBase()) ? 0 : 8)"
|
||||
else:
|
||||
eaCode += " + (isBigEndian64(xc->tcBase()) ? 8 : 0)"
|
||||
if not self.post:
|
||||
eaCode += self.offset
|
||||
eaCode += ";"
|
||||
|
||||
self.codeBlobs["ea_code"] = eaCode
|
||||
|
||||
def emitHelper(self, base='Memory64', wbDecl=None):
|
||||
global header_output, decoder_output, exec_output
|
||||
|
||||
# If this is a microop itself, don't allow anything that would
|
||||
# require further microcoding.
|
||||
if self.micro:
|
||||
assert not wbDecl
|
||||
|
||||
fa_code = None
|
||||
if not self.micro and self.flavor in ("normal", "widen", "acquire"):
|
||||
fa_code = '''
|
||||
fault->annotate(ArmFault::SAS, %s);
|
||||
fault->annotate(ArmFault::SSE, %s);
|
||||
fault->annotate(ArmFault::SRT, dest);
|
||||
fault->annotate(ArmFault::SF, %s);
|
||||
fault->annotate(ArmFault::AR, %s);
|
||||
''' % ("0" if self.size == 1 else
|
||||
"1" if self.size == 2 else
|
||||
"2" if self.size == 4 else "3",
|
||||
"true" if self.sign else "false",
|
||||
"true" if (self.size == 8 or
|
||||
self.flavor == "widen") else "false",
|
||||
"true" if self.flavor == "acquire" else "false")
|
||||
|
||||
(newHeader, newDecoder, newExec) = \
|
||||
self.fillTemplates(self.name, self.Name, self.codeBlobs,
|
||||
self.memFlags, self.instFlags,
|
||||
base, wbDecl, faCode=fa_code)
|
||||
|
||||
header_output += newHeader
|
||||
decoder_output += newDecoder
|
||||
exec_output += newExec
|
||||
|
||||
class LoadImmInst64(LoadInst64):
|
||||
def __init__(self, *args, **kargs):
|
||||
super(LoadImmInst64, self).__init__(*args, **kargs)
|
||||
self.offset = " + imm"
|
||||
|
||||
self.wbDecl = "MicroAddXiUop(machInst, base, base, imm);"
|
||||
|
||||
class LoadRegInst64(LoadInst64):
|
||||
def __init__(self, *args, **kargs):
|
||||
super(LoadRegInst64, self).__init__(*args, **kargs)
|
||||
self.offset = " + extendReg64(XOffset, type, shiftAmt, 64)"
|
||||
|
||||
self.wbDecl = \
|
||||
"MicroAddXERegUop(machInst, base, base, " + \
|
||||
" offset, type, shiftAmt);"
|
||||
|
||||
class LoadRawRegInst64(LoadInst64):
|
||||
def __init__(self, *args, **kargs):
|
||||
super(LoadRawRegInst64, self).__init__(*args, **kargs)
|
||||
self.offset = ""
|
||||
|
||||
class LoadSingle64(LoadInst64):
|
||||
def emit(self):
|
||||
self.buildEACode()
|
||||
|
||||
# Code that actually handles the access
|
||||
if self.flavor in ("dprefetch", "iprefetch"):
|
||||
accCode = 'uint64_t temp M5_VAR_USED = Mem%s;'
|
||||
elif self.flavor == "fp":
|
||||
if self.size in (1, 2, 4):
|
||||
accCode = '''
|
||||
AA64FpDestP0_uw = cSwap(Mem%s,
|
||||
isBigEndian64(xc->tcBase()));
|
||||
AA64FpDestP1_uw = 0;
|
||||
AA64FpDestP2_uw = 0;
|
||||
AA64FpDestP3_uw = 0;
|
||||
'''
|
||||
elif self.size == 8 or (self.size == 16 and not self.top):
|
||||
accCode = '''
|
||||
uint64_t data = cSwap(Mem%s,
|
||||
isBigEndian64(xc->tcBase()));
|
||||
AA64FpDestP0_uw = (uint32_t)data;
|
||||
AA64FpDestP1_uw = (data >> 32);
|
||||
'''
|
||||
# Only zero out the other half if this isn't part of a
|
||||
# pair of 8 byte loads implementing a 16 byte load.
|
||||
if self.size == 8:
|
||||
accCode += '''
|
||||
AA64FpDestP2_uw = 0;
|
||||
AA64FpDestP3_uw = 0;
|
||||
'''
|
||||
elif self.size == 16 and self.top:
|
||||
accCode = '''
|
||||
uint64_t data = cSwap(Mem%s,
|
||||
isBigEndian64(xc->tcBase()));
|
||||
AA64FpDestP2_uw = (uint32_t)data;
|
||||
AA64FpDestP3_uw = (data >> 32);
|
||||
'''
|
||||
elif self.flavor == "widen" or self.size == 8:
|
||||
accCode = "XDest = cSwap(Mem%s, isBigEndian64(xc->tcBase()));"
|
||||
else:
|
||||
accCode = "WDest = cSwap(Mem%s, isBigEndian64(xc->tcBase()));"
|
||||
if self.size == 16:
|
||||
accCode = accCode % buildMemSuffix(self.sign, 8)
|
||||
else:
|
||||
accCode = accCode % buildMemSuffix(self.sign, self.size)
|
||||
|
||||
self.codeBlobs["memacc_code"] = accCode
|
||||
|
||||
# Push it out to the output files
|
||||
wbDecl = None
|
||||
if self.writeback and not self.micro:
|
||||
wbDecl = self.wbDecl
|
||||
self.emitHelper(self.base, wbDecl)
|
||||
|
||||
class LoadDouble64(LoadInst64):
|
||||
def emit(self):
|
||||
self.buildEACode()
|
||||
|
||||
# Code that actually handles the access
|
||||
if self.flavor == "fp":
|
||||
accCode = '''
|
||||
uint64_t data = cSwap(Mem_ud, isBigEndian64(xc->tcBase()));
|
||||
AA64FpDestP0_uw = (uint32_t)data;
|
||||
AA64FpDestP1_uw = 0;
|
||||
AA64FpDestP2_uw = 0;
|
||||
AA64FpDestP3_uw = 0;
|
||||
AA64FpDest2P0_uw = (data >> 32);
|
||||
AA64FpDest2P1_uw = 0;
|
||||
AA64FpDest2P2_uw = 0;
|
||||
AA64FpDest2P3_uw = 0;
|
||||
'''
|
||||
else:
|
||||
if self.sign:
|
||||
if self.size == 4:
|
||||
accCode = '''
|
||||
uint64_t data = cSwap(Mem_ud,
|
||||
isBigEndian64(xc->tcBase()));
|
||||
XDest = sext<32>((uint32_t)data);
|
||||
XDest2 = sext<32>(data >> 32);
|
||||
'''
|
||||
elif self.size == 8:
|
||||
accCode = '''
|
||||
XDest = sext<64>(Mem_tud.a);
|
||||
XDest2 = sext<64>(Mem_tud.b);
|
||||
'''
|
||||
else:
|
||||
if self.size == 4:
|
||||
accCode = '''
|
||||
uint64_t data = cSwap(Mem_ud,
|
||||
isBigEndian64(xc->tcBase()));
|
||||
XDest = (uint32_t)data;
|
||||
XDest2 = data >> 32;
|
||||
'''
|
||||
elif self.size == 8:
|
||||
accCode = '''
|
||||
XDest = Mem_tud.a;
|
||||
XDest2 = Mem_tud.b;
|
||||
'''
|
||||
self.codeBlobs["memacc_code"] = accCode
|
||||
|
||||
# Push it out to the output files
|
||||
wbDecl = None
|
||||
if self.writeback and not self.micro:
|
||||
wbDecl = self.wbDecl
|
||||
self.emitHelper(self.base, wbDecl)
|
||||
|
||||
class LoadImm64(LoadImmInst64, LoadSingle64):
|
||||
decConstBase = 'LoadStoreImm64'
|
||||
base = 'ArmISA::MemoryImm64'
|
||||
writeback = False
|
||||
post = False
|
||||
|
||||
class LoadPre64(LoadImmInst64, LoadSingle64):
|
||||
decConstBase = 'LoadStoreImm64'
|
||||
base = 'ArmISA::MemoryPreIndex64'
|
||||
writeback = True
|
||||
post = False
|
||||
|
||||
class LoadPost64(LoadImmInst64, LoadSingle64):
|
||||
decConstBase = 'LoadStoreImm64'
|
||||
base = 'ArmISA::MemoryPostIndex64'
|
||||
writeback = True
|
||||
post = True
|
||||
|
||||
class LoadReg64(LoadRegInst64, LoadSingle64):
|
||||
decConstBase = 'LoadStoreReg64'
|
||||
base = 'ArmISA::MemoryReg64'
|
||||
writeback = False
|
||||
post = False
|
||||
|
||||
class LoadRaw64(LoadRawRegInst64, LoadSingle64):
|
||||
decConstBase = 'LoadStoreRaw64'
|
||||
base = 'ArmISA::MemoryRaw64'
|
||||
writeback = False
|
||||
post = False
|
||||
|
||||
class LoadEx64(LoadRawRegInst64, LoadSingle64):
|
||||
decConstBase = 'LoadStoreEx64'
|
||||
base = 'ArmISA::MemoryEx64'
|
||||
writeback = False
|
||||
post = False
|
||||
|
||||
class LoadLit64(LoadImmInst64, LoadSingle64):
|
||||
decConstBase = 'LoadStoreLit64'
|
||||
base = 'ArmISA::MemoryLiteral64'
|
||||
writeback = False
|
||||
post = False
|
||||
|
||||
def buildLoads64(mnem, NameBase, size, sign, flavor="normal"):
|
||||
LoadImm64(mnem, NameBase + "_IMM", size, sign, flavor=flavor).emit()
|
||||
LoadPre64(mnem, NameBase + "_PRE", size, sign, flavor=flavor).emit()
|
||||
LoadPost64(mnem, NameBase + "_POST", size, sign, flavor=flavor).emit()
|
||||
LoadReg64(mnem, NameBase + "_REG", size, sign, flavor=flavor).emit()
|
||||
|
||||
buildLoads64("ldrb", "LDRB64", 1, False)
|
||||
buildLoads64("ldrsb", "LDRSBW64", 1, True)
|
||||
buildLoads64("ldrsb", "LDRSBX64", 1, True, flavor="widen")
|
||||
buildLoads64("ldrh", "LDRH64", 2, False)
|
||||
buildLoads64("ldrsh", "LDRSHW64", 2, True)
|
||||
buildLoads64("ldrsh", "LDRSHX64", 2, True, flavor="widen")
|
||||
buildLoads64("ldrsw", "LDRSW64", 4, True, flavor="widen")
|
||||
buildLoads64("ldr", "LDRW64", 4, False)
|
||||
buildLoads64("ldr", "LDRX64", 8, False)
|
||||
buildLoads64("ldr", "LDRBFP64", 1, False, flavor="fp")
|
||||
buildLoads64("ldr", "LDRHFP64", 2, False, flavor="fp")
|
||||
buildLoads64("ldr", "LDRSFP64", 4, False, flavor="fp")
|
||||
buildLoads64("ldr", "LDRDFP64", 8, False, flavor="fp")
|
||||
|
||||
LoadImm64("prfm", "PRFM64_IMM", 8, flavor="dprefetch").emit()
|
||||
LoadReg64("prfm", "PRFM64_REG", 8, flavor="dprefetch").emit()
|
||||
LoadLit64("prfm", "PRFM64_LIT", 8, literal=True, flavor="dprefetch").emit()
|
||||
LoadImm64("prfum", "PRFUM64_IMM", 8, flavor="dprefetch").emit()
|
||||
|
||||
LoadImm64("ldurb", "LDURB64_IMM", 1, False).emit()
|
||||
LoadImm64("ldursb", "LDURSBW64_IMM", 1, True).emit()
|
||||
LoadImm64("ldursb", "LDURSBX64_IMM", 1, True, flavor="widen").emit()
|
||||
LoadImm64("ldurh", "LDURH64_IMM", 2, False).emit()
|
||||
LoadImm64("ldursh", "LDURSHW64_IMM", 2, True).emit()
|
||||
LoadImm64("ldursh", "LDURSHX64_IMM", 2, True, flavor="widen").emit()
|
||||
LoadImm64("ldursw", "LDURSW64_IMM", 4, True, flavor="widen").emit()
|
||||
LoadImm64("ldur", "LDURW64_IMM", 4, False).emit()
|
||||
LoadImm64("ldur", "LDURX64_IMM", 8, False).emit()
|
||||
LoadImm64("ldur", "LDURBFP64_IMM", 1, flavor="fp").emit()
|
||||
LoadImm64("ldur", "LDURHFP64_IMM", 2, flavor="fp").emit()
|
||||
LoadImm64("ldur", "LDURSFP64_IMM", 4, flavor="fp").emit()
|
||||
LoadImm64("ldur", "LDURDFP64_IMM", 8, flavor="fp").emit()
|
||||
|
||||
LoadImm64("ldtrb", "LDTRB64_IMM", 1, False, True).emit()
|
||||
LoadImm64("ldtrsb", "LDTRSBW64_IMM", 1, True, True).emit()
|
||||
LoadImm64("ldtrsb", "LDTRSBX64_IMM", 1, True, True, flavor="widen").emit()
|
||||
LoadImm64("ldtrh", "LDTRH64_IMM", 2, False, True).emit()
|
||||
LoadImm64("ldtrsh", "LDTRSHW64_IMM", 2, True, True).emit()
|
||||
LoadImm64("ldtrsh", "LDTRSHX64_IMM", 2, True, True, flavor="widen").emit()
|
||||
LoadImm64("ldtrsw", "LDTRSW64_IMM", 4, True, flavor="widen").emit()
|
||||
LoadImm64("ldtr", "LDTRW64_IMM", 4, False, True).emit()
|
||||
LoadImm64("ldtr", "LDTRX64_IMM", 8, False, True).emit()
|
||||
|
||||
LoadLit64("ldrsw", "LDRSWL64_LIT", 4, True, \
|
||||
literal=True, flavor="widen").emit()
|
||||
LoadLit64("ldr", "LDRWL64_LIT", 4, False, literal=True).emit()
|
||||
LoadLit64("ldr", "LDRXL64_LIT", 8, False, literal=True).emit()
|
||||
LoadLit64("ldr", "LDRSFP64_LIT", 4, literal=True, flavor="fp").emit()
|
||||
LoadLit64("ldr", "LDRDFP64_LIT", 8, literal=True, flavor="fp").emit()
|
||||
|
||||
LoadRaw64("ldar", "LDARX64", 8, flavor="acquire").emit()
|
||||
LoadRaw64("ldar", "LDARW64", 4, flavor="acquire").emit()
|
||||
LoadRaw64("ldarh", "LDARH64", 2, flavor="acquire").emit()
|
||||
LoadRaw64("ldarb", "LDARB64", 1, flavor="acquire").emit()
|
||||
|
||||
LoadEx64("ldaxr", "LDAXRX64", 8, flavor="acex").emit()
|
||||
LoadEx64("ldaxr", "LDAXRW64", 4, flavor="acex").emit()
|
||||
LoadEx64("ldaxrh", "LDAXRH64", 2, flavor="acex").emit()
|
||||
LoadEx64("ldaxrb", "LDAXRB64", 1, flavor="acex").emit()
|
||||
|
||||
LoadEx64("ldxr", "LDXRX64", 8, flavor="exclusive").emit()
|
||||
LoadEx64("ldxr", "LDXRW64", 4, flavor="exclusive").emit()
|
||||
LoadEx64("ldxrh", "LDXRH64", 2, flavor="exclusive").emit()
|
||||
LoadEx64("ldxrb", "LDXRB64", 1, flavor="exclusive").emit()
|
||||
|
||||
class LoadImmU64(LoadImm64):
|
||||
decConstBase = 'LoadStoreImmU64'
|
||||
micro = True
|
||||
|
||||
class LoadImmDU64(LoadImmInst64, LoadDouble64):
|
||||
decConstBase = 'LoadStoreImmDU64'
|
||||
base = 'ArmISA::MemoryDImm64'
|
||||
micro = True
|
||||
post = False
|
||||
writeback = False
|
||||
|
||||
class LoadImmDouble64(LoadImmInst64, LoadDouble64):
|
||||
decConstBase = 'LoadStoreImmDU64'
|
||||
base = 'ArmISA::MemoryDImm64'
|
||||
micro = False
|
||||
post = False
|
||||
writeback = False
|
||||
|
||||
class LoadRegU64(LoadReg64):
|
||||
decConstBase = 'LoadStoreRegU64'
|
||||
micro = True
|
||||
|
||||
class LoadLitU64(LoadLit64):
|
||||
decConstBase = 'LoadStoreLitU64'
|
||||
micro = True
|
||||
|
||||
LoadImmDouble64("ldaxp", "LDAXPW64", 4, flavor="acexp").emit()
|
||||
LoadImmDouble64("ldaxp", "LDAXPX64", 8, flavor="acexp").emit()
|
||||
LoadImmDouble64("ldxp", "LDXPW64", 4, flavor="exp").emit()
|
||||
LoadImmDouble64("ldxp", "LDXPX64", 8, flavor="exp").emit()
|
||||
|
||||
LoadImmU64("ldrxi_uop", "MicroLdrXImmUop", 8).emit()
|
||||
LoadRegU64("ldrxr_uop", "MicroLdrXRegUop", 8).emit()
|
||||
LoadLitU64("ldrxl_uop", "MicroLdrXLitUop", 8, literal=True).emit()
|
||||
LoadImmU64("ldrfpxi_uop", "MicroLdrFpXImmUop", 8, flavor="fp").emit()
|
||||
LoadRegU64("ldrfpxr_uop", "MicroLdrFpXRegUop", 8, flavor="fp").emit()
|
||||
LoadLitU64("ldrfpxl_uop", "MicroLdrFpXLitUop", 8, literal=True,
|
||||
flavor="fp").emit()
|
||||
LoadImmU64("ldrqbfpxi_uop", "MicroLdrQBFpXImmUop",
|
||||
16, flavor="fp", top = False).emit()
|
||||
LoadRegU64("ldrqbfpxr_uop", "MicroLdrQBFpXRegUop",
|
||||
16, flavor="fp", top = False).emit()
|
||||
LoadLitU64("ldrqbfpxl_uop", "MicroLdrQBFpXLitUop",
|
||||
16, literal=True, flavor="fp", top = False).emit()
|
||||
LoadImmU64("ldrqtfpxi_uop", "MicroLdrQTFpXImmUop",
|
||||
16, flavor="fp", top = True).emit()
|
||||
LoadRegU64("ldrqtfpxr_uop", "MicroLdrQTFpXRegUop",
|
||||
16, flavor="fp", top = True).emit()
|
||||
LoadLitU64("ldrqtfpxl_uop", "MicroLdrQTFpXLitUop",
|
||||
16, literal=True, flavor="fp", top = True).emit()
|
||||
LoadImmDU64("ldrduxi_uop", "MicroLdrDUXImmUop", 4, sign=False).emit()
|
||||
LoadImmDU64("ldrdsxi_uop", "MicroLdrDSXImmUop", 4, sign=True).emit()
|
||||
LoadImmDU64("ldrdfpxi_uop", "MicroLdrDFpXImmUop", 4, flavor="fp").emit()
|
||||
}};
|
|
@ -1,5 +1,5 @@
|
|||
//
|
||||
// Copyright (c) 2010 ARM Limited
|
||||
// Copyright (c) 2010, 2012-2013 ARM Limited
|
||||
// All rights reserved
|
||||
//
|
||||
// The license below extends only to copyright in the software and shall
|
||||
|
@ -58,6 +58,7 @@ let {{
|
|||
armCode = '''
|
||||
PseudoInst::arm(xc->tcBase());
|
||||
'''
|
||||
|
||||
armIop = InstObjParams("arm", "Arm", "PredOp",
|
||||
{ "code": armCode,
|
||||
"predicate_test": predicateTest },
|
||||
|
@ -69,6 +70,7 @@ let {{
|
|||
quiesceCode = '''
|
||||
PseudoInst::quiesce(xc->tcBase());
|
||||
'''
|
||||
|
||||
quiesceIop = InstObjParams("quiesce", "Quiesce", "PredOp",
|
||||
{ "code": quiesceCode,
|
||||
"predicate_test": predicateTest },
|
||||
|
@ -81,6 +83,10 @@ let {{
|
|||
PseudoInst::quiesceNs(xc->tcBase(), join32to64(R1, R0));
|
||||
'''
|
||||
|
||||
quiesceNsCode64 = '''
|
||||
PseudoInst::quiesceNs(xc->tcBase(), X0);
|
||||
'''
|
||||
|
||||
quiesceNsIop = InstObjParams("quiesceNs", "QuiesceNs", "PredOp",
|
||||
{ "code": quiesceNsCode,
|
||||
"predicate_test": predicateTest },
|
||||
|
@ -89,10 +95,22 @@ let {{
|
|||
decoder_output += BasicConstructor.subst(quiesceNsIop)
|
||||
exec_output += QuiescePredOpExecute.subst(quiesceNsIop)
|
||||
|
||||
quiesceNsIop = InstObjParams("quiesceNs", "QuiesceNs64", "PredOp",
|
||||
{ "code": quiesceNsCode64,
|
||||
"predicate_test": predicateTest },
|
||||
["IsNonSpeculative", "IsQuiesce"])
|
||||
header_output += BasicDeclare.subst(quiesceNsIop)
|
||||
decoder_output += BasicConstructor.subst(quiesceNsIop)
|
||||
exec_output += QuiescePredOpExecute.subst(quiesceNsIop)
|
||||
|
||||
quiesceCyclesCode = '''
|
||||
PseudoInst::quiesceCycles(xc->tcBase(), join32to64(R1, R0));
|
||||
'''
|
||||
|
||||
quiesceCyclesCode64 = '''
|
||||
PseudoInst::quiesceCycles(xc->tcBase(), X0);
|
||||
'''
|
||||
|
||||
quiesceCyclesIop = InstObjParams("quiesceCycles", "QuiesceCycles", "PredOp",
|
||||
{ "code": quiesceCyclesCode,
|
||||
"predicate_test": predicateTest },
|
||||
|
@ -101,12 +119,23 @@ let {{
|
|||
decoder_output += BasicConstructor.subst(quiesceCyclesIop)
|
||||
exec_output += QuiescePredOpExecute.subst(quiesceCyclesIop)
|
||||
|
||||
quiesceCyclesIop = InstObjParams("quiesceCycles", "QuiesceCycles64", "PredOp",
|
||||
{ "code": quiesceCyclesCode64,
|
||||
"predicate_test": predicateTest },
|
||||
["IsNonSpeculative", "IsQuiesce", "IsUnverifiable"])
|
||||
header_output += BasicDeclare.subst(quiesceCyclesIop)
|
||||
decoder_output += BasicConstructor.subst(quiesceCyclesIop)
|
||||
exec_output += QuiescePredOpExecute.subst(quiesceCyclesIop)
|
||||
|
||||
quiesceTimeCode = '''
|
||||
uint64_t qt_val = PseudoInst::quiesceTime(xc->tcBase());
|
||||
R0 = bits(qt_val, 31, 0);
|
||||
R1 = bits(qt_val, 63, 32);
|
||||
'''
|
||||
|
||||
quiesceTimeCode64 = '''
|
||||
X0 = PseudoInst::quiesceTime(xc->tcBase());
|
||||
'''
|
||||
quiesceTimeIop = InstObjParams("quiesceTime", "QuiesceTime", "PredOp",
|
||||
{ "code": quiesceTimeCode,
|
||||
"predicate_test": predicateTest },
|
||||
|
@ -115,12 +144,23 @@ let {{
|
|||
decoder_output += BasicConstructor.subst(quiesceTimeIop)
|
||||
exec_output += PredOpExecute.subst(quiesceTimeIop)
|
||||
|
||||
quiesceTimeIop = InstObjParams("quiesceTime", "QuiesceTime64", "PredOp",
|
||||
{ "code": quiesceTimeCode64,
|
||||
"predicate_test": predicateTest },
|
||||
["IsNonSpeculative", "IsUnverifiable"])
|
||||
header_output += BasicDeclare.subst(quiesceTimeIop)
|
||||
decoder_output += BasicConstructor.subst(quiesceTimeIop)
|
||||
exec_output += PredOpExecute.subst(quiesceTimeIop)
|
||||
|
||||
rpnsCode = '''
|
||||
uint64_t rpns_val = PseudoInst::rpns(xc->tcBase());
|
||||
R0 = bits(rpns_val, 31, 0);
|
||||
R1 = bits(rpns_val, 63, 32);
|
||||
'''
|
||||
|
||||
rpnsCode64 = '''
|
||||
X0 = PseudoInst::rpns(xc->tcBase());
|
||||
'''
|
||||
rpnsIop = InstObjParams("rpns", "Rpns", "PredOp",
|
||||
{ "code": rpnsCode,
|
||||
"predicate_test": predicateTest },
|
||||
|
@ -129,10 +169,22 @@ let {{
|
|||
decoder_output += BasicConstructor.subst(rpnsIop)
|
||||
exec_output += PredOpExecute.subst(rpnsIop)
|
||||
|
||||
rpnsIop = InstObjParams("rpns", "Rpns64", "PredOp",
|
||||
{ "code": rpnsCode64,
|
||||
"predicate_test": predicateTest },
|
||||
["IsNonSpeculative", "IsUnverifiable"])
|
||||
header_output += BasicDeclare.subst(rpnsIop)
|
||||
decoder_output += BasicConstructor.subst(rpnsIop)
|
||||
exec_output += PredOpExecute.subst(rpnsIop)
|
||||
|
||||
wakeCpuCode = '''
|
||||
PseudoInst::wakeCPU(xc->tcBase(), join32to64(R1,R0));
|
||||
'''
|
||||
|
||||
wakeCpuCode64 = '''
|
||||
PseudoInst::wakeCPU(xc->tcBase(), X0);
|
||||
'''
|
||||
|
||||
wakeCPUIop = InstObjParams("wakeCPU", "WakeCPU", "PredOp",
|
||||
{ "code": wakeCpuCode,
|
||||
"predicate_test": predicateTest },
|
||||
|
@ -141,6 +193,14 @@ let {{
|
|||
decoder_output += BasicConstructor.subst(wakeCPUIop)
|
||||
exec_output += PredOpExecute.subst(wakeCPUIop)
|
||||
|
||||
wakeCPUIop = InstObjParams("wakeCPU", "WakeCPU64", "PredOp",
|
||||
{ "code": wakeCpuCode64,
|
||||
"predicate_test": predicateTest },
|
||||
["IsNonSpeculative", "IsUnverifiable"])
|
||||
header_output += BasicDeclare.subst(wakeCPUIop)
|
||||
decoder_output += BasicConstructor.subst(wakeCPUIop)
|
||||
exec_output += PredOpExecute.subst(wakeCPUIop)
|
||||
|
||||
deprecated_ivlbIop = InstObjParams("deprecated_ivlb", "Deprecated_ivlb", "PredOp",
|
||||
{ "code": '''warn_once("Obsolete M5 ivlb instruction encountered.\\n");''',
|
||||
"predicate_test": predicateTest })
|
||||
|
@ -171,6 +231,11 @@ let {{
|
|||
m5exit_code = '''
|
||||
PseudoInst::m5exit(xc->tcBase(), join32to64(R1, R0));
|
||||
'''
|
||||
|
||||
m5exit_code64 = '''
|
||||
PseudoInst::m5exit(xc->tcBase(), X0);
|
||||
'''
|
||||
|
||||
m5exitIop = InstObjParams("m5exit", "M5exit", "PredOp",
|
||||
{ "code": m5exit_code,
|
||||
"predicate_test": predicateTest },
|
||||
|
@ -190,6 +255,14 @@ let {{
|
|||
decoder_output += BasicConstructor.subst(m5failIop)
|
||||
exec_output += PredOpExecute.subst(m5failIop)
|
||||
|
||||
m5exitIop = InstObjParams("m5exit", "M5exit64", "PredOp",
|
||||
{ "code": m5exit_code64,
|
||||
"predicate_test": predicateTest },
|
||||
["No_OpClass", "IsNonSpeculative"])
|
||||
header_output += BasicDeclare.subst(m5exitIop)
|
||||
decoder_output += BasicConstructor.subst(m5exitIop)
|
||||
exec_output += PredOpExecute.subst(m5exitIop)
|
||||
|
||||
loadsymbolCode = '''
|
||||
PseudoInst::loadsymbol(xc->tcBase());
|
||||
'''
|
||||
|
@ -208,6 +281,10 @@ let {{
|
|||
R1 = bits(ip_val, 63, 32);
|
||||
'''
|
||||
|
||||
initparamCode64 = '''
|
||||
X0 = PseudoInst::initParam(xc->tcBase());
|
||||
'''
|
||||
|
||||
initparamIop = InstObjParams("initparam", "Initparam", "PredOp",
|
||||
{ "code": initparamCode,
|
||||
"predicate_test": predicateTest },
|
||||
|
@ -216,10 +293,21 @@ let {{
|
|||
decoder_output += BasicConstructor.subst(initparamIop)
|
||||
exec_output += PredOpExecute.subst(initparamIop)
|
||||
|
||||
initparamIop = InstObjParams("initparam", "Initparam64", "PredOp",
|
||||
{ "code": initparamCode64,
|
||||
"predicate_test": predicateTest },
|
||||
["IsNonSpeculative"])
|
||||
header_output += BasicDeclare.subst(initparamIop)
|
||||
decoder_output += BasicConstructor.subst(initparamIop)
|
||||
exec_output += PredOpExecute.subst(initparamIop)
|
||||
|
||||
resetstats_code = '''
|
||||
PseudoInst::resetstats(xc->tcBase(), join32to64(R1, R0), join32to64(R3, R2));
|
||||
'''
|
||||
|
||||
resetstats_code64 = '''
|
||||
PseudoInst::resetstats(xc->tcBase(), X0, X1);
|
||||
'''
|
||||
resetstatsIop = InstObjParams("resetstats", "Resetstats", "PredOp",
|
||||
{ "code": resetstats_code,
|
||||
"predicate_test": predicateTest },
|
||||
|
@ -228,9 +316,22 @@ let {{
|
|||
decoder_output += BasicConstructor.subst(resetstatsIop)
|
||||
exec_output += PredOpExecute.subst(resetstatsIop)
|
||||
|
||||
resetstatsIop = InstObjParams("resetstats", "Resetstats64", "PredOp",
|
||||
{ "code": resetstats_code64,
|
||||
"predicate_test": predicateTest },
|
||||
["IsNonSpeculative"])
|
||||
header_output += BasicDeclare.subst(resetstatsIop)
|
||||
decoder_output += BasicConstructor.subst(resetstatsIop)
|
||||
exec_output += PredOpExecute.subst(resetstatsIop)
|
||||
|
||||
dumpstats_code = '''
|
||||
PseudoInst::dumpstats(xc->tcBase(), join32to64(R1, R0), join32to64(R3, R2));
|
||||
'''
|
||||
|
||||
dumpstats_code64 = '''
|
||||
PseudoInst::dumpstats(xc->tcBase(), X0, X1);
|
||||
'''
|
||||
|
||||
dumpstatsIop = InstObjParams("dumpstats", "Dumpstats", "PredOp",
|
||||
{ "code": dumpstats_code,
|
||||
"predicate_test": predicateTest },
|
||||
|
@ -239,9 +340,22 @@ let {{
|
|||
decoder_output += BasicConstructor.subst(dumpstatsIop)
|
||||
exec_output += PredOpExecute.subst(dumpstatsIop)
|
||||
|
||||
dumpstatsIop = InstObjParams("dumpstats", "Dumpstats64", "PredOp",
|
||||
{ "code": dumpstats_code64,
|
||||
"predicate_test": predicateTest },
|
||||
["IsNonSpeculative"])
|
||||
header_output += BasicDeclare.subst(dumpstatsIop)
|
||||
decoder_output += BasicConstructor.subst(dumpstatsIop)
|
||||
exec_output += PredOpExecute.subst(dumpstatsIop)
|
||||
|
||||
dumpresetstats_code = '''
|
||||
PseudoInst::dumpresetstats(xc->tcBase(), join32to64(R1, R0), join32to64(R3, R2));
|
||||
'''
|
||||
|
||||
dumpresetstats_code64 = '''
|
||||
PseudoInst::dumpresetstats(xc->tcBase(), X0, X1);
|
||||
'''
|
||||
|
||||
dumpresetstatsIop = InstObjParams("dumpresetstats", "Dumpresetstats", "PredOp",
|
||||
{ "code": dumpresetstats_code,
|
||||
"predicate_test": predicateTest },
|
||||
|
@ -250,9 +364,22 @@ let {{
|
|||
decoder_output += BasicConstructor.subst(dumpresetstatsIop)
|
||||
exec_output += PredOpExecute.subst(dumpresetstatsIop)
|
||||
|
||||
dumpresetstatsIop = InstObjParams("dumpresetstats", "Dumpresetstats64", "PredOp",
|
||||
{ "code": dumpresetstats_code64,
|
||||
"predicate_test": predicateTest },
|
||||
["IsNonSpeculative"])
|
||||
header_output += BasicDeclare.subst(dumpresetstatsIop)
|
||||
decoder_output += BasicConstructor.subst(dumpresetstatsIop)
|
||||
exec_output += PredOpExecute.subst(dumpresetstatsIop)
|
||||
|
||||
m5checkpoint_code = '''
|
||||
PseudoInst::m5checkpoint(xc->tcBase(), join32to64(R1, R0), join32to64(R3, R2));
|
||||
'''
|
||||
|
||||
m5checkpoint_code64 = '''
|
||||
PseudoInst::m5checkpoint(xc->tcBase(), X0, X1);
|
||||
'''
|
||||
|
||||
m5checkpointIop = InstObjParams("m5checkpoint", "M5checkpoint", "PredOp",
|
||||
{ "code": m5checkpoint_code,
|
||||
"predicate_test": predicateTest },
|
||||
|
@ -261,11 +388,27 @@ let {{
|
|||
decoder_output += BasicConstructor.subst(m5checkpointIop)
|
||||
exec_output += PredOpExecute.subst(m5checkpointIop)
|
||||
|
||||
m5checkpointIop = InstObjParams("m5checkpoint", "M5checkpoint64", "PredOp",
|
||||
{ "code": m5checkpoint_code64,
|
||||
"predicate_test": predicateTest },
|
||||
["IsNonSpeculative", "IsUnverifiable"])
|
||||
header_output += BasicDeclare.subst(m5checkpointIop)
|
||||
decoder_output += BasicConstructor.subst(m5checkpointIop)
|
||||
exec_output += PredOpExecute.subst(m5checkpointIop)
|
||||
|
||||
m5readfileCode = '''
|
||||
int n = 4;
|
||||
uint64_t offset = getArgument(xc->tcBase(), n, sizeof(uint64_t), false);
|
||||
R0 = PseudoInst::readfile(xc->tcBase(), R0, join32to64(R3,R2), offset);
|
||||
'''
|
||||
|
||||
m5readfileCode64 = '''
|
||||
int n = 4;
|
||||
uint64_t offset = getArgument(xc->tcBase(), n, sizeof(uint64_t), false);
|
||||
n = 6;
|
||||
X0 = PseudoInst::readfile(xc->tcBase(), (uint32_t)X0, X1, offset);
|
||||
'''
|
||||
|
||||
m5readfileIop = InstObjParams("m5readfile", "M5readfile", "PredOp",
|
||||
{ "code": m5readfileCode,
|
||||
"predicate_test": predicateTest },
|
||||
|
@ -274,6 +417,14 @@ let {{
|
|||
decoder_output += BasicConstructor.subst(m5readfileIop)
|
||||
exec_output += PredOpExecute.subst(m5readfileIop)
|
||||
|
||||
m5readfileIop = InstObjParams("m5readfile", "M5readfile64", "PredOp",
|
||||
{ "code": m5readfileCode64,
|
||||
"predicate_test": predicateTest },
|
||||
["IsNonSpeculative", "IsUnverifiable"])
|
||||
header_output += BasicDeclare.subst(m5readfileIop)
|
||||
decoder_output += BasicConstructor.subst(m5readfileIop)
|
||||
exec_output += PredOpExecute.subst(m5readfileIop)
|
||||
|
||||
m5writefileCode = '''
|
||||
int n = 4;
|
||||
uint64_t offset = getArgument(xc->tcBase(), n, sizeof(uint64_t), false);
|
||||
|
@ -282,6 +433,16 @@ let {{
|
|||
R0 = PseudoInst::writefile(xc->tcBase(), R0, join32to64(R3,R2), offset,
|
||||
filenameAddr);
|
||||
'''
|
||||
|
||||
m5writefileCode64 = '''
|
||||
int n = 4;
|
||||
uint64_t offset = getArgument(xc->tcBase(), n, sizeof(uint64_t), false);
|
||||
n = 6;
|
||||
Addr filenameAddr = getArgument(xc->tcBase(), n, sizeof(Addr), false);
|
||||
X0 = PseudoInst::writefile(xc->tcBase(), (uint32_t)X0, X1, offset,
|
||||
filenameAddr);
|
||||
'''
|
||||
|
||||
m5writefileIop = InstObjParams("m5writefile", "M5writefile", "PredOp",
|
||||
{ "code": m5writefileCode,
|
||||
"predicate_test": predicateTest },
|
||||
|
@ -290,6 +451,14 @@ let {{
|
|||
decoder_output += BasicConstructor.subst(m5writefileIop)
|
||||
exec_output += PredOpExecute.subst(m5writefileIop)
|
||||
|
||||
m5writefileIop = InstObjParams("m5writefile", "M5writefile64", "PredOp",
|
||||
{ "code": m5writefileCode64,
|
||||
"predicate_test": predicateTest },
|
||||
["IsNonSpeculative"])
|
||||
header_output += BasicDeclare.subst(m5writefileIop)
|
||||
decoder_output += BasicConstructor.subst(m5writefileIop)
|
||||
exec_output += PredOpExecute.subst(m5writefileIop)
|
||||
|
||||
m5breakIop = InstObjParams("m5break", "M5break", "PredOp",
|
||||
{ "code": "PseudoInst::debugbreak(xc->tcBase());",
|
||||
"predicate_test": predicateTest },
|
||||
|
@ -309,6 +478,9 @@ let {{
|
|||
m5addsymbolCode = '''
|
||||
PseudoInst::addsymbol(xc->tcBase(), join32to64(R1, R0), R2);
|
||||
'''
|
||||
m5addsymbolCode64 = '''
|
||||
PseudoInst::addsymbol(xc->tcBase(), X0, (uint32_t)X1);
|
||||
'''
|
||||
m5addsymbolIop = InstObjParams("m5addsymbol", "M5addsymbol", "PredOp",
|
||||
{ "code": m5addsymbolCode,
|
||||
"predicate_test": predicateTest },
|
||||
|
@ -317,8 +489,17 @@ let {{
|
|||
decoder_output += BasicConstructor.subst(m5addsymbolIop)
|
||||
exec_output += PredOpExecute.subst(m5addsymbolIop)
|
||||
|
||||
m5addsymbolIop = InstObjParams("m5addsymbol", "M5addsymbol64", "PredOp",
|
||||
{ "code": m5addsymbolCode64,
|
||||
"predicate_test": predicateTest },
|
||||
["IsNonSpeculative"])
|
||||
header_output += BasicDeclare.subst(m5addsymbolIop)
|
||||
decoder_output += BasicConstructor.subst(m5addsymbolIop)
|
||||
exec_output += PredOpExecute.subst(m5addsymbolIop)
|
||||
|
||||
m5panicCode = '''panic("M5 panic instruction called at pc=%#x.",
|
||||
xc->pcState().pc());'''
|
||||
|
||||
m5panicIop = InstObjParams("m5panic", "M5panic", "PredOp",
|
||||
{ "code": m5panicCode,
|
||||
"predicate_test": predicateTest },
|
||||
|
@ -332,6 +513,13 @@ let {{
|
|||
join32to64(R1, R0),
|
||||
join32to64(R3, R2)
|
||||
);'''
|
||||
|
||||
m5workbeginCode64 = '''PseudoInst::workbegin(
|
||||
xc->tcBase(),
|
||||
X0,
|
||||
X1
|
||||
);'''
|
||||
|
||||
m5workbeginIop = InstObjParams("m5workbegin", "M5workbegin", "PredOp",
|
||||
{ "code": m5workbeginCode,
|
||||
"predicate_test": predicateTest },
|
||||
|
@ -340,11 +528,26 @@ let {{
|
|||
decoder_output += BasicConstructor.subst(m5workbeginIop)
|
||||
exec_output += PredOpExecute.subst(m5workbeginIop)
|
||||
|
||||
m5workbeginIop = InstObjParams("m5workbegin", "M5workbegin64", "PredOp",
|
||||
{ "code": m5workbeginCode64,
|
||||
"predicate_test": predicateTest },
|
||||
["IsNonSpeculative"])
|
||||
header_output += BasicDeclare.subst(m5workbeginIop)
|
||||
decoder_output += BasicConstructor.subst(m5workbeginIop)
|
||||
exec_output += PredOpExecute.subst(m5workbeginIop)
|
||||
|
||||
m5workendCode = '''PseudoInst::workend(
|
||||
xc->tcBase(),
|
||||
join32to64(R1, R0),
|
||||
join32to64(R3, R2)
|
||||
);'''
|
||||
|
||||
m5workendCode64 = '''PseudoInst::workend(
|
||||
xc->tcBase(),
|
||||
X0,
|
||||
X1
|
||||
);'''
|
||||
|
||||
m5workendIop = InstObjParams("m5workend", "M5workend", "PredOp",
|
||||
{ "code": m5workendCode,
|
||||
"predicate_test": predicateTest },
|
||||
|
@ -353,4 +556,11 @@ let {{
|
|||
decoder_output += BasicConstructor.subst(m5workendIop)
|
||||
exec_output += PredOpExecute.subst(m5workendIop)
|
||||
|
||||
m5workendIop = InstObjParams("m5workend", "M5workend64", "PredOp",
|
||||
{ "code": m5workendCode64,
|
||||
"predicate_test": predicateTest },
|
||||
["IsNonSpeculative"])
|
||||
header_output += BasicDeclare.subst(m5workendIop)
|
||||
decoder_output += BasicConstructor.subst(m5workendIop)
|
||||
exec_output += PredOpExecute.subst(m5workendIop)
|
||||
}};
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
// -*- mode:c++ -*-
|
||||
|
||||
// Copyright (c) 2010 ARM Limited
|
||||
// Copyright (c) 2010-2013 ARM Limited
|
||||
// All rights reserved
|
||||
//
|
||||
// The license below extends only to copyright in the software and shall
|
||||
|
@ -91,7 +91,8 @@ let {{
|
|||
SCTLR sctlr = Sctlr;
|
||||
|
||||
CPSR new_cpsr =
|
||||
cpsrWriteByInstr(old_cpsr, Spsr, 0xF, true, sctlr.nmfi);
|
||||
cpsrWriteByInstr(old_cpsr, Spsr, Scr, Nsacr, 0xF, true,
|
||||
sctlr.nmfi, xc->tcBase());
|
||||
Cpsr = ~CondCodesMask & new_cpsr;
|
||||
CondCodesNZ = new_cpsr.nz;
|
||||
CondCodesC = new_cpsr.c;
|
||||
|
@ -158,8 +159,8 @@ let {{
|
|||
|
||||
header_output = decoder_output = exec_output = ''
|
||||
|
||||
loadIops = (microLdrUopIop, microLdrRetUopIop,
|
||||
microLdrFpUopIop, microLdrDBFpUopIop, microLdrDTFpUopIop)
|
||||
loadIops = (microLdrUopIop, microLdrRetUopIop, microLdrFpUopIop,
|
||||
microLdrDBFpUopIop, microLdrDTFpUopIop)
|
||||
storeIops = (microStrUopIop, microStrFpUopIop,
|
||||
microStrDBFpUopIop, microStrDTFpUopIop)
|
||||
for iop in loadIops + storeIops:
|
||||
|
@ -178,7 +179,7 @@ let {{
|
|||
let {{
|
||||
exec_output = header_output = ''
|
||||
|
||||
eaCode = 'EA = URa + imm;'
|
||||
eaCode = 'EA = XURa + imm;'
|
||||
|
||||
for size in (1, 2, 3, 4, 6, 8, 12, 16):
|
||||
# Set up the memory access.
|
||||
|
@ -592,6 +593,26 @@ let {{
|
|||
URa = URb + shift_rm_imm(URc, shiftAmt, shiftType, OptShiftRmCondCodesC);
|
||||
'''
|
||||
|
||||
microAddXiUopIop = InstObjParams('addxi_uop', 'MicroAddXiUop',
|
||||
'MicroIntImmXOp',
|
||||
'XURa = XURb + imm;',
|
||||
['IsMicroop'])
|
||||
|
||||
microAddXiSpAlignUopIop = InstObjParams('addxi_uop', 'MicroAddXiSpAlignUop',
|
||||
'MicroIntImmXOp', '''
|
||||
if (isSP((IntRegIndex) urb) && bits(XURb, 3, 0) &&
|
||||
SPAlignmentCheckEnabled(xc->tcBase())) {
|
||||
return new SPAlignmentFault();
|
||||
}
|
||||
XURa = XURb + imm;
|
||||
''', ['IsMicroop'])
|
||||
|
||||
microAddXERegUopIop = InstObjParams('addxr_uop', 'MicroAddXERegUop',
|
||||
'MicroIntRegXOp',
|
||||
'XURa = XURb + ' + \
|
||||
'extendReg64(XURc, type, shiftAmt, 64);',
|
||||
['IsMicroop'])
|
||||
|
||||
microAddUopIop = InstObjParams('add_uop', 'MicroAddUop',
|
||||
'MicroIntRegOp',
|
||||
{'code': microAddUopCode,
|
||||
|
@ -604,6 +625,11 @@ let {{
|
|||
'predicate_test': predicateTest},
|
||||
['IsMicroop'])
|
||||
|
||||
microSubXiUopIop = InstObjParams('subxi_uop', 'MicroSubXiUop',
|
||||
'MicroIntImmXOp',
|
||||
'XURa = XURb - imm;',
|
||||
['IsMicroop'])
|
||||
|
||||
microSubUopCode = '''
|
||||
URa = URb - shift_rm_imm(URc, shiftAmt, shiftType, OptShiftRmCondCodesC);
|
||||
'''
|
||||
|
@ -631,8 +657,8 @@ let {{
|
|||
SCTLR sctlr = Sctlr;
|
||||
pNPC = URa;
|
||||
CPSR new_cpsr =
|
||||
cpsrWriteByInstr(cpsrOrCondCodes, URb,
|
||||
0xF, true, sctlr.nmfi);
|
||||
cpsrWriteByInstr(cpsrOrCondCodes, URb, Scr, Nsacr,
|
||||
0xF, true, sctlr.nmfi, xc->tcBase());
|
||||
Cpsr = ~CondCodesMask & new_cpsr;
|
||||
NextThumb = new_cpsr.t;
|
||||
NextJazelle = new_cpsr.j;
|
||||
|
@ -651,25 +677,37 @@ let {{
|
|||
['IsMicroop'])
|
||||
|
||||
header_output = MicroIntImmDeclare.subst(microAddiUopIop) + \
|
||||
MicroIntImmDeclare.subst(microAddXiUopIop) + \
|
||||
MicroIntImmDeclare.subst(microAddXiSpAlignUopIop) + \
|
||||
MicroIntImmDeclare.subst(microSubiUopIop) + \
|
||||
MicroIntImmDeclare.subst(microSubXiUopIop) + \
|
||||
MicroIntRegDeclare.subst(microAddUopIop) + \
|
||||
MicroIntRegDeclare.subst(microSubUopIop) + \
|
||||
MicroIntXERegDeclare.subst(microAddXERegUopIop) + \
|
||||
MicroIntMovDeclare.subst(microUopRegMovIop) + \
|
||||
MicroIntMovDeclare.subst(microUopRegMovRetIop) + \
|
||||
MicroSetPCCPSRDeclare.subst(microUopSetPCCPSRIop)
|
||||
|
||||
decoder_output = MicroIntImmConstructor.subst(microAddiUopIop) + \
|
||||
MicroIntImmXConstructor.subst(microAddXiUopIop) + \
|
||||
MicroIntImmXConstructor.subst(microAddXiSpAlignUopIop) + \
|
||||
MicroIntImmConstructor.subst(microSubiUopIop) + \
|
||||
MicroIntImmXConstructor.subst(microSubXiUopIop) + \
|
||||
MicroIntRegConstructor.subst(microAddUopIop) + \
|
||||
MicroIntRegConstructor.subst(microSubUopIop) + \
|
||||
MicroIntXERegConstructor.subst(microAddXERegUopIop) + \
|
||||
MicroIntMovConstructor.subst(microUopRegMovIop) + \
|
||||
MicroIntMovConstructor.subst(microUopRegMovRetIop) + \
|
||||
MicroSetPCCPSRConstructor.subst(microUopSetPCCPSRIop)
|
||||
|
||||
exec_output = PredOpExecute.subst(microAddiUopIop) + \
|
||||
BasicExecute.subst(microAddXiUopIop) + \
|
||||
BasicExecute.subst(microAddXiSpAlignUopIop) + \
|
||||
PredOpExecute.subst(microSubiUopIop) + \
|
||||
BasicExecute.subst(microSubXiUopIop) + \
|
||||
PredOpExecute.subst(microAddUopIop) + \
|
||||
PredOpExecute.subst(microSubUopIop) + \
|
||||
BasicExecute.subst(microAddXERegUopIop) + \
|
||||
PredOpExecute.subst(microUopRegMovIop) + \
|
||||
PredOpExecute.subst(microUopRegMovRetIop) + \
|
||||
PredOpExecute.subst(microUopSetPCCPSRIop)
|
||||
|
@ -681,6 +719,25 @@ let {{
|
|||
header_output = MacroMemDeclare.subst(iop)
|
||||
decoder_output = MacroMemConstructor.subst(iop)
|
||||
|
||||
iop = InstObjParams("ldpstp", "LdpStp", 'PairMemOp', "", [])
|
||||
header_output += PairMemDeclare.subst(iop)
|
||||
decoder_output += PairMemConstructor.subst(iop)
|
||||
|
||||
iopImm = InstObjParams("bigfpmemimm", "BigFpMemImm", "BigFpMemImmOp", "")
|
||||
iopPre = InstObjParams("bigfpmempre", "BigFpMemPre", "BigFpMemPreOp", "")
|
||||
iopPost = InstObjParams("bigfpmempost", "BigFpMemPost", "BigFpMemPostOp", "")
|
||||
for iop in (iopImm, iopPre, iopPost):
|
||||
header_output += BigFpMemImmDeclare.subst(iop)
|
||||
decoder_output += BigFpMemImmConstructor.subst(iop)
|
||||
|
||||
iop = InstObjParams("bigfpmemreg", "BigFpMemReg", "BigFpMemRegOp", "")
|
||||
header_output += BigFpMemRegDeclare.subst(iop)
|
||||
decoder_output += BigFpMemRegConstructor.subst(iop)
|
||||
|
||||
iop = InstObjParams("bigfpmemlit", "BigFpMemLit", "BigFpMemLitOp", "")
|
||||
header_output += BigFpMemLitDeclare.subst(iop)
|
||||
decoder_output += BigFpMemLitConstructor.subst(iop)
|
||||
|
||||
iop = InstObjParams("vldmult", "VldMult", 'VldMultOp', "", [])
|
||||
header_output += VMemMultDeclare.subst(iop)
|
||||
decoder_output += VMemMultConstructor.subst(iop)
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
// -*- mode:c++ -*-
|
||||
|
||||
// Copyright (c) 2010 ARM Limited
|
||||
// Copyright (c) 2010-2012 ARM Limited
|
||||
// All rights reserved
|
||||
//
|
||||
// The license below extends only to copyright in the software and shall
|
||||
|
@ -48,8 +48,8 @@ let {{
|
|||
self.constructTemplate = eval(self.decConstBase + 'Constructor')
|
||||
|
||||
def fillTemplates(self, name, Name, codeBlobs, memFlags, instFlags,
|
||||
base = 'Memory', wbDecl = None, pcDecl = None,
|
||||
rasPop = False):
|
||||
base='Memory', wbDecl=None, pcDecl=None,
|
||||
rasPop=False, size=4, sign=False, faCode=None):
|
||||
# Make sure flags are in lists (convert to lists if not).
|
||||
memFlags = makeList(memFlags)
|
||||
instFlags = makeList(instFlags)
|
||||
|
@ -63,6 +63,22 @@ let {{
|
|||
|
||||
codeBlobs["ea_code"] = eaCode
|
||||
|
||||
if faCode:
|
||||
# For AArch64 the fa_code snippet comes already assembled here
|
||||
codeBlobs["fa_code"] = faCode
|
||||
elif wbDecl == None:
|
||||
codeBlobs["fa_code"] = '''
|
||||
if (dest != INTREG_PC) {
|
||||
fault->annotate(ArmFault::SAS, %s);
|
||||
fault->annotate(ArmFault::SSE, %s);
|
||||
fault->annotate(ArmFault::SRT, dest);
|
||||
}
|
||||
''' %("0" if size == 1 else
|
||||
"1" if size == 2 else "2",
|
||||
"true" if sign else "false")
|
||||
else:
|
||||
codeBlobs["fa_code"] = ''
|
||||
|
||||
macroName = Name
|
||||
instFlagsCopy = list(instFlags)
|
||||
codeBlobsCopy = dict(codeBlobs)
|
||||
|
@ -108,6 +124,7 @@ let {{
|
|||
"use_uops" : use_uops,
|
||||
"use_pc" : use_pc,
|
||||
"use_wb" : use_wb,
|
||||
"fa_code" : '',
|
||||
"is_ras_pop" : is_ras_pop },
|
||||
['IsMacroop'])
|
||||
header_output += self.declareTemplate.subst(iop)
|
||||
|
@ -176,8 +193,13 @@ let {{
|
|||
return Name
|
||||
|
||||
def buildMemSuffix(sign, size):
|
||||
if size == 4:
|
||||
memSuffix = ''
|
||||
if size == 8:
|
||||
memSuffix = '_ud'
|
||||
elif size == 4:
|
||||
if sign:
|
||||
memSuffix = '_sw'
|
||||
else:
|
||||
memSuffix = '_uw'
|
||||
elif size == 2:
|
||||
if sign:
|
||||
memSuffix = '_sh'
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
// -*- mode:c++ -*-
|
||||
|
||||
// Copyright (c) 2010-2012 ARM Limited
|
||||
// Copyright (c) 2010-2013 ARM Limited
|
||||
// All rights reserved
|
||||
//
|
||||
// The license below extends only to copyright in the software and shall
|
||||
|
@ -40,21 +40,102 @@
|
|||
let {{
|
||||
|
||||
svcCode = '''
|
||||
if (FullSystem) {
|
||||
fault = new SupervisorCall;
|
||||
} else {
|
||||
fault = new SupervisorCall(machInst);
|
||||
}
|
||||
fault = new SupervisorCall(machInst, imm);
|
||||
'''
|
||||
|
||||
svcIop = InstObjParams("svc", "Svc", "PredOp",
|
||||
svcIop = InstObjParams("svc", "Svc", "ImmOp",
|
||||
{ "code": svcCode,
|
||||
"predicate_test": predicateTest },
|
||||
["IsSyscall", "IsNonSpeculative", "IsSerializeAfter"])
|
||||
header_output = BasicDeclare.subst(svcIop)
|
||||
decoder_output = BasicConstructor.subst(svcIop)
|
||||
header_output = ImmOpDeclare.subst(svcIop)
|
||||
decoder_output = ImmOpConstructor.subst(svcIop)
|
||||
exec_output = PredOpExecute.subst(svcIop)
|
||||
|
||||
smcCode = '''
|
||||
HCR hcr = Hcr;
|
||||
CPSR cpsr = Cpsr;
|
||||
SCR scr = Scr;
|
||||
|
||||
if ((cpsr.mode != MODE_USER) && FullSystem) {
|
||||
if (ArmSystem::haveVirtualization(xc->tcBase()) &&
|
||||
!inSecureState(scr, cpsr) && (cpsr.mode != MODE_HYP) && hcr.tsc) {
|
||||
fault = new HypervisorTrap(machInst, 0, EC_SMC_TO_HYP);
|
||||
} else {
|
||||
if (scr.scd) {
|
||||
fault = disabledFault();
|
||||
} else {
|
||||
fault = new SecureMonitorCall(machInst);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
fault = disabledFault();
|
||||
}
|
||||
'''
|
||||
|
||||
smcIop = InstObjParams("smc", "Smc", "PredOp",
|
||||
{ "code": smcCode,
|
||||
"predicate_test": predicateTest },
|
||||
["IsNonSpeculative", "IsSerializeAfter"])
|
||||
header_output += BasicDeclare.subst(smcIop)
|
||||
decoder_output += BasicConstructor.subst(smcIop)
|
||||
exec_output += PredOpExecute.subst(smcIop)
|
||||
|
||||
hvcCode = '''
|
||||
CPSR cpsr = Cpsr;
|
||||
SCR scr = Scr;
|
||||
|
||||
// Filter out the various cases where this instruction isn't defined
|
||||
if (!FullSystem || !ArmSystem::haveVirtualization(xc->tcBase()) ||
|
||||
(cpsr.mode == MODE_USER) ||
|
||||
(ArmSystem::haveSecurity(xc->tcBase()) && (!scr.ns || !scr.hce))) {
|
||||
fault = disabledFault();
|
||||
} else {
|
||||
fault = new HypervisorCall(machInst, imm);
|
||||
}
|
||||
'''
|
||||
|
||||
hvcIop = InstObjParams("hvc", "Hvc", "ImmOp",
|
||||
{ "code": hvcCode,
|
||||
"predicate_test": predicateTest },
|
||||
["IsNonSpeculative", "IsSerializeAfter"])
|
||||
header_output += ImmOpDeclare.subst(hvcIop)
|
||||
decoder_output += ImmOpConstructor.subst(hvcIop)
|
||||
exec_output += PredOpExecute.subst(hvcIop)
|
||||
|
||||
eretCode = '''
|
||||
SCTLR sctlr = Sctlr;
|
||||
CPSR old_cpsr = Cpsr;
|
||||
old_cpsr.nz = CondCodesNZ;
|
||||
old_cpsr.c = CondCodesC;
|
||||
old_cpsr.v = CondCodesV;
|
||||
old_cpsr.ge = CondCodesGE;
|
||||
|
||||
CPSR new_cpsr = cpsrWriteByInstr(old_cpsr, Spsr, Scr, Nsacr, 0xF,
|
||||
true, sctlr.nmfi, xc->tcBase());
|
||||
Cpsr = ~CondCodesMask & new_cpsr;
|
||||
CondCodesNZ = new_cpsr.nz;
|
||||
CondCodesC = new_cpsr.c;
|
||||
CondCodesV = new_cpsr.v;
|
||||
CondCodesGE = new_cpsr.ge;
|
||||
|
||||
NextThumb = (new_cpsr).t;
|
||||
NextJazelle = (new_cpsr).j;
|
||||
NextItState = (((new_cpsr).it2 << 2) & 0xFC)
|
||||
| ((new_cpsr).it1 & 0x3);
|
||||
|
||||
NPC = (old_cpsr.mode == MODE_HYP) ? ElrHyp : LR;
|
||||
'''
|
||||
|
||||
eretIop = InstObjParams("eret", "Eret", "PredOp",
|
||||
{ "code": eretCode,
|
||||
"predicate_test": predicateTest },
|
||||
["IsNonSpeculative", "IsSerializeAfter"])
|
||||
header_output += BasicDeclare.subst(eretIop)
|
||||
decoder_output += BasicConstructor.subst(eretIop)
|
||||
exec_output += PredOpExecute.subst(eretIop)
|
||||
|
||||
|
||||
|
||||
}};
|
||||
|
||||
let {{
|
||||
|
@ -87,6 +168,59 @@ let {{
|
|||
decoder_output += MrsConstructor.subst(mrsSpsrIop)
|
||||
exec_output += PredOpExecute.subst(mrsSpsrIop)
|
||||
|
||||
mrsBankedRegCode = '''
|
||||
bool isIntReg;
|
||||
int regIdx;
|
||||
|
||||
if (decodeMrsMsrBankedReg(byteMask, r, isIntReg, regIdx, Cpsr, Scr, Nsacr)) {
|
||||
if (isIntReg) {
|
||||
Dest = DecodedBankedIntReg;
|
||||
} else {
|
||||
Dest = xc->readMiscReg(regIdx);
|
||||
}
|
||||
} else {
|
||||
return new UndefinedInstruction(machInst, false, mnemonic);
|
||||
}
|
||||
'''
|
||||
mrsBankedRegIop = InstObjParams("mrs", "MrsBankedReg", "MrsOp",
|
||||
{ "code": mrsBankedRegCode,
|
||||
"predicate_test": predicateTest },
|
||||
["IsSerializeBefore"])
|
||||
header_output += MrsBankedRegDeclare.subst(mrsBankedRegIop)
|
||||
decoder_output += MrsBankedRegConstructor.subst(mrsBankedRegIop)
|
||||
exec_output += PredOpExecute.subst(mrsBankedRegIop)
|
||||
|
||||
msrBankedRegCode = '''
|
||||
bool isIntReg;
|
||||
int regIdx;
|
||||
|
||||
if (decodeMrsMsrBankedReg(byteMask, r, isIntReg, regIdx, Cpsr, Scr, Nsacr)) {
|
||||
if (isIntReg) {
|
||||
// This is a bit nasty, you would have thought that
|
||||
// DecodedBankedIntReg wouldn't be written to unless the
|
||||
// conditions on the IF statements above are met, however if
|
||||
// you look at the generated C code you'll find that they are.
|
||||
// However this is safe as DecodedBankedIntReg (which is used
|
||||
// in operands.isa to get the index of DecodedBankedIntReg)
|
||||
// will return INTREG_DUMMY if its not a valid integer
|
||||
// register, so redirecting the write to somewhere we don't
|
||||
// care about.
|
||||
DecodedBankedIntReg = Op1;
|
||||
} else {
|
||||
xc->setMiscReg(regIdx, Op1);
|
||||
}
|
||||
} else {
|
||||
return new UndefinedInstruction(machInst, false, mnemonic);
|
||||
}
|
||||
'''
|
||||
msrBankedRegIop = InstObjParams("msr", "MsrBankedReg", "MsrRegOp",
|
||||
{ "code": msrBankedRegCode,
|
||||
"predicate_test": predicateTest },
|
||||
["IsSerializeAfter"])
|
||||
header_output += MsrBankedRegDeclare.subst(msrBankedRegIop)
|
||||
decoder_output += MsrBankedRegConstructor.subst(msrBankedRegIop)
|
||||
exec_output += PredOpExecute.subst(msrBankedRegIop)
|
||||
|
||||
msrCpsrRegCode = '''
|
||||
SCTLR sctlr = Sctlr;
|
||||
CPSR old_cpsr = Cpsr;
|
||||
|
@ -96,7 +230,8 @@ let {{
|
|||
old_cpsr.ge = CondCodesGE;
|
||||
|
||||
CPSR new_cpsr =
|
||||
cpsrWriteByInstr(old_cpsr, Op1, byteMask, false, sctlr.nmfi);
|
||||
cpsrWriteByInstr(old_cpsr, Op1, Scr, Nsacr, byteMask, false,
|
||||
sctlr.nmfi, xc->tcBase());
|
||||
Cpsr = ~CondCodesMask & new_cpsr;
|
||||
CondCodesNZ = new_cpsr.nz;
|
||||
CondCodesC = new_cpsr.c;
|
||||
|
@ -128,7 +263,8 @@ let {{
|
|||
old_cpsr.v = CondCodesV;
|
||||
old_cpsr.ge = CondCodesGE;
|
||||
CPSR new_cpsr =
|
||||
cpsrWriteByInstr(old_cpsr, imm, byteMask, false, sctlr.nmfi);
|
||||
cpsrWriteByInstr(old_cpsr, imm, Scr, Nsacr, byteMask, false,
|
||||
sctlr.nmfi, xc->tcBase());
|
||||
Cpsr = ~CondCodesMask & new_cpsr;
|
||||
CondCodesNZ = new_cpsr.nz;
|
||||
CondCodesC = new_cpsr.c;
|
||||
|
@ -488,12 +624,10 @@ let {{
|
|||
decoder_output += BasicConstructor.subst(bkptIop)
|
||||
exec_output += BasicExecute.subst(bkptIop)
|
||||
|
||||
nopIop = InstObjParams("nop", "NopInst", "PredOp", \
|
||||
{ "code" : "", "predicate_test" : predicateTest },
|
||||
['IsNop'])
|
||||
nopIop = InstObjParams("nop", "NopInst", "ArmStaticInst", "", ['IsNop'])
|
||||
header_output += BasicDeclare.subst(nopIop)
|
||||
decoder_output += BasicConstructor.subst(nopIop)
|
||||
exec_output += PredOpExecute.subst(nopIop)
|
||||
decoder_output += BasicConstructor64.subst(nopIop)
|
||||
exec_output += BasicExecute.subst(nopIop)
|
||||
|
||||
yieldIop = InstObjParams("yield", "YieldInst", "PredOp", \
|
||||
{ "code" : "", "predicate_test" : predicateTest })
|
||||
|
@ -502,14 +636,31 @@ let {{
|
|||
exec_output += PredOpExecute.subst(yieldIop)
|
||||
|
||||
wfeCode = '''
|
||||
// WFE Sleeps if SevMailbox==0 and no unmasked interrupts are pending
|
||||
HCR hcr = Hcr;
|
||||
CPSR cpsr = Cpsr;
|
||||
SCR scr = Scr64;
|
||||
SCTLR sctlr = Sctlr;
|
||||
|
||||
// WFE Sleeps if SevMailbox==0 and no unmasked interrupts are pending,
|
||||
ThreadContext *tc = xc->tcBase();
|
||||
if (SevMailbox == 1) {
|
||||
SevMailbox = 0;
|
||||
PseudoInst::quiesceSkip(xc->tcBase());
|
||||
} else if (xc->tcBase()->getCpuPtr()->getInterruptController()->checkInterrupts(xc->tcBase())) {
|
||||
PseudoInst::quiesceSkip(xc->tcBase());
|
||||
PseudoInst::quiesceSkip(tc);
|
||||
} else if (tc->getCpuPtr()->getInterruptController()->checkInterrupts(tc)) {
|
||||
PseudoInst::quiesceSkip(tc);
|
||||
} else if (cpsr.el == EL0 && !sctlr.ntwe) {
|
||||
PseudoInst::quiesceSkip(tc);
|
||||
fault = new SupervisorTrap(machInst, 0x1E00001, EC_TRAPPED_WFI_WFE);
|
||||
} else if (ArmSystem::haveVirtualization(tc) &&
|
||||
!inSecureState(scr, cpsr) && (cpsr.mode != MODE_HYP) &&
|
||||
hcr.twe) {
|
||||
PseudoInst::quiesceSkip(tc);
|
||||
fault = new HypervisorTrap(machInst, 0x1E00001, EC_TRAPPED_WFI_WFE);
|
||||
} else if (ArmSystem::haveSecurity(tc) && cpsr.el != EL3 && scr.twe) {
|
||||
PseudoInst::quiesceSkip(tc);
|
||||
fault = new SecureMonitorTrap(machInst, 0x1E00001, EC_TRAPPED_WFI_WFE);
|
||||
} else {
|
||||
PseudoInst::quiesce(xc->tcBase());
|
||||
PseudoInst::quiesce(tc);
|
||||
}
|
||||
'''
|
||||
wfePredFixUpCode = '''
|
||||
|
@ -528,12 +679,30 @@ let {{
|
|||
exec_output += QuiescePredOpExecuteWithFixup.subst(wfeIop)
|
||||
|
||||
wfiCode = '''
|
||||
HCR hcr = Hcr;
|
||||
CPSR cpsr = Cpsr;
|
||||
SCR scr = Scr64;
|
||||
SCTLR sctlr = Sctlr;
|
||||
|
||||
// WFI doesn't sleep if interrupts are pending (masked or not)
|
||||
if (xc->tcBase()->getCpuPtr()->getInterruptController()->checkRaw()) {
|
||||
PseudoInst::quiesceSkip(xc->tcBase());
|
||||
ThreadContext *tc = xc->tcBase();
|
||||
if (tc->getCpuPtr()->getInterruptController()->checkWfiWake(hcr, cpsr,
|
||||
scr)) {
|
||||
PseudoInst::quiesceSkip(tc);
|
||||
} else if (cpsr.el == EL0 && !sctlr.ntwi) {
|
||||
PseudoInst::quiesceSkip(tc);
|
||||
fault = new SupervisorTrap(machInst, 0x1E00000, EC_TRAPPED_WFI_WFE);
|
||||
} else if (ArmSystem::haveVirtualization(tc) && hcr.twi &&
|
||||
(cpsr.mode != MODE_HYP) && !inSecureState(scr, cpsr)) {
|
||||
PseudoInst::quiesceSkip(tc);
|
||||
fault = new HypervisorTrap(machInst, 0x1E00000, EC_TRAPPED_WFI_WFE);
|
||||
} else if (ArmSystem::haveSecurity(tc) && cpsr.el != EL3 && scr.twi) {
|
||||
PseudoInst::quiesceSkip(tc);
|
||||
fault = new SecureMonitorTrap(machInst, 0x1E00000, EC_TRAPPED_WFI_WFE);
|
||||
} else {
|
||||
PseudoInst::quiesce(xc->tcBase());
|
||||
PseudoInst::quiesce(tc);
|
||||
}
|
||||
tc->getCpuPtr()->clearInterrupt(INT_ABT, 0);
|
||||
'''
|
||||
wfiIop = InstObjParams("wfi", "WfiInst", "PredOp", \
|
||||
{ "code" : wfiCode, "predicate_test" : predicateTest },
|
||||
|
@ -564,6 +733,16 @@ let {{
|
|||
decoder_output += BasicConstructor.subst(sevIop)
|
||||
exec_output += PredOpExecute.subst(sevIop)
|
||||
|
||||
sevlCode = '''
|
||||
SevMailbox = 1;
|
||||
'''
|
||||
sevlIop = InstObjParams("sevl", "SevlInst", "PredOp", \
|
||||
{ "code" : sevlCode, "predicate_test" : predicateTest },
|
||||
["IsNonSpeculative", "IsSquashAfter", "IsUnverifiable"])
|
||||
header_output += BasicDeclare.subst(sevlIop)
|
||||
decoder_output += BasicConstructor.subst(sevlIop)
|
||||
exec_output += BasicExecute.subst(sevlIop)
|
||||
|
||||
itIop = InstObjParams("it", "ItInst", "PredOp", \
|
||||
{ "code" : ";",
|
||||
"predicate_test" : predicateTest }, [])
|
||||
|
@ -571,9 +750,6 @@ let {{
|
|||
decoder_output += BasicConstructor.subst(itIop)
|
||||
exec_output += PredOpExecute.subst(itIop)
|
||||
unknownCode = '''
|
||||
if (FullSystem)
|
||||
return new UndefinedInstruction;
|
||||
else
|
||||
return new UndefinedInstruction(machInst, true);
|
||||
'''
|
||||
unknownIop = InstObjParams("unknown", "Unknown", "UnknownOp", \
|
||||
|
@ -626,108 +802,152 @@ let {{
|
|||
exec_output += PredOpExecute.subst(bfiIop)
|
||||
|
||||
mrc14code = '''
|
||||
CPSR cpsr = Cpsr;
|
||||
if (cpsr.mode == MODE_USER) {
|
||||
if (FullSystem)
|
||||
return new UndefinedInstruction;
|
||||
else
|
||||
return new UndefinedInstruction(false, mnemonic);
|
||||
MiscRegIndex miscReg = (MiscRegIndex) xc->tcBase()->flattenMiscIndex(op1);
|
||||
if (!canReadCoprocReg(miscReg, Scr, Cpsr, xc->tcBase())) {
|
||||
return new UndefinedInstruction(machInst, false, mnemonic);
|
||||
}
|
||||
if (mcrMrc14TrapToHyp((const MiscRegIndex) op1, Hcr, Cpsr, Scr, Hdcr,
|
||||
Hstr, Hcptr, imm)) {
|
||||
return new HypervisorTrap(machInst, imm, EC_TRAPPED_CP14_MCR_MRC);
|
||||
}
|
||||
Dest = MiscOp1;
|
||||
'''
|
||||
|
||||
mrc14Iop = InstObjParams("mrc", "Mrc14", "RegRegOp",
|
||||
mrc14Iop = InstObjParams("mrc", "Mrc14", "RegRegImmOp",
|
||||
{ "code": mrc14code,
|
||||
"predicate_test": predicateTest }, [])
|
||||
header_output += RegRegOpDeclare.subst(mrc14Iop)
|
||||
decoder_output += RegRegOpConstructor.subst(mrc14Iop)
|
||||
header_output += RegRegImmOpDeclare.subst(mrc14Iop)
|
||||
decoder_output += RegRegImmOpConstructor.subst(mrc14Iop)
|
||||
exec_output += PredOpExecute.subst(mrc14Iop)
|
||||
|
||||
|
||||
mcr14code = '''
|
||||
CPSR cpsr = Cpsr;
|
||||
if (cpsr.mode == MODE_USER) {
|
||||
if (FullSystem)
|
||||
return new UndefinedInstruction;
|
||||
else
|
||||
return new UndefinedInstruction(false, mnemonic);
|
||||
MiscRegIndex miscReg = (MiscRegIndex) xc->tcBase()->flattenMiscIndex(dest);
|
||||
if (!canWriteCoprocReg(miscReg, Scr, Cpsr, xc->tcBase())) {
|
||||
return new UndefinedInstruction(machInst, false, mnemonic);
|
||||
}
|
||||
if (mcrMrc14TrapToHyp(miscReg, Hcr, Cpsr, Scr, Hdcr,
|
||||
Hstr, Hcptr, imm)) {
|
||||
return new HypervisorTrap(machInst, imm, EC_TRAPPED_CP14_MCR_MRC);
|
||||
}
|
||||
MiscDest = Op1;
|
||||
'''
|
||||
mcr14Iop = InstObjParams("mcr", "Mcr14", "RegRegOp",
|
||||
mcr14Iop = InstObjParams("mcr", "Mcr14", "RegRegImmOp",
|
||||
{ "code": mcr14code,
|
||||
"predicate_test": predicateTest },
|
||||
["IsSerializeAfter","IsNonSpeculative"])
|
||||
header_output += RegRegOpDeclare.subst(mcr14Iop)
|
||||
decoder_output += RegRegOpConstructor.subst(mcr14Iop)
|
||||
header_output += RegRegImmOpDeclare.subst(mcr14Iop)
|
||||
decoder_output += RegRegImmOpConstructor.subst(mcr14Iop)
|
||||
exec_output += PredOpExecute.subst(mcr14Iop)
|
||||
|
||||
mrc14UserIop = InstObjParams("mrc", "Mrc14User", "RegRegOp",
|
||||
{ "code": "Dest = MiscOp1;",
|
||||
"predicate_test": predicateTest }, [])
|
||||
header_output += RegRegOpDeclare.subst(mrc14UserIop)
|
||||
decoder_output += RegRegOpConstructor.subst(mrc14UserIop)
|
||||
exec_output += PredOpExecute.subst(mrc14UserIop)
|
||||
|
||||
mcr14UserIop = InstObjParams("mcr", "Mcr14User", "RegRegOp",
|
||||
{ "code": "MiscDest = Op1",
|
||||
"predicate_test": predicateTest },
|
||||
["IsSerializeAfter","IsNonSpeculative"])
|
||||
header_output += RegRegOpDeclare.subst(mcr14UserIop)
|
||||
decoder_output += RegRegOpConstructor.subst(mcr14UserIop)
|
||||
exec_output += PredOpExecute.subst(mcr14UserIop)
|
||||
|
||||
mrc15code = '''
|
||||
CPSR cpsr = Cpsr;
|
||||
if (cpsr.mode == MODE_USER) {
|
||||
if (FullSystem)
|
||||
return new UndefinedInstruction;
|
||||
else
|
||||
return new UndefinedInstruction(false, mnemonic);
|
||||
int preFlatOp1 = flattenMiscRegNsBanked(op1, xc->tcBase());
|
||||
MiscRegIndex miscReg = (MiscRegIndex)
|
||||
xc->tcBase()->flattenMiscIndex(preFlatOp1);
|
||||
bool hypTrap = mcrMrc15TrapToHyp(miscReg, Hcr, Cpsr, Scr, Hdcr, Hstr,
|
||||
Hcptr, imm);
|
||||
bool canRead = canReadCoprocReg(miscReg, Scr, Cpsr, xc->tcBase());
|
||||
|
||||
// if we're in non secure PL1 mode then we can trap regargless of whether
|
||||
// the register is accessable, in other modes we trap if only if the register
|
||||
// IS accessable.
|
||||
if (!canRead & !(hypTrap & !inUserMode(Cpsr) & !inSecureState(Scr, Cpsr))) {
|
||||
return new UndefinedInstruction(machInst, false, mnemonic);
|
||||
}
|
||||
Dest = MiscOp1;
|
||||
if (hypTrap) {
|
||||
return new HypervisorTrap(machInst, imm, EC_TRAPPED_CP15_MCR_MRC);
|
||||
}
|
||||
Dest = MiscNsBankedOp1;
|
||||
'''
|
||||
|
||||
mrc15Iop = InstObjParams("mrc", "Mrc15", "RegRegOp",
|
||||
mrc15Iop = InstObjParams("mrc", "Mrc15", "RegRegImmOp",
|
||||
{ "code": mrc15code,
|
||||
"predicate_test": predicateTest }, [])
|
||||
header_output += RegRegOpDeclare.subst(mrc15Iop)
|
||||
decoder_output += RegRegOpConstructor.subst(mrc15Iop)
|
||||
header_output += RegRegImmOpDeclare.subst(mrc15Iop)
|
||||
decoder_output += RegRegImmOpConstructor.subst(mrc15Iop)
|
||||
exec_output += PredOpExecute.subst(mrc15Iop)
|
||||
|
||||
|
||||
mcr15code = '''
|
||||
CPSR cpsr = Cpsr;
|
||||
if (cpsr.mode == MODE_USER) {
|
||||
if (FullSystem)
|
||||
return new UndefinedInstruction;
|
||||
else
|
||||
return new UndefinedInstruction(false, mnemonic);
|
||||
int preFlatDest = flattenMiscRegNsBanked(dest, xc->tcBase());
|
||||
MiscRegIndex miscReg = (MiscRegIndex)
|
||||
xc->tcBase()->flattenMiscIndex(preFlatDest);
|
||||
bool hypTrap = mcrMrc15TrapToHyp(miscReg, Hcr, Cpsr, Scr, Hdcr, Hstr,
|
||||
Hcptr, imm);
|
||||
bool canWrite = canWriteCoprocReg(miscReg, Scr, Cpsr, xc->tcBase());
|
||||
|
||||
// if we're in non secure PL1 mode then we can trap regargless of whether
|
||||
// the register is accessable, in other modes we trap if only if the register
|
||||
// IS accessable.
|
||||
if (!canWrite & !(hypTrap & !inUserMode(Cpsr) & !inSecureState(Scr, Cpsr))) {
|
||||
return new UndefinedInstruction(machInst, false, mnemonic);
|
||||
}
|
||||
MiscDest = Op1;
|
||||
if (hypTrap) {
|
||||
return new HypervisorTrap(machInst, imm, EC_TRAPPED_CP15_MCR_MRC);
|
||||
}
|
||||
MiscNsBankedDest = Op1;
|
||||
'''
|
||||
mcr15Iop = InstObjParams("mcr", "Mcr15", "RegRegOp",
|
||||
mcr15Iop = InstObjParams("mcr", "Mcr15", "RegRegImmOp",
|
||||
{ "code": mcr15code,
|
||||
"predicate_test": predicateTest },
|
||||
["IsSerializeAfter","IsNonSpeculative"])
|
||||
header_output += RegRegOpDeclare.subst(mcr15Iop)
|
||||
decoder_output += RegRegOpConstructor.subst(mcr15Iop)
|
||||
header_output += RegRegImmOpDeclare.subst(mcr15Iop)
|
||||
decoder_output += RegRegImmOpConstructor.subst(mcr15Iop)
|
||||
exec_output += PredOpExecute.subst(mcr15Iop)
|
||||
|
||||
mrc15UserIop = InstObjParams("mrc", "Mrc15User", "RegRegOp",
|
||||
{ "code": "Dest = MiscOp1;",
|
||||
"predicate_test": predicateTest }, [])
|
||||
header_output += RegRegOpDeclare.subst(mrc15UserIop)
|
||||
decoder_output += RegRegOpConstructor.subst(mrc15UserIop)
|
||||
exec_output += PredOpExecute.subst(mrc15UserIop)
|
||||
|
||||
mcr15UserIop = InstObjParams("mcr", "Mcr15User", "RegRegOp",
|
||||
{ "code": "MiscDest = Op1",
|
||||
"predicate_test": predicateTest },
|
||||
["IsSerializeAfter","IsNonSpeculative"])
|
||||
header_output += RegRegOpDeclare.subst(mcr15UserIop)
|
||||
decoder_output += RegRegOpConstructor.subst(mcr15UserIop)
|
||||
exec_output += PredOpExecute.subst(mcr15UserIop)
|
||||
mrrc15code = '''
|
||||
int preFlatOp1 = flattenMiscRegNsBanked(op1, xc->tcBase());
|
||||
MiscRegIndex miscReg = (MiscRegIndex)
|
||||
xc->tcBase()->flattenMiscIndex(preFlatOp1);
|
||||
bool hypTrap = mcrrMrrc15TrapToHyp(miscReg, Cpsr, Scr, Hstr, Hcr, imm);
|
||||
bool canRead = canReadCoprocReg(miscReg, Scr, Cpsr, xc->tcBase());
|
||||
|
||||
// if we're in non secure PL1 mode then we can trap regargless of whether
|
||||
// the register is accessable, in other modes we trap if only if the register
|
||||
// IS accessable.
|
||||
if (!canRead & !(hypTrap & !inUserMode(Cpsr) & !inSecureState(Scr, Cpsr))) {
|
||||
return new UndefinedInstruction(machInst, false, mnemonic);
|
||||
}
|
||||
if (hypTrap) {
|
||||
return new HypervisorTrap(machInst, imm, EC_TRAPPED_CP15_MCRR_MRRC);
|
||||
}
|
||||
Dest = bits(MiscNsBankedOp164, 63, 32);
|
||||
Dest2 = bits(MiscNsBankedOp164, 31, 0);
|
||||
'''
|
||||
mrrc15Iop = InstObjParams("mrrc", "Mrrc15", "MrrcOp",
|
||||
{ "code": mrrc15code,
|
||||
"predicate_test": predicateTest }, [])
|
||||
header_output += MrrcOpDeclare.subst(mrrc15Iop)
|
||||
decoder_output += MrrcOpConstructor.subst(mrrc15Iop)
|
||||
exec_output += PredOpExecute.subst(mrrc15Iop)
|
||||
|
||||
|
||||
mcrr15code = '''
|
||||
int preFlatDest = flattenMiscRegNsBanked(dest, xc->tcBase());
|
||||
MiscRegIndex miscReg = (MiscRegIndex)
|
||||
xc->tcBase()->flattenMiscIndex(preFlatDest);
|
||||
bool hypTrap = mcrrMrrc15TrapToHyp(miscReg, Cpsr, Scr, Hstr, Hcr, imm);
|
||||
bool canWrite = canWriteCoprocReg(miscReg, Scr, Cpsr, xc->tcBase());
|
||||
|
||||
// if we're in non secure PL1 mode then we can trap regargless of whether
|
||||
// the register is accessable, in other modes we trap if only if the register
|
||||
// IS accessable.
|
||||
if (!canWrite & !(hypTrap & !inUserMode(Cpsr) & !inSecureState(Scr, Cpsr))) {
|
||||
return new UndefinedInstruction(machInst, false, mnemonic);
|
||||
}
|
||||
if (hypTrap) {
|
||||
return new HypervisorTrap(machInst, imm, EC_TRAPPED_CP15_MCRR_MRRC);
|
||||
}
|
||||
MiscNsBankedDest64 = ((uint64_t) Op1 << 32) | Op2;
|
||||
'''
|
||||
mcrr15Iop = InstObjParams("mcrr", "Mcrr15", "McrrOp",
|
||||
{ "code": mcrr15code,
|
||||
"predicate_test": predicateTest }, [])
|
||||
header_output += McrrOpDeclare.subst(mcrr15Iop)
|
||||
decoder_output += McrrOpConstructor.subst(mcrr15Iop)
|
||||
exec_output += PredOpExecute.subst(mcrr15Iop)
|
||||
|
||||
|
||||
enterxCode = '''
|
||||
NextThumb = true;
|
||||
|
@ -775,35 +995,53 @@ let {{
|
|||
exec_output += PredOpExecute.subst(clrexIop)
|
||||
|
||||
isbCode = '''
|
||||
// If the barrier is due to a CP15 access check for hyp traps
|
||||
if ((imm != 0) && mcrMrc15TrapToHyp(MISCREG_CP15ISB, Hcr, Cpsr, Scr,
|
||||
Hdcr, Hstr, Hcptr, imm)) {
|
||||
return new HypervisorTrap(machInst, imm,
|
||||
EC_TRAPPED_CP15_MCR_MRC);
|
||||
}
|
||||
fault = new FlushPipe;
|
||||
'''
|
||||
isbIop = InstObjParams("isb", "Isb", "PredOp",
|
||||
isbIop = InstObjParams("isb", "Isb", "ImmOp",
|
||||
{"code": isbCode,
|
||||
"predicate_test": predicateTest},
|
||||
['IsSerializeAfter'])
|
||||
header_output += BasicDeclare.subst(isbIop)
|
||||
decoder_output += BasicConstructor.subst(isbIop)
|
||||
header_output += ImmOpDeclare.subst(isbIop)
|
||||
decoder_output += ImmOpConstructor.subst(isbIop)
|
||||
exec_output += PredOpExecute.subst(isbIop)
|
||||
|
||||
dsbCode = '''
|
||||
// If the barrier is due to a CP15 access check for hyp traps
|
||||
if ((imm != 0) && mcrMrc15TrapToHyp(MISCREG_CP15DSB, Hcr, Cpsr, Scr,
|
||||
Hdcr, Hstr, Hcptr, imm)) {
|
||||
return new HypervisorTrap(machInst, imm,
|
||||
EC_TRAPPED_CP15_MCR_MRC);
|
||||
}
|
||||
fault = new FlushPipe;
|
||||
'''
|
||||
dsbIop = InstObjParams("dsb", "Dsb", "PredOp",
|
||||
dsbIop = InstObjParams("dsb", "Dsb", "ImmOp",
|
||||
{"code": dsbCode,
|
||||
"predicate_test": predicateTest},
|
||||
['IsMemBarrier', 'IsSerializeAfter'])
|
||||
header_output += BasicDeclare.subst(dsbIop)
|
||||
decoder_output += BasicConstructor.subst(dsbIop)
|
||||
header_output += ImmOpDeclare.subst(dsbIop)
|
||||
decoder_output += ImmOpConstructor.subst(dsbIop)
|
||||
exec_output += PredOpExecute.subst(dsbIop)
|
||||
|
||||
dmbCode = '''
|
||||
// If the barrier is due to a CP15 access check for hyp traps
|
||||
if ((imm != 0) && mcrMrc15TrapToHyp(MISCREG_CP15DMB, Hcr, Cpsr, Scr,
|
||||
Hdcr, Hstr, Hcptr, imm)) {
|
||||
return new HypervisorTrap(machInst, imm,
|
||||
EC_TRAPPED_CP15_MCR_MRC);
|
||||
}
|
||||
'''
|
||||
dmbIop = InstObjParams("dmb", "Dmb", "PredOp",
|
||||
dmbIop = InstObjParams("dmb", "Dmb", "ImmOp",
|
||||
{"code": dmbCode,
|
||||
"predicate_test": predicateTest},
|
||||
['IsMemBarrier'])
|
||||
header_output += BasicDeclare.subst(dmbIop)
|
||||
decoder_output += BasicConstructor.subst(dmbIop)
|
||||
header_output += ImmOpDeclare.subst(dmbIop)
|
||||
decoder_output += ImmOpConstructor.subst(dmbIop)
|
||||
exec_output += PredOpExecute.subst(dmbIop)
|
||||
|
||||
dbgCode = '''
|
||||
|
|
147
src/arch/arm/isa/insts/misc64.isa
Normal file
147
src/arch/arm/isa/insts/misc64.isa
Normal file
|
@ -0,0 +1,147 @@
|
|||
// -*- mode:c++ -*-
|
||||
|
||||
// Copyright (c) 2011-2013 ARM Limited
|
||||
// All rights reserved
|
||||
//
|
||||
// The license below extends only to copyright in the software and shall
|
||||
// not be construed as granting a license to any other intellectual
|
||||
// property including but not limited to intellectual property relating
|
||||
// to a hardware implementation of the functionality of the software
|
||||
// licensed hereunder. You may use the software subject to the license
|
||||
// terms below provided that you ensure that this notice is replicated
|
||||
// unmodified and in its entirety in all distributions of the software,
|
||||
// modified or unmodified, in source code or in binary form.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met: redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer;
|
||||
// redistributions in binary form must reproduce the above copyright
|
||||
// notice, this list of conditions and the following disclaimer in the
|
||||
// documentation and/or other materials provided with the distribution;
|
||||
// neither the name of the copyright holders nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
//
|
||||
// Authors: Gabe Black
|
||||
|
||||
let {{
|
||||
svcCode = '''
|
||||
fault = new SupervisorCall(machInst, bits(machInst, 20, 5));
|
||||
'''
|
||||
|
||||
svcIop = InstObjParams("svc", "Svc64", "ArmStaticInst",
|
||||
svcCode, ["IsSyscall", "IsNonSpeculative",
|
||||
"IsSerializeAfter"])
|
||||
header_output = BasicDeclare.subst(svcIop)
|
||||
decoder_output = BasicConstructor64.subst(svcIop)
|
||||
exec_output = BasicExecute.subst(svcIop)
|
||||
|
||||
# @todo: extend to take into account Virtualization.
|
||||
smcCode = '''
|
||||
SCR scr = Scr64;
|
||||
CPSR cpsr = Cpsr;
|
||||
|
||||
if (!ArmSystem::haveSecurity(xc->tcBase()) || inUserMode(cpsr) || scr.smd) {
|
||||
fault = disabledFault();
|
||||
} else {
|
||||
fault = new SecureMonitorCall(machInst);
|
||||
}
|
||||
'''
|
||||
|
||||
smcIop = InstObjParams("smc", "Smc64", "ArmStaticInst",
|
||||
smcCode, ["IsNonSpeculative", "IsSerializeAfter"])
|
||||
header_output += BasicDeclare.subst(smcIop)
|
||||
decoder_output += BasicConstructor64.subst(smcIop)
|
||||
exec_output += BasicExecute.subst(smcIop)
|
||||
|
||||
def subst(templateBase, iop):
|
||||
global header_output, decoder_output, exec_output
|
||||
header_output += eval(templateBase + "Declare").subst(iop)
|
||||
decoder_output += eval(templateBase + "Constructor").subst(iop)
|
||||
exec_output += BasicExecute.subst(iop)
|
||||
|
||||
bfmMaskCode = '''
|
||||
uint64_t bitMask;
|
||||
int diff = imm2 - imm1;
|
||||
if (imm1 <= imm2) {
|
||||
bitMask = mask(diff + 1);
|
||||
} else {
|
||||
bitMask = mask(imm2 + 1);
|
||||
bitMask = (bitMask >> imm1) | (bitMask << (intWidth - imm1));
|
||||
diff += intWidth;
|
||||
}
|
||||
uint64_t topBits M5_VAR_USED = ~mask(diff+1);
|
||||
uint64_t result = (Op164 >> imm1) | (Op164 << (intWidth - imm1));
|
||||
result &= bitMask;
|
||||
'''
|
||||
|
||||
bfmCode = bfmMaskCode + 'Dest64 = result | (Dest64 & ~bitMask);'
|
||||
bfmIop = InstObjParams("bfm", "Bfm64", "RegRegImmImmOp64", bfmCode);
|
||||
subst("RegRegImmImmOp64", bfmIop)
|
||||
|
||||
ubfmCode = bfmMaskCode + 'Dest64 = result;'
|
||||
ubfmIop = InstObjParams("ubfm", "Ubfm64", "RegRegImmImmOp64", ubfmCode);
|
||||
subst("RegRegImmImmOp64", ubfmIop)
|
||||
|
||||
sbfmCode = bfmMaskCode + \
|
||||
'Dest64 = result | (bits(Op164, imm2) ? topBits : 0);'
|
||||
sbfmIop = InstObjParams("sbfm", "Sbfm64", "RegRegImmImmOp64", sbfmCode);
|
||||
subst("RegRegImmImmOp64", sbfmIop)
|
||||
|
||||
extrCode = '''
|
||||
if (imm == 0) {
|
||||
Dest64 = Op264;
|
||||
} else {
|
||||
Dest64 = (Op164 << (intWidth - imm)) | (Op264 >> imm);
|
||||
}
|
||||
'''
|
||||
extrIop = InstObjParams("extr", "Extr64", "RegRegRegImmOp64", extrCode);
|
||||
subst("RegRegRegImmOp64", extrIop);
|
||||
|
||||
unknownCode = '''
|
||||
return new UndefinedInstruction(machInst, true);
|
||||
'''
|
||||
unknown64Iop = InstObjParams("unknown", "Unknown64", "UnknownOp64",
|
||||
unknownCode)
|
||||
header_output += BasicDeclare.subst(unknown64Iop)
|
||||
decoder_output += BasicConstructor64.subst(unknown64Iop)
|
||||
exec_output += BasicExecute.subst(unknown64Iop)
|
||||
|
||||
isbIop = InstObjParams("isb", "Isb64", "ArmStaticInst",
|
||||
"fault = new FlushPipe;", ['IsSerializeAfter'])
|
||||
header_output += BasicDeclare.subst(isbIop)
|
||||
decoder_output += BasicConstructor64.subst(isbIop)
|
||||
exec_output += BasicExecute.subst(isbIop)
|
||||
|
||||
dsbIop = InstObjParams("dsb", "Dsb64", "ArmStaticInst",
|
||||
"fault = new FlushPipe;",
|
||||
['IsMemBarrier', 'IsSerializeAfter'])
|
||||
header_output += BasicDeclare.subst(dsbIop)
|
||||
decoder_output += BasicConstructor64.subst(dsbIop)
|
||||
exec_output += BasicExecute.subst(dsbIop)
|
||||
|
||||
dmbIop = InstObjParams("dmb", "Dmb64", "ArmStaticInst", "",
|
||||
['IsMemBarrier'])
|
||||
header_output += BasicDeclare.subst(dmbIop)
|
||||
decoder_output += BasicConstructor64.subst(dmbIop)
|
||||
exec_output += BasicExecute.subst(dmbIop)
|
||||
|
||||
clrexIop = InstObjParams("clrex", "Clrex64", "ArmStaticInst",
|
||||
"LLSCLock = 0;")
|
||||
header_output += BasicDeclare.subst(clrexIop)
|
||||
decoder_output += BasicConstructor64.subst(clrexIop)
|
||||
exec_output += BasicExecute.subst(clrexIop)
|
||||
}};
|
|
@ -1,6 +1,6 @@
|
|||
// -*- mode:c++ -*-
|
||||
|
||||
// Copyright (c) 2010 ARM Limited
|
||||
// Copyright (c) 2010-2011 ARM Limited
|
||||
// All rights reserved
|
||||
//
|
||||
// The license below extends only to copyright in the software and shall
|
||||
|
@ -127,6 +127,38 @@ output header {{
|
|||
}
|
||||
}
|
||||
|
||||
template <template <typename T> class Base>
|
||||
StaticInstPtr
|
||||
decodeNeonSThreeHAndWReg(unsigned size, ExtMachInst machInst,
|
||||
IntRegIndex dest, IntRegIndex op1,
|
||||
IntRegIndex op2)
|
||||
{
|
||||
switch (size) {
|
||||
case 1:
|
||||
return new Base<int16_t>(machInst, dest, op1, op2);
|
||||
case 2:
|
||||
return new Base<int32_t>(machInst, dest, op1, op2);
|
||||
default:
|
||||
return new Unknown(machInst);
|
||||
}
|
||||
}
|
||||
|
||||
template <template <typename T> class Base>
|
||||
StaticInstPtr
|
||||
decodeNeonSThreeImmHAndWReg(unsigned size, ExtMachInst machInst,
|
||||
IntRegIndex dest, IntRegIndex op1,
|
||||
IntRegIndex op2, uint64_t imm)
|
||||
{
|
||||
switch (size) {
|
||||
case 1:
|
||||
return new Base<int16_t>(machInst, dest, op1, op2, imm);
|
||||
case 2:
|
||||
return new Base<int32_t>(machInst, dest, op1, op2, imm);
|
||||
default:
|
||||
return new Unknown(machInst);
|
||||
}
|
||||
}
|
||||
|
||||
template <template <typename T> class Base>
|
||||
StaticInstPtr
|
||||
decodeNeonUSThreeUSReg(bool notSigned, unsigned size,
|
||||
|
@ -174,6 +206,38 @@ output header {{
|
|||
}
|
||||
}
|
||||
|
||||
template <template <typename T> class BaseD,
|
||||
template <typename T> class BaseQ>
|
||||
StaticInstPtr
|
||||
decodeNeonSThreeXReg(bool q, unsigned size,
|
||||
ExtMachInst machInst, IntRegIndex dest,
|
||||
IntRegIndex op1, IntRegIndex op2)
|
||||
{
|
||||
if (q) {
|
||||
return decodeNeonSThreeUReg<BaseQ>(
|
||||
size, machInst, dest, op1, op2);
|
||||
} else {
|
||||
return decodeNeonSThreeUSReg<BaseD>(
|
||||
size, machInst, dest, op1, op2);
|
||||
}
|
||||
}
|
||||
|
||||
template <template <typename T> class BaseD,
|
||||
template <typename T> class BaseQ>
|
||||
StaticInstPtr
|
||||
decodeNeonUThreeXReg(bool q, unsigned size,
|
||||
ExtMachInst machInst, IntRegIndex dest,
|
||||
IntRegIndex op1, IntRegIndex op2)
|
||||
{
|
||||
if (q) {
|
||||
return decodeNeonUThreeUReg<BaseQ>(
|
||||
size, machInst, dest, op1, op2);
|
||||
} else {
|
||||
return decodeNeonUThreeUSReg<BaseD>(
|
||||
size, machInst, dest, op1, op2);
|
||||
}
|
||||
}
|
||||
|
||||
template <template <typename T> class BaseD,
|
||||
template <typename T> class BaseQ>
|
||||
StaticInstPtr
|
||||
|
@ -238,6 +302,124 @@ output header {{
|
|||
}
|
||||
}
|
||||
|
||||
template <template <typename T> class BaseD,
|
||||
template <typename T> class BaseQ>
|
||||
StaticInstPtr
|
||||
decodeNeonUThreeFpReg(bool q, unsigned size, ExtMachInst machInst,
|
||||
IntRegIndex dest, IntRegIndex op1, IntRegIndex op2)
|
||||
{
|
||||
if (q) {
|
||||
if (size)
|
||||
return new BaseQ<uint64_t>(machInst, dest, op1, op2);
|
||||
else
|
||||
return new BaseQ<uint32_t>(machInst, dest, op1, op2);
|
||||
} else {
|
||||
if (size)
|
||||
return new Unknown(machInst);
|
||||
else
|
||||
return new BaseD<uint32_t>(machInst, dest, op1, op2);
|
||||
}
|
||||
}
|
||||
|
||||
template <template <typename T> class Base>
|
||||
StaticInstPtr
|
||||
decodeNeonUThreeScFpReg(bool size, ExtMachInst machInst,
|
||||
IntRegIndex dest, IntRegIndex op1, IntRegIndex op2)
|
||||
{
|
||||
if (size)
|
||||
return new Base<uint64_t>(machInst, dest, op1, op2);
|
||||
else
|
||||
return new Base<uint32_t>(machInst, dest, op1, op2);
|
||||
}
|
||||
|
||||
template <template <typename T> class Base>
|
||||
StaticInstPtr
|
||||
decodeNeonUThreeImmScFpReg(bool size, ExtMachInst machInst,
|
||||
IntRegIndex dest, IntRegIndex op1,
|
||||
IntRegIndex op2, uint64_t imm)
|
||||
{
|
||||
if (size)
|
||||
return new Base<uint64_t>(machInst, dest, op1, op2, imm);
|
||||
else
|
||||
return new Base<uint32_t>(machInst, dest, op1, op2, imm);
|
||||
}
|
||||
|
||||
template <template <typename T> class BaseD,
|
||||
template <typename T> class BaseQ>
|
||||
StaticInstPtr
|
||||
decodeNeonUThreeImmHAndWReg(bool q, unsigned size, ExtMachInst machInst,
|
||||
IntRegIndex dest, IntRegIndex op1,
|
||||
IntRegIndex op2, uint64_t imm)
|
||||
{
|
||||
if (q) {
|
||||
switch (size) {
|
||||
case 1:
|
||||
return new BaseQ<uint16_t>(machInst, dest, op1, op2, imm);
|
||||
case 2:
|
||||
return new BaseQ<uint32_t>(machInst, dest, op1, op2, imm);
|
||||
default:
|
||||
return new Unknown(machInst);
|
||||
}
|
||||
} else {
|
||||
switch (size) {
|
||||
case 1:
|
||||
return new BaseD<uint16_t>(machInst, dest, op1, op2, imm);
|
||||
case 2:
|
||||
return new BaseD<uint32_t>(machInst, dest, op1, op2, imm);
|
||||
default:
|
||||
return new Unknown(machInst);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template <template <typename T> class BaseD,
|
||||
template <typename T> class BaseQ>
|
||||
StaticInstPtr
|
||||
decodeNeonSThreeImmHAndWReg(bool q, unsigned size, ExtMachInst machInst,
|
||||
IntRegIndex dest, IntRegIndex op1,
|
||||
IntRegIndex op2, uint64_t imm)
|
||||
{
|
||||
if (q) {
|
||||
switch (size) {
|
||||
case 1:
|
||||
return new BaseQ<int16_t>(machInst, dest, op1, op2, imm);
|
||||
case 2:
|
||||
return new BaseQ<int32_t>(machInst, dest, op1, op2, imm);
|
||||
default:
|
||||
return new Unknown(machInst);
|
||||
}
|
||||
} else {
|
||||
switch (size) {
|
||||
case 1:
|
||||
return new BaseD<int16_t>(machInst, dest, op1, op2, imm);
|
||||
case 2:
|
||||
return new BaseD<int32_t>(machInst, dest, op1, op2, imm);
|
||||
default:
|
||||
return new Unknown(machInst);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template <template <typename T> class BaseD,
|
||||
template <typename T> class BaseQ>
|
||||
StaticInstPtr
|
||||
decodeNeonUThreeImmFpReg(bool q, unsigned size, ExtMachInst machInst,
|
||||
IntRegIndex dest, IntRegIndex op1,
|
||||
IntRegIndex op2, uint64_t imm)
|
||||
{
|
||||
if (q) {
|
||||
if (size)
|
||||
return new BaseQ<uint64_t>(machInst, dest, op1, op2, imm);
|
||||
else
|
||||
return new BaseQ<uint32_t>(machInst, dest, op1, op2, imm);
|
||||
} else {
|
||||
if (size)
|
||||
return new Unknown(machInst);
|
||||
else
|
||||
return new BaseD<uint32_t>(machInst, dest, op1, op2, imm);
|
||||
}
|
||||
}
|
||||
|
||||
template <template <typename T> class BaseD,
|
||||
template <typename T> class BaseQ>
|
||||
StaticInstPtr
|
||||
|
@ -345,6 +527,46 @@ output header {{
|
|||
}
|
||||
}
|
||||
|
||||
template <template <typename T> class Base>
|
||||
StaticInstPtr
|
||||
decodeNeonUTwoShiftUReg(unsigned size,
|
||||
ExtMachInst machInst, IntRegIndex dest,
|
||||
IntRegIndex op1, uint64_t imm)
|
||||
{
|
||||
switch (size) {
|
||||
case 0:
|
||||
return new Base<uint8_t>(machInst, dest, op1, imm);
|
||||
case 1:
|
||||
return new Base<uint16_t>(machInst, dest, op1, imm);
|
||||
case 2:
|
||||
return new Base<uint32_t>(machInst, dest, op1, imm);
|
||||
case 3:
|
||||
return new Base<uint64_t>(machInst, dest, op1, imm);
|
||||
default:
|
||||
return new Unknown(machInst);
|
||||
}
|
||||
}
|
||||
|
||||
template <template <typename T> class Base>
|
||||
StaticInstPtr
|
||||
decodeNeonSTwoShiftUReg(unsigned size,
|
||||
ExtMachInst machInst, IntRegIndex dest,
|
||||
IntRegIndex op1, uint64_t imm)
|
||||
{
|
||||
switch (size) {
|
||||
case 0:
|
||||
return new Base<int8_t>(machInst, dest, op1, imm);
|
||||
case 1:
|
||||
return new Base<int16_t>(machInst, dest, op1, imm);
|
||||
case 2:
|
||||
return new Base<int32_t>(machInst, dest, op1, imm);
|
||||
case 3:
|
||||
return new Base<int64_t>(machInst, dest, op1, imm);
|
||||
default:
|
||||
return new Unknown(machInst);
|
||||
}
|
||||
}
|
||||
|
||||
template <template <typename T> class BaseD,
|
||||
template <typename T> class BaseQ>
|
||||
StaticInstPtr
|
||||
|
@ -411,6 +633,66 @@ output header {{
|
|||
}
|
||||
}
|
||||
|
||||
template <template <typename T> class BaseD,
|
||||
template <typename T> class BaseQ>
|
||||
StaticInstPtr
|
||||
decodeNeonUTwoShiftXReg(bool q, unsigned size, ExtMachInst machInst,
|
||||
IntRegIndex dest, IntRegIndex op1, uint64_t imm)
|
||||
{
|
||||
if (q) {
|
||||
return decodeNeonUTwoShiftUReg<BaseQ>(
|
||||
size, machInst, dest, op1, imm);
|
||||
} else {
|
||||
return decodeNeonUTwoShiftUSReg<BaseD>(
|
||||
size, machInst, dest, op1, imm);
|
||||
}
|
||||
}
|
||||
|
||||
template <template <typename T> class BaseD,
|
||||
template <typename T> class BaseQ>
|
||||
StaticInstPtr
|
||||
decodeNeonSTwoShiftXReg(bool q, unsigned size, ExtMachInst machInst,
|
||||
IntRegIndex dest, IntRegIndex op1, uint64_t imm)
|
||||
{
|
||||
if (q) {
|
||||
return decodeNeonSTwoShiftUReg<BaseQ>(
|
||||
size, machInst, dest, op1, imm);
|
||||
} else {
|
||||
return decodeNeonSTwoShiftUSReg<BaseD>(
|
||||
size, machInst, dest, op1, imm);
|
||||
}
|
||||
}
|
||||
|
||||
template <template <typename T> class Base>
|
||||
StaticInstPtr
|
||||
decodeNeonUTwoShiftUFpReg(unsigned size, ExtMachInst machInst,
|
||||
IntRegIndex dest, IntRegIndex op1, uint64_t imm)
|
||||
{
|
||||
if (size)
|
||||
return new Base<uint64_t>(machInst, dest, op1, imm);
|
||||
else
|
||||
return new Base<uint32_t>(machInst, dest, op1, imm);
|
||||
}
|
||||
|
||||
template <template <typename T> class BaseD,
|
||||
template <typename T> class BaseQ>
|
||||
StaticInstPtr
|
||||
decodeNeonUTwoShiftFpReg(bool q, unsigned size, ExtMachInst machInst,
|
||||
IntRegIndex dest, IntRegIndex op1, uint64_t imm)
|
||||
{
|
||||
if (q) {
|
||||
if (size)
|
||||
return new BaseQ<uint64_t>(machInst, dest, op1, imm);
|
||||
else
|
||||
return new BaseQ<uint32_t>(machInst, dest, op1, imm);
|
||||
} else {
|
||||
if (size)
|
||||
return new Unknown(machInst);
|
||||
else
|
||||
return new BaseD<uint32_t>(machInst, dest, op1, imm);
|
||||
}
|
||||
}
|
||||
|
||||
template <template <typename T> class Base>
|
||||
StaticInstPtr
|
||||
decodeNeonUTwoMiscUSReg(unsigned size,
|
||||
|
@ -559,6 +841,221 @@ output header {{
|
|||
}
|
||||
}
|
||||
|
||||
template <template <typename T> class BaseD,
|
||||
template <typename T> class BaseQ>
|
||||
StaticInstPtr
|
||||
decodeNeonUTwoMiscXReg(bool q, unsigned size, ExtMachInst machInst,
|
||||
IntRegIndex dest, IntRegIndex op1)
|
||||
{
|
||||
if (q) {
|
||||
return decodeNeonUTwoMiscUReg<BaseQ>(size, machInst, dest, op1);
|
||||
} else {
|
||||
return decodeNeonUTwoMiscUSReg<BaseD>(size, machInst, dest, op1);
|
||||
}
|
||||
}
|
||||
|
||||
template <template <typename T> class BaseD,
|
||||
template <typename T> class BaseQ>
|
||||
StaticInstPtr
|
||||
decodeNeonSTwoMiscXReg(bool q, unsigned size, ExtMachInst machInst,
|
||||
IntRegIndex dest, IntRegIndex op1)
|
||||
{
|
||||
if (q) {
|
||||
return decodeNeonSTwoMiscUReg<BaseQ>(size, machInst, dest, op1);
|
||||
} else {
|
||||
return decodeNeonSTwoMiscUSReg<BaseD>(size, machInst, dest, op1);
|
||||
}
|
||||
}
|
||||
|
||||
template <template <typename T> class BaseD,
|
||||
template <typename T> class BaseQ>
|
||||
StaticInstPtr
|
||||
decodeNeonUTwoMiscFpReg(bool q, unsigned size, ExtMachInst machInst,
|
||||
IntRegIndex dest, IntRegIndex op1)
|
||||
{
|
||||
if (q) {
|
||||
if (size)
|
||||
return new BaseQ<uint64_t>(machInst, dest, op1);
|
||||
else
|
||||
return new BaseQ<uint32_t>(machInst, dest, op1);
|
||||
} else {
|
||||
if (size)
|
||||
return new Unknown(machInst);
|
||||
else
|
||||
return new BaseD<uint32_t>(machInst, dest, op1);
|
||||
}
|
||||
}
|
||||
|
||||
template <template <typename T> class BaseD,
|
||||
template <typename T> class BaseQ>
|
||||
StaticInstPtr
|
||||
decodeNeonUTwoMiscPwiseScFpReg(unsigned size, ExtMachInst machInst,
|
||||
IntRegIndex dest, IntRegIndex op1)
|
||||
{
|
||||
if (size)
|
||||
return new BaseQ<uint64_t>(machInst, dest, op1);
|
||||
else
|
||||
return new BaseD<uint32_t>(machInst, dest, op1);
|
||||
}
|
||||
|
||||
template <template <typename T> class Base>
|
||||
StaticInstPtr
|
||||
decodeNeonUTwoMiscScFpReg(unsigned size, ExtMachInst machInst,
|
||||
IntRegIndex dest, IntRegIndex op1)
|
||||
{
|
||||
if (size)
|
||||
return new Base<uint64_t>(machInst, dest, op1);
|
||||
else
|
||||
return new Base<uint32_t>(machInst, dest, op1);
|
||||
}
|
||||
|
||||
template <template <typename T> class BaseD,
|
||||
template <typename T> class BaseQ>
|
||||
StaticInstPtr
|
||||
decodeNeonUAcrossLanesReg(bool q, unsigned size, ExtMachInst machInst,
|
||||
IntRegIndex dest, IntRegIndex op1)
|
||||
{
|
||||
if (q) {
|
||||
switch (size) {
|
||||
case 0x0:
|
||||
return new BaseQ<uint8_t>(machInst, dest, op1);
|
||||
case 0x1:
|
||||
return new BaseQ<uint16_t>(machInst, dest, op1);
|
||||
case 0x2:
|
||||
return new BaseQ<uint32_t>(machInst, dest, op1);
|
||||
default:
|
||||
return new Unknown(machInst);
|
||||
}
|
||||
} else {
|
||||
switch (size) {
|
||||
case 0x0:
|
||||
return new BaseD<uint8_t>(machInst, dest, op1);
|
||||
case 0x1:
|
||||
return new BaseD<uint16_t>(machInst, dest, op1);
|
||||
default:
|
||||
return new Unknown(machInst);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template <template <typename T> class BaseD,
|
||||
template <typename T> class BaseQ,
|
||||
template <typename T> class BaseBQ>
|
||||
StaticInstPtr
|
||||
decodeNeonUAcrossLanesReg(bool q, unsigned size, ExtMachInst machInst,
|
||||
IntRegIndex dest, IntRegIndex op1)
|
||||
{
|
||||
if (q) {
|
||||
switch (size) {
|
||||
case 0x0:
|
||||
return new BaseQ<uint8_t>(machInst, dest, op1);
|
||||
case 0x1:
|
||||
return new BaseQ<uint16_t>(machInst, dest, op1);
|
||||
case 0x2:
|
||||
return new BaseBQ<uint32_t>(machInst, dest, op1);
|
||||
default:
|
||||
return new Unknown(machInst);
|
||||
}
|
||||
} else {
|
||||
switch (size) {
|
||||
case 0x0:
|
||||
return new BaseD<uint8_t>(machInst, dest, op1);
|
||||
case 0x1:
|
||||
return new BaseD<uint16_t>(machInst, dest, op1);
|
||||
default:
|
||||
return new Unknown(machInst);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template <template <typename T> class BaseD,
|
||||
template <typename T> class BaseQ>
|
||||
StaticInstPtr
|
||||
decodeNeonSAcrossLanesReg(bool q, unsigned size, ExtMachInst machInst,
|
||||
IntRegIndex dest, IntRegIndex op1)
|
||||
{
|
||||
if (q) {
|
||||
switch (size) {
|
||||
case 0x0:
|
||||
return new BaseQ<int8_t>(machInst, dest, op1);
|
||||
case 0x1:
|
||||
return new BaseQ<int16_t>(machInst, dest, op1);
|
||||
case 0x2:
|
||||
return new BaseQ<int32_t>(machInst, dest, op1);
|
||||
default:
|
||||
return new Unknown(machInst);
|
||||
}
|
||||
} else {
|
||||
switch (size) {
|
||||
case 0x0:
|
||||
return new BaseD<int8_t>(machInst, dest, op1);
|
||||
case 0x1:
|
||||
return new BaseD<int16_t>(machInst, dest, op1);
|
||||
default:
|
||||
return new Unknown(machInst);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template <template <typename T> class BaseD,
|
||||
template <typename T> class BaseQ,
|
||||
template <typename T> class BaseBQ>
|
||||
StaticInstPtr
|
||||
decodeNeonUAcrossLanesLongReg(bool q, unsigned size, ExtMachInst machInst,
|
||||
IntRegIndex dest, IntRegIndex op1)
|
||||
{
|
||||
if (q) {
|
||||
switch (size) {
|
||||
case 0x0:
|
||||
return new BaseQ<uint8_t>(machInst, dest, op1);
|
||||
case 0x1:
|
||||
return new BaseQ<uint16_t>(machInst, dest, op1);
|
||||
case 0x2:
|
||||
return new BaseBQ<uint32_t>(machInst, dest, op1);
|
||||
default:
|
||||
return new Unknown(machInst);
|
||||
}
|
||||
} else {
|
||||
switch (size) {
|
||||
case 0x0:
|
||||
return new BaseD<uint8_t>(machInst, dest, op1);
|
||||
case 0x1:
|
||||
return new BaseD<uint16_t>(machInst, dest, op1);
|
||||
default:
|
||||
return new Unknown(machInst);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template <template <typename T> class BaseD,
|
||||
template <typename T> class BaseQ,
|
||||
template <typename T> class BaseBQ>
|
||||
StaticInstPtr
|
||||
decodeNeonSAcrossLanesLongReg(bool q, unsigned size, ExtMachInst machInst,
|
||||
IntRegIndex dest, IntRegIndex op1)
|
||||
{
|
||||
if (q) {
|
||||
switch (size) {
|
||||
case 0x0:
|
||||
return new BaseQ<int8_t>(machInst, dest, op1);
|
||||
case 0x1:
|
||||
return new BaseQ<int16_t>(machInst, dest, op1);
|
||||
case 0x2:
|
||||
return new BaseBQ<int32_t>(machInst, dest, op1);
|
||||
default:
|
||||
return new Unknown(machInst);
|
||||
}
|
||||
} else {
|
||||
switch (size) {
|
||||
case 0x0:
|
||||
return new BaseD<int8_t>(machInst, dest, op1);
|
||||
case 0x1:
|
||||
return new BaseD<int16_t>(machInst, dest, op1);
|
||||
default:
|
||||
return new Unknown(machInst);
|
||||
}
|
||||
}
|
||||
}
|
||||
}};
|
||||
|
||||
output exec {{
|
||||
|
@ -872,10 +1369,7 @@ let {{
|
|||
readDestCode = 'destElem = gtoh(destReg.elements[i]);'
|
||||
eWalkCode += '''
|
||||
if (imm < 0 && imm >= eCount) {
|
||||
if (FullSystem)
|
||||
fault = new UndefinedInstruction;
|
||||
else
|
||||
fault = new UndefinedInstruction(false, mnemonic);
|
||||
fault = new UndefinedInstruction(machInst, false, mnemonic);
|
||||
} else {
|
||||
for (unsigned i = 0; i < eCount; i++) {
|
||||
Element srcElem1 = gtoh(srcReg1.elements[i]);
|
||||
|
@ -926,10 +1420,7 @@ let {{
|
|||
readDestCode = 'destElem = gtoh(destReg.elements[i]);'
|
||||
eWalkCode += '''
|
||||
if (imm < 0 && imm >= eCount) {
|
||||
if (FullSystem)
|
||||
fault = new UndefinedInstruction;
|
||||
else
|
||||
fault = new UndefinedInstruction(false, mnemonic);
|
||||
fault = new UndefinedInstruction(machInst, false, mnemonic);
|
||||
} else {
|
||||
for (unsigned i = 0; i < eCount; i++) {
|
||||
Element srcElem1 = gtoh(srcReg1.elements[i]);
|
||||
|
@ -978,10 +1469,7 @@ let {{
|
|||
readDestCode = 'destReg = destRegs[i];'
|
||||
eWalkCode += '''
|
||||
if (imm < 0 && imm >= eCount) {
|
||||
if (FullSystem)
|
||||
fault = new UndefinedInstruction;
|
||||
else
|
||||
fault = new UndefinedInstruction(false, mnemonic);
|
||||
fault = new UndefinedInstruction(machInst, false, mnemonic);
|
||||
} else {
|
||||
for (unsigned i = 0; i < rCount; i++) {
|
||||
FloatReg srcReg1 = srcRegs1[i];
|
||||
|
@ -2156,7 +2644,7 @@ let {{
|
|||
bool done;
|
||||
destReg = processNans(fpscr, done, true, srcReg1, srcReg2);
|
||||
if (!done) {
|
||||
destReg = binaryOp(fpscr, srcReg1, srcReg2, fpMaxS,
|
||||
destReg = binaryOp(fpscr, srcReg1, srcReg2, fpMax<float>,
|
||||
true, true, VfpRoundNearest);
|
||||
} else if (flushToZero(srcReg1, srcReg2)) {
|
||||
fpscr.idc = 1;
|
||||
|
@ -2171,7 +2659,7 @@ let {{
|
|||
bool done;
|
||||
destReg = processNans(fpscr, done, true, srcReg1, srcReg2);
|
||||
if (!done) {
|
||||
destReg = binaryOp(fpscr, srcReg1, srcReg2, fpMinS,
|
||||
destReg = binaryOp(fpscr, srcReg1, srcReg2, fpMin<float>,
|
||||
true, true, VfpRoundNearest);
|
||||
} else if (flushToZero(srcReg1, srcReg2)) {
|
||||
fpscr.idc = 1;
|
||||
|
@ -2234,6 +2722,24 @@ let {{
|
|||
threeEqualRegInstFp("vmla", "NVmlaDFp", "SimdFloatMultAccOp", ("float",), 2, vmlafpCode, True)
|
||||
threeEqualRegInstFp("vmla", "NVmlaQFp", "SimdFloatMultAccOp", ("float",), 4, vmlafpCode, True)
|
||||
|
||||
vfmafpCode = '''
|
||||
FPSCR fpscr = (FPSCR) FpscrExc;
|
||||
destReg = ternaryOp(fpscr, srcReg1, srcReg2, destReg, fpMulAdd<float>,
|
||||
true, true, VfpRoundNearest);
|
||||
FpscrExc = fpscr;
|
||||
'''
|
||||
threeEqualRegInstFp("vfma", "NVfmaDFp", "SimdFloatMultAccOp", ("float",), 2, vfmafpCode, True)
|
||||
threeEqualRegInstFp("vfma", "NVfmaQFp", "SimdFloatMultAccOp", ("float",), 4, vfmafpCode, True)
|
||||
|
||||
vfmsfpCode = '''
|
||||
FPSCR fpscr = (FPSCR) FpscrExc;
|
||||
destReg = ternaryOp(fpscr, -srcReg1, srcReg2, destReg, fpMulAdd<float>,
|
||||
true, true, VfpRoundNearest);
|
||||
FpscrExc = fpscr;
|
||||
'''
|
||||
threeEqualRegInstFp("vfms", "NVfmsDFp", "SimdFloatMultAccOp", ("float",), 2, vfmsfpCode, True)
|
||||
threeEqualRegInstFp("vfms", "NVfmsQFp", "SimdFloatMultAccOp", ("float",), 4, vfmsfpCode, True)
|
||||
|
||||
vmlsfpCode = '''
|
||||
FPSCR fpscr = (FPSCR) FpscrExc;
|
||||
float mid = binaryOp(fpscr, srcReg1, srcReg2, fpMulS,
|
||||
|
@ -2765,7 +3271,7 @@ let {{
|
|||
fpscr.idc = 1;
|
||||
VfpSavedState state = prepFpState(VfpRoundNearest);
|
||||
__asm__ __volatile__("" : "=m" (srcElem1) : "m" (srcElem1));
|
||||
destReg = vfpFpSToFixed(srcElem1, false, false, imm);
|
||||
destReg = vfpFpToFixed<float>(srcElem1, false, 32, imm);
|
||||
__asm__ __volatile__("" :: "m" (destReg));
|
||||
finishVfp(fpscr, state, true);
|
||||
FpscrExc = fpscr;
|
||||
|
@ -2781,7 +3287,7 @@ let {{
|
|||
fpscr.idc = 1;
|
||||
VfpSavedState state = prepFpState(VfpRoundNearest);
|
||||
__asm__ __volatile__("" : "=m" (srcElem1) : "m" (srcElem1));
|
||||
destReg = vfpFpSToFixed(srcElem1, true, false, imm);
|
||||
destReg = vfpFpToFixed<float>(srcElem1, true, 32, imm);
|
||||
__asm__ __volatile__("" :: "m" (destReg));
|
||||
finishVfp(fpscr, state, true);
|
||||
FpscrExc = fpscr;
|
||||
|
@ -2795,7 +3301,7 @@ let {{
|
|||
FPSCR fpscr = (FPSCR) FpscrExc;
|
||||
VfpSavedState state = prepFpState(VfpRoundNearest);
|
||||
__asm__ __volatile__("" : "=m" (srcReg1) : "m" (srcReg1));
|
||||
destElem = vfpUFixedToFpS(true, true, srcReg1, false, imm);
|
||||
destElem = vfpUFixedToFpS(true, true, srcReg1, 32, imm);
|
||||
__asm__ __volatile__("" :: "m" (destElem));
|
||||
finishVfp(fpscr, state, true);
|
||||
FpscrExc = fpscr;
|
||||
|
@ -2809,7 +3315,7 @@ let {{
|
|||
FPSCR fpscr = (FPSCR) FpscrExc;
|
||||
VfpSavedState state = prepFpState(VfpRoundNearest);
|
||||
__asm__ __volatile__("" : "=m" (srcReg1) : "m" (srcReg1));
|
||||
destElem = vfpSFixedToFpS(true, true, srcReg1, false, imm);
|
||||
destElem = vfpSFixedToFpS(true, true, srcReg1, 32, imm);
|
||||
__asm__ __volatile__("" :: "m" (destElem));
|
||||
finishVfp(fpscr, state, true);
|
||||
FpscrExc = fpscr;
|
||||
|
@ -3296,10 +3802,7 @@ let {{
|
|||
} else {
|
||||
index -= eCount;
|
||||
if (index >= eCount) {
|
||||
if (FullSystem)
|
||||
fault = new UndefinedInstruction;
|
||||
else
|
||||
fault = new UndefinedInstruction(false, mnemonic);
|
||||
fault = new UndefinedInstruction(machInst, false, mnemonic);
|
||||
} else {
|
||||
destReg.elements[i] = srcReg2.elements[index];
|
||||
}
|
||||
|
|
3355
src/arch/arm/isa/insts/neon64.isa
Normal file
3355
src/arch/arm/isa/insts/neon64.isa
Normal file
File diff suppressed because it is too large
Load diff
471
src/arch/arm/isa/insts/neon64_mem.isa
Normal file
471
src/arch/arm/isa/insts/neon64_mem.isa
Normal file
|
@ -0,0 +1,471 @@
|
|||
// -*- mode: c++ -*-
|
||||
|
||||
// Copyright (c) 2012-2013 ARM Limited
|
||||
// All rights reserved
|
||||
//
|
||||
// The license below extends only to copyright in the software and shall
|
||||
// not be construed as granting a license to any other intellectual
|
||||
// property including but not limited to intellectual property relating
|
||||
// to a hardware implementation of the functionality of the software
|
||||
// licensed hereunder. You may use the software subject to the license
|
||||
// terms below provided that you ensure that this notice is replicated
|
||||
// unmodified and in its entirety in all distributions of the software,
|
||||
// modified or unmodified, in source code or in binary form.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met: redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer;
|
||||
// redistributions in binary form must reproduce the above copyright
|
||||
// notice, this list of conditions and the following disclaimer in the
|
||||
// documentation and/or other materials provided with the distribution;
|
||||
// neither the name of the copyright holders nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
//
|
||||
// Authors: Mbou Eyole
|
||||
// Giacomo Gabrielli
|
||||
|
||||
let {{
|
||||
|
||||
header_output = ''
|
||||
decoder_output = ''
|
||||
exec_output = ''
|
||||
|
||||
def mkMemAccMicroOp(name):
|
||||
global header_output, decoder_output, exec_output
|
||||
SPAlignmentCheckCodeNeon = '''
|
||||
if (baseIsSP && bits(XURa, 3, 0) &&
|
||||
SPAlignmentCheckEnabled(xc->tcBase())) {
|
||||
return new SPAlignmentFault();
|
||||
}
|
||||
'''
|
||||
eaCode = SPAlignmentCheckCodeNeon + '''
|
||||
EA = XURa + imm;
|
||||
'''
|
||||
memDecl = '''
|
||||
const int MaxNumBytes = 16;
|
||||
union MemUnion {
|
||||
uint8_t bytes[MaxNumBytes];
|
||||
uint32_t floatRegBits[MaxNumBytes / 4];
|
||||
};
|
||||
'''
|
||||
|
||||
# Do endian conversion for all the elements
|
||||
convCode = '''
|
||||
VReg x = {0, 0};
|
||||
|
||||
x.lo = (((XReg) memUnion.floatRegBits[1]) << 32) |
|
||||
(XReg) memUnion.floatRegBits[0];
|
||||
x.hi = (((XReg) memUnion.floatRegBits[3]) << 32) |
|
||||
(XReg) memUnion.floatRegBits[2];
|
||||
|
||||
const unsigned eCount = 16 / (1 << eSize);
|
||||
|
||||
if (isBigEndian64(xc->tcBase())) {
|
||||
for (unsigned i = 0; i < eCount; i++) {
|
||||
switch (eSize) {
|
||||
case 0x3: // 64-bit
|
||||
writeVecElem(&x, (XReg) gtobe(
|
||||
(uint64_t) readVecElem(x, i, eSize)), i, eSize);
|
||||
break;
|
||||
case 0x2: // 32-bit
|
||||
writeVecElem(&x, (XReg) gtobe(
|
||||
(uint32_t) readVecElem(x, i, eSize)), i, eSize);
|
||||
break;
|
||||
case 0x1: // 16-bit
|
||||
writeVecElem(&x, (XReg) gtobe(
|
||||
(uint16_t) readVecElem(x, i, eSize)), i, eSize);
|
||||
break;
|
||||
default: // 8-bit
|
||||
break; // Nothing to do here
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for (unsigned i = 0; i < eCount; i++) {
|
||||
switch (eSize) {
|
||||
case 0x3: // 64-bit
|
||||
writeVecElem(&x, (XReg) gtole(
|
||||
(uint64_t) readVecElem(x, i, eSize)), i, eSize);
|
||||
break;
|
||||
case 0x2: // 32-bit
|
||||
writeVecElem(&x, (XReg) gtole(
|
||||
(uint32_t) readVecElem(x, i, eSize)), i, eSize);
|
||||
break;
|
||||
case 0x1: // 16-bit
|
||||
writeVecElem(&x, (XReg) gtole(
|
||||
(uint16_t) readVecElem(x, i, eSize)), i, eSize);
|
||||
break;
|
||||
default: // 8-bit
|
||||
break; // Nothing to do here
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
memUnion.floatRegBits[0] = (uint32_t) x.lo;
|
||||
memUnion.floatRegBits[1] = (uint32_t) (x.lo >> 32);
|
||||
memUnion.floatRegBits[2] = (uint32_t) x.hi;
|
||||
memUnion.floatRegBits[3] = (uint32_t) (x.hi >> 32);
|
||||
'''
|
||||
|
||||
# Offload everything into registers
|
||||
regSetCode = ''
|
||||
for reg in range(4):
|
||||
regSetCode += '''
|
||||
AA64FpDestP%(reg)d_uw = gtoh(memUnion.floatRegBits[%(reg)d]);
|
||||
''' % { 'reg' : reg }
|
||||
|
||||
# Pull everything in from registers
|
||||
regGetCode = ''
|
||||
for reg in range(4):
|
||||
regGetCode += '''
|
||||
memUnion.floatRegBits[%(reg)d] = htog(AA64FpDestP%(reg)d_uw);
|
||||
''' % { 'reg' : reg }
|
||||
|
||||
loadMemAccCode = convCode + regSetCode
|
||||
storeMemAccCode = regGetCode + convCode
|
||||
|
||||
loadIop = InstObjParams(name + 'ld',
|
||||
'MicroNeonLoad64',
|
||||
'MicroNeonMemOp',
|
||||
{ 'mem_decl' : memDecl,
|
||||
'memacc_code' : loadMemAccCode,
|
||||
'ea_code' : simd64EnabledCheckCode + eaCode,
|
||||
},
|
||||
[ 'IsMicroop', 'IsMemRef', 'IsLoad' ])
|
||||
storeIop = InstObjParams(name + 'st',
|
||||
'MicroNeonStore64',
|
||||
'MicroNeonMemOp',
|
||||
{ 'mem_decl' : memDecl,
|
||||
'memacc_code' : storeMemAccCode,
|
||||
'ea_code' : simd64EnabledCheckCode + eaCode,
|
||||
},
|
||||
[ 'IsMicroop', 'IsMemRef', 'IsStore' ])
|
||||
|
||||
exec_output += NeonLoadExecute64.subst(loadIop) + \
|
||||
NeonLoadInitiateAcc64.subst(loadIop) + \
|
||||
NeonLoadCompleteAcc64.subst(loadIop) + \
|
||||
NeonStoreExecute64.subst(storeIop) + \
|
||||
NeonStoreInitiateAcc64.subst(storeIop) + \
|
||||
NeonStoreCompleteAcc64.subst(storeIop)
|
||||
header_output += MicroNeonMemDeclare64.subst(loadIop) + \
|
||||
MicroNeonMemDeclare64.subst(storeIop)
|
||||
|
||||
def mkMarshalMicroOp(name, Name):
|
||||
global header_output, decoder_output, exec_output
|
||||
|
||||
getInputCodeOp1L = ''
|
||||
for v in range(4):
|
||||
for p in range(4):
|
||||
getInputCodeOp1L += '''
|
||||
writeVecElem(&input[%(v)d], (XReg) AA64FpOp1P%(p)dV%(v)d_uw,
|
||||
%(p)d, 0x2);
|
||||
''' % { 'v' : v, 'p' : p }
|
||||
|
||||
getInputCodeOp1S = ''
|
||||
for v in range(4):
|
||||
for p in range(4):
|
||||
getInputCodeOp1S += '''
|
||||
writeVecElem(&input[%(v)d], (XReg) AA64FpOp1P%(p)dV%(v)dS_uw,
|
||||
%(p)d, 0x2);
|
||||
''' % { 'v' : v, 'p' : p }
|
||||
|
||||
if name == 'deint_neon_uop':
|
||||
|
||||
eCode = '''
|
||||
VReg input[4]; // input data from scratch area
|
||||
VReg output[2]; // output data to arch. SIMD regs
|
||||
VReg temp;
|
||||
temp.lo = 0;
|
||||
temp.hi = 0;
|
||||
'''
|
||||
for p in range(4):
|
||||
eCode += '''
|
||||
writeVecElem(&temp, (XReg) AA64FpDestP%(p)dV1L_uw, %(p)d, 0x2);
|
||||
''' % { 'p' : p }
|
||||
eCode += getInputCodeOp1L
|
||||
|
||||
# Note that numRegs is not always the same as numStructElems; in
|
||||
# particular, for LD1/ST1, numStructElems is 1 but numRegs can be
|
||||
# 1, 2, 3 or 4
|
||||
|
||||
eCode += '''
|
||||
output[0].lo = 0;
|
||||
output[0].hi = 0;
|
||||
output[1].lo = 0;
|
||||
output[1].hi = 0;
|
||||
|
||||
int eCount = dataSize / (8 << eSize);
|
||||
int eSizeBytes = 1 << eSize; // element size in bytes
|
||||
int numBytes = step * dataSize / 4;
|
||||
int totNumBytes = numRegs * dataSize / 8;
|
||||
|
||||
int structElemNo, pos, a, b;
|
||||
XReg data;
|
||||
|
||||
for (int r = 0; r < 2; ++r) {
|
||||
for (int i = 0; i < eCount; ++i) {
|
||||
if (numBytes < totNumBytes) {
|
||||
structElemNo = r + (step * 2);
|
||||
if (numStructElems == 1) {
|
||||
pos = (eSizeBytes * i) +
|
||||
(eCount * structElemNo * eSizeBytes);
|
||||
} else {
|
||||
pos = (numStructElems * eSizeBytes * i) +
|
||||
(structElemNo * eSizeBytes);
|
||||
}
|
||||
a = pos / 16;
|
||||
b = (pos % 16) / eSizeBytes;
|
||||
data = (XReg) readVecElem(input[a], (XReg) b,
|
||||
eSize);
|
||||
writeVecElem(&output[r], data, i, eSize);
|
||||
numBytes += eSizeBytes;
|
||||
}
|
||||
}
|
||||
}
|
||||
'''
|
||||
for p in range(4):
|
||||
eCode += '''
|
||||
AA64FpDestP%(p)dV0L_uw = (uint32_t) readVecElem(output[0],
|
||||
%(p)d, 0x2);
|
||||
''' % { 'p' : p }
|
||||
eCode += '''
|
||||
if ((numRegs % 2 == 0) || (numRegs == 3 && step == 0)) {
|
||||
'''
|
||||
for p in range(4):
|
||||
eCode += '''
|
||||
AA64FpDestP%(p)dV1L_uw = (uint32_t) readVecElem(
|
||||
output[1], %(p)d, 0x2);
|
||||
''' % { 'p' : p }
|
||||
eCode += '''
|
||||
} else {
|
||||
'''
|
||||
for p in range(4):
|
||||
eCode += '''
|
||||
AA64FpDestP%(p)dV1L_uw = (uint32_t) readVecElem(temp,
|
||||
%(p)d, 0x2);
|
||||
''' % { 'p' : p }
|
||||
eCode += '''
|
||||
}
|
||||
'''
|
||||
|
||||
iop = InstObjParams(name, Name, 'MicroNeonMixOp64',
|
||||
{ 'code' : eCode }, ['IsMicroop'])
|
||||
header_output += MicroNeonMixDeclare64.subst(iop)
|
||||
exec_output += MicroNeonMixExecute64.subst(iop)
|
||||
|
||||
elif name == 'int_neon_uop':
|
||||
|
||||
eCode = '''
|
||||
VReg input[4]; // input data from arch. SIMD regs
|
||||
VReg output[2]; // output data to scratch area
|
||||
'''
|
||||
|
||||
eCode += getInputCodeOp1S
|
||||
|
||||
# Note that numRegs is not always the same as numStructElems; in
|
||||
# particular, for LD1/ST1, numStructElems is 1 but numRegs can be
|
||||
# 1, 2, 3 or 4
|
||||
|
||||
eCode += '''
|
||||
int eCount = dataSize / (8 << eSize);
|
||||
int eSizeBytes = 1 << eSize;
|
||||
int totNumBytes = numRegs * dataSize / 8;
|
||||
int numOutputElems = 128 / (8 << eSize);
|
||||
int stepOffset = step * 32;
|
||||
|
||||
for (int i = 0; i < 2; ++i) {
|
||||
output[i].lo = 0;
|
||||
output[i].hi = 0;
|
||||
}
|
||||
|
||||
int r = 0, k = 0, i, j;
|
||||
XReg data;
|
||||
|
||||
for (int pos = stepOffset; pos < 32 + stepOffset;
|
||||
pos += eSizeBytes) {
|
||||
if (pos < totNumBytes) {
|
||||
if (numStructElems == 1) {
|
||||
i = (pos / eSizeBytes) % eCount;
|
||||
j = pos / (eCount * eSizeBytes);
|
||||
} else {
|
||||
i = pos / (numStructElems * eSizeBytes);
|
||||
j = (pos % (numStructElems * eSizeBytes)) /
|
||||
eSizeBytes;
|
||||
}
|
||||
data = (XReg) readVecElem(input[j], (XReg) i, eSize);
|
||||
writeVecElem(&output[r], data, k, eSize);
|
||||
k++;
|
||||
if (k == numOutputElems){
|
||||
k = 0;
|
||||
++r;
|
||||
}
|
||||
}
|
||||
}
|
||||
'''
|
||||
for v in range(2):
|
||||
for p in range(4):
|
||||
eCode += '''
|
||||
AA64FpDestP%(p)dV%(v)d_uw = (uint32_t) readVecElem(
|
||||
output[%(v)d], %(p)d, 0x2);
|
||||
''' % { 'v': v, 'p': p}
|
||||
|
||||
iop = InstObjParams(name, Name, 'MicroNeonMixOp64',
|
||||
{ 'code' : eCode }, ['IsMicroop'])
|
||||
header_output += MicroNeonMixDeclare64.subst(iop)
|
||||
exec_output += MicroNeonMixExecute64.subst(iop)
|
||||
|
||||
elif name == 'unpack_neon_uop':
|
||||
|
||||
eCode = '''
|
||||
VReg input[4]; //input data from scratch area
|
||||
VReg output[2]; //output data to arch. SIMD regs
|
||||
'''
|
||||
|
||||
eCode += getInputCodeOp1L
|
||||
|
||||
# Fill output regs with register data initially. Note that
|
||||
# elements in output register outside indexed lanes are left
|
||||
# untouched
|
||||
for v in range(2):
|
||||
for p in range(4):
|
||||
eCode += '''
|
||||
writeVecElem(&output[%(v)d], (XReg) AA64FpDestP%(p)dV%(v)dL_uw,
|
||||
%(p)d, 0x2);
|
||||
''' % { 'v': v, 'p': p}
|
||||
eCode += '''
|
||||
int eCount = dataSize / (8 << eSize);
|
||||
int eCount128 = 128 / (8 << eSize);
|
||||
int eSizeBytes = 1 << eSize;
|
||||
int totNumBytes = numStructElems * eSizeBytes;
|
||||
int numInputElems = eCount128;
|
||||
int stepOffset = step * 2 * eSizeBytes;
|
||||
int stepLimit = 2 * eSizeBytes;
|
||||
|
||||
int r = 0, i, j;
|
||||
XReg data;
|
||||
|
||||
for (int pos = stepOffset; pos < stepLimit + stepOffset;
|
||||
pos += eSizeBytes) {
|
||||
if (pos < totNumBytes) {
|
||||
r = pos / eSizeBytes;
|
||||
j = r / numInputElems;
|
||||
i = r % numInputElems;
|
||||
data = (XReg) readVecElem(input[j], (XReg) i, eSize);
|
||||
|
||||
if (replicate) {
|
||||
for (int i = 0; i < eCount128; ++i) {
|
||||
if (i < eCount) {
|
||||
writeVecElem(&output[r % 2], data, i,
|
||||
eSize);
|
||||
} else { // zero extend if necessary
|
||||
writeVecElem(&output[r % 2], (XReg) 0, i,
|
||||
eSize);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
writeVecElem(&output[r % 2], data, lane, eSize);
|
||||
}
|
||||
}
|
||||
}
|
||||
'''
|
||||
for v in range(2):
|
||||
for p in range(4):
|
||||
eCode += '''
|
||||
AA64FpDestP%(p)dV%(v)dL_uw = (uint32_t) readVecElem(
|
||||
output[%(v)d], %(p)d, 0x2);
|
||||
''' % { 'v' : v, 'p' : p }
|
||||
|
||||
iop = InstObjParams(name, Name, 'MicroNeonMixLaneOp64',
|
||||
{ 'code' : eCode }, ['IsMicroop'])
|
||||
header_output += MicroNeonMixLaneDeclare64.subst(iop)
|
||||
exec_output += MicroNeonMixExecute64.subst(iop)
|
||||
|
||||
elif name == 'pack_neon_uop':
|
||||
|
||||
eCode = '''
|
||||
VReg input[4]; // input data from arch. SIMD regs
|
||||
VReg output[2]; // output data to scratch area
|
||||
'''
|
||||
|
||||
eCode += getInputCodeOp1S
|
||||
|
||||
eCode += '''
|
||||
int eSizeBytes = 1 << eSize;
|
||||
int numOutputElems = 128 / (8 << eSize);
|
||||
int totNumBytes = numStructElems * eSizeBytes;
|
||||
int stepOffset = step * 32;
|
||||
int stepLimit = 32;
|
||||
|
||||
int r = 0, i, j;
|
||||
XReg data;
|
||||
|
||||
for (int i = 0; i < 2; ++i) {
|
||||
output[i].lo = 0;
|
||||
output[i].hi = 0;
|
||||
}
|
||||
|
||||
for (int pos = stepOffset; pos < stepLimit + stepOffset;
|
||||
pos += eSizeBytes) {
|
||||
if (pos < totNumBytes) {
|
||||
r = pos / 16;
|
||||
j = pos / eSizeBytes;
|
||||
i = (pos / eSizeBytes) % numOutputElems;
|
||||
data = (XReg) readVecElem(input[j], lane, eSize);
|
||||
writeVecElem(&output[r % 2], data, i, eSize);
|
||||
}
|
||||
}
|
||||
'''
|
||||
|
||||
for v in range(2):
|
||||
for p in range(4):
|
||||
eCode += '''
|
||||
AA64FpDestP%(p)dV%(v)d_uw = (uint32_t) readVecElem(
|
||||
output[%(v)d], %(p)d, 0x2);
|
||||
''' % { 'v' : v, 'p' : p }
|
||||
|
||||
iop = InstObjParams(name, Name, 'MicroNeonMixLaneOp64',
|
||||
{ 'code' : eCode }, ['IsMicroop'])
|
||||
header_output += MicroNeonMixLaneDeclare64.subst(iop)
|
||||
exec_output += MicroNeonMixExecute64.subst(iop)
|
||||
|
||||
# Generate instructions
|
||||
mkMemAccMicroOp('mem_neon_uop')
|
||||
mkMarshalMicroOp('deint_neon_uop', 'MicroDeintNeon64')
|
||||
mkMarshalMicroOp('int_neon_uop', 'MicroIntNeon64')
|
||||
mkMarshalMicroOp('unpack_neon_uop', 'MicroUnpackNeon64')
|
||||
mkMarshalMicroOp('pack_neon_uop', 'MicroPackNeon64')
|
||||
|
||||
}};
|
||||
|
||||
let {{
|
||||
|
||||
iop = InstObjParams('vldmult64', 'VldMult64', 'VldMultOp64', '', [])
|
||||
header_output += VMemMultDeclare64.subst(iop)
|
||||
decoder_output += VMemMultConstructor64.subst(iop)
|
||||
|
||||
iop = InstObjParams('vstmult64', 'VstMult64', 'VstMultOp64', '', [])
|
||||
header_output += VMemMultDeclare64.subst(iop)
|
||||
decoder_output += VMemMultConstructor64.subst(iop)
|
||||
|
||||
iop = InstObjParams('vldsingle64', 'VldSingle64', 'VldSingleOp64', '', [])
|
||||
header_output += VMemSingleDeclare64.subst(iop)
|
||||
decoder_output += VMemSingleConstructor64.subst(iop)
|
||||
|
||||
iop = InstObjParams('vstsingle64', 'VstSingle64', 'VstSingleOp64', '', [])
|
||||
header_output += VMemSingleDeclare64.subst(iop)
|
||||
decoder_output += VMemSingleConstructor64.subst(iop)
|
||||
|
||||
}};
|
|
@ -1,6 +1,6 @@
|
|||
// -*- mode:c++ -*-
|
||||
|
||||
// Copyright (c) 2010 ARM Limited
|
||||
// Copyright (c) 2010-2011 ARM Limited
|
||||
// All rights reserved
|
||||
//
|
||||
// The license below extends only to copyright in the software and shall
|
||||
|
@ -38,6 +38,7 @@
|
|||
// Authors: Gabe Black
|
||||
|
||||
let {{
|
||||
import math
|
||||
|
||||
header_output = ""
|
||||
decoder_output = ""
|
||||
|
@ -77,7 +78,9 @@ let {{
|
|||
(newHeader,
|
||||
newDecoder,
|
||||
newExec) = self.fillTemplates(self.name, self.Name, codeBlobs,
|
||||
self.memFlags, self.instFlags, base, wbDecl)
|
||||
self.memFlags, self.instFlags,
|
||||
base, wbDecl, None, False,
|
||||
self.size, self.sign)
|
||||
|
||||
header_output += newHeader
|
||||
decoder_output += newDecoder
|
||||
|
@ -171,7 +174,7 @@ let {{
|
|||
self.size, self.sign, self.user)
|
||||
|
||||
# Add memory request flags where necessary
|
||||
self.memFlags.append("%d" % (self.size - 1))
|
||||
self.memFlags.append("%d" % int(math.log(self.size, 2)))
|
||||
if self.user:
|
||||
self.memFlags.append("ArmISA::TLB::UserMode")
|
||||
|
||||
|
|
372
src/arch/arm/isa/insts/str64.isa
Normal file
372
src/arch/arm/isa/insts/str64.isa
Normal file
|
@ -0,0 +1,372 @@
|
|||
// -*- mode:c++ -*-
|
||||
|
||||
// Copyright (c) 2011-2013 ARM Limited
|
||||
// All rights reserved
|
||||
//
|
||||
// The license below extends only to copyright in the software and shall
|
||||
// not be construed as granting a license to any other intellectual
|
||||
// property including but not limited to intellectual property relating
|
||||
// to a hardware implementation of the functionality of the software
|
||||
// licensed hereunder. You may use the software subject to the license
|
||||
// terms below provided that you ensure that this notice is replicated
|
||||
// unmodified and in its entirety in all distributions of the software,
|
||||
// modified or unmodified, in source code or in binary form.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met: redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer;
|
||||
// redistributions in binary form must reproduce the above copyright
|
||||
// notice, this list of conditions and the following disclaimer in the
|
||||
// documentation and/or other materials provided with the distribution;
|
||||
// neither the name of the copyright holders nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
//
|
||||
// Authors: Gabe Black
|
||||
|
||||
let {{
|
||||
|
||||
header_output = ""
|
||||
decoder_output = ""
|
||||
exec_output = ""
|
||||
|
||||
class StoreInst64(LoadStoreInst):
|
||||
execBase = 'Store64'
|
||||
micro = False
|
||||
|
||||
def __init__(self, mnem, Name, size=4, user=False, flavor="normal",
|
||||
top = False):
|
||||
super(StoreInst64, self).__init__()
|
||||
|
||||
self.name = mnem
|
||||
self.Name = Name
|
||||
self.size = size
|
||||
self.user = user
|
||||
self.flavor = flavor
|
||||
self.top = top
|
||||
|
||||
self.memFlags = ["ArmISA::TLB::MustBeOne"]
|
||||
self.instFlags = []
|
||||
self.codeBlobs = { "postacc_code" : "" }
|
||||
|
||||
# Add memory request flags where necessary
|
||||
if self.user:
|
||||
self.memFlags.append("ArmISA::TLB::UserMode")
|
||||
|
||||
if self.flavor in ("relexp", "exp"):
|
||||
# For exclusive pair ops alignment check is based on total size
|
||||
self.memFlags.append("%d" % int(math.log(self.size, 2) + 1))
|
||||
elif not (self.size == 16 and self.top):
|
||||
# Only the first microop should perform alignment checking.
|
||||
self.memFlags.append("%d" % int(math.log(self.size, 2)))
|
||||
|
||||
if self.flavor not in ("release", "relex", "exclusive",
|
||||
"relexp", "exp"):
|
||||
self.memFlags.append("ArmISA::TLB::AllowUnaligned")
|
||||
|
||||
if self.micro:
|
||||
self.instFlags.append("IsMicroop")
|
||||
|
||||
if self.flavor in ("release", "relex", "relexp"):
|
||||
self.instFlags.extend(["IsMemBarrier",
|
||||
"IsWriteBarrier",
|
||||
"IsReadBarrier"])
|
||||
if self.flavor in ("relex", "exclusive", "exp", "relexp"):
|
||||
self.instFlags.append("IsStoreConditional")
|
||||
self.memFlags.append("Request::LLSC")
|
||||
|
||||
def emitHelper(self, base = 'Memory64', wbDecl = None):
|
||||
global header_output, decoder_output, exec_output
|
||||
|
||||
# If this is a microop itself, don't allow anything that would
|
||||
# require further microcoding.
|
||||
if self.micro:
|
||||
assert not wbDecl
|
||||
|
||||
fa_code = None
|
||||
if not self.micro and self.flavor in ("normal", "release"):
|
||||
fa_code = '''
|
||||
fault->annotate(ArmFault::SAS, %s);
|
||||
fault->annotate(ArmFault::SSE, false);
|
||||
fault->annotate(ArmFault::SRT, dest);
|
||||
fault->annotate(ArmFault::SF, %s);
|
||||
fault->annotate(ArmFault::AR, %s);
|
||||
''' % ("0" if self.size == 1 else
|
||||
"1" if self.size == 2 else
|
||||
"2" if self.size == 4 else "3",
|
||||
"true" if self.size == 8 else "false",
|
||||
"true" if self.flavor == "release" else "false")
|
||||
|
||||
(newHeader, newDecoder, newExec) = \
|
||||
self.fillTemplates(self.name, self.Name, self.codeBlobs,
|
||||
self.memFlags, self.instFlags,
|
||||
base, wbDecl, faCode=fa_code)
|
||||
|
||||
header_output += newHeader
|
||||
decoder_output += newDecoder
|
||||
exec_output += newExec
|
||||
|
||||
def buildEACode(self):
|
||||
# Address computation
|
||||
eaCode = ""
|
||||
if self.flavor == "fp":
|
||||
eaCode += vfp64EnabledCheckCode
|
||||
|
||||
eaCode += SPAlignmentCheckCode + "EA = XBase"
|
||||
if self.size == 16:
|
||||
if self.top:
|
||||
eaCode += " + (isBigEndian64(xc->tcBase()) ? 0 : 8)"
|
||||
else:
|
||||
eaCode += " + (isBigEndian64(xc->tcBase()) ? 8 : 0)"
|
||||
if not self.post:
|
||||
eaCode += self.offset
|
||||
eaCode += ";"
|
||||
|
||||
self.codeBlobs["ea_code"] = eaCode
|
||||
|
||||
|
||||
class StoreImmInst64(StoreInst64):
|
||||
def __init__(self, *args, **kargs):
|
||||
super(StoreImmInst64, self).__init__(*args, **kargs)
|
||||
self.offset = "+ imm"
|
||||
|
||||
self.wbDecl = "MicroAddXiUop(machInst, base, base, imm);"
|
||||
|
||||
class StoreRegInst64(StoreInst64):
|
||||
def __init__(self, *args, **kargs):
|
||||
super(StoreRegInst64, self).__init__(*args, **kargs)
|
||||
self.offset = "+ extendReg64(XOffset, type, shiftAmt, 64)"
|
||||
|
||||
self.wbDecl = \
|
||||
"MicroAddXERegUop(machInst, base, base, " + \
|
||||
" offset, type, shiftAmt);"
|
||||
|
||||
class StoreRawRegInst64(StoreInst64):
|
||||
def __init__(self, *args, **kargs):
|
||||
super(StoreRawRegInst64, self).__init__(*args, **kargs)
|
||||
self.offset = ""
|
||||
|
||||
class StoreSingle64(StoreInst64):
|
||||
def emit(self):
|
||||
self.buildEACode()
|
||||
|
||||
# Code that actually handles the access
|
||||
if self.flavor == "fp":
|
||||
if self.size in (1, 2, 4):
|
||||
accCode = '''
|
||||
Mem%(suffix)s =
|
||||
cSwap(AA64FpDestP0%(suffix)s, isBigEndian64(xc->tcBase()));
|
||||
'''
|
||||
elif self.size == 8 or (self.size == 16 and not self.top):
|
||||
accCode = '''
|
||||
uint64_t data = AA64FpDestP1_uw;
|
||||
data = (data << 32) | AA64FpDestP0_uw;
|
||||
Mem%(suffix)s = cSwap(data, isBigEndian64(xc->tcBase()));
|
||||
'''
|
||||
elif self.size == 16 and self.top:
|
||||
accCode = '''
|
||||
uint64_t data = AA64FpDestP3_uw;
|
||||
data = (data << 32) | AA64FpDestP2_uw;
|
||||
Mem%(suffix)s = cSwap(data, isBigEndian64(xc->tcBase()));
|
||||
'''
|
||||
else:
|
||||
accCode = \
|
||||
'Mem%(suffix)s = cSwap(XDest%(suffix)s, isBigEndian64(xc->tcBase()));'
|
||||
if self.size == 16:
|
||||
accCode = accCode % \
|
||||
{ "suffix" : buildMemSuffix(False, 8) }
|
||||
else:
|
||||
accCode = accCode % \
|
||||
{ "suffix" : buildMemSuffix(False, self.size) }
|
||||
|
||||
self.codeBlobs["memacc_code"] = accCode
|
||||
|
||||
if self.flavor in ("relex", "exclusive"):
|
||||
self.instFlags.append("IsStoreConditional")
|
||||
self.memFlags.append("Request::LLSC")
|
||||
|
||||
# Push it out to the output files
|
||||
wbDecl = None
|
||||
if self.writeback and not self.micro:
|
||||
wbDecl = self.wbDecl
|
||||
self.emitHelper(self.base, wbDecl)
|
||||
|
||||
class StoreDouble64(StoreInst64):
|
||||
def emit(self):
|
||||
self.buildEACode()
|
||||
|
||||
# Code that actually handles the access
|
||||
if self.flavor == "fp":
|
||||
accCode = '''
|
||||
uint64_t data = AA64FpDest2P0_uw;
|
||||
data = (data << 32) | AA64FpDestP0_uw;
|
||||
Mem_ud = cSwap(data, isBigEndian64(xc->tcBase()));
|
||||
'''
|
||||
else:
|
||||
if self.size == 4:
|
||||
accCode = '''
|
||||
uint64_t data = XDest2_uw;
|
||||
data = (data << 32) | XDest_uw;
|
||||
Mem_ud = cSwap(data, isBigEndian64(xc->tcBase()));
|
||||
'''
|
||||
elif self.size == 8:
|
||||
accCode = '''
|
||||
// This temporary needs to be here so that the parser
|
||||
// will correctly identify this instruction as a store.
|
||||
Twin64_t temp;
|
||||
temp.a = XDest_ud;
|
||||
temp.b = XDest2_ud;
|
||||
Mem_tud = temp;
|
||||
'''
|
||||
self.codeBlobs["memacc_code"] = accCode
|
||||
|
||||
# Push it out to the output files
|
||||
wbDecl = None
|
||||
if self.writeback and not self.micro:
|
||||
wbDecl = self.wbDecl
|
||||
self.emitHelper(self.base, wbDecl)
|
||||
|
||||
class StoreImm64(StoreImmInst64, StoreSingle64):
|
||||
decConstBase = 'LoadStoreImm64'
|
||||
base = 'ArmISA::MemoryImm64'
|
||||
writeback = False
|
||||
post = False
|
||||
|
||||
class StorePre64(StoreImmInst64, StoreSingle64):
|
||||
decConstBase = 'LoadStoreImm64'
|
||||
base = 'ArmISA::MemoryPreIndex64'
|
||||
writeback = True
|
||||
post = False
|
||||
|
||||
class StorePost64(StoreImmInst64, StoreSingle64):
|
||||
decConstBase = 'LoadStoreImm64'
|
||||
base = 'ArmISA::MemoryPostIndex64'
|
||||
writeback = True
|
||||
post = True
|
||||
|
||||
class StoreReg64(StoreRegInst64, StoreSingle64):
|
||||
decConstBase = 'LoadStoreReg64'
|
||||
base = 'ArmISA::MemoryReg64'
|
||||
writeback = False
|
||||
post = False
|
||||
|
||||
class StoreRaw64(StoreRawRegInst64, StoreSingle64):
|
||||
decConstBase = 'LoadStoreRaw64'
|
||||
base = 'ArmISA::MemoryRaw64'
|
||||
writeback = False
|
||||
post = False
|
||||
|
||||
class StoreEx64(StoreRawRegInst64, StoreSingle64):
|
||||
decConstBase = 'LoadStoreEx64'
|
||||
base = 'ArmISA::MemoryEx64'
|
||||
writeback = False
|
||||
post = False
|
||||
execBase = 'StoreEx64'
|
||||
def __init__(self, *args, **kargs):
|
||||
super(StoreEx64, self).__init__(*args, **kargs)
|
||||
self.codeBlobs["postacc_code"] = "XResult = !writeResult;"
|
||||
|
||||
def buildStores64(mnem, NameBase, size, flavor="normal"):
|
||||
StoreImm64(mnem, NameBase + "_IMM", size, flavor=flavor).emit()
|
||||
StorePre64(mnem, NameBase + "_PRE", size, flavor=flavor).emit()
|
||||
StorePost64(mnem, NameBase + "_POST", size, flavor=flavor).emit()
|
||||
StoreReg64(mnem, NameBase + "_REG", size, flavor=flavor).emit()
|
||||
|
||||
buildStores64("strb", "STRB64", 1)
|
||||
buildStores64("strh", "STRH64", 2)
|
||||
buildStores64("str", "STRW64", 4)
|
||||
buildStores64("str", "STRX64", 8)
|
||||
buildStores64("str", "STRBFP64", 1, flavor="fp")
|
||||
buildStores64("str", "STRHFP64", 2, flavor="fp")
|
||||
buildStores64("str", "STRSFP64", 4, flavor="fp")
|
||||
buildStores64("str", "STRDFP64", 8, flavor="fp")
|
||||
|
||||
StoreImm64("sturb", "STURB64_IMM", 1).emit()
|
||||
StoreImm64("sturh", "STURH64_IMM", 2).emit()
|
||||
StoreImm64("stur", "STURW64_IMM", 4).emit()
|
||||
StoreImm64("stur", "STURX64_IMM", 8).emit()
|
||||
StoreImm64("stur", "STURBFP64_IMM", 1, flavor="fp").emit()
|
||||
StoreImm64("stur", "STURHFP64_IMM", 2, flavor="fp").emit()
|
||||
StoreImm64("stur", "STURSFP64_IMM", 4, flavor="fp").emit()
|
||||
StoreImm64("stur", "STURDFP64_IMM", 8, flavor="fp").emit()
|
||||
|
||||
StoreImm64("sttrb", "STTRB64_IMM", 1, user=True).emit()
|
||||
StoreImm64("sttrh", "STTRH64_IMM", 2, user=True).emit()
|
||||
StoreImm64("sttr", "STTRW64_IMM", 4, user=True).emit()
|
||||
StoreImm64("sttr", "STTRX64_IMM", 8, user=True).emit()
|
||||
|
||||
StoreRaw64("stlr", "STLRX64", 8, flavor="release").emit()
|
||||
StoreRaw64("stlr", "STLRW64", 4, flavor="release").emit()
|
||||
StoreRaw64("stlrh", "STLRH64", 2, flavor="release").emit()
|
||||
StoreRaw64("stlrb", "STLRB64", 1, flavor="release").emit()
|
||||
|
||||
StoreEx64("stlxr", "STLXRX64", 8, flavor="relex").emit()
|
||||
StoreEx64("stlxr", "STLXRW64", 4, flavor="relex").emit()
|
||||
StoreEx64("stlxrh", "STLXRH64", 2, flavor="relex").emit()
|
||||
StoreEx64("stlxrb", "STLXRB64", 1, flavor="relex").emit()
|
||||
|
||||
StoreEx64("stxr", "STXRX64", 8, flavor="exclusive").emit()
|
||||
StoreEx64("stxr", "STXRW64", 4, flavor="exclusive").emit()
|
||||
StoreEx64("stxrh", "STXRH64", 2, flavor="exclusive").emit()
|
||||
StoreEx64("stxrb", "STXRB64", 1, flavor="exclusive").emit()
|
||||
|
||||
class StoreImmU64(StoreImm64):
|
||||
decConstBase = 'LoadStoreImmU64'
|
||||
micro = True
|
||||
|
||||
class StoreImmDU64(StoreImmInst64, StoreDouble64):
|
||||
decConstBase = 'LoadStoreImmDU64'
|
||||
base = 'ArmISA::MemoryDImm64'
|
||||
micro = True
|
||||
post = False
|
||||
writeback = False
|
||||
|
||||
class StoreImmDEx64(StoreImmInst64, StoreDouble64):
|
||||
execBase = 'StoreEx64'
|
||||
decConstBase = 'StoreImmDEx64'
|
||||
base = 'ArmISA::MemoryDImmEx64'
|
||||
micro = False
|
||||
post = False
|
||||
writeback = False
|
||||
def __init__(self, *args, **kargs):
|
||||
super(StoreImmDEx64, self).__init__(*args, **kargs)
|
||||
self.codeBlobs["postacc_code"] = "XResult = !writeResult;"
|
||||
|
||||
class StoreRegU64(StoreReg64):
|
||||
decConstBase = 'LoadStoreRegU64'
|
||||
micro = True
|
||||
|
||||
StoreImmDEx64("stlxp", "STLXPW64", 4, flavor="relexp").emit()
|
||||
StoreImmDEx64("stlxp", "STLXPX64", 8, flavor="relexp").emit()
|
||||
StoreImmDEx64("stxp", "STXPW64", 4, flavor="exp").emit()
|
||||
StoreImmDEx64("stxp", "STXPX64", 8, flavor="exp").emit()
|
||||
|
||||
StoreImmU64("strxi_uop", "MicroStrXImmUop", 8).emit()
|
||||
StoreRegU64("strxr_uop", "MicroStrXRegUop", 8).emit()
|
||||
StoreImmU64("strfpxi_uop", "MicroStrFpXImmUop", 8, flavor="fp").emit()
|
||||
StoreRegU64("strfpxr_uop", "MicroStrFpXRegUop", 8, flavor="fp").emit()
|
||||
StoreImmU64("strqbfpxi_uop", "MicroStrQBFpXImmUop",
|
||||
16, flavor="fp", top=False).emit()
|
||||
StoreRegU64("strqbfpxr_uop", "MicroStrQBFpXRegUop",
|
||||
16, flavor="fp", top=False).emit()
|
||||
StoreImmU64("strqtfpxi_uop", "MicroStrQTFpXImmUop",
|
||||
16, flavor="fp", top=True).emit()
|
||||
StoreRegU64("strqtfpxr_uop", "MicroStrQTFpXRegUop",
|
||||
16, flavor="fp", top=True).emit()
|
||||
StoreImmDU64("strdxi_uop", "MicroStrDXImmUop", 4).emit()
|
||||
StoreImmDU64("strdfpxi_uop", "MicroStrDFpXImmUop", 4, flavor="fp").emit()
|
||||
|
||||
}};
|
|
@ -1,6 +1,6 @@
|
|||
// -*- mode:c++ -*-
|
||||
|
||||
// Copyright (c) 2010 ARM Limited
|
||||
// Copyright (c) 2010-2011 ARM Limited
|
||||
// All rights reserved
|
||||
//
|
||||
// The license below extends only to copyright in the software and shall
|
||||
|
@ -73,10 +73,7 @@ let {{
|
|||
|
||||
swpPreAccCode = '''
|
||||
if (!((SCTLR)Sctlr).sw) {
|
||||
if (FullSystem)
|
||||
return new UndefinedInstruction;
|
||||
else
|
||||
return new UndefinedInstruction(false, mnemonic);
|
||||
return new UndefinedInstruction(machInst, false, mnemonic);
|
||||
}
|
||||
'''
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
// -*- mode:c++ -*-
|
||||
// Copyright (c) 2010 ARM Limited
|
||||
// Copyright (c) 2010-2013 ARM Limited
|
||||
// All rights reserved
|
||||
//
|
||||
// The license below extends only to copyright in the software and shall
|
||||
|
@ -80,6 +80,31 @@ let {{
|
|||
xc->%(func)s(this, %(op_idx)s, %(final_val)s);
|
||||
}
|
||||
'''
|
||||
aarch64Read = '''
|
||||
((xc->%(func)s(this, %(op_idx)s)) & mask(intWidth))
|
||||
'''
|
||||
aarch64Write = '''
|
||||
xc->%(func)s(this, %(op_idx)s, (%(final_val)s) & mask(intWidth))
|
||||
'''
|
||||
aarchX64Read = '''
|
||||
((xc->%(func)s(this, %(op_idx)s)) & mask(aarch64 ? 64 : 32))
|
||||
'''
|
||||
aarchX64Write = '''
|
||||
xc->%(func)s(this, %(op_idx)s, (%(final_val)s) & mask(aarch64 ? 64 : 32))
|
||||
'''
|
||||
aarchW64Read = '''
|
||||
((xc->%(func)s(this, %(op_idx)s)) & mask(32))
|
||||
'''
|
||||
aarchW64Write = '''
|
||||
xc->%(func)s(this, %(op_idx)s, (%(final_val)s) & mask(32))
|
||||
'''
|
||||
cntrlNsBankedWrite = '''
|
||||
xc->setMiscReg(flattenMiscRegNsBanked(dest, xc->tcBase()), %(final_val)s)
|
||||
'''
|
||||
|
||||
cntrlNsBankedRead = '''
|
||||
xc->readMiscReg(flattenMiscRegNsBanked(op1, xc->tcBase()))
|
||||
'''
|
||||
|
||||
#PCState operands need to have a sorting index (the number at the end)
|
||||
#less than all the integer registers which might update the PC. That way
|
||||
|
@ -99,6 +124,18 @@ let {{
|
|||
return ('IntReg', 'uw', idx, 'IsInteger', srtNormal,
|
||||
maybePCRead, maybePCWrite)
|
||||
|
||||
def intReg64(idx):
|
||||
return ('IntReg', 'ud', idx, 'IsInteger', srtNormal,
|
||||
aarch64Read, aarch64Write)
|
||||
|
||||
def intRegX64(idx, id = srtNormal):
|
||||
return ('IntReg', 'ud', idx, 'IsInteger', id,
|
||||
aarchX64Read, aarchX64Write)
|
||||
|
||||
def intRegW64(idx, id = srtNormal):
|
||||
return ('IntReg', 'ud', idx, 'IsInteger', id,
|
||||
aarchW64Read, aarchW64Write)
|
||||
|
||||
def intRegNPC(idx):
|
||||
return ('IntReg', 'uw', idx, 'IsInteger', srtNormal)
|
||||
|
||||
|
@ -120,26 +157,49 @@ let {{
|
|||
def cntrlReg(idx, id = srtNormal, type = 'uw'):
|
||||
return ('ControlReg', type, idx, None, id)
|
||||
|
||||
def cntrlNsBankedReg(idx, id = srtNormal, type = 'uw'):
|
||||
return ('ControlReg', type, idx, (None, None, 'IsControl'), id, cntrlNsBankedRead, cntrlNsBankedWrite)
|
||||
|
||||
def cntrlNsBankedReg64(idx, id = srtNormal, type = 'ud'):
|
||||
return ('ControlReg', type, idx, (None, None, 'IsControl'), id, cntrlNsBankedRead, cntrlNsBankedWrite)
|
||||
|
||||
def cntrlRegNC(idx, id = srtNormal, type = 'uw'):
|
||||
return ('ControlReg', type, idx, None, id)
|
||||
|
||||
def pcStateReg(idx, id):
|
||||
return ('PCState', 'uw', idx, (None, None, 'IsControl'), id)
|
||||
return ('PCState', 'ud', idx, (None, None, 'IsControl'), id)
|
||||
}};
|
||||
|
||||
def operands {{
|
||||
#Abstracted integer reg operands
|
||||
'Dest': intReg('dest'),
|
||||
'Dest64': intReg64('dest'),
|
||||
'XDest': intRegX64('dest'),
|
||||
'WDest': intRegW64('dest'),
|
||||
'IWDest': intRegIWPC('dest'),
|
||||
'AIWDest': intRegAIWPC('dest'),
|
||||
'Dest2': intReg('dest2'),
|
||||
'XDest2': intRegX64('dest2'),
|
||||
'FDest2': floatReg('dest2'),
|
||||
'Result': intReg('result'),
|
||||
'XResult': intRegX64('result'),
|
||||
'XBase': intRegX64('base', id = srtBase),
|
||||
'Base': intRegAPC('base', id = srtBase),
|
||||
'XOffset': intRegX64('offset'),
|
||||
'Index': intReg('index'),
|
||||
'Shift': intReg('shift'),
|
||||
'Op1': intReg('op1'),
|
||||
'Op2': intReg('op2'),
|
||||
'Op3': intReg('op3'),
|
||||
'Op164': intReg64('op1'),
|
||||
'Op264': intReg64('op2'),
|
||||
'Op364': intReg64('op3'),
|
||||
'XOp1': intRegX64('op1'),
|
||||
'XOp2': intRegX64('op2'),
|
||||
'XOp3': intRegX64('op3'),
|
||||
'WOp1': intRegW64('op1'),
|
||||
'WOp2': intRegW64('op2'),
|
||||
'WOp3': intRegW64('op3'),
|
||||
'Reg0': intReg('reg0'),
|
||||
'Reg1': intReg('reg1'),
|
||||
'Reg2': intReg('reg2'),
|
||||
|
@ -147,13 +207,19 @@ def operands {{
|
|||
|
||||
#Fixed index integer reg operands
|
||||
'SpMode': intRegNPC('intRegInMode((OperatingMode)regMode, INTREG_SP)'),
|
||||
'DecodedBankedIntReg': intRegNPC('decodeMrsMsrBankedIntRegIndex(byteMask, r)'),
|
||||
'LR': intRegNPC('INTREG_LR'),
|
||||
'XLR': intRegX64('INTREG_X30'),
|
||||
'R7': intRegNPC('7'),
|
||||
# First four arguments are passed in registers
|
||||
'R0': intRegNPC('0'),
|
||||
'R1': intRegNPC('1'),
|
||||
'R2': intRegNPC('2'),
|
||||
'R3': intRegNPC('3'),
|
||||
'X0': intRegX64('0'),
|
||||
'X1': intRegX64('1'),
|
||||
'X2': intRegX64('2'),
|
||||
'X3': intRegX64('3'),
|
||||
|
||||
#Pseudo integer condition code registers
|
||||
'CondCodesNZ': intRegCC('INTREG_CONDCODES_NZ'),
|
||||
|
@ -230,9 +296,95 @@ def operands {{
|
|||
'FpOp2P2': floatReg('(op2 + 2)'),
|
||||
'FpOp2P3': floatReg('(op2 + 3)'),
|
||||
|
||||
# Create AArch64 unpacked view of the FP registers
|
||||
'AA64FpOp1P0': floatReg('((op1 * 4) + 0)'),
|
||||
'AA64FpOp1P1': floatReg('((op1 * 4) + 1)'),
|
||||
'AA64FpOp1P2': floatReg('((op1 * 4) + 2)'),
|
||||
'AA64FpOp1P3': floatReg('((op1 * 4) + 3)'),
|
||||
'AA64FpOp2P0': floatReg('((op2 * 4) + 0)'),
|
||||
'AA64FpOp2P1': floatReg('((op2 * 4) + 1)'),
|
||||
'AA64FpOp2P2': floatReg('((op2 * 4) + 2)'),
|
||||
'AA64FpOp2P3': floatReg('((op2 * 4) + 3)'),
|
||||
'AA64FpOp3P0': floatReg('((op3 * 4) + 0)'),
|
||||
'AA64FpOp3P1': floatReg('((op3 * 4) + 1)'),
|
||||
'AA64FpOp3P2': floatReg('((op3 * 4) + 2)'),
|
||||
'AA64FpOp3P3': floatReg('((op3 * 4) + 3)'),
|
||||
'AA64FpDestP0': floatReg('((dest * 4) + 0)'),
|
||||
'AA64FpDestP1': floatReg('((dest * 4) + 1)'),
|
||||
'AA64FpDestP2': floatReg('((dest * 4) + 2)'),
|
||||
'AA64FpDestP3': floatReg('((dest * 4) + 3)'),
|
||||
'AA64FpDest2P0': floatReg('((dest2 * 4) + 0)'),
|
||||
'AA64FpDest2P1': floatReg('((dest2 * 4) + 1)'),
|
||||
'AA64FpDest2P2': floatReg('((dest2 * 4) + 2)'),
|
||||
'AA64FpDest2P3': floatReg('((dest2 * 4) + 3)'),
|
||||
|
||||
'AA64FpOp1P0V0': floatReg('((((op1+0)) * 4) + 0)'),
|
||||
'AA64FpOp1P1V0': floatReg('((((op1+0)) * 4) + 1)'),
|
||||
'AA64FpOp1P2V0': floatReg('((((op1+0)) * 4) + 2)'),
|
||||
'AA64FpOp1P3V0': floatReg('((((op1+0)) * 4) + 3)'),
|
||||
|
||||
'AA64FpOp1P0V1': floatReg('((((op1+1)) * 4) + 0)'),
|
||||
'AA64FpOp1P1V1': floatReg('((((op1+1)) * 4) + 1)'),
|
||||
'AA64FpOp1P2V1': floatReg('((((op1+1)) * 4) + 2)'),
|
||||
'AA64FpOp1P3V1': floatReg('((((op1+1)) * 4) + 3)'),
|
||||
|
||||
'AA64FpOp1P0V2': floatReg('((((op1+2)) * 4) + 0)'),
|
||||
'AA64FpOp1P1V2': floatReg('((((op1+2)) * 4) + 1)'),
|
||||
'AA64FpOp1P2V2': floatReg('((((op1+2)) * 4) + 2)'),
|
||||
'AA64FpOp1P3V2': floatReg('((((op1+2)) * 4) + 3)'),
|
||||
|
||||
'AA64FpOp1P0V3': floatReg('((((op1+3)) * 4) + 0)'),
|
||||
'AA64FpOp1P1V3': floatReg('((((op1+3)) * 4) + 1)'),
|
||||
'AA64FpOp1P2V3': floatReg('((((op1+3)) * 4) + 2)'),
|
||||
'AA64FpOp1P3V3': floatReg('((((op1+3)) * 4) + 3)'),
|
||||
|
||||
'AA64FpOp1P0V0S': floatReg('((((op1+0)%32) * 4) + 0)'),
|
||||
'AA64FpOp1P1V0S': floatReg('((((op1+0)%32) * 4) + 1)'),
|
||||
'AA64FpOp1P2V0S': floatReg('((((op1+0)%32) * 4) + 2)'),
|
||||
'AA64FpOp1P3V0S': floatReg('((((op1+0)%32) * 4) + 3)'),
|
||||
|
||||
'AA64FpOp1P0V1S': floatReg('((((op1+1)%32) * 4) + 0)'),
|
||||
'AA64FpOp1P1V1S': floatReg('((((op1+1)%32) * 4) + 1)'),
|
||||
'AA64FpOp1P2V1S': floatReg('((((op1+1)%32) * 4) + 2)'),
|
||||
'AA64FpOp1P3V1S': floatReg('((((op1+1)%32) * 4) + 3)'),
|
||||
|
||||
'AA64FpOp1P0V2S': floatReg('((((op1+2)%32) * 4) + 0)'),
|
||||
'AA64FpOp1P1V2S': floatReg('((((op1+2)%32) * 4) + 1)'),
|
||||
'AA64FpOp1P2V2S': floatReg('((((op1+2)%32) * 4) + 2)'),
|
||||
'AA64FpOp1P3V2S': floatReg('((((op1+2)%32) * 4) + 3)'),
|
||||
|
||||
'AA64FpOp1P0V3S': floatReg('((((op1+3)%32) * 4) + 0)'),
|
||||
'AA64FpOp1P1V3S': floatReg('((((op1+3)%32) * 4) + 1)'),
|
||||
'AA64FpOp1P2V3S': floatReg('((((op1+3)%32) * 4) + 2)'),
|
||||
'AA64FpOp1P3V3S': floatReg('((((op1+3)%32) * 4) + 3)'),
|
||||
|
||||
'AA64FpDestP0V0': floatReg('((((dest+0)) * 4) + 0)'),
|
||||
'AA64FpDestP1V0': floatReg('((((dest+0)) * 4) + 1)'),
|
||||
'AA64FpDestP2V0': floatReg('((((dest+0)) * 4) + 2)'),
|
||||
'AA64FpDestP3V0': floatReg('((((dest+0)) * 4) + 3)'),
|
||||
|
||||
'AA64FpDestP0V1': floatReg('((((dest+1)) * 4) + 0)'),
|
||||
'AA64FpDestP1V1': floatReg('((((dest+1)) * 4) + 1)'),
|
||||
'AA64FpDestP2V1': floatReg('((((dest+1)) * 4) + 2)'),
|
||||
'AA64FpDestP3V1': floatReg('((((dest+1)) * 4) + 3)'),
|
||||
|
||||
'AA64FpDestP0V0L': floatReg('((((dest+0)%32) * 4) + 0)'),
|
||||
'AA64FpDestP1V0L': floatReg('((((dest+0)%32) * 4) + 1)'),
|
||||
'AA64FpDestP2V0L': floatReg('((((dest+0)%32) * 4) + 2)'),
|
||||
'AA64FpDestP3V0L': floatReg('((((dest+0)%32) * 4) + 3)'),
|
||||
|
||||
'AA64FpDestP0V1L': floatReg('((((dest+1)%32) * 4) + 0)'),
|
||||
'AA64FpDestP1V1L': floatReg('((((dest+1)%32) * 4) + 1)'),
|
||||
'AA64FpDestP2V1L': floatReg('((((dest+1)%32) * 4) + 2)'),
|
||||
'AA64FpDestP3V1L': floatReg('((((dest+1)%32) * 4) + 3)'),
|
||||
|
||||
#Abstracted control reg operands
|
||||
'MiscDest': cntrlReg('dest'),
|
||||
'MiscOp1': cntrlReg('op1'),
|
||||
'MiscNsBankedDest': cntrlNsBankedReg('dest'),
|
||||
'MiscNsBankedOp1': cntrlNsBankedReg('op1'),
|
||||
'MiscNsBankedDest64': cntrlNsBankedReg64('dest'),
|
||||
'MiscNsBankedOp164': cntrlNsBankedReg64('op1'),
|
||||
|
||||
#Fixed index control regs
|
||||
'Cpsr': cntrlReg('MISCREG_CPSR', srtCpsr),
|
||||
|
@ -244,22 +396,41 @@ def operands {{
|
|||
'FpscrQc': cntrlRegNC('MISCREG_FPSCR_QC'),
|
||||
'FpscrExc': cntrlRegNC('MISCREG_FPSCR_EXC'),
|
||||
'Cpacr': cntrlReg('MISCREG_CPACR'),
|
||||
'Cpacr64': cntrlReg('MISCREG_CPACR_EL1'),
|
||||
'Fpexc': cntrlRegNC('MISCREG_FPEXC'),
|
||||
'Nsacr': cntrlReg('MISCREG_NSACR'),
|
||||
'ElrHyp': cntrlRegNC('MISCREG_ELR_HYP'),
|
||||
'Hcr': cntrlReg('MISCREG_HCR'),
|
||||
'Hcr64': cntrlReg('MISCREG_HCR_EL2'),
|
||||
'Hdcr': cntrlReg('MISCREG_HDCR'),
|
||||
'Hcptr': cntrlReg('MISCREG_HCPTR'),
|
||||
'CptrEl264': cntrlReg('MISCREG_CPTR_EL2'),
|
||||
'CptrEl364': cntrlReg('MISCREG_CPTR_EL3'),
|
||||
'Hstr': cntrlReg('MISCREG_HSTR'),
|
||||
'Scr': cntrlReg('MISCREG_SCR'),
|
||||
'Scr64': cntrlReg('MISCREG_SCR_EL3'),
|
||||
'Sctlr': cntrlRegNC('MISCREG_SCTLR'),
|
||||
'SevMailbox': cntrlRegNC('MISCREG_SEV_MAILBOX'),
|
||||
'LLSCLock': cntrlRegNC('MISCREG_LOCKFLAG'),
|
||||
'Dczid' : cntrlRegNC('MISCREG_DCZID_EL0'),
|
||||
|
||||
#Register fields for microops
|
||||
'URa' : intReg('ura'),
|
||||
'XURa' : intRegX64('ura'),
|
||||
'WURa' : intRegW64('ura'),
|
||||
'IWRa' : intRegIWPC('ura'),
|
||||
'Fa' : floatReg('ura'),
|
||||
'FaP1' : floatReg('ura + 1'),
|
||||
'URb' : intReg('urb'),
|
||||
'XURb' : intRegX64('urb'),
|
||||
'URc' : intReg('urc'),
|
||||
'XURc' : intRegX64('urc'),
|
||||
|
||||
#Memory Operand
|
||||
'Mem': ('Mem', 'uw', None, ('IsMemRef', 'IsLoad', 'IsStore'), srtNormal),
|
||||
|
||||
#PCState fields
|
||||
'RawPC': pcStateReg('pc', srtPC),
|
||||
'PC': pcStateReg('instPC', srtPC),
|
||||
'NPC': pcStateReg('instNPC', srtPC),
|
||||
'pNPC': pcStateReg('instNPC', srtEPC),
|
||||
|
|
|
@ -1,5 +1,17 @@
|
|||
// -*- mode:c++ -*-
|
||||
|
||||
// Copyright (c) 2011 ARM Limited
|
||||
// All rights reserved
|
||||
//
|
||||
// The license below extends only to copyright in the software and shall
|
||||
// not be construed as granting a license to any other intellectual
|
||||
// property including but not limited to intellectual property relating
|
||||
// to a hardware implementation of the functionality of the software
|
||||
// licensed hereunder. You may use the software subject to the license
|
||||
// terms below provided that you ensure that this notice is replicated
|
||||
// unmodified and in its entirety in all distributions of the software,
|
||||
// modified or unmodified, in source code or in binary form.
|
||||
//
|
||||
// Copyright (c) 2007-2008 The Florida State University
|
||||
// All rights reserved.
|
||||
//
|
||||
|
@ -60,6 +72,13 @@ def template BasicConstructor {{
|
|||
}
|
||||
}};
|
||||
|
||||
def template BasicConstructor64 {{
|
||||
inline %(class_name)s::%(class_name)s(ExtMachInst machInst) : %(base_class)s("%(mnemonic)s", machInst, %(op_class)s)
|
||||
{
|
||||
%(constructor)s;
|
||||
}
|
||||
}};
|
||||
|
||||
|
||||
// Basic instruction class execute method template.
|
||||
def template BasicExecute {{
|
||||
|
|
141
src/arch/arm/isa/templates/branch64.isa
Normal file
141
src/arch/arm/isa/templates/branch64.isa
Normal file
|
@ -0,0 +1,141 @@
|
|||
// -*- mode:c++ -*-
|
||||
|
||||
// Copyright (c) 2011 ARM Limited
|
||||
// All rights reserved
|
||||
//
|
||||
// The license below extends only to copyright in the software and shall
|
||||
// not be construed as granting a license to any other intellectual
|
||||
// property including but not limited to intellectual property relating
|
||||
// to a hardware implementation of the functionality of the software
|
||||
// licensed hereunder. You may use the software subject to the license
|
||||
// terms below provided that you ensure that this notice is replicated
|
||||
// unmodified and in its entirety in all distributions of the software,
|
||||
// modified or unmodified, in source code or in binary form.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met: redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer;
|
||||
// redistributions in binary form must reproduce the above copyright
|
||||
// notice, this list of conditions and the following disclaimer in the
|
||||
// documentation and/or other materials provided with the distribution;
|
||||
// neither the name of the copyright holders nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
//
|
||||
// Authors: Gabe Black
|
||||
|
||||
def template BranchImm64Declare {{
|
||||
class %(class_name)s : public %(base_class)s
|
||||
{
|
||||
public:
|
||||
// Constructor
|
||||
%(class_name)s(ExtMachInst machInst, int64_t _imm);
|
||||
%(BasicExecDeclare)s
|
||||
};
|
||||
}};
|
||||
|
||||
def template BranchImm64Constructor {{
|
||||
inline %(class_name)s::%(class_name)s(ExtMachInst machInst,
|
||||
int64_t _imm)
|
||||
: %(base_class)s("%(mnemonic)s", machInst, %(op_class)s, _imm)
|
||||
{
|
||||
%(constructor)s;
|
||||
}
|
||||
}};
|
||||
|
||||
def template BranchImmCond64Declare {{
|
||||
class %(class_name)s : public %(base_class)s
|
||||
{
|
||||
public:
|
||||
// Constructor
|
||||
%(class_name)s(ExtMachInst machInst, int64_t _imm,
|
||||
ConditionCode _condCode);
|
||||
%(BasicExecDeclare)s
|
||||
};
|
||||
}};
|
||||
|
||||
def template BranchImmCond64Constructor {{
|
||||
inline %(class_name)s::%(class_name)s(ExtMachInst machInst,
|
||||
int64_t _imm,
|
||||
ConditionCode _condCode)
|
||||
: %(base_class)s("%(mnemonic)s", machInst, %(op_class)s,
|
||||
_imm, _condCode)
|
||||
{
|
||||
%(constructor)s;
|
||||
}
|
||||
}};
|
||||
|
||||
def template BranchReg64Declare {{
|
||||
class %(class_name)s : public %(base_class)s
|
||||
{
|
||||
public:
|
||||
// Constructor
|
||||
%(class_name)s(ExtMachInst machInst, IntRegIndex _op1);
|
||||
%(BasicExecDeclare)s
|
||||
};
|
||||
}};
|
||||
|
||||
def template BranchReg64Constructor {{
|
||||
inline %(class_name)s::%(class_name)s(ExtMachInst machInst,
|
||||
IntRegIndex _op1)
|
||||
: %(base_class)s("%(mnemonic)s", machInst, %(op_class)s, _op1)
|
||||
{
|
||||
%(constructor)s;
|
||||
}
|
||||
}};
|
||||
|
||||
def template BranchImmReg64Declare {{
|
||||
class %(class_name)s : public %(base_class)s
|
||||
{
|
||||
public:
|
||||
// Constructor
|
||||
%(class_name)s(ExtMachInst machInst,
|
||||
int64_t imm, IntRegIndex _op1);
|
||||
%(BasicExecDeclare)s
|
||||
};
|
||||
}};
|
||||
|
||||
def template BranchImmReg64Constructor {{
|
||||
inline %(class_name)s::%(class_name)s(ExtMachInst machInst,
|
||||
int64_t _imm,
|
||||
IntRegIndex _op1)
|
||||
: %(base_class)s("%(mnemonic)s", machInst, %(op_class)s, _imm, _op1)
|
||||
{
|
||||
%(constructor)s;
|
||||
}
|
||||
}};
|
||||
|
||||
def template BranchImmImmReg64Declare {{
|
||||
class %(class_name)s : public %(base_class)s
|
||||
{
|
||||
public:
|
||||
// Constructor
|
||||
%(class_name)s(ExtMachInst machInst, int64_t _imm1, int64_t _imm2,
|
||||
IntRegIndex _op1);
|
||||
%(BasicExecDeclare)s
|
||||
};
|
||||
}};
|
||||
|
||||
def template BranchImmImmReg64Constructor {{
|
||||
inline %(class_name)s::%(class_name)s(ExtMachInst machInst,
|
||||
int64_t _imm1, int64_t _imm2,
|
||||
IntRegIndex _op1)
|
||||
: %(base_class)s("%(mnemonic)s", machInst, %(op_class)s,
|
||||
_imm1, _imm2, _op1)
|
||||
{
|
||||
%(constructor)s;
|
||||
}
|
||||
}};
|
279
src/arch/arm/isa/templates/data64.isa
Normal file
279
src/arch/arm/isa/templates/data64.isa
Normal file
|
@ -0,0 +1,279 @@
|
|||
// -*- mode:c++ -*-
|
||||
|
||||
// Copyright (c) 2011 ARM Limited
|
||||
// All rights reserved
|
||||
//
|
||||
// The license below extends only to copyright in the software and shall
|
||||
// not be construed as granting a license to any other intellectual
|
||||
// property including but not limited to intellectual property relating
|
||||
// to a hardware implementation of the functionality of the software
|
||||
// licensed hereunder. You may use the software subject to the license
|
||||
// terms below provided that you ensure that this notice is replicated
|
||||
// unmodified and in its entirety in all distributions of the software,
|
||||
// modified or unmodified, in source code or in binary form.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met: redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer;
|
||||
// redistributions in binary form must reproduce the above copyright
|
||||
// notice, this list of conditions and the following disclaimer in the
|
||||
// documentation and/or other materials provided with the distribution;
|
||||
// neither the name of the copyright holders nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
//
|
||||
// Authors: Gabe Black
|
||||
|
||||
def template DataXImmDeclare {{
|
||||
class %(class_name)s : public %(base_class)s
|
||||
{
|
||||
public:
|
||||
// Constructor
|
||||
%(class_name)s(ExtMachInst machInst, IntRegIndex _dest,
|
||||
IntRegIndex _op1, uint64_t _imm);
|
||||
%(BasicExecDeclare)s
|
||||
};
|
||||
}};
|
||||
|
||||
def template DataXImmConstructor {{
|
||||
inline %(class_name)s::%(class_name)s(ExtMachInst machInst,
|
||||
IntRegIndex _dest,
|
||||
IntRegIndex _op1,
|
||||
uint64_t _imm)
|
||||
: %(base_class)s("%(mnemonic)s", machInst, %(op_class)s,
|
||||
_dest, _op1, _imm)
|
||||
{
|
||||
%(constructor)s;
|
||||
}
|
||||
}};
|
||||
|
||||
def template DataXSRegDeclare {{
|
||||
class %(class_name)s : public %(base_class)s
|
||||
{
|
||||
public:
|
||||
// Constructor
|
||||
%(class_name)s(ExtMachInst machInst, IntRegIndex _dest,
|
||||
IntRegIndex _op1, IntRegIndex _op2,
|
||||
int32_t _shiftAmt, ArmShiftType _shiftType);
|
||||
%(BasicExecDeclare)s
|
||||
};
|
||||
}};
|
||||
|
||||
def template DataXSRegConstructor {{
|
||||
inline %(class_name)s::%(class_name)s(ExtMachInst machInst,
|
||||
IntRegIndex _dest,
|
||||
IntRegIndex _op1,
|
||||
IntRegIndex _op2,
|
||||
int32_t _shiftAmt,
|
||||
ArmShiftType _shiftType)
|
||||
: %(base_class)s("%(mnemonic)s", machInst, %(op_class)s,
|
||||
_dest, _op1, _op2, _shiftAmt, _shiftType)
|
||||
{
|
||||
%(constructor)s;
|
||||
}
|
||||
}};
|
||||
|
||||
def template DataXERegDeclare {{
|
||||
class %(class_name)s : public %(base_class)s
|
||||
{
|
||||
public:
|
||||
// Constructor
|
||||
%(class_name)s(ExtMachInst machInst, IntRegIndex _dest,
|
||||
IntRegIndex _op1, IntRegIndex _op2,
|
||||
ArmExtendType _extendType, int32_t _shiftAmt);
|
||||
%(BasicExecDeclare)s
|
||||
};
|
||||
}};
|
||||
|
||||
def template DataXERegConstructor {{
|
||||
inline %(class_name)s::%(class_name)s(ExtMachInst machInst,
|
||||
IntRegIndex _dest,
|
||||
IntRegIndex _op1,
|
||||
IntRegIndex _op2,
|
||||
ArmExtendType _extendType,
|
||||
int32_t _shiftAmt)
|
||||
: %(base_class)s("%(mnemonic)s", machInst, %(op_class)s,
|
||||
_dest, _op1, _op2, _extendType, _shiftAmt)
|
||||
{
|
||||
%(constructor)s;
|
||||
}
|
||||
}};
|
||||
|
||||
def template DataX1RegDeclare {{
|
||||
class %(class_name)s : public %(base_class)s
|
||||
{
|
||||
public:
|
||||
// Constructor
|
||||
%(class_name)s(ExtMachInst machInst, IntRegIndex _dest,
|
||||
IntRegIndex _op1);
|
||||
%(BasicExecDeclare)s
|
||||
};
|
||||
}};
|
||||
|
||||
def template DataX1RegConstructor {{
|
||||
inline %(class_name)s::%(class_name)s(ExtMachInst machInst,
|
||||
IntRegIndex _dest,
|
||||
IntRegIndex _op1)
|
||||
: %(base_class)s("%(mnemonic)s", machInst, %(op_class)s, _dest, _op1)
|
||||
{
|
||||
%(constructor)s;
|
||||
}
|
||||
}};
|
||||
|
||||
def template DataX2RegDeclare {{
|
||||
class %(class_name)s : public %(base_class)s
|
||||
{
|
||||
public:
|
||||
// Constructor
|
||||
%(class_name)s(ExtMachInst machInst, IntRegIndex _dest,
|
||||
IntRegIndex _op1, IntRegIndex _op2);
|
||||
%(BasicExecDeclare)s
|
||||
};
|
||||
}};
|
||||
|
||||
def template DataX2RegConstructor {{
|
||||
inline %(class_name)s::%(class_name)s(ExtMachInst machInst,
|
||||
IntRegIndex _dest,
|
||||
IntRegIndex _op1,
|
||||
IntRegIndex _op2)
|
||||
: %(base_class)s("%(mnemonic)s", machInst, %(op_class)s,
|
||||
_dest, _op1, _op2)
|
||||
{
|
||||
%(constructor)s;
|
||||
}
|
||||
}};
|
||||
|
||||
def template DataX2RegImmDeclare {{
|
||||
class %(class_name)s : public %(base_class)s
|
||||
{
|
||||
public:
|
||||
// Constructor
|
||||
%(class_name)s(ExtMachInst machInst, IntRegIndex _dest,
|
||||
IntRegIndex _op1, IntRegIndex _op2, uint64_t _imm);
|
||||
%(BasicExecDeclare)s
|
||||
};
|
||||
}};
|
||||
|
||||
def template DataX2RegImmConstructor {{
|
||||
inline %(class_name)s::%(class_name)s(ExtMachInst machInst,
|
||||
IntRegIndex _dest,
|
||||
IntRegIndex _op1,
|
||||
IntRegIndex _op2,
|
||||
uint64_t _imm)
|
||||
: %(base_class)s("%(mnemonic)s", machInst, %(op_class)s,
|
||||
_dest, _op1, _op2, _imm)
|
||||
{
|
||||
%(constructor)s;
|
||||
}
|
||||
}};
|
||||
|
||||
def template DataX3RegDeclare {{
|
||||
class %(class_name)s : public %(base_class)s
|
||||
{
|
||||
public:
|
||||
// Constructor
|
||||
%(class_name)s(ExtMachInst machInst, IntRegIndex _dest,
|
||||
IntRegIndex _op1, IntRegIndex _op2, IntRegIndex _op3);
|
||||
%(BasicExecDeclare)s
|
||||
};
|
||||
}};
|
||||
|
||||
def template DataX3RegConstructor {{
|
||||
inline %(class_name)s::%(class_name)s(ExtMachInst machInst,
|
||||
IntRegIndex _dest,
|
||||
IntRegIndex _op1,
|
||||
IntRegIndex _op2,
|
||||
IntRegIndex _op3)
|
||||
: %(base_class)s("%(mnemonic)s", machInst, %(op_class)s,
|
||||
_dest, _op1, _op2, _op3)
|
||||
{
|
||||
%(constructor)s;
|
||||
}
|
||||
}};
|
||||
|
||||
def template DataXCondCompImmDeclare {{
|
||||
class %(class_name)s : public %(base_class)s
|
||||
{
|
||||
public:
|
||||
// Constructor
|
||||
%(class_name)s(ExtMachInst machInst, IntRegIndex _op1,
|
||||
uint64_t _imm, ConditionCode _condCode, uint8_t _defCc);
|
||||
%(BasicExecDeclare)s
|
||||
};
|
||||
}};
|
||||
|
||||
def template DataXCondCompImmConstructor {{
|
||||
inline %(class_name)s::%(class_name)s(ExtMachInst machInst,
|
||||
IntRegIndex _op1,
|
||||
uint64_t _imm,
|
||||
ConditionCode _condCode,
|
||||
uint8_t _defCc)
|
||||
: %(base_class)s("%(mnemonic)s", machInst, %(op_class)s,
|
||||
_op1, _imm, _condCode, _defCc)
|
||||
{
|
||||
%(constructor)s;
|
||||
}
|
||||
}};
|
||||
|
||||
def template DataXCondCompRegDeclare {{
|
||||
class %(class_name)s : public %(base_class)s
|
||||
{
|
||||
public:
|
||||
// Constructor
|
||||
%(class_name)s(ExtMachInst machInst, IntRegIndex _op1,
|
||||
IntRegIndex _op2, ConditionCode _condCode,
|
||||
uint8_t _defCc);
|
||||
%(BasicExecDeclare)s
|
||||
};
|
||||
}};
|
||||
|
||||
def template DataXCondCompRegConstructor {{
|
||||
inline %(class_name)s::%(class_name)s(ExtMachInst machInst,
|
||||
IntRegIndex _op1,
|
||||
IntRegIndex _op2,
|
||||
ConditionCode _condCode,
|
||||
uint8_t _defCc)
|
||||
: %(base_class)s("%(mnemonic)s", machInst, %(op_class)s,
|
||||
_op1, _op2, _condCode, _defCc)
|
||||
{
|
||||
%(constructor)s;
|
||||
}
|
||||
}};
|
||||
|
||||
def template DataXCondSelDeclare {{
|
||||
class %(class_name)s : public %(base_class)s
|
||||
{
|
||||
public:
|
||||
// Constructor
|
||||
%(class_name)s(ExtMachInst machInst, IntRegIndex _dest,
|
||||
IntRegIndex _op1, IntRegIndex _op2,
|
||||
ConditionCode _condCode);
|
||||
%(BasicExecDeclare)s
|
||||
};
|
||||
}};
|
||||
|
||||
def template DataXCondSelConstructor {{
|
||||
inline %(class_name)s::%(class_name)s(ExtMachInst machInst,
|
||||
IntRegIndex _dest,
|
||||
IntRegIndex _op1,
|
||||
IntRegIndex _op2,
|
||||
ConditionCode _condCode)
|
||||
: %(base_class)s("%(mnemonic)s", machInst, %(op_class)s,
|
||||
_dest, _op1, _op2, _condCode)
|
||||
{
|
||||
%(constructor)s;
|
||||
}
|
||||
}};
|
|
@ -1,6 +1,6 @@
|
|||
// -*- mode:c++ -*-
|
||||
|
||||
// Copyright (c) 2010 ARM Limited
|
||||
// Copyright (c) 2010-2013 ARM Limited
|
||||
// All rights reserved
|
||||
//
|
||||
// The license below extends only to copyright in the software and shall
|
||||
|
@ -338,6 +338,18 @@ def template MicroIntImmConstructor {{
|
|||
}
|
||||
}};
|
||||
|
||||
def template MicroIntImmXConstructor {{
|
||||
%(class_name)s::%(class_name)s(ExtMachInst machInst,
|
||||
RegIndex _ura,
|
||||
RegIndex _urb,
|
||||
int32_t _imm)
|
||||
: %(base_class)s("%(mnemonic)s", machInst, %(op_class)s,
|
||||
_ura, _urb, _imm)
|
||||
{
|
||||
%(constructor)s;
|
||||
}
|
||||
}};
|
||||
|
||||
def template MicroIntRegDeclare {{
|
||||
class %(class_name)s : public %(base_class)s
|
||||
{
|
||||
|
@ -349,6 +361,28 @@ def template MicroIntRegDeclare {{
|
|||
};
|
||||
}};
|
||||
|
||||
def template MicroIntXERegConstructor {{
|
||||
%(class_name)s::%(class_name)s(ExtMachInst machInst,
|
||||
RegIndex _ura, RegIndex _urb, RegIndex _urc,
|
||||
ArmExtendType _type, uint32_t _shiftAmt)
|
||||
: %(base_class)s("%(mnemonic)s", machInst, %(op_class)s,
|
||||
_ura, _urb, _urc, _type, _shiftAmt)
|
||||
{
|
||||
%(constructor)s;
|
||||
}
|
||||
}};
|
||||
|
||||
def template MicroIntXERegDeclare {{
|
||||
class %(class_name)s : public %(base_class)s
|
||||
{
|
||||
public:
|
||||
%(class_name)s(ExtMachInst machInst,
|
||||
RegIndex _ura, RegIndex _urb, RegIndex _urc,
|
||||
ArmExtendType _type, uint32_t _shiftAmt);
|
||||
%(BasicExecDeclare)s
|
||||
};
|
||||
}};
|
||||
|
||||
def template MicroIntRegConstructor {{
|
||||
%(class_name)s::%(class_name)s(ExtMachInst machInst,
|
||||
RegIndex _ura, RegIndex _urb, RegIndex _urc,
|
||||
|
@ -402,6 +436,96 @@ def template MacroMemConstructor {{
|
|||
|
||||
}};
|
||||
|
||||
def template BigFpMemImmDeclare {{
|
||||
class %(class_name)s : public %(base_class)s
|
||||
{
|
||||
public:
|
||||
// Constructor
|
||||
%(class_name)s(const char *mnemonic, ExtMachInst machInst,
|
||||
bool load, IntRegIndex dest, IntRegIndex base, int64_t imm);
|
||||
%(BasicExecPanic)s
|
||||
};
|
||||
}};
|
||||
|
||||
def template BigFpMemImmConstructor {{
|
||||
%(class_name)s::%(class_name)s(const char *mnemonic, ExtMachInst machInst,
|
||||
bool load, IntRegIndex dest, IntRegIndex base, int64_t imm)
|
||||
: %(base_class)s(mnemonic, machInst, %(op_class)s, load, dest, base, imm)
|
||||
{
|
||||
%(constructor)s;
|
||||
}
|
||||
}};
|
||||
|
||||
def template BigFpMemRegDeclare {{
|
||||
class %(class_name)s : public %(base_class)s
|
||||
{
|
||||
public:
|
||||
// Constructor
|
||||
%(class_name)s(const char *mnemonic, ExtMachInst machInst,
|
||||
bool load, IntRegIndex dest, IntRegIndex base,
|
||||
IntRegIndex offset, ArmExtendType type, int64_t imm);
|
||||
%(BasicExecPanic)s
|
||||
};
|
||||
}};
|
||||
|
||||
def template BigFpMemRegConstructor {{
|
||||
%(class_name)s::%(class_name)s(const char *mnemonic, ExtMachInst machInst,
|
||||
bool load, IntRegIndex dest, IntRegIndex base,
|
||||
IntRegIndex offset, ArmExtendType type, int64_t imm)
|
||||
: %(base_class)s(mnemonic, machInst, %(op_class)s, load, dest, base,
|
||||
offset, type, imm)
|
||||
{
|
||||
%(constructor)s;
|
||||
}
|
||||
}};
|
||||
|
||||
def template BigFpMemLitDeclare {{
|
||||
class %(class_name)s : public %(base_class)s
|
||||
{
|
||||
public:
|
||||
// Constructor
|
||||
%(class_name)s(const char *mnemonic, ExtMachInst machInst,
|
||||
IntRegIndex dest, int64_t imm);
|
||||
%(BasicExecPanic)s
|
||||
};
|
||||
}};
|
||||
|
||||
def template BigFpMemLitConstructor {{
|
||||
%(class_name)s::%(class_name)s(const char *mnemonic, ExtMachInst machInst,
|
||||
IntRegIndex dest, int64_t imm)
|
||||
: %(base_class)s(mnemonic, machInst, %(op_class)s, dest, imm)
|
||||
{
|
||||
%(constructor)s;
|
||||
}
|
||||
}};
|
||||
|
||||
def template PairMemDeclare {{
|
||||
class %(class_name)s : public %(base_class)s
|
||||
{
|
||||
public:
|
||||
// Constructor
|
||||
%(class_name)s(const char *mnemonic, ExtMachInst machInst,
|
||||
uint32_t size, bool fp, bool load, bool noAlloc, bool signExt,
|
||||
bool exclusive, bool acrel, uint32_t imm,
|
||||
AddrMode mode, IntRegIndex rn, IntRegIndex rt,
|
||||
IntRegIndex rt2);
|
||||
%(BasicExecPanic)s
|
||||
};
|
||||
}};
|
||||
|
||||
def template PairMemConstructor {{
|
||||
%(class_name)s::%(class_name)s(const char *mnemonic, ExtMachInst machInst,
|
||||
uint32_t size, bool fp, bool load, bool noAlloc, bool signExt,
|
||||
bool exclusive, bool acrel, uint32_t imm, AddrMode mode,
|
||||
IntRegIndex rn, IntRegIndex rt, IntRegIndex rt2)
|
||||
: %(base_class)s(mnemonic, machInst, %(op_class)s, size,
|
||||
fp, load, noAlloc, signExt, exclusive, acrel,
|
||||
imm, mode, rn, rt, rt2)
|
||||
{
|
||||
%(constructor)s;
|
||||
}
|
||||
}};
|
||||
|
||||
def template VMemMultDeclare {{
|
||||
class %(class_name)s : public %(base_class)s
|
||||
{
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
// -*- mode:c++ -*-
|
||||
|
||||
// Copyright (c) 2010 ARM Limited
|
||||
// Copyright (c) 2010, 2012 ARM Limited
|
||||
// All rights reserved
|
||||
//
|
||||
// The license below extends only to copyright in the software and shall
|
||||
|
@ -697,6 +697,11 @@ def template LoadStoreImmDeclare {{
|
|||
%(InitiateAccDeclare)s
|
||||
|
||||
%(CompleteAccDeclare)s
|
||||
|
||||
virtual void
|
||||
annotateFault(ArmFault *fault) {
|
||||
%(fa_code)s
|
||||
}
|
||||
};
|
||||
}};
|
||||
|
||||
|
@ -763,6 +768,11 @@ def template StoreRegDeclare {{
|
|||
%(InitiateAccDeclare)s
|
||||
|
||||
%(CompleteAccDeclare)s
|
||||
|
||||
virtual void
|
||||
annotateFault(ArmFault *fault) {
|
||||
%(fa_code)s
|
||||
}
|
||||
};
|
||||
}};
|
||||
|
||||
|
@ -808,6 +818,11 @@ def template LoadRegDeclare {{
|
|||
%(InitiateAccDeclare)s
|
||||
|
||||
%(CompleteAccDeclare)s
|
||||
|
||||
virtual void
|
||||
annotateFault(ArmFault *fault) {
|
||||
%(fa_code)s
|
||||
}
|
||||
};
|
||||
}};
|
||||
|
||||
|
@ -828,6 +843,11 @@ def template LoadImmDeclare {{
|
|||
%(InitiateAccDeclare)s
|
||||
|
||||
%(CompleteAccDeclare)s
|
||||
|
||||
virtual void
|
||||
annotateFault(ArmFault *fault) {
|
||||
%(fa_code)s
|
||||
}
|
||||
};
|
||||
}};
|
||||
|
||||
|
|
686
src/arch/arm/isa/templates/mem64.isa
Normal file
686
src/arch/arm/isa/templates/mem64.isa
Normal file
|
@ -0,0 +1,686 @@
|
|||
// -*- mode:c++ -*-
|
||||
|
||||
// Copyright (c) 2011-2013 ARM Limited
|
||||
// All rights reserved
|
||||
//
|
||||
// The license below extends only to copyright in the software and shall
|
||||
// not be construed as granting a license to any other intellectual
|
||||
// property including but not limited to intellectual property relating
|
||||
// to a hardware implementation of the functionality of the software
|
||||
// licensed hereunder. You may use the software subject to the license
|
||||
// terms below provided that you ensure that this notice is replicated
|
||||
// unmodified and in its entirety in all distributions of the software,
|
||||
// modified or unmodified, in source code or in binary form.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met: redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer;
|
||||
// redistributions in binary form must reproduce the above copyright
|
||||
// notice, this list of conditions and the following disclaimer in the
|
||||
// documentation and/or other materials provided with the distribution;
|
||||
// neither the name of the copyright holders nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
//
|
||||
// Authors: Gabe Black
|
||||
|
||||
let {{
|
||||
SPAlignmentCheckCode = '''
|
||||
if (baseIsSP && bits(XBase, 3, 0) &&
|
||||
SPAlignmentCheckEnabled(xc->tcBase())) {
|
||||
return new SPAlignmentFault();
|
||||
}
|
||||
'''
|
||||
}};
|
||||
|
||||
def template Load64Execute {{
|
||||
Fault %(class_name)s::execute(%(CPU_exec_context)s *xc,
|
||||
Trace::InstRecord *traceData) const
|
||||
{
|
||||
Addr EA;
|
||||
Fault fault = NoFault;
|
||||
|
||||
%(op_decl)s;
|
||||
%(op_rd)s;
|
||||
%(ea_code)s;
|
||||
|
||||
if (fault == NoFault) {
|
||||
fault = readMemAtomic(xc, traceData, EA, Mem, memAccessFlags);
|
||||
%(memacc_code)s;
|
||||
}
|
||||
|
||||
if (fault == NoFault) {
|
||||
%(op_wb)s;
|
||||
}
|
||||
|
||||
return fault;
|
||||
}
|
||||
}};
|
||||
|
||||
def template Store64Execute {{
|
||||
Fault %(class_name)s::execute(%(CPU_exec_context)s *xc,
|
||||
Trace::InstRecord *traceData) const
|
||||
{
|
||||
Addr EA;
|
||||
Fault fault = NoFault;
|
||||
|
||||
%(op_decl)s;
|
||||
%(op_rd)s;
|
||||
%(ea_code)s;
|
||||
|
||||
if (fault == NoFault) {
|
||||
%(memacc_code)s;
|
||||
}
|
||||
|
||||
if (fault == NoFault) {
|
||||
fault = writeMemAtomic(xc, traceData, Mem, EA,
|
||||
memAccessFlags, NULL);
|
||||
}
|
||||
|
||||
if (fault == NoFault) {
|
||||
%(op_wb)s;
|
||||
}
|
||||
|
||||
return fault;
|
||||
}
|
||||
}};
|
||||
|
||||
def template Store64InitiateAcc {{
|
||||
Fault %(class_name)s::initiateAcc(%(CPU_exec_context)s *xc,
|
||||
Trace::InstRecord *traceData) const
|
||||
{
|
||||
Addr EA;
|
||||
Fault fault = NoFault;
|
||||
|
||||
%(op_decl)s;
|
||||
%(op_rd)s;
|
||||
%(ea_code)s;
|
||||
|
||||
if (fault == NoFault) {
|
||||
%(memacc_code)s;
|
||||
}
|
||||
|
||||
if (fault == NoFault) {
|
||||
fault = writeMemTiming(xc, traceData, Mem, EA, memAccessFlags,
|
||||
NULL);
|
||||
}
|
||||
|
||||
return fault;
|
||||
}
|
||||
}};
|
||||
|
||||
def template StoreEx64Execute {{
|
||||
Fault %(class_name)s::execute(%(CPU_exec_context)s *xc,
|
||||
Trace::InstRecord *traceData) const
|
||||
{
|
||||
Addr EA;
|
||||
Fault fault = NoFault;
|
||||
|
||||
%(op_decl)s;
|
||||
%(op_rd)s;
|
||||
%(ea_code)s;
|
||||
|
||||
if (fault == NoFault) {
|
||||
%(memacc_code)s;
|
||||
}
|
||||
|
||||
uint64_t writeResult = 0;
|
||||
if (fault == NoFault) {
|
||||
fault = writeMemAtomic(xc, traceData, Mem, EA, memAccessFlags,
|
||||
&writeResult);
|
||||
}
|
||||
|
||||
if (fault == NoFault) {
|
||||
%(postacc_code)s;
|
||||
}
|
||||
|
||||
if (fault == NoFault) {
|
||||
%(op_wb)s;
|
||||
}
|
||||
|
||||
return fault;
|
||||
}
|
||||
}};
|
||||
|
||||
def template StoreEx64InitiateAcc {{
|
||||
Fault %(class_name)s::initiateAcc(%(CPU_exec_context)s *xc,
|
||||
Trace::InstRecord *traceData) const
|
||||
{
|
||||
Addr EA;
|
||||
Fault fault = NoFault;
|
||||
|
||||
%(op_decl)s;
|
||||
%(op_rd)s;
|
||||
%(ea_code)s;
|
||||
|
||||
if (fault == NoFault) {
|
||||
%(memacc_code)s;
|
||||
}
|
||||
|
||||
if (fault == NoFault) {
|
||||
fault = writeMemTiming(xc, traceData, Mem, EA, memAccessFlags,
|
||||
NULL);
|
||||
}
|
||||
|
||||
return fault;
|
||||
}
|
||||
}};
|
||||
|
||||
def template Load64InitiateAcc {{
|
||||
Fault %(class_name)s::initiateAcc(%(CPU_exec_context)s *xc,
|
||||
Trace::InstRecord *traceData) const
|
||||
{
|
||||
Addr EA;
|
||||
Fault fault = NoFault;
|
||||
|
||||
%(op_src_decl)s;
|
||||
%(op_rd)s;
|
||||
%(ea_code)s;
|
||||
|
||||
if (fault == NoFault) {
|
||||
fault = readMemTiming(xc, traceData, EA, Mem, memAccessFlags);
|
||||
}
|
||||
|
||||
return fault;
|
||||
}
|
||||
}};
|
||||
|
||||
def template Load64CompleteAcc {{
|
||||
Fault %(class_name)s::completeAcc(PacketPtr pkt,
|
||||
%(CPU_exec_context)s *xc,
|
||||
Trace::InstRecord *traceData) const
|
||||
{
|
||||
Fault fault = NoFault;
|
||||
|
||||
%(op_decl)s;
|
||||
%(op_rd)s;
|
||||
|
||||
// ARM instructions will not have a pkt if the predicate is false
|
||||
getMem(pkt, Mem, traceData);
|
||||
|
||||
if (fault == NoFault) {
|
||||
%(memacc_code)s;
|
||||
}
|
||||
|
||||
if (fault == NoFault) {
|
||||
%(op_wb)s;
|
||||
}
|
||||
|
||||
return fault;
|
||||
}
|
||||
}};
|
||||
|
||||
def template Store64CompleteAcc {{
|
||||
Fault %(class_name)s::completeAcc(PacketPtr pkt,
|
||||
%(CPU_exec_context)s *xc,
|
||||
Trace::InstRecord *traceData) const
|
||||
{
|
||||
return NoFault;
|
||||
}
|
||||
}};
|
||||
|
||||
def template StoreEx64CompleteAcc {{
|
||||
Fault %(class_name)s::completeAcc(PacketPtr pkt,
|
||||
%(CPU_exec_context)s *xc,
|
||||
Trace::InstRecord *traceData) const
|
||||
{
|
||||
Fault fault = NoFault;
|
||||
|
||||
%(op_decl)s;
|
||||
%(op_rd)s;
|
||||
|
||||
uint64_t writeResult = pkt->req->getExtraData();
|
||||
%(postacc_code)s;
|
||||
|
||||
if (fault == NoFault) {
|
||||
%(op_wb)s;
|
||||
}
|
||||
|
||||
return fault;
|
||||
}
|
||||
}};
|
||||
|
||||
def template DCStore64Declare {{
|
||||
class %(class_name)s : public %(base_class)s
|
||||
{
|
||||
public:
|
||||
|
||||
/// Constructor.
|
||||
%(class_name)s(ExtMachInst machInst, IntRegIndex _base, IntRegIndex _dest, uint64_t _imm);
|
||||
|
||||
%(BasicExecDeclare)s
|
||||
%(InitiateAccDeclare)s
|
||||
%(CompleteAccDeclare)s
|
||||
|
||||
virtual void
|
||||
annotateFault(ArmFault *fault) {
|
||||
%(fa_code)s
|
||||
}
|
||||
};
|
||||
}};
|
||||
|
||||
def template DCStore64Constructor {{
|
||||
%(class_name)s::%(class_name)s(ExtMachInst machInst, IntRegIndex _base, IntRegIndex _dest, uint64_t _imm)
|
||||
: %(base_class)s("%(mnemonic)s", machInst, %(op_class)s,
|
||||
(IntRegIndex)_base, _dest, _imm)
|
||||
{
|
||||
%(constructor)s;
|
||||
assert(!%(use_uops)d);
|
||||
}
|
||||
}};
|
||||
|
||||
def template DCStore64Execute {{
|
||||
Fault %(class_name)s::execute(%(CPU_exec_context)s *xc,
|
||||
Trace::InstRecord *traceData) const
|
||||
{
|
||||
Addr EA;
|
||||
Fault fault = NoFault;
|
||||
|
||||
%(op_decl)s;
|
||||
%(op_rd)s;
|
||||
%(ea_code)s;
|
||||
|
||||
|
||||
if (fault == NoFault) {
|
||||
%(memacc_code)s;
|
||||
}
|
||||
|
||||
if (fault == NoFault) {
|
||||
fault = xc->writeMem(NULL, op_size, EA, memAccessFlags, NULL);
|
||||
}
|
||||
|
||||
if (fault == NoFault) {
|
||||
%(op_wb)s;
|
||||
}
|
||||
|
||||
return fault;
|
||||
}
|
||||
}};
|
||||
|
||||
def template DCStore64InitiateAcc {{
|
||||
Fault %(class_name)s::initiateAcc(%(CPU_exec_context)s *xc,
|
||||
Trace::InstRecord *traceData) const
|
||||
{
|
||||
Addr EA;
|
||||
Fault fault = NoFault;
|
||||
|
||||
%(op_decl)s;
|
||||
%(op_rd)s;
|
||||
%(ea_code)s;
|
||||
|
||||
if (fault == NoFault) {
|
||||
%(memacc_code)s;
|
||||
}
|
||||
|
||||
if (fault == NoFault) {
|
||||
fault = xc->writeMem(NULL, op_size, EA, memAccessFlags, NULL);
|
||||
}
|
||||
|
||||
return fault;
|
||||
}
|
||||
}};
|
||||
|
||||
|
||||
def template LoadStoreImm64Declare {{
|
||||
class %(class_name)s : public %(base_class)s
|
||||
{
|
||||
public:
|
||||
|
||||
/// Constructor.
|
||||
%(class_name)s(ExtMachInst machInst,
|
||||
IntRegIndex _dest, IntRegIndex _base, int64_t _imm);
|
||||
|
||||
%(BasicExecDeclare)s
|
||||
%(InitiateAccDeclare)s
|
||||
%(CompleteAccDeclare)s
|
||||
|
||||
virtual void
|
||||
annotateFault(ArmFault *fault) {
|
||||
%(fa_code)s
|
||||
}
|
||||
};
|
||||
}};
|
||||
|
||||
def template LoadStoreImmU64Declare {{
|
||||
class %(class_name)s : public %(base_class)s
|
||||
{
|
||||
public:
|
||||
|
||||
/// Constructor.
|
||||
%(class_name)s(ExtMachInst machInst,
|
||||
IntRegIndex _dest, IntRegIndex _base, int64_t _imm,
|
||||
bool noAlloc = false, bool exclusive = false,
|
||||
bool acrel = false);
|
||||
|
||||
%(BasicExecDeclare)s
|
||||
%(InitiateAccDeclare)s
|
||||
%(CompleteAccDeclare)s
|
||||
|
||||
virtual void
|
||||
annotateFault(ArmFault *fault) {
|
||||
%(fa_code)s
|
||||
}
|
||||
};
|
||||
}};
|
||||
|
||||
def template LoadStoreImmDU64Declare {{
|
||||
class %(class_name)s : public %(base_class)s
|
||||
{
|
||||
public:
|
||||
|
||||
/// Constructor.
|
||||
%(class_name)s(ExtMachInst machInst,
|
||||
IntRegIndex _dest, IntRegIndex _dest2, IntRegIndex _base,
|
||||
int64_t _imm = 0, bool noAlloc = false, bool exclusive = false,
|
||||
bool acrel = false);
|
||||
|
||||
%(BasicExecDeclare)s
|
||||
%(InitiateAccDeclare)s
|
||||
%(CompleteAccDeclare)s
|
||||
|
||||
virtual void
|
||||
annotateFault(ArmFault *fault) {
|
||||
%(fa_code)s
|
||||
}
|
||||
};
|
||||
}};
|
||||
|
||||
def template StoreImmDEx64Declare {{
|
||||
/**
|
||||
* Static instruction class for "%(mnemonic)s".
|
||||
*/
|
||||
class %(class_name)s : public %(base_class)s
|
||||
{
|
||||
public:
|
||||
|
||||
/// Constructor.
|
||||
%(class_name)s(ExtMachInst machInst,
|
||||
IntRegIndex _result, IntRegIndex _dest, IntRegIndex _dest2,
|
||||
IntRegIndex _base, int64_t _imm = 0);
|
||||
|
||||
%(BasicExecDeclare)s
|
||||
|
||||
%(InitiateAccDeclare)s
|
||||
|
||||
%(CompleteAccDeclare)s
|
||||
};
|
||||
}};
|
||||
|
||||
|
||||
def template LoadStoreReg64Declare {{
|
||||
class %(class_name)s : public %(base_class)s
|
||||
{
|
||||
public:
|
||||
|
||||
/// Constructor.
|
||||
%(class_name)s(ExtMachInst machInst,
|
||||
IntRegIndex _dest, IntRegIndex _base, IntRegIndex _offset,
|
||||
ArmExtendType _type, uint32_t _shiftAmt);
|
||||
|
||||
%(BasicExecDeclare)s
|
||||
%(InitiateAccDeclare)s
|
||||
%(CompleteAccDeclare)s
|
||||
|
||||
virtual void
|
||||
annotateFault(ArmFault *fault) {
|
||||
%(fa_code)s
|
||||
}
|
||||
};
|
||||
}};
|
||||
|
||||
def template LoadStoreRegU64Declare {{
|
||||
class %(class_name)s : public %(base_class)s
|
||||
{
|
||||
public:
|
||||
|
||||
/// Constructor.
|
||||
%(class_name)s(ExtMachInst machInst,
|
||||
IntRegIndex _dest, IntRegIndex _base, IntRegIndex _offset,
|
||||
ArmExtendType _type, uint32_t _shiftAmt,
|
||||
bool noAlloc = false, bool exclusive = false,
|
||||
bool acrel = false);
|
||||
|
||||
%(BasicExecDeclare)s
|
||||
%(InitiateAccDeclare)s
|
||||
%(CompleteAccDeclare)s
|
||||
|
||||
virtual void
|
||||
annotateFault(ArmFault *fault) {
|
||||
%(fa_code)s
|
||||
}
|
||||
};
|
||||
}};
|
||||
|
||||
def template LoadStoreRaw64Declare {{
|
||||
class %(class_name)s : public %(base_class)s
|
||||
{
|
||||
public:
|
||||
|
||||
/// Constructor.
|
||||
%(class_name)s(ExtMachInst machInst, IntRegIndex _dest,
|
||||
IntRegIndex _base);
|
||||
|
||||
%(BasicExecDeclare)s
|
||||
%(InitiateAccDeclare)s
|
||||
%(CompleteAccDeclare)s
|
||||
|
||||
virtual void
|
||||
annotateFault(ArmFault *fault) {
|
||||
%(fa_code)s
|
||||
}
|
||||
};
|
||||
}};
|
||||
|
||||
def template LoadStoreEx64Declare {{
|
||||
class %(class_name)s : public %(base_class)s
|
||||
{
|
||||
public:
|
||||
|
||||
/// Constructor.
|
||||
%(class_name)s(ExtMachInst machInst, IntRegIndex _dest,
|
||||
IntRegIndex _base, IntRegIndex _result);
|
||||
|
||||
%(BasicExecDeclare)s
|
||||
%(InitiateAccDeclare)s
|
||||
%(CompleteAccDeclare)s
|
||||
|
||||
virtual void
|
||||
annotateFault(ArmFault *fault) {
|
||||
%(fa_code)s
|
||||
}
|
||||
};
|
||||
}};
|
||||
|
||||
def template LoadStoreLit64Declare {{
|
||||
class %(class_name)s : public %(base_class)s
|
||||
{
|
||||
public:
|
||||
|
||||
/// Constructor.
|
||||
%(class_name)s(ExtMachInst machInst, IntRegIndex _dest, int64_t _imm);
|
||||
|
||||
%(BasicExecDeclare)s
|
||||
%(InitiateAccDeclare)s
|
||||
%(CompleteAccDeclare)s
|
||||
|
||||
virtual void
|
||||
annotateFault(ArmFault *fault) {
|
||||
%(fa_code)s
|
||||
}
|
||||
};
|
||||
}};
|
||||
|
||||
def template LoadStoreLitU64Declare {{
|
||||
class %(class_name)s : public %(base_class)s
|
||||
{
|
||||
public:
|
||||
|
||||
/// Constructor.
|
||||
%(class_name)s(ExtMachInst machInst, IntRegIndex _dest, int64_t _imm,
|
||||
bool noAlloc = false, bool exclusive = false,
|
||||
bool acrel = false);
|
||||
|
||||
%(BasicExecDeclare)s
|
||||
%(InitiateAccDeclare)s
|
||||
%(CompleteAccDeclare)s
|
||||
|
||||
virtual void
|
||||
annotateFault(ArmFault *fault) {
|
||||
%(fa_code)s
|
||||
}
|
||||
};
|
||||
}};
|
||||
|
||||
def template LoadStoreImm64Constructor {{
|
||||
%(class_name)s::%(class_name)s(ExtMachInst machInst,
|
||||
IntRegIndex _dest, IntRegIndex _base, int64_t _imm)
|
||||
: %(base_class)s("%(mnemonic)s", machInst, %(op_class)s,
|
||||
(IntRegIndex)_dest, (IntRegIndex)_base, _imm)
|
||||
{
|
||||
%(constructor)s;
|
||||
#if %(use_uops)d
|
||||
assert(numMicroops >= 2);
|
||||
uops = new StaticInstPtr[numMicroops];
|
||||
uops[0] = new %(acc_name)s(machInst, _dest, _base, _imm);
|
||||
uops[0]->setDelayedCommit();
|
||||
uops[1] = new %(wb_decl)s;
|
||||
uops[1]->setLastMicroop();
|
||||
#endif
|
||||
}
|
||||
}};
|
||||
|
||||
def template LoadStoreImmU64Constructor {{
|
||||
%(class_name)s::%(class_name)s(ExtMachInst machInst,
|
||||
IntRegIndex _dest, IntRegIndex _base, int64_t _imm,
|
||||
bool noAlloc, bool exclusive, bool acrel)
|
||||
: %(base_class)s("%(mnemonic)s", machInst, %(op_class)s,
|
||||
_dest, _base, _imm)
|
||||
{
|
||||
%(constructor)s;
|
||||
assert(!%(use_uops)d);
|
||||
setExcAcRel(exclusive, acrel);
|
||||
}
|
||||
}};
|
||||
|
||||
def template LoadStoreImmDU64Constructor {{
|
||||
%(class_name)s::%(class_name)s(ExtMachInst machInst,
|
||||
IntRegIndex _dest, IntRegIndex _dest2, IntRegIndex _base,
|
||||
int64_t _imm, bool noAlloc, bool exclusive, bool acrel)
|
||||
: %(base_class)s("%(mnemonic)s", machInst, %(op_class)s,
|
||||
_dest, _dest2, _base, _imm)
|
||||
{
|
||||
%(constructor)s;
|
||||
assert(!%(use_uops)d);
|
||||
setExcAcRel(exclusive, acrel);
|
||||
}
|
||||
}};
|
||||
|
||||
def template StoreImmDEx64Constructor {{
|
||||
inline %(class_name)s::%(class_name)s(ExtMachInst machInst,
|
||||
IntRegIndex _result, IntRegIndex _dest, IntRegIndex _dest2,
|
||||
IntRegIndex _base, int64_t _imm)
|
||||
: %(base_class)s("%(mnemonic)s", machInst, %(op_class)s,
|
||||
_result, _dest, _dest2, _base, _imm)
|
||||
{
|
||||
%(constructor)s;
|
||||
assert(!%(use_uops)d);
|
||||
}
|
||||
}};
|
||||
|
||||
|
||||
def template LoadStoreReg64Constructor {{
|
||||
%(class_name)s::%(class_name)s(ExtMachInst machInst,
|
||||
IntRegIndex _dest, IntRegIndex _base, IntRegIndex _offset,
|
||||
ArmExtendType _type, uint32_t _shiftAmt)
|
||||
: %(base_class)s("%(mnemonic)s", machInst, %(op_class)s,
|
||||
_dest, _base, _offset, _type, _shiftAmt)
|
||||
{
|
||||
%(constructor)s;
|
||||
#if %(use_uops)d
|
||||
assert(numMicroops >= 2);
|
||||
uops = new StaticInstPtr[numMicroops];
|
||||
uops[0] = new %(acc_name)s(machInst, _dest, _base, _offset,
|
||||
_type, _shiftAmt);
|
||||
uops[0]->setDelayedCommit();
|
||||
uops[1] = new %(wb_decl)s;
|
||||
uops[1]->setLastMicroop();
|
||||
#endif
|
||||
}
|
||||
}};
|
||||
|
||||
def template LoadStoreRegU64Constructor {{
|
||||
%(class_name)s::%(class_name)s(ExtMachInst machInst,
|
||||
IntRegIndex _dest, IntRegIndex _base, IntRegIndex _offset,
|
||||
ArmExtendType _type, uint32_t _shiftAmt,
|
||||
bool noAlloc, bool exclusive, bool acrel)
|
||||
: %(base_class)s("%(mnemonic)s", machInst, %(op_class)s,
|
||||
_dest, _base, _offset, _type, _shiftAmt)
|
||||
{
|
||||
%(constructor)s;
|
||||
assert(!%(use_uops)d);
|
||||
setExcAcRel(exclusive, acrel);
|
||||
}
|
||||
}};
|
||||
|
||||
def template LoadStoreRaw64Constructor {{
|
||||
%(class_name)s::%(class_name)s(ExtMachInst machInst,
|
||||
IntRegIndex _dest, IntRegIndex _base)
|
||||
: %(base_class)s("%(mnemonic)s", machInst, %(op_class)s, _dest, _base)
|
||||
{
|
||||
%(constructor)s;
|
||||
}
|
||||
}};
|
||||
|
||||
def template LoadStoreEx64Constructor {{
|
||||
%(class_name)s::%(class_name)s(ExtMachInst machInst,
|
||||
IntRegIndex _dest, IntRegIndex _base, IntRegIndex _result)
|
||||
: %(base_class)s("%(mnemonic)s", machInst, %(op_class)s,
|
||||
_dest, _base, _result)
|
||||
{
|
||||
%(constructor)s;
|
||||
}
|
||||
}};
|
||||
|
||||
def template LoadStoreLit64Constructor {{
|
||||
%(class_name)s::%(class_name)s(ExtMachInst machInst,
|
||||
IntRegIndex _dest, int64_t _imm)
|
||||
: %(base_class)s("%(mnemonic)s", machInst, %(op_class)s,
|
||||
(IntRegIndex)_dest, _imm)
|
||||
{
|
||||
%(constructor)s;
|
||||
#if %(use_uops)d
|
||||
assert(numMicroops >= 2);
|
||||
uops = new StaticInstPtr[numMicroops];
|
||||
uops[0] = new %(acc_name)s(machInst, _dest, _imm);
|
||||
uops[0]->setDelayedCommit();
|
||||
uops[1] = new %(wb_decl)s;
|
||||
uops[1]->setLastMicroop();
|
||||
#endif
|
||||
}
|
||||
}};
|
||||
|
||||
def template LoadStoreLitU64Constructor {{
|
||||
%(class_name)s::%(class_name)s(ExtMachInst machInst,
|
||||
IntRegIndex _dest, int64_t _imm,
|
||||
bool noAlloc, bool exclusive, bool acrel)
|
||||
: %(base_class)s("%(mnemonic)s", machInst, %(op_class)s,
|
||||
(IntRegIndex)_dest, _imm)
|
||||
{
|
||||
%(constructor)s;
|
||||
assert(!%(use_uops)d);
|
||||
setExcAcRel(exclusive, acrel);
|
||||
}
|
||||
}};
|
|
@ -1,6 +1,6 @@
|
|||
// -*- mode:c++ -*-
|
||||
|
||||
// Copyright (c) 2010 ARM Limited
|
||||
// Copyright (c) 2010-2013 ARM Limited
|
||||
// All rights reserved
|
||||
//
|
||||
// The license below extends only to copyright in the software and shall
|
||||
|
@ -62,6 +62,69 @@ def template MrsConstructor {{
|
|||
}
|
||||
}};
|
||||
|
||||
def template MrsBankedRegDeclare {{
|
||||
class %(class_name)s : public %(base_class)s
|
||||
{
|
||||
protected:
|
||||
uint8_t byteMask;
|
||||
bool r;
|
||||
|
||||
public:
|
||||
// Constructor
|
||||
%(class_name)s(ExtMachInst machInst, IntRegIndex _dest,
|
||||
uint8_t _sysM, bool _r);
|
||||
%(BasicExecDeclare)s
|
||||
};
|
||||
}};
|
||||
|
||||
def template MrsBankedRegConstructor {{
|
||||
inline %(class_name)s::%(class_name)s(ExtMachInst machInst,
|
||||
IntRegIndex _dest,
|
||||
uint8_t _sysM,
|
||||
bool _r)
|
||||
: %(base_class)s("%(mnemonic)s", machInst, %(op_class)s, _dest),
|
||||
byteMask(_sysM), r(_r)
|
||||
{
|
||||
%(constructor)s;
|
||||
if (!(condCode == COND_AL || condCode == COND_UC)) {
|
||||
for (int x = 0; x < _numDestRegs; x++) {
|
||||
_srcRegIdx[_numSrcRegs++] = _destRegIdx[x];
|
||||
}
|
||||
}
|
||||
}
|
||||
}};
|
||||
|
||||
def template MsrBankedRegDeclare {{
|
||||
class %(class_name)s : public %(base_class)s
|
||||
{
|
||||
protected:
|
||||
bool r;
|
||||
|
||||
public:
|
||||
// Constructor
|
||||
%(class_name)s(ExtMachInst machInst, IntRegIndex _op1,
|
||||
uint8_t _sysM, bool _r);
|
||||
%(BasicExecDeclare)s
|
||||
};
|
||||
}};
|
||||
|
||||
def template MsrBankedRegConstructor {{
|
||||
inline %(class_name)s::%(class_name)s(ExtMachInst machInst,
|
||||
IntRegIndex _op1,
|
||||
uint8_t _sysM,
|
||||
bool _r)
|
||||
: %(base_class)s("%(mnemonic)s", machInst, %(op_class)s, _op1, _sysM),
|
||||
r(_r)
|
||||
{
|
||||
%(constructor)s;
|
||||
if (!(condCode == COND_AL || condCode == COND_UC)) {
|
||||
for (int x = 0; x < _numDestRegs; x++) {
|
||||
_srcRegIdx[_numSrcRegs++] = _destRegIdx[x];
|
||||
}
|
||||
}
|
||||
}
|
||||
}};
|
||||
|
||||
def template MsrRegDeclare {{
|
||||
class %(class_name)s : public %(base_class)s
|
||||
{
|
||||
|
@ -114,6 +177,66 @@ def template MsrImmConstructor {{
|
|||
}
|
||||
}};
|
||||
|
||||
def template MrrcOpDeclare {{
|
||||
class %(class_name)s : public %(base_class)s
|
||||
{
|
||||
protected:
|
||||
public:
|
||||
// Constructor
|
||||
%(class_name)s(ExtMachInst machInst, IntRegIndex _op1,
|
||||
IntRegIndex _dest, IntRegIndex _dest2, uint32_t imm);
|
||||
%(BasicExecDeclare)s
|
||||
};
|
||||
}};
|
||||
|
||||
def template MrrcOpConstructor {{
|
||||
inline %(class_name)s::%(class_name)s(ExtMachInst machInst,
|
||||
IntRegIndex op1,
|
||||
IntRegIndex dest,
|
||||
IntRegIndex dest2,
|
||||
uint32_t imm)
|
||||
: %(base_class)s("%(mnemonic)s", machInst, %(op_class)s, op1, dest,
|
||||
dest2, imm)
|
||||
{
|
||||
%(constructor)s;
|
||||
if (!(condCode == COND_AL || condCode == COND_UC)) {
|
||||
for (int x = 0; x < _numDestRegs; x++) {
|
||||
_srcRegIdx[_numSrcRegs++] = _destRegIdx[x];
|
||||
}
|
||||
}
|
||||
}
|
||||
}};
|
||||
|
||||
def template McrrOpDeclare {{
|
||||
class %(class_name)s : public %(base_class)s
|
||||
{
|
||||
protected:
|
||||
public:
|
||||
// Constructor
|
||||
%(class_name)s(ExtMachInst machInst, IntRegIndex _op1, IntRegIndex _op2,
|
||||
IntRegIndex _dest, uint32_t imm);
|
||||
%(BasicExecDeclare)s
|
||||
};
|
||||
}};
|
||||
|
||||
def template McrrOpConstructor {{
|
||||
inline %(class_name)s::%(class_name)s(ExtMachInst machInst,
|
||||
IntRegIndex op1,
|
||||
IntRegIndex op2,
|
||||
IntRegIndex dest,
|
||||
uint32_t imm)
|
||||
: %(base_class)s("%(mnemonic)s", machInst, %(op_class)s, op1, op2,
|
||||
dest, imm)
|
||||
{
|
||||
%(constructor)s;
|
||||
if (!(condCode == COND_AL || condCode == COND_UC)) {
|
||||
for (int x = 0; x < _numDestRegs; x++) {
|
||||
_srcRegIdx[_numSrcRegs++] = _destRegIdx[x];
|
||||
}
|
||||
}
|
||||
}
|
||||
}};
|
||||
|
||||
def template ImmOpDeclare {{
|
||||
class %(class_name)s : public %(base_class)s
|
||||
{
|
||||
|
@ -310,6 +433,35 @@ def template RegRegImmOpConstructor {{
|
|||
}
|
||||
}};
|
||||
|
||||
def template RegImmImmOpDeclare {{
|
||||
class %(class_name)s : public %(base_class)s
|
||||
{
|
||||
protected:
|
||||
public:
|
||||
// Constructor
|
||||
%(class_name)s(ExtMachInst machInst,
|
||||
IntRegIndex _dest, uint64_t _imm1, uint64_t _imm2);
|
||||
%(BasicExecDeclare)s
|
||||
};
|
||||
}};
|
||||
|
||||
def template RegImmImmOpConstructor {{
|
||||
inline %(class_name)s::%(class_name)s(ExtMachInst machInst,
|
||||
IntRegIndex _dest,
|
||||
uint64_t _imm1,
|
||||
uint64_t _imm2)
|
||||
: %(base_class)s("%(mnemonic)s", machInst, %(op_class)s,
|
||||
_dest, _imm1, _imm2)
|
||||
{
|
||||
%(constructor)s;
|
||||
if (!(condCode == COND_AL || condCode == COND_UC)) {
|
||||
for (int x = 0; x < _numDestRegs; x++) {
|
||||
_srcRegIdx[_numSrcRegs++] = _destRegIdx[x];
|
||||
}
|
||||
}
|
||||
}
|
||||
}};
|
||||
|
||||
def template RegRegImmImmOpDeclare {{
|
||||
class %(class_name)s : public %(base_class)s
|
||||
{
|
||||
|
|
91
src/arch/arm/isa/templates/misc64.isa
Normal file
91
src/arch/arm/isa/templates/misc64.isa
Normal file
|
@ -0,0 +1,91 @@
|
|||
// -*- mode:c++ -*-
|
||||
|
||||
// Copyright (c) 2011 ARM Limited
|
||||
// All rights reserved
|
||||
//
|
||||
// The license below extends only to copyright in the software and shall
|
||||
// not be construed as granting a license to any other intellectual
|
||||
// property including but not limited to intellectual property relating
|
||||
// to a hardware implementation of the functionality of the software
|
||||
// licensed hereunder. You may use the software subject to the license
|
||||
// terms below provided that you ensure that this notice is replicated
|
||||
// unmodified and in its entirety in all distributions of the software,
|
||||
// modified or unmodified, in source code or in binary form.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met: redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer;
|
||||
// redistributions in binary form must reproduce the above copyright
|
||||
// notice, this list of conditions and the following disclaimer in the
|
||||
// documentation and/or other materials provided with the distribution;
|
||||
// neither the name of the copyright holders nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
//
|
||||
// Authors: Gabe Black
|
||||
|
||||
def template RegRegImmImmOp64Declare {{
|
||||
class %(class_name)s : public %(base_class)s
|
||||
{
|
||||
protected:
|
||||
public:
|
||||
// Constructor
|
||||
%(class_name)s(ExtMachInst machInst,
|
||||
IntRegIndex _dest, IntRegIndex _op1,
|
||||
uint64_t _imm1, uint64_t _imm2);
|
||||
%(BasicExecDeclare)s
|
||||
};
|
||||
}};
|
||||
|
||||
def template RegRegImmImmOp64Constructor {{
|
||||
inline %(class_name)s::%(class_name)s(ExtMachInst machInst,
|
||||
IntRegIndex _dest,
|
||||
IntRegIndex _op1,
|
||||
uint64_t _imm1,
|
||||
uint64_t _imm2)
|
||||
: %(base_class)s("%(mnemonic)s", machInst, %(op_class)s,
|
||||
_dest, _op1, _imm1, _imm2)
|
||||
{
|
||||
%(constructor)s;
|
||||
}
|
||||
}};
|
||||
|
||||
def template RegRegRegImmOp64Declare {{
|
||||
class %(class_name)s : public %(base_class)s
|
||||
{
|
||||
protected:
|
||||
public:
|
||||
// Constructor
|
||||
%(class_name)s(ExtMachInst machInst,
|
||||
IntRegIndex _dest, IntRegIndex _op1,
|
||||
IntRegIndex _op2, uint64_t _imm);
|
||||
%(BasicExecDeclare)s
|
||||
};
|
||||
}};
|
||||
|
||||
def template RegRegRegImmOp64Constructor {{
|
||||
inline %(class_name)s::%(class_name)s(ExtMachInst machInst,
|
||||
IntRegIndex _dest,
|
||||
IntRegIndex _op1,
|
||||
IntRegIndex _op2,
|
||||
uint64_t _imm)
|
||||
: %(base_class)s("%(mnemonic)s", machInst, %(op_class)s,
|
||||
_dest, _op1, _op2, _imm)
|
||||
{
|
||||
%(constructor)s;
|
||||
}
|
||||
}};
|
||||
|
|
@ -1,6 +1,6 @@
|
|||
// -*- mode:c++ -*-
|
||||
|
||||
// Copyright (c) 2010 ARM Limited
|
||||
// Copyright (c) 2010-2012 ARM Limited
|
||||
// All rights reserved
|
||||
//
|
||||
// The license below extends only to copyright in the software and shall
|
||||
|
@ -39,8 +39,26 @@
|
|||
|
||||
let {{
|
||||
simdEnabledCheckCode = '''
|
||||
if (!neonEnabled(Cpacr, Cpsr, Fpexc))
|
||||
return disabledFault();
|
||||
{
|
||||
uint32_t issEnCheck;
|
||||
bool trapEnCheck;
|
||||
uint32_t seq;
|
||||
if (!vfpNeonEnabled(seq, Hcptr, Nsacr, Cpacr, Cpsr, issEnCheck,
|
||||
trapEnCheck, xc->tcBase(), Fpexc, true))
|
||||
{return disabledFault();}
|
||||
if (trapEnCheck) {
|
||||
CPSR cpsrEnCheck = Cpsr;
|
||||
if (cpsrEnCheck.mode == MODE_HYP) {
|
||||
return new UndefinedInstruction(machInst, issEnCheck,
|
||||
EC_TRAPPED_HCPTR);
|
||||
} else {
|
||||
if (!inSecureState(Scr, Cpsr)) {
|
||||
return new HypervisorTrap(machInst, issEnCheck,
|
||||
EC_TRAPPED_HCPTR);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
'''
|
||||
}};
|
||||
|
||||
|
|
527
src/arch/arm/isa/templates/neon64.isa
Normal file
527
src/arch/arm/isa/templates/neon64.isa
Normal file
|
@ -0,0 +1,527 @@
|
|||
// -*- mode: c++ -*-
|
||||
|
||||
// Copyright (c) 2012-2013 ARM Limited
|
||||
// All rights reserved
|
||||
//
|
||||
// The license below extends only to copyright in the software and shall
|
||||
// not be construed as granting a license to any other intellectual
|
||||
// property including but not limited to intellectual property relating
|
||||
// to a hardware implementation of the functionality of the software
|
||||
// licensed hereunder. You may use the software subject to the license
|
||||
// terms below provided that you ensure that this notice is replicated
|
||||
// unmodified and in its entirety in all distributions of the software,
|
||||
// modified or unmodified, in source code or in binary form.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met: redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer;
|
||||
// redistributions in binary form must reproduce the above copyright
|
||||
// notice, this list of conditions and the following disclaimer in the
|
||||
// documentation and/or other materials provided with the distribution;
|
||||
// neither the name of the copyright holders nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
//
|
||||
// Authors: Mbou Eyole
|
||||
// Giacomo Gabrielli
|
||||
|
||||
let {{
|
||||
simd64EnabledCheckCode = vfp64EnabledCheckCode
|
||||
}};
|
||||
|
||||
def template NeonX2RegOpDeclare {{
|
||||
template <class _Element>
|
||||
class %(class_name)s : public %(base_class)s
|
||||
{
|
||||
protected:
|
||||
typedef _Element Element;
|
||||
public:
|
||||
// Constructor
|
||||
%(class_name)s(ExtMachInst machInst,
|
||||
IntRegIndex _dest, IntRegIndex _op1, IntRegIndex _op2)
|
||||
: %(base_class)s("%(mnemonic)s", machInst, %(op_class)s,
|
||||
_dest, _op1, _op2)
|
||||
{
|
||||
%(constructor)s;
|
||||
}
|
||||
|
||||
%(BasicExecDeclare)s
|
||||
};
|
||||
}};
|
||||
|
||||
def template NeonX2RegImmOpDeclare {{
|
||||
template <class _Element>
|
||||
class %(class_name)s : public %(base_class)s
|
||||
{
|
||||
protected:
|
||||
typedef _Element Element;
|
||||
public:
|
||||
// Constructor
|
||||
%(class_name)s(ExtMachInst machInst,
|
||||
IntRegIndex _dest, IntRegIndex _op1, IntRegIndex _op2,
|
||||
uint64_t _imm)
|
||||
: %(base_class)s("%(mnemonic)s", machInst, %(op_class)s,
|
||||
_dest, _op1, _op2, _imm)
|
||||
{
|
||||
%(constructor)s;
|
||||
}
|
||||
|
||||
%(BasicExecDeclare)s
|
||||
};
|
||||
}};
|
||||
|
||||
def template NeonX1RegOpDeclare {{
|
||||
template <class _Element>
|
||||
class %(class_name)s : public %(base_class)s
|
||||
{
|
||||
protected:
|
||||
typedef _Element Element;
|
||||
public:
|
||||
// Constructor
|
||||
%(class_name)s(ExtMachInst machInst,
|
||||
IntRegIndex _dest, IntRegIndex _op1)
|
||||
: %(base_class)s("%(mnemonic)s", machInst, %(op_class)s,
|
||||
_dest, _op1)
|
||||
{
|
||||
%(constructor)s;
|
||||
}
|
||||
|
||||
%(BasicExecDeclare)s
|
||||
};
|
||||
}};
|
||||
|
||||
def template NeonX1RegImmOpDeclare {{
|
||||
template <class _Element>
|
||||
class %(class_name)s : public %(base_class)s
|
||||
{
|
||||
protected:
|
||||
typedef _Element Element;
|
||||
public:
|
||||
// Constructor
|
||||
%(class_name)s(ExtMachInst machInst,
|
||||
IntRegIndex _dest, IntRegIndex _op1, uint64_t _imm)
|
||||
: %(base_class)s("%(mnemonic)s", machInst, %(op_class)s,
|
||||
_dest, _op1, _imm)
|
||||
{
|
||||
%(constructor)s;
|
||||
}
|
||||
|
||||
%(BasicExecDeclare)s
|
||||
};
|
||||
}};
|
||||
|
||||
def template NeonX1Reg2ImmOpDeclare {{
|
||||
template <class _Element>
|
||||
class %(class_name)s : public %(base_class)s
|
||||
{
|
||||
protected:
|
||||
typedef _Element Element;
|
||||
public:
|
||||
// Constructor
|
||||
%(class_name)s(ExtMachInst machInst,
|
||||
IntRegIndex _dest, IntRegIndex _op1, uint64_t _imm1,
|
||||
uint64_t _imm2)
|
||||
: %(base_class)s("%(mnemonic)s", machInst, %(op_class)s,
|
||||
_dest, _op1, _imm1, _imm2)
|
||||
{
|
||||
%(constructor)s;
|
||||
}
|
||||
|
||||
%(BasicExecDeclare)s
|
||||
};
|
||||
}};
|
||||
|
||||
def template NeonX1RegImmOnlyOpDeclare {{
|
||||
template <class _Element>
|
||||
class %(class_name)s : public %(base_class)s
|
||||
{
|
||||
protected:
|
||||
typedef _Element Element;
|
||||
public:
|
||||
// Constructor
|
||||
%(class_name)s(ExtMachInst machInst,
|
||||
IntRegIndex _dest, uint64_t _imm)
|
||||
: %(base_class)s("%(mnemonic)s", machInst, %(op_class)s,
|
||||
_dest, _imm)
|
||||
{
|
||||
%(constructor)s;
|
||||
}
|
||||
|
||||
%(BasicExecDeclare)s
|
||||
};
|
||||
}};
|
||||
|
||||
def template NeonXExecDeclare {{
|
||||
template
|
||||
Fault %(class_name)s<%(targs)s>::execute(
|
||||
%(CPU_exec_context)s *, Trace::InstRecord *) const;
|
||||
}};
|
||||
|
||||
def template NeonXEqualRegOpExecute {{
|
||||
template <class Element>
|
||||
Fault %(class_name)s<Element>::execute(%(CPU_exec_context)s *xc,
|
||||
Trace::InstRecord *traceData) const
|
||||
{
|
||||
Fault fault = NoFault;
|
||||
%(op_decl)s;
|
||||
%(op_rd)s;
|
||||
|
||||
const unsigned rCount = %(r_count)d;
|
||||
const unsigned eCount = rCount * sizeof(FloatRegBits) / sizeof(Element);
|
||||
const unsigned eCountFull = 4 * sizeof(FloatRegBits) / sizeof(Element);
|
||||
|
||||
union RegVect {
|
||||
FloatRegBits regs[rCount];
|
||||
Element elements[eCount];
|
||||
};
|
||||
|
||||
union FullRegVect {
|
||||
FloatRegBits regs[4];
|
||||
Element elements[eCountFull];
|
||||
};
|
||||
|
||||
%(code)s;
|
||||
if (fault == NoFault)
|
||||
{
|
||||
%(op_wb)s;
|
||||
}
|
||||
|
||||
return fault;
|
||||
}
|
||||
}};
|
||||
|
||||
def template NeonXUnequalRegOpExecute {{
|
||||
template <class Element>
|
||||
Fault %(class_name)s<Element>::execute(%(CPU_exec_context)s *xc,
|
||||
Trace::InstRecord *traceData) const
|
||||
{
|
||||
typedef typename bigger_type_t<Element>::type BigElement;
|
||||
Fault fault = NoFault;
|
||||
%(op_decl)s;
|
||||
%(op_rd)s;
|
||||
|
||||
const unsigned rCount = %(r_count)d;
|
||||
const unsigned eCount = rCount * sizeof(FloatRegBits) / sizeof(Element);
|
||||
const unsigned eCountFull = 4 * sizeof(FloatRegBits) / sizeof(Element);
|
||||
|
||||
union RegVect {
|
||||
FloatRegBits regs[rCount];
|
||||
Element elements[eCount];
|
||||
BigElement bigElements[eCount / 2];
|
||||
};
|
||||
|
||||
union BigRegVect {
|
||||
FloatRegBits regs[2 * rCount];
|
||||
BigElement elements[eCount];
|
||||
};
|
||||
|
||||
union FullRegVect {
|
||||
FloatRegBits regs[4];
|
||||
Element elements[eCountFull];
|
||||
};
|
||||
|
||||
%(code)s;
|
||||
if (fault == NoFault)
|
||||
{
|
||||
%(op_wb)s;
|
||||
}
|
||||
|
||||
return fault;
|
||||
}
|
||||
}};
|
||||
|
||||
def template MicroNeonMemDeclare64 {{
|
||||
class %(class_name)s : public %(base_class)s
|
||||
{
|
||||
protected:
|
||||
// True if the base register is SP (used for SP alignment checking)
|
||||
bool baseIsSP;
|
||||
// Access size in bytes
|
||||
uint8_t accSize;
|
||||
// Vector element size (0 -> 8-bit, 1 -> 16-bit, 2 -> 32-bit,
|
||||
// 3 -> 64-bit)
|
||||
uint8_t eSize;
|
||||
|
||||
public:
|
||||
%(class_name)s(ExtMachInst machInst, RegIndex _dest, RegIndex _ura,
|
||||
uint32_t _imm, unsigned extraMemFlags, bool _baseIsSP,
|
||||
uint8_t _accSize, uint8_t _eSize)
|
||||
: %(base_class)s("%(mnemonic)s", machInst, %(op_class)s, _dest,
|
||||
_ura, _imm),
|
||||
baseIsSP(_baseIsSP), accSize(_accSize), eSize(_eSize)
|
||||
{
|
||||
memAccessFlags |= extraMemFlags;
|
||||
%(constructor)s;
|
||||
}
|
||||
|
||||
%(BasicExecDeclare)s
|
||||
%(InitiateAccDeclare)s
|
||||
%(CompleteAccDeclare)s
|
||||
};
|
||||
}};
|
||||
|
||||
def template NeonLoadExecute64 {{
|
||||
Fault %(class_name)s::execute(
|
||||
%(CPU_exec_context)s *xc, Trace::InstRecord *traceData) const
|
||||
{
|
||||
Addr EA;
|
||||
Fault fault = NoFault;
|
||||
|
||||
%(op_decl)s;
|
||||
%(mem_decl)s;
|
||||
%(op_rd)s;
|
||||
%(ea_code)s;
|
||||
|
||||
MemUnion memUnion;
|
||||
uint8_t *dataPtr = memUnion.bytes;
|
||||
|
||||
if (fault == NoFault) {
|
||||
fault = xc->readMem(EA, dataPtr, accSize, memAccessFlags);
|
||||
%(memacc_code)s;
|
||||
}
|
||||
|
||||
if (fault == NoFault) {
|
||||
%(op_wb)s;
|
||||
}
|
||||
|
||||
return fault;
|
||||
}
|
||||
}};
|
||||
|
||||
def template NeonLoadInitiateAcc64 {{
|
||||
Fault %(class_name)s::initiateAcc(
|
||||
%(CPU_exec_context)s *xc, Trace::InstRecord *traceData) const
|
||||
{
|
||||
Addr EA;
|
||||
Fault fault = NoFault;
|
||||
|
||||
%(op_decl)s;
|
||||
%(mem_decl)s;
|
||||
%(op_rd)s;
|
||||
%(ea_code)s;
|
||||
|
||||
MemUnion memUnion;
|
||||
uint8_t *dataPtr = memUnion.bytes;
|
||||
|
||||
if (fault == NoFault) {
|
||||
fault = xc->readMem(EA, dataPtr, accSize, memAccessFlags);
|
||||
}
|
||||
|
||||
return fault;
|
||||
}
|
||||
}};
|
||||
|
||||
def template NeonLoadCompleteAcc64 {{
|
||||
Fault %(class_name)s::completeAcc(
|
||||
PacketPtr pkt, %(CPU_exec_context)s *xc,
|
||||
Trace::InstRecord *traceData) const
|
||||
{
|
||||
Fault fault = NoFault;
|
||||
|
||||
%(mem_decl)s;
|
||||
%(op_decl)s;
|
||||
%(op_rd)s;
|
||||
|
||||
MemUnion &memUnion = *(MemUnion *)pkt->getPtr<uint8_t>();
|
||||
|
||||
if (fault == NoFault) {
|
||||
%(memacc_code)s;
|
||||
}
|
||||
|
||||
if (fault == NoFault) {
|
||||
%(op_wb)s;
|
||||
}
|
||||
|
||||
return fault;
|
||||
}
|
||||
}};
|
||||
|
||||
def template NeonStoreExecute64 {{
|
||||
Fault %(class_name)s::execute(
|
||||
%(CPU_exec_context)s *xc, Trace::InstRecord *traceData) const
|
||||
{
|
||||
Addr EA;
|
||||
Fault fault = NoFault;
|
||||
|
||||
%(op_decl)s;
|
||||
%(mem_decl)s;
|
||||
%(op_rd)s;
|
||||
%(ea_code)s;
|
||||
|
||||
MemUnion memUnion;
|
||||
uint8_t *dataPtr = memUnion.bytes;
|
||||
|
||||
if (fault == NoFault) {
|
||||
%(memacc_code)s;
|
||||
}
|
||||
|
||||
if (fault == NoFault) {
|
||||
fault = xc->writeMem(dataPtr, accSize, EA, memAccessFlags,
|
||||
NULL);
|
||||
}
|
||||
|
||||
if (fault == NoFault) {
|
||||
%(op_wb)s;
|
||||
}
|
||||
|
||||
return fault;
|
||||
}
|
||||
}};
|
||||
|
||||
def template NeonStoreInitiateAcc64 {{
|
||||
Fault %(class_name)s::initiateAcc(
|
||||
%(CPU_exec_context)s *xc, Trace::InstRecord *traceData) const
|
||||
{
|
||||
Addr EA;
|
||||
Fault fault = NoFault;
|
||||
|
||||
%(op_decl)s;
|
||||
%(mem_decl)s;
|
||||
%(op_rd)s;
|
||||
%(ea_code)s;
|
||||
|
||||
MemUnion memUnion;
|
||||
if (fault == NoFault) {
|
||||
%(memacc_code)s;
|
||||
}
|
||||
|
||||
if (fault == NoFault) {
|
||||
fault = xc->writeMem(memUnion.bytes, accSize, EA, memAccessFlags,
|
||||
NULL);
|
||||
}
|
||||
|
||||
return fault;
|
||||
}
|
||||
}};
|
||||
|
||||
def template NeonStoreCompleteAcc64 {{
|
||||
Fault %(class_name)s::completeAcc(
|
||||
PacketPtr pkt, %(CPU_exec_context)s *xc,
|
||||
Trace::InstRecord *traceData) const
|
||||
{
|
||||
return NoFault;
|
||||
}
|
||||
}};
|
||||
|
||||
def template VMemMultDeclare64 {{
|
||||
class %(class_name)s : public %(base_class)s
|
||||
{
|
||||
public:
|
||||
// Constructor
|
||||
%(class_name)s(ExtMachInst machInst, RegIndex rn, RegIndex vd,
|
||||
RegIndex rm, uint8_t eSize, uint8_t dataSize,
|
||||
uint8_t numStructElems, uint8_t numRegs, bool wb);
|
||||
%(BasicExecPanic)s
|
||||
};
|
||||
}};
|
||||
|
||||
def template VMemSingleDeclare64 {{
|
||||
class %(class_name)s : public %(base_class)s
|
||||
{
|
||||
public:
|
||||
// Constructor
|
||||
%(class_name)s(ExtMachInst machInst, RegIndex rn, RegIndex vd,
|
||||
RegIndex rm, uint8_t eSize, uint8_t dataSize,
|
||||
uint8_t numStructElems, uint8_t index, bool wb,
|
||||
bool replicate = false);
|
||||
%(BasicExecPanic)s
|
||||
};
|
||||
}};
|
||||
|
||||
def template VMemMultConstructor64 {{
|
||||
%(class_name)s::%(class_name)s(
|
||||
ExtMachInst machInst, RegIndex rn, RegIndex vd, RegIndex rm,
|
||||
uint8_t _eSize, uint8_t _dataSize, uint8_t _numStructElems,
|
||||
uint8_t _numRegs, bool _wb) :
|
||||
%(base_class)s(
|
||||
"%(mnemonic)s", machInst, %(op_class)s, rn, vd, rm,
|
||||
_eSize, _dataSize, _numStructElems, _numRegs, _wb)
|
||||
{
|
||||
%(constructor)s;
|
||||
}
|
||||
}};
|
||||
|
||||
def template VMemSingleConstructor64 {{
|
||||
%(class_name)s::%(class_name)s(
|
||||
ExtMachInst machInst, RegIndex rn, RegIndex vd, RegIndex rm,
|
||||
uint8_t _eSize, uint8_t _dataSize, uint8_t _numStructElems,
|
||||
uint8_t _index, bool _wb, bool _replicate) :
|
||||
%(base_class)s(
|
||||
"%(mnemonic)s", machInst, %(op_class)s, rn, vd, rm,
|
||||
_eSize, _dataSize, _numStructElems, _index, _wb,
|
||||
_replicate)
|
||||
{
|
||||
%(constructor)s;
|
||||
}
|
||||
}};
|
||||
|
||||
def template MicroNeonMixDeclare64 {{
|
||||
class %(class_name)s : public %(base_class)s
|
||||
{
|
||||
public:
|
||||
%(class_name)s(ExtMachInst machInst, RegIndex _dest, RegIndex _op1,
|
||||
uint8_t _eSize, uint8_t _dataSize,
|
||||
uint8_t _numStructElems, uint8_t _numRegs,
|
||||
uint8_t _step) :
|
||||
%(base_class)s("%(mnemonic)s", machInst, %(op_class)s,
|
||||
_dest, _op1, _eSize, _dataSize, _numStructElems,
|
||||
_numRegs, _step)
|
||||
{
|
||||
%(constructor)s;
|
||||
}
|
||||
|
||||
%(BasicExecDeclare)s
|
||||
};
|
||||
}};
|
||||
|
||||
def template MicroNeonMixLaneDeclare64 {{
|
||||
class %(class_name)s : public %(base_class)s
|
||||
{
|
||||
public:
|
||||
%(class_name)s(ExtMachInst machInst, RegIndex _dest, RegIndex _op1,
|
||||
uint8_t _eSize, uint8_t _dataSize,
|
||||
uint8_t _numStructElems, uint8_t _lane, uint8_t _step,
|
||||
bool _replicate = false) :
|
||||
%(base_class)s("%(mnemonic)s", machInst, %(op_class)s,
|
||||
_dest, _op1, _eSize, _dataSize, _numStructElems,
|
||||
_lane, _step, _replicate)
|
||||
{
|
||||
%(constructor)s;
|
||||
}
|
||||
|
||||
%(BasicExecDeclare)s
|
||||
};
|
||||
}};
|
||||
|
||||
def template MicroNeonMixExecute64 {{
|
||||
Fault %(class_name)s::execute(%(CPU_exec_context)s *xc,
|
||||
Trace::InstRecord *traceData) const
|
||||
{
|
||||
Fault fault = NoFault;
|
||||
uint64_t resTemp = 0;
|
||||
resTemp = resTemp;
|
||||
%(op_decl)s;
|
||||
%(op_rd)s;
|
||||
|
||||
%(code)s;
|
||||
if (fault == NoFault)
|
||||
{
|
||||
%(op_wb)s;
|
||||
}
|
||||
|
||||
return fault;
|
||||
}
|
||||
}};
|
|
@ -1,6 +1,6 @@
|
|||
// -*- mode:c++ -*-
|
||||
|
||||
// Copyright (c) 2010 ARM Limited
|
||||
// Copyright (c) 2010-2011 ARM Limited
|
||||
// All rights reserved
|
||||
//
|
||||
// The license below extends only to copyright in the software and shall
|
||||
|
@ -40,26 +40,37 @@
|
|||
//Basic instruction templates
|
||||
##include "basic.isa"
|
||||
|
||||
//Templates for AArch64 bit data instructions.
|
||||
##include "data64.isa"
|
||||
|
||||
//Templates for predicated instructions
|
||||
##include "pred.isa"
|
||||
|
||||
//Templates for memory instructions
|
||||
##include "mem.isa"
|
||||
|
||||
//Templates for AArch64 memory instructions
|
||||
##include "mem64.isa"
|
||||
|
||||
//Miscellaneous instructions that don't fit elsewhere
|
||||
##include "misc.isa"
|
||||
##include "misc64.isa"
|
||||
|
||||
//Templates for microcoded memory instructions
|
||||
##include "macromem.isa"
|
||||
|
||||
//Templates for branches
|
||||
##include "branch.isa"
|
||||
##include "branch64.isa"
|
||||
|
||||
//Templates for multiplies
|
||||
##include "mult.isa"
|
||||
|
||||
//Templates for VFP instructions
|
||||
##include "vfp.isa"
|
||||
##include "vfp64.isa"
|
||||
|
||||
//Templates for Neon instructions
|
||||
##include "neon.isa"
|
||||
|
||||
##include "neon64.isa"
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
// -*- mode:c++ -*-
|
||||
|
||||
// Copyright (c) 2010 ARM Limited
|
||||
// Copyright (c) 2010-2013 ARM Limited
|
||||
// All rights reserved
|
||||
//
|
||||
// The license below extends only to copyright in the software and shall
|
||||
|
@ -39,32 +39,117 @@
|
|||
|
||||
let {{
|
||||
vfpEnabledCheckCode = '''
|
||||
if (!vfpEnabled(Cpacr, Cpsr, Fpexc))
|
||||
return disabledFault();
|
||||
uint32_t issEnCheck;
|
||||
bool trapEnCheck;
|
||||
uint32_t seq;
|
||||
if (!vfpNeonEnabled(seq,Hcptr, Nsacr, Cpacr, Cpsr, issEnCheck,
|
||||
trapEnCheck, xc->tcBase(), Fpexc))
|
||||
{return disabledFault();}
|
||||
if (trapEnCheck) {
|
||||
CPSR cpsrEnCheck = Cpsr;
|
||||
if (cpsrEnCheck.mode == MODE_HYP) {
|
||||
return new UndefinedInstruction(machInst, issEnCheck,
|
||||
EC_TRAPPED_HCPTR);
|
||||
} else {
|
||||
if (!inSecureState(Scr, Cpsr)) {
|
||||
return new HypervisorTrap(machInst, issEnCheck,
|
||||
EC_TRAPPED_HCPTR);
|
||||
}
|
||||
}
|
||||
}
|
||||
'''
|
||||
|
||||
vfp64EnabledCheckCode = '''
|
||||
CPSR cpsrEnCheck = Cpsr;
|
||||
ExceptionLevel el = (ExceptionLevel) (uint8_t) cpsrEnCheck.el;
|
||||
if (!vfpNeon64Enabled(Cpacr64, el))
|
||||
return new SupervisorTrap(machInst, 0x1E00000,
|
||||
EC_TRAPPED_SIMD_FP);
|
||||
|
||||
if (ArmSystem::haveVirtualization(xc->tcBase()) && el <= EL2) {
|
||||
HCPTR cptrEnCheck = xc->tcBase()->readMiscReg(MISCREG_CPTR_EL2);
|
||||
if (cptrEnCheck.tfp)
|
||||
return new HypervisorTrap(machInst, 0x1E00000,
|
||||
EC_TRAPPED_SIMD_FP);
|
||||
}
|
||||
|
||||
if (ArmSystem::haveSecurity(xc->tcBase())) {
|
||||
HCPTR cptrEnCheck = xc->tcBase()->readMiscReg(MISCREG_CPTR_EL3);
|
||||
if (cptrEnCheck.tfp)
|
||||
return new SecureMonitorTrap(machInst, 0x1E00000,
|
||||
EC_TRAPPED_SIMD_FP);
|
||||
}
|
||||
'''
|
||||
|
||||
vmsrEnabledCheckCode = '''
|
||||
if (!vfpEnabled(Cpacr, Cpsr))
|
||||
uint32_t issEnCheck;
|
||||
bool trapEnCheck;
|
||||
uint32_t seq;
|
||||
if (!vfpNeonEnabled(seq,Hcptr, Nsacr, Cpacr, Cpsr, issEnCheck,
|
||||
trapEnCheck, xc->tcBase()))
|
||||
if (dest != (int)MISCREG_FPEXC && dest != (int)MISCREG_FPSID)
|
||||
return disabledFault();
|
||||
{return disabledFault();}
|
||||
if (!inPrivilegedMode(Cpsr))
|
||||
if (dest != (int)MISCREG_FPSCR)
|
||||
return disabledFault();
|
||||
|
||||
if (trapEnCheck) {
|
||||
CPSR cpsrEnCheck = Cpsr;
|
||||
if (cpsrEnCheck.mode == MODE_HYP) {
|
||||
return new UndefinedInstruction(machInst, issEnCheck,
|
||||
EC_TRAPPED_HCPTR);
|
||||
} else {
|
||||
if (!inSecureState(Scr, Cpsr)) {
|
||||
return new HypervisorTrap(machInst, issEnCheck,
|
||||
EC_TRAPPED_HCPTR);
|
||||
}
|
||||
}
|
||||
}
|
||||
'''
|
||||
|
||||
vmrsEnabledCheckCode = '''
|
||||
if (!vfpEnabled(Cpacr, Cpsr))
|
||||
uint32_t issEnCheck;
|
||||
bool trapEnCheck;
|
||||
uint32_t seq;
|
||||
if (!vfpNeonEnabled(seq,Hcptr, Nsacr, Cpacr, Cpsr, issEnCheck,
|
||||
trapEnCheck, xc->tcBase()))
|
||||
if (op1 != (int)MISCREG_FPEXC && op1 != (int)MISCREG_FPSID &&
|
||||
op1 != (int)MISCREG_MVFR0 && op1 != (int)MISCREG_MVFR1)
|
||||
return disabledFault();
|
||||
{return disabledFault();}
|
||||
if (!inPrivilegedMode(Cpsr))
|
||||
if (op1 != (int)MISCREG_FPSCR)
|
||||
return disabledFault();
|
||||
if (trapEnCheck) {
|
||||
CPSR cpsrEnCheck = Cpsr;
|
||||
if (cpsrEnCheck.mode == MODE_HYP) {
|
||||
return new UndefinedInstruction(machInst, issEnCheck,
|
||||
EC_TRAPPED_HCPTR);
|
||||
} else {
|
||||
if (!inSecureState(Scr, Cpsr)) {
|
||||
return new HypervisorTrap(machInst, issEnCheck,
|
||||
EC_TRAPPED_HCPTR);
|
||||
}
|
||||
}
|
||||
}
|
||||
'''
|
||||
vmrsApsrEnabledCheckCode = '''
|
||||
if (!vfpEnabled(Cpacr, Cpsr))
|
||||
return disabledFault();
|
||||
uint32_t issEnCheck;
|
||||
bool trapEnCheck;
|
||||
uint32_t seq;
|
||||
if (!vfpNeonEnabled(seq,Hcptr, Nsacr, Cpacr, Cpsr, issEnCheck,
|
||||
trapEnCheck, xc->tcBase()))
|
||||
{return disabledFault();}
|
||||
if (trapEnCheck) {
|
||||
CPSR cpsrEnCheck = Cpsr;
|
||||
if (cpsrEnCheck.mode == MODE_HYP) {
|
||||
return new UndefinedInstruction(machInst, issEnCheck,
|
||||
EC_TRAPPED_HCPTR);
|
||||
} else {
|
||||
if (!inSecureState(Scr, Cpsr)) {
|
||||
return new HypervisorTrap(machInst, issEnCheck,
|
||||
EC_TRAPPED_HCPTR);
|
||||
}
|
||||
}
|
||||
}
|
||||
'''
|
||||
}};
|
||||
|
||||
|
|
140
src/arch/arm/isa/templates/vfp64.isa
Normal file
140
src/arch/arm/isa/templates/vfp64.isa
Normal file
|
@ -0,0 +1,140 @@
|
|||
// -*- mode:c++ -*-
|
||||
|
||||
// Copyright (c) 2012 ARM Limited
|
||||
// All rights reserved
|
||||
//
|
||||
// The license below extends only to copyright in the software and shall
|
||||
// not be construed as granting a license to any other intellectual
|
||||
// property including but not limited to intellectual property relating
|
||||
// to a hardware implementation of the functionality of the software
|
||||
// licensed hereunder. You may use the software subject to the license
|
||||
// terms below provided that you ensure that this notice is replicated
|
||||
// unmodified and in its entirety in all distributions of the software,
|
||||
// modified or unmodified, in source code or in binary form.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met: redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer;
|
||||
// redistributions in binary form must reproduce the above copyright
|
||||
// notice, this list of conditions and the following disclaimer in the
|
||||
// documentation and/or other materials provided with the distribution;
|
||||
// neither the name of the copyright holders nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
//
|
||||
// Authors: Thomas Grocutt
|
||||
|
||||
def template AA64FpRegRegOpConstructor {{
|
||||
inline %(class_name)s::%(class_name)s(ExtMachInst machInst,
|
||||
IntRegIndex _dest, IntRegIndex _op1,
|
||||
VfpMicroMode mode)
|
||||
: %(base_class)s("%(mnemonic)s", machInst, %(op_class)s,
|
||||
_dest, _op1, mode)
|
||||
{
|
||||
%(constructor)s;
|
||||
for (int x = 0; x < _numDestRegs; x++) {
|
||||
_srcRegIdx[_numSrcRegs++] = _destRegIdx[x];
|
||||
}
|
||||
}
|
||||
}};
|
||||
|
||||
def template AA64FpRegRegOpConstructor {{
|
||||
inline %(class_name)s::%(class_name)s(ExtMachInst machInst,
|
||||
IntRegIndex _dest, IntRegIndex _op1,
|
||||
VfpMicroMode mode)
|
||||
: %(base_class)s("%(mnemonic)s", machInst, %(op_class)s,
|
||||
_dest, _op1, mode)
|
||||
{
|
||||
%(constructor)s;
|
||||
for (int x = 0; x < _numDestRegs; x++) {
|
||||
_srcRegIdx[_numSrcRegs++] = _destRegIdx[x];
|
||||
}
|
||||
}
|
||||
}};
|
||||
|
||||
def template AA64FpRegImmOpConstructor {{
|
||||
inline %(class_name)s::%(class_name)s(ExtMachInst machInst,
|
||||
IntRegIndex _dest, uint64_t _imm, VfpMicroMode mode)
|
||||
: %(base_class)s("%(mnemonic)s", machInst, %(op_class)s,
|
||||
_dest, _imm, mode)
|
||||
{
|
||||
%(constructor)s;
|
||||
for (int x = 0; x < _numDestRegs; x++) {
|
||||
_srcRegIdx[_numSrcRegs++] = _destRegIdx[x];
|
||||
}
|
||||
}
|
||||
}};
|
||||
|
||||
def template AA64FpRegRegImmOpConstructor {{
|
||||
inline %(class_name)s::%(class_name)s(ExtMachInst machInst,
|
||||
IntRegIndex _dest,
|
||||
IntRegIndex _op1,
|
||||
uint64_t _imm,
|
||||
VfpMicroMode mode)
|
||||
: %(base_class)s("%(mnemonic)s", machInst, %(op_class)s,
|
||||
_dest, _op1, _imm, mode)
|
||||
{
|
||||
%(constructor)s;
|
||||
for (int x = 0; x < _numDestRegs; x++) {
|
||||
_srcRegIdx[_numSrcRegs++] = _destRegIdx[x];
|
||||
}
|
||||
}
|
||||
}};
|
||||
|
||||
def template AA64FpRegRegRegOpConstructor {{
|
||||
inline %(class_name)s::%(class_name)s(ExtMachInst machInst,
|
||||
IntRegIndex _dest,
|
||||
IntRegIndex _op1,
|
||||
IntRegIndex _op2,
|
||||
VfpMicroMode mode)
|
||||
: %(base_class)s("%(mnemonic)s", machInst, %(op_class)s,
|
||||
_dest, _op1, _op2, mode)
|
||||
{
|
||||
%(constructor)s;
|
||||
for (int x = 0; x < _numDestRegs; x++) {
|
||||
_srcRegIdx[_numSrcRegs++] = _destRegIdx[x];
|
||||
}
|
||||
}
|
||||
}};
|
||||
|
||||
def template AA64FpRegRegRegRegOpDeclare {{
|
||||
class %(class_name)s : public %(base_class)s
|
||||
{
|
||||
public:
|
||||
// Constructor
|
||||
%(class_name)s(ExtMachInst machInst,
|
||||
IntRegIndex _dest, IntRegIndex _op1, IntRegIndex _op2,
|
||||
IntRegIndex _op3, VfpMicroMode mode = VfpNotAMicroop);
|
||||
%(BasicExecDeclare)s
|
||||
};
|
||||
}};
|
||||
|
||||
def template AA64FpRegRegRegRegOpConstructor {{
|
||||
inline %(class_name)s::%(class_name)s(ExtMachInst machInst,
|
||||
IntRegIndex _dest,
|
||||
IntRegIndex _op1,
|
||||
IntRegIndex _op2,
|
||||
IntRegIndex _op3,
|
||||
VfpMicroMode mode)
|
||||
: %(base_class)s("%(mnemonic)s", machInst, %(op_class)s,
|
||||
_dest, _op1, _op2, _op3, mode)
|
||||
{
|
||||
%(constructor)s;
|
||||
for (int x = 0; x < _numDestRegs; x++) {
|
||||
_srcRegIdx[_numSrcRegs++] = _destRegIdx[x];
|
||||
}
|
||||
}
|
||||
}};
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2010 ARM Limited
|
||||
* Copyright (c) 2010, 2012 ARM Limited
|
||||
* All rights reserved
|
||||
*
|
||||
* The license below extends only to copyright in the software and shall
|
||||
|
@ -95,6 +95,9 @@ namespace ArmISA
|
|||
|
||||
const Addr PAddrImplMask = (ULL(1) << PABits) - 1;
|
||||
|
||||
// Max. physical address range in bits supported by the architecture
|
||||
const unsigned MaxPhysAddrRange = 48;
|
||||
|
||||
// return a no-op instruction... used for instruction fetch faults
|
||||
const ExtMachInst NoopMachInst = 0x01E320F000ULL;
|
||||
|
||||
|
@ -124,6 +127,8 @@ namespace ArmISA
|
|||
INT_IRQ,
|
||||
INT_FIQ,
|
||||
INT_SEV, // Special interrupt for recieving SEV's
|
||||
INT_VIRT_IRQ,
|
||||
INT_VIRT_FIQ,
|
||||
NumInterruptTypes
|
||||
};
|
||||
} // namespace ArmISA
|
||||
|
|
|
@ -1,4 +1,16 @@
|
|||
/*
|
||||
* Copyright (c) 2011 ARM Limited
|
||||
* All rights reserved
|
||||
*
|
||||
* The license below extends only to copyright in the software and shall
|
||||
* not be construed as granting a license to any other intellectual
|
||||
* property including but not limited to intellectual property relating
|
||||
* to a hardware implementation of the functionality of the software
|
||||
* licensed hereunder. You may use the software subject to the license
|
||||
* terms below provided that you ensure that this notice is replicated
|
||||
* unmodified and in its entirety in all distributions of the software,
|
||||
* modified or unmodified, in source code or in binary form.
|
||||
*
|
||||
* Copyright (c) 2003-2005 The Regents of The University of Michigan
|
||||
* Copyright (c) 2007-2008 The Florida State University
|
||||
* All rights reserved.
|
||||
|
@ -34,55 +46,108 @@
|
|||
#include "arch/arm/linux/linux.hh"
|
||||
|
||||
// open(2) flags translation table
|
||||
OpenFlagTransTable ArmLinux::openFlagTable[] = {
|
||||
OpenFlagTransTable ArmLinux32::openFlagTable[] = {
|
||||
#ifdef _MSC_VER
|
||||
{ ArmLinux::TGT_O_RDONLY, _O_RDONLY },
|
||||
{ ArmLinux::TGT_O_WRONLY, _O_WRONLY },
|
||||
{ ArmLinux::TGT_O_RDWR, _O_RDWR },
|
||||
{ ArmLinux::TGT_O_APPEND, _O_APPEND },
|
||||
{ ArmLinux::TGT_O_CREAT, _O_CREAT },
|
||||
{ ArmLinux::TGT_O_TRUNC, _O_TRUNC },
|
||||
{ ArmLinux::TGT_O_EXCL, _O_EXCL },
|
||||
{ ArmLinux32::TGT_O_RDONLY, _O_RDONLY },
|
||||
{ ArmLinux32::TGT_O_WRONLY, _O_WRONLY },
|
||||
{ ArmLinux32::TGT_O_RDWR, _O_RDWR },
|
||||
{ ArmLinux32::TGT_O_APPEND, _O_APPEND },
|
||||
{ ArmLinux32::TGT_O_CREAT, _O_CREAT },
|
||||
{ ArmLinux32::TGT_O_TRUNC, _O_TRUNC },
|
||||
{ ArmLinux32::TGT_O_EXCL, _O_EXCL },
|
||||
#ifdef _O_NONBLOCK
|
||||
{ ArmLinux::TGT_O_NONBLOCK, _O_NONBLOCK },
|
||||
{ ArmLinux32::TGT_O_NONBLOCK, _O_NONBLOCK },
|
||||
#endif
|
||||
#ifdef _O_NOCTTY
|
||||
{ ArmLinux::TGT_O_NOCTTY, _O_NOCTTY },
|
||||
{ ArmLinux32::TGT_O_NOCTTY, _O_NOCTTY },
|
||||
#endif
|
||||
#ifdef _O_SYNC
|
||||
{ ArmLinux::TGT_O_SYNC, _O_SYNC },
|
||||
{ ArmLinux32::TGT_O_SYNC, _O_SYNC },
|
||||
#endif
|
||||
#else /* !_MSC_VER */
|
||||
{ ArmLinux::TGT_O_RDONLY, O_RDONLY },
|
||||
{ ArmLinux::TGT_O_WRONLY, O_WRONLY },
|
||||
{ ArmLinux::TGT_O_RDWR, O_RDWR },
|
||||
{ ArmLinux::TGT_O_CREAT, O_CREAT },
|
||||
{ ArmLinux::TGT_O_EXCL, O_EXCL },
|
||||
{ ArmLinux::TGT_O_NOCTTY, O_NOCTTY },
|
||||
{ ArmLinux::TGT_O_TRUNC, O_TRUNC },
|
||||
{ ArmLinux::TGT_O_APPEND, O_APPEND },
|
||||
{ ArmLinux::TGT_O_NONBLOCK, O_NONBLOCK },
|
||||
{ ArmLinux32::TGT_O_RDONLY, O_RDONLY },
|
||||
{ ArmLinux32::TGT_O_WRONLY, O_WRONLY },
|
||||
{ ArmLinux32::TGT_O_RDWR, O_RDWR },
|
||||
{ ArmLinux32::TGT_O_CREAT, O_CREAT },
|
||||
{ ArmLinux32::TGT_O_EXCL, O_EXCL },
|
||||
{ ArmLinux32::TGT_O_NOCTTY, O_NOCTTY },
|
||||
{ ArmLinux32::TGT_O_TRUNC, O_TRUNC },
|
||||
{ ArmLinux32::TGT_O_APPEND, O_APPEND },
|
||||
{ ArmLinux32::TGT_O_NONBLOCK, O_NONBLOCK },
|
||||
#ifdef O_SYNC
|
||||
{ ArmLinux::TGT_O_SYNC, O_SYNC },
|
||||
{ ArmLinux32::TGT_O_SYNC, O_SYNC },
|
||||
#endif
|
||||
#ifdef FASYNC
|
||||
{ ArmLinux::TGT_FASYNC, FASYNC },
|
||||
{ ArmLinux32::TGT_FASYNC, FASYNC },
|
||||
#endif
|
||||
#ifdef O_DIRECT
|
||||
{ ArmLinux::TGT_O_DIRECT, O_DIRECT },
|
||||
{ ArmLinux32::TGT_O_DIRECT, O_DIRECT },
|
||||
#endif
|
||||
#ifdef O_LARGEFILE
|
||||
{ ArmLinux::TGT_O_LARGEFILE, O_LARGEFILE },
|
||||
{ ArmLinux32::TGT_O_LARGEFILE, O_LARGEFILE },
|
||||
#endif
|
||||
#ifdef O_DIRECTORY
|
||||
{ ArmLinux::TGT_O_DIRECTORY, O_DIRECTORY },
|
||||
{ ArmLinux32::TGT_O_DIRECTORY, O_DIRECTORY },
|
||||
#endif
|
||||
#ifdef O_NOFOLLOW
|
||||
{ ArmLinux::TGT_O_NOFOLLOW, O_NOFOLLOW },
|
||||
{ ArmLinux32::TGT_O_NOFOLLOW, O_NOFOLLOW },
|
||||
#endif
|
||||
#endif /* _MSC_VER */
|
||||
};
|
||||
|
||||
const int ArmLinux::NUM_OPEN_FLAGS =
|
||||
(sizeof(ArmLinux::openFlagTable)/sizeof(ArmLinux::openFlagTable[0]));
|
||||
const int ArmLinux32::NUM_OPEN_FLAGS = sizeof(ArmLinux32::openFlagTable) /
|
||||
sizeof(ArmLinux32::openFlagTable[0]);
|
||||
|
||||
// open(2) flags translation table
|
||||
OpenFlagTransTable ArmLinux64::openFlagTable[] = {
|
||||
#ifdef _MSC_VER
|
||||
{ ArmLinux64::TGT_O_RDONLY, _O_RDONLY },
|
||||
{ ArmLinux64::TGT_O_WRONLY, _O_WRONLY },
|
||||
{ ArmLinux64::TGT_O_RDWR, _O_RDWR },
|
||||
{ ArmLinux64::TGT_O_APPEND, _O_APPEND },
|
||||
{ ArmLinux64::TGT_O_CREAT, _O_CREAT },
|
||||
{ ArmLinux64::TGT_O_TRUNC, _O_TRUNC },
|
||||
{ ArmLinux64::TGT_O_EXCL, _O_EXCL },
|
||||
#ifdef _O_NONBLOCK
|
||||
{ ArmLinux64::TGT_O_NONBLOCK, _O_NONBLOCK },
|
||||
#endif
|
||||
#ifdef _O_NOCTTY
|
||||
{ ArmLinux64::TGT_O_NOCTTY, _O_NOCTTY },
|
||||
#endif
|
||||
#ifdef _O_SYNC
|
||||
{ ArmLinux64::TGT_O_SYNC, _O_SYNC },
|
||||
#endif
|
||||
#else /* !_MSC_VER */
|
||||
{ ArmLinux64::TGT_O_RDONLY, O_RDONLY },
|
||||
{ ArmLinux64::TGT_O_WRONLY, O_WRONLY },
|
||||
{ ArmLinux64::TGT_O_RDWR, O_RDWR },
|
||||
{ ArmLinux64::TGT_O_CREAT, O_CREAT },
|
||||
{ ArmLinux64::TGT_O_EXCL, O_EXCL },
|
||||
{ ArmLinux64::TGT_O_NOCTTY, O_NOCTTY },
|
||||
{ ArmLinux64::TGT_O_TRUNC, O_TRUNC },
|
||||
{ ArmLinux64::TGT_O_APPEND, O_APPEND },
|
||||
{ ArmLinux64::TGT_O_NONBLOCK, O_NONBLOCK },
|
||||
#ifdef O_SYNC
|
||||
{ ArmLinux64::TGT_O_SYNC, O_SYNC },
|
||||
#endif
|
||||
#ifdef FASYNC
|
||||
{ ArmLinux64::TGT_FASYNC, FASYNC },
|
||||
#endif
|
||||
#ifdef O_DIRECT
|
||||
{ ArmLinux64::TGT_O_DIRECT, O_DIRECT },
|
||||
#endif
|
||||
#ifdef O_LARGEFILE
|
||||
{ ArmLinux64::TGT_O_LARGEFILE, O_LARGEFILE },
|
||||
#endif
|
||||
#ifdef O_DIRECTORY
|
||||
{ ArmLinux64::TGT_O_DIRECTORY, O_DIRECTORY },
|
||||
#endif
|
||||
#ifdef O_NOFOLLOW
|
||||
{ ArmLinux64::TGT_O_NOFOLLOW, O_NOFOLLOW },
|
||||
#endif
|
||||
#endif /* _MSC_VER */
|
||||
};
|
||||
|
||||
const int ArmLinux64::NUM_OPEN_FLAGS = sizeof(ArmLinux64::openFlagTable) /
|
||||
sizeof(ArmLinux64::openFlagTable[0]);
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2010 ARM Limited
|
||||
* Copyright (c) 2010, 2011-2012 ARM Limited
|
||||
* All rights reserved
|
||||
*
|
||||
* The license below extends only to copyright in the software and shall
|
||||
|
@ -47,7 +47,7 @@
|
|||
|
||||
#include "kern/linux/linux.hh"
|
||||
|
||||
class ArmLinux : public Linux
|
||||
class ArmLinux32 : public Linux
|
||||
{
|
||||
public:
|
||||
|
||||
|
@ -123,8 +123,10 @@ class ArmLinux : public Linux
|
|||
uint16_t st_uid;
|
||||
uint16_t st_gid;
|
||||
uint32_t st_rdev;
|
||||
uint32_t __pad1;
|
||||
uint32_t st_size;
|
||||
uint32_t st_blksize;
|
||||
uint32_t __pad2;
|
||||
uint32_t st_blocks;
|
||||
uint32_t st_atimeX;
|
||||
uint32_t st_atime_nsec;
|
||||
|
@ -198,8 +200,192 @@ class ArmLinux : public Linux
|
|||
int32_t tms_cutime; //!< user time of children
|
||||
int32_t tms_cstime; //!< system time of children
|
||||
};
|
||||
};
|
||||
|
||||
class ArmLinux64 : public Linux
|
||||
{
|
||||
public:
|
||||
|
||||
/// This table maps the target open() flags to the corresponding
|
||||
/// host open() flags.
|
||||
static OpenFlagTransTable openFlagTable[];
|
||||
|
||||
/// Number of entries in openFlagTable[].
|
||||
static const int NUM_OPEN_FLAGS;
|
||||
|
||||
//@{
|
||||
/// Basic ARM Linux types
|
||||
typedef uint64_t size_t;
|
||||
typedef uint64_t off_t;
|
||||
typedef int64_t time_t;
|
||||
typedef int64_t clock_t;
|
||||
//@}
|
||||
|
||||
//@{
|
||||
/// open(2) flag values.
|
||||
static const int TGT_O_RDONLY = 00000000; //!< O_RDONLY
|
||||
static const int TGT_O_WRONLY = 00000001; //!< O_WRONLY
|
||||
static const int TGT_O_RDWR = 00000002; //!< O_RDWR
|
||||
static const int TGT_O_CREAT = 00000100; //!< O_CREAT
|
||||
static const int TGT_O_EXCL = 00000200; //!< O_EXCL
|
||||
static const int TGT_O_NOCTTY = 00000400; //!< O_NOCTTY
|
||||
static const int TGT_O_TRUNC = 00001000; //!< O_TRUNC
|
||||
static const int TGT_O_APPEND = 00002000; //!< O_APPEND
|
||||
static const int TGT_O_NONBLOCK = 00004000; //!< O_NONBLOCK
|
||||
static const int TGT_O_SYNC = 00010000; //!< O_SYNC
|
||||
static const int TGT_FASYNC = 00020000; //!< FASYNC
|
||||
static const int TGT_O_DIRECT = 00040000; //!< O_DIRECT
|
||||
static const int TGT_O_LARGEFILE = 00100000; //!< O_LARGEFILE
|
||||
static const int TGT_O_DIRECTORY = 00200000; //!< O_DIRECTORY
|
||||
static const int TGT_O_NOFOLLOW = 00400000; //!< O_NOFOLLOW
|
||||
static const int TGT_O_NOATIME = 01000000; //!< O_NOATIME
|
||||
static const int TGT_O_CLOEXEC = 02000000; //!< O_NOATIME
|
||||
//@}
|
||||
|
||||
/// For mmap().
|
||||
static const unsigned TGT_MAP_ANONYMOUS = 0x20;
|
||||
static const unsigned TGT_MAP_FIXED = 0x10;
|
||||
|
||||
//@{
|
||||
/// For getrusage().
|
||||
static const int TGT_RUSAGE_SELF = 0;
|
||||
static const int TGT_RUSAGE_CHILDREN = -1;
|
||||
static const int TGT_RUSAGE_BOTH = -2;
|
||||
//@}
|
||||
|
||||
//@{
|
||||
/// ioctl() command codes.
|
||||
static const unsigned TIOCGETP_ = 0x5401;
|
||||
static const unsigned TIOCSETP_ = 0x80067409;
|
||||
static const unsigned TIOCSETN_ = 0x8006740a;
|
||||
static const unsigned TIOCSETC_ = 0x80067411;
|
||||
static const unsigned TIOCGETC_ = 0x40067412;
|
||||
static const unsigned FIONREAD_ = 0x4004667f;
|
||||
static const unsigned TIOCISATTY_ = 0x2000745e;
|
||||
static const unsigned TIOCGETS_ = 0x402c7413;
|
||||
static const unsigned TIOCGETA_ = 0x5405;
|
||||
static const unsigned TCSETAW_ = 0x5407; // 2.6.15 kernel
|
||||
//@}
|
||||
|
||||
/// For table().
|
||||
static const int TBL_SYSINFO = 12;
|
||||
|
||||
/// Resource enumeration for getrlimit().
|
||||
enum rlimit_resources {
|
||||
TGT_RLIMIT_CPU = 0,
|
||||
TGT_RLIMIT_FSIZE = 1,
|
||||
TGT_RLIMIT_DATA = 2,
|
||||
TGT_RLIMIT_STACK = 3,
|
||||
TGT_RLIMIT_CORE = 4,
|
||||
TGT_RLIMIT_RSS = 5,
|
||||
TGT_RLIMIT_NPROC = 6,
|
||||
TGT_RLIMIT_NOFILE = 7,
|
||||
TGT_RLIMIT_MEMLOCK = 8,
|
||||
TGT_RLIMIT_AS = 9,
|
||||
TGT_RLIMIT_LOCKS = 10
|
||||
};
|
||||
|
||||
/// Limit struct for getrlimit/setrlimit.
|
||||
struct rlimit {
|
||||
uint64_t rlim_cur; //!< soft limit
|
||||
uint64_t rlim_max; //!< hard limit
|
||||
};
|
||||
|
||||
/// For gettimeofday().
|
||||
struct timeval {
|
||||
int64_t tv_sec; //!< seconds
|
||||
int64_t tv_usec; //!< microseconds
|
||||
};
|
||||
|
||||
// For writev/readv
|
||||
struct tgt_iovec {
|
||||
uint64_t iov_base; // void *
|
||||
uint64_t iov_len;
|
||||
};
|
||||
|
||||
typedef struct {
|
||||
uint64_t st_dev;
|
||||
uint64_t st_ino;
|
||||
uint64_t st_nlink;
|
||||
uint32_t st_mode;
|
||||
uint32_t st_uid;
|
||||
uint32_t st_gid;
|
||||
uint32_t __pad0;
|
||||
uint64_t st_rdev;
|
||||
uint64_t st_size;
|
||||
uint64_t st_blksize;
|
||||
uint64_t st_blocks;
|
||||
uint64_t st_atimeX;
|
||||
uint64_t st_atime_nsec;
|
||||
uint64_t st_mtimeX;
|
||||
uint64_t st_mtime_nsec;
|
||||
uint64_t st_ctimeX;
|
||||
uint64_t st_ctime_nsec;
|
||||
} tgt_stat;
|
||||
|
||||
typedef struct {
|
||||
uint64_t st_dev;
|
||||
uint64_t st_ino;
|
||||
uint32_t st_mode;
|
||||
uint32_t st_nlink;
|
||||
uint32_t st_uid;
|
||||
uint32_t st_gid;
|
||||
uint32_t __pad0;
|
||||
uint64_t st_rdev;
|
||||
uint64_t st_size;
|
||||
uint64_t st_blksize;
|
||||
uint64_t st_blocks;
|
||||
uint64_t st_atimeX;
|
||||
uint64_t st_atime_nsec;
|
||||
uint64_t st_mtimeX;
|
||||
uint64_t st_mtime_nsec;
|
||||
uint64_t st_ctimeX;
|
||||
uint64_t st_ctime_nsec;
|
||||
} tgt_stat64;
|
||||
|
||||
typedef struct {
|
||||
int64_t uptime; /* Seconds since boot */
|
||||
uint64_t loads[3]; /* 1, 5, and 15 minute load averages */
|
||||
uint64_t totalram; /* Total usable main memory size */
|
||||
uint64_t freeram; /* Available memory size */
|
||||
uint64_t sharedram; /* Amount of shared memory */
|
||||
uint64_t bufferram; /* Memory used by buffers */
|
||||
uint64_t totalswap; /* Total swap space size */
|
||||
uint64_t freeswap; /* swap space still available */
|
||||
uint16_t procs; /* Number of current processes */
|
||||
uint16_t pad;
|
||||
uint64_t totalhigh; /* Total high memory size */
|
||||
uint64_t freehigh; /* Available high memory size */
|
||||
uint32_t mem_unit; /* Memory unit size in bytes */
|
||||
} tgt_sysinfo;
|
||||
|
||||
/// For getrusage().
|
||||
struct rusage {
|
||||
struct timeval ru_utime; //!< user time used
|
||||
struct timeval ru_stime; //!< system time used
|
||||
int64_t ru_maxrss; //!< max rss
|
||||
int64_t ru_ixrss; //!< integral shared memory size
|
||||
int64_t ru_idrss; //!< integral unshared data "
|
||||
int64_t ru_isrss; //!< integral unshared stack "
|
||||
int64_t ru_minflt; //!< page reclaims - total vmfaults
|
||||
int64_t ru_majflt; //!< page faults
|
||||
int64_t ru_nswap; //!< swaps
|
||||
int64_t ru_inblock; //!< block input operations
|
||||
int64_t ru_oublock; //!< block output operations
|
||||
int64_t ru_msgsnd; //!< messages sent
|
||||
int64_t ru_msgrcv; //!< messages received
|
||||
int64_t ru_nsignals; //!< signals received
|
||||
int64_t ru_nvcsw; //!< voluntary context switches
|
||||
int64_t ru_nivcsw; //!< involuntary "
|
||||
};
|
||||
|
||||
/// For times().
|
||||
struct tms {
|
||||
int64_t tms_utime; //!< user time
|
||||
int64_t tms_stime; //!< system time
|
||||
int64_t tms_cutime; //!< user time of children
|
||||
int64_t tms_cstime; //!< system time of children
|
||||
};
|
||||
};
|
||||
|
||||
#endif
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -1,4 +1,16 @@
|
|||
/*
|
||||
* Copyright (c) 2011-2012 ARM Limited
|
||||
* All rights reserved
|
||||
*
|
||||
* The license below extends only to copyright in the software and shall
|
||||
* not be construed as granting a license to any other intellectual
|
||||
* property including but not limited to intellectual property relating
|
||||
* to a hardware implementation of the functionality of the software
|
||||
* licensed hereunder. You may use the software subject to the license
|
||||
* terms below provided that you ensure that this notice is replicated
|
||||
* unmodified and in its entirety in all distributions of the software,
|
||||
* modified or unmodified, in source code or in binary form.
|
||||
*
|
||||
* Copyright (c) 2007-2008 The Florida State University
|
||||
* All rights reserved.
|
||||
*
|
||||
|
@ -31,39 +43,54 @@
|
|||
#ifndef __ARM_LINUX_PROCESS_HH__
|
||||
#define __ARM_LINUX_PROCESS_HH__
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include "arch/arm/process.hh"
|
||||
|
||||
class ArmLinuxProcessBits
|
||||
{
|
||||
protected:
|
||||
SyscallDesc* getLinuxDesc(int callnum);
|
||||
|
||||
struct SyscallTable
|
||||
{
|
||||
int base;
|
||||
SyscallDesc *descs;
|
||||
int size;
|
||||
|
||||
SyscallDesc *getDesc(int offset) const;
|
||||
};
|
||||
|
||||
std::vector<SyscallTable> syscallTables;
|
||||
};
|
||||
|
||||
/// A process with emulated Arm/Linux syscalls.
|
||||
class ArmLinuxProcess : public ArmLiveProcess
|
||||
class ArmLinuxProcess32 : public ArmLiveProcess32, public ArmLinuxProcessBits
|
||||
{
|
||||
public:
|
||||
ArmLinuxProcess(LiveProcessParams * params, ObjectFile *objFile,
|
||||
ArmLinuxProcess32(LiveProcessParams * params, ObjectFile *objFile,
|
||||
ObjectFile::Arch _arch);
|
||||
|
||||
virtual SyscallDesc* getDesc(int callnum);
|
||||
|
||||
void initState();
|
||||
|
||||
ArmISA::IntReg getSyscallArg(ThreadContext *tc, int &i);
|
||||
/// Explicitly import the otherwise hidden getSyscallArg
|
||||
using ArmLiveProcess::getSyscallArg;
|
||||
void setSyscallArg(ThreadContext *tc, int i, ArmISA::IntReg val);
|
||||
|
||||
/// The target system's hostname.
|
||||
static const char *hostname;
|
||||
|
||||
/// A page to hold "kernel" provided functions. The name might be wrong.
|
||||
static const Addr commPage;
|
||||
|
||||
/// Array of syscall descriptors, indexed by call number.
|
||||
static SyscallDesc syscallDescs[];
|
||||
SyscallDesc* getDesc(int callnum);
|
||||
};
|
||||
|
||||
/// Array of "arm private" syscall descriptors.
|
||||
static SyscallDesc privSyscallDescs[];
|
||||
/// A process with emulated Arm/Linux syscalls.
|
||||
class ArmLinuxProcess64 : public ArmLiveProcess64, public ArmLinuxProcessBits
|
||||
{
|
||||
public:
|
||||
ArmLinuxProcess64(LiveProcessParams * params, ObjectFile *objFile,
|
||||
ObjectFile::Arch _arch);
|
||||
|
||||
const int Num_Syscall_Descs;
|
||||
|
||||
const int Num_Priv_Syscall_Descs;
|
||||
void initState();
|
||||
SyscallDesc* getDesc(int callnum);
|
||||
};
|
||||
|
||||
#endif // __ARM_LINUX_PROCESS_HH__
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2010-2012 ARM Limited
|
||||
* Copyright (c) 2010-2013 ARM Limited
|
||||
* All rights reserved
|
||||
*
|
||||
* The license below extends only to copyright in the software and shall
|
||||
|
@ -63,7 +63,8 @@ using namespace Linux;
|
|||
LinuxArmSystem::LinuxArmSystem(Params *p)
|
||||
: ArmSystem(p),
|
||||
enableContextSwitchStatsDump(p->enable_context_switch_stats_dump),
|
||||
kernelPanicEvent(NULL), kernelOopsEvent(NULL)
|
||||
kernelPanicEvent(NULL), kernelOopsEvent(NULL),
|
||||
bootReleaseAddr(p->boot_release_addr)
|
||||
{
|
||||
if (p->panic_on_panic) {
|
||||
kernelPanicEvent = addKernelFuncEventOrPanic<PanicPCEvent>(
|
||||
|
@ -98,22 +99,30 @@ LinuxArmSystem::LinuxArmSystem(Params *p)
|
|||
secDataPtrAddr = 0;
|
||||
secDataAddr = 0;
|
||||
penReleaseAddr = 0;
|
||||
|
||||
kernelSymtab->findAddress("__secondary_data", secDataPtrAddr);
|
||||
kernelSymtab->findAddress("secondary_data", secDataAddr);
|
||||
kernelSymtab->findAddress("pen_release", penReleaseAddr);
|
||||
kernelSymtab->findAddress("secondary_holding_pen_release", pen64ReleaseAddr);
|
||||
|
||||
secDataPtrAddr &= ~ULL(0x7F);
|
||||
secDataAddr &= ~ULL(0x7F);
|
||||
penReleaseAddr &= ~ULL(0x7F);
|
||||
pen64ReleaseAddr &= ~ULL(0x7F);
|
||||
bootReleaseAddr = (bootReleaseAddr & ~ULL(0x7F)) + loadAddrOffset;
|
||||
|
||||
}
|
||||
|
||||
bool
|
||||
LinuxArmSystem::adderBootUncacheable(Addr a)
|
||||
{
|
||||
Addr block = a & ~ULL(0x7F);
|
||||
|
||||
if (block == secDataPtrAddr || block == secDataAddr ||
|
||||
block == penReleaseAddr)
|
||||
block == penReleaseAddr || pen64ReleaseAddr == block ||
|
||||
block == bootReleaseAddr)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -145,7 +154,8 @@ LinuxArmSystem::initState()
|
|||
if (kernel_has_fdt_support && dtb_file_specified) {
|
||||
// Kernel supports flattened device tree and dtb file specified.
|
||||
// Using Device Tree Blob to describe system configuration.
|
||||
inform("Loading DTB file: %s\n", params()->dtb_filename);
|
||||
inform("Loading DTB file: %s at address %#x\n", params()->dtb_filename,
|
||||
params()->atags_addr + loadAddrOffset);
|
||||
|
||||
ObjectFile *dtb_file = createObjectFile(params()->dtb_filename, true);
|
||||
if (!dtb_file) {
|
||||
|
@ -165,7 +175,7 @@ LinuxArmSystem::initState()
|
|||
"to DTB file: %s\n", params()->dtb_filename);
|
||||
}
|
||||
|
||||
dtb_file->setTextBase(params()->atags_addr);
|
||||
dtb_file->setTextBase(params()->atags_addr + loadAddrOffset);
|
||||
dtb_file->loadSections(physProxy);
|
||||
delete dtb_file;
|
||||
} else {
|
||||
|
@ -215,15 +225,17 @@ LinuxArmSystem::initState()
|
|||
DPRINTF(Loader, "Boot atags was %d bytes in total\n", size << 2);
|
||||
DDUMP(Loader, boot_data, size << 2);
|
||||
|
||||
physProxy.writeBlob(params()->atags_addr, boot_data, size << 2);
|
||||
physProxy.writeBlob(params()->atags_addr + loadAddrOffset, boot_data,
|
||||
size << 2);
|
||||
|
||||
delete[] boot_data;
|
||||
}
|
||||
|
||||
// Kernel boot requirements to set up r0, r1 and r2 in ARMv7
|
||||
for (int i = 0; i < threadContexts.size(); i++) {
|
||||
threadContexts[i]->setIntReg(0, 0);
|
||||
threadContexts[i]->setIntReg(1, params()->machine_type);
|
||||
threadContexts[i]->setIntReg(2, params()->atags_addr);
|
||||
threadContexts[i]->setIntReg(2, params()->atags_addr + loadAddrOffset);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2010-2012 ARM Limited
|
||||
* Copyright (c) 2010-2013 ARM Limited
|
||||
* All rights reserved
|
||||
*
|
||||
* The license below extends only to copyright in the software and shall
|
||||
|
@ -126,6 +126,8 @@ class LinuxArmSystem : public ArmSystem
|
|||
Addr secDataPtrAddr;
|
||||
Addr secDataAddr;
|
||||
Addr penReleaseAddr;
|
||||
Addr pen64ReleaseAddr;
|
||||
Addr bootReleaseAddr;
|
||||
};
|
||||
|
||||
class DumpStatsPCEvent : public PCEvent
|
||||
|
|
|
@ -53,6 +53,8 @@
|
|||
*/
|
||||
|
||||
#include "arch/arm/miscregs.hh"
|
||||
#include "arch/arm/isa_traits.hh"
|
||||
#include "debug/LLSC.hh"
|
||||
#include "mem/packet.hh"
|
||||
#include "mem/request.hh"
|
||||
|
||||
|
@ -62,20 +64,26 @@ template <class XC>
|
|||
inline void
|
||||
handleLockedSnoop(XC *xc, PacketPtr pkt, Addr cacheBlockMask)
|
||||
{
|
||||
DPRINTF(LLSC,"%s: handleing snoop for address: %#x locked: %d\n",
|
||||
xc->getCpuPtr()->name(),pkt->getAddr(),
|
||||
xc->readMiscReg(MISCREG_LOCKFLAG));
|
||||
if (!xc->readMiscReg(MISCREG_LOCKFLAG))
|
||||
return;
|
||||
|
||||
Addr locked_addr = xc->readMiscReg(MISCREG_LOCKADDR) & cacheBlockMask;
|
||||
// If no caches are attached, the snoop address always needs to be masked
|
||||
Addr snoop_addr = pkt->getAddr() & cacheBlockMask;
|
||||
|
||||
if (locked_addr == snoop_addr)
|
||||
DPRINTF(LLSC,"%s: handleing snoop for address: %#x locked addr: %#x\n",
|
||||
xc->getCpuPtr()->name(),snoop_addr, locked_addr);
|
||||
if (locked_addr == snoop_addr) {
|
||||
DPRINTF(LLSC,"%s: address match, clearing lock and signaling sev\n",
|
||||
xc->getCpuPtr()->name());
|
||||
xc->setMiscReg(MISCREG_LOCKFLAG, false);
|
||||
}
|
||||
|
||||
template <class XC>
|
||||
inline void
|
||||
handleLockedSnoopHit(XC *xc)
|
||||
{
|
||||
// Implement ARMv8 WFE/SEV semantics
|
||||
xc->setMiscReg(MISCREG_SEV_MAILBOX, true);
|
||||
xc->getCpuPtr()->wakeup();
|
||||
}
|
||||
}
|
||||
|
||||
template <class XC>
|
||||
|
@ -84,8 +92,19 @@ handleLockedRead(XC *xc, Request *req)
|
|||
{
|
||||
xc->setMiscReg(MISCREG_LOCKADDR, req->getPaddr());
|
||||
xc->setMiscReg(MISCREG_LOCKFLAG, true);
|
||||
DPRINTF(LLSC,"%s: Placing address %#x in monitor\n", xc->getCpuPtr()->name(),
|
||||
req->getPaddr());
|
||||
}
|
||||
|
||||
template <class XC>
|
||||
inline void
|
||||
handleLockedSnoopHit(XC *xc)
|
||||
{
|
||||
DPRINTF(LLSC,"%s: handling snoop lock hit address: %#x\n",
|
||||
xc->getCpuPtr()->name(), xc->readMiscReg(MISCREG_LOCKADDR));
|
||||
xc->setMiscReg(MISCREG_LOCKFLAG, false);
|
||||
xc->setMiscReg(MISCREG_SEV_MAILBOX, true);
|
||||
}
|
||||
|
||||
template <class XC>
|
||||
inline bool
|
||||
|
@ -94,6 +113,8 @@ handleLockedWrite(XC *xc, Request *req, Addr cacheBlockMask)
|
|||
if (req->isSwap())
|
||||
return true;
|
||||
|
||||
DPRINTF(LLSC,"%s: handling locked write for address %#x in monitor\n",
|
||||
xc->getCpuPtr()->name(), req->getPaddr());
|
||||
// Verify that the lock flag is still set and the address
|
||||
// is correct
|
||||
bool lock_flag = xc->readMiscReg(MISCREG_LOCKFLAG);
|
||||
|
@ -103,6 +124,8 @@ handleLockedWrite(XC *xc, Request *req, Addr cacheBlockMask)
|
|||
// don't even bother sending to memory system
|
||||
req->setExtraData(0);
|
||||
xc->setMiscReg(MISCREG_LOCKFLAG, false);
|
||||
DPRINTF(LLSC,"%s: clearing lock flag in handle locked write\n",
|
||||
xc->getCpuPtr()->name());
|
||||
// the rest of this code is not architectural;
|
||||
// it's just a debugging aid to help detect
|
||||
// livelock by warning on long sequences of failed
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue