From 0aaf8ec6b83c65a60932fd9f9f4b81eca1bcce74 Mon Sep 17 00:00:00 2001 From: Steve Reinhardt Date: Thu, 3 Feb 2005 20:47:11 -0500 Subject: [PATCH] Add support for CPU models to execute the effective address calculation and memory access portions separately. Not currently used by any CPU models, but Kevin says he needs this. Also clean up handling of execution tracing for memory accesses (move it all into isa_desc and out of CPU models). Got rid of some ancient unused code too. arch/alpha/isa_desc: Add execute() methods to EAComp and MemAcc portions of memory access instructions, to allow CPU models to execute the effective address calculation and memory access portions separately. Requires the execution context to remember the effective address across the two invocations. Added setEA() and getEA() methods to execution context to support this. A model that does not use the split execution model can panic if these methods are called. Also added hook to call traceData->setAddr() after EA computation on any load or store operation. arch/isa_parser.py: Call traceData->setData() on memory writes (stores). cpu/simple_cpu/simple_cpu.cc: Get rid of unused code. cpu/simple_cpu/simple_cpu.hh: Add (non-functional) setEA() and getEA() methods for new split memory access execution support. --HG-- extra : convert_revision : bc2d2c758c4ca753812b9fa81f21038e55929ff0 --- arch/alpha/isa_desc | 171 +++++++++++++++++++++-------------- arch/isa_parser.py | 6 +- cpu/simple_cpu/simple_cpu.cc | 8 -- cpu/simple_cpu/simple_cpu.hh | 8 +- 4 files changed, 113 insertions(+), 80 deletions(-) diff --git a/arch/alpha/isa_desc b/arch/alpha/isa_desc index 5154d78d1..5602a6ba6 100644 --- a/arch/alpha/isa_desc +++ b/arch/alpha/isa_desc @@ -790,48 +790,14 @@ output header {{ protected: /// Constructor MemoryNoDisp(const char *mnem, MachInst _machInst, OpClass __opClass, - StaticInstPtr _eaCompPtr, - StaticInstPtr _memAccPtr) + StaticInstPtr _eaCompPtr = nullStaticInstPtr, + StaticInstPtr _memAccPtr = nullStaticInstPtr) : Memory(mnem, _machInst, __opClass, _eaCompPtr, _memAccPtr) { } std::string generateDisassembly(Addr pc, const SymbolTable *symtab); }; - - - /** - * Base class for "fake" effective-address computation - * instructions returnded by eaCompInst(). - */ - class EACompBase : public AlphaStaticInst - { - public: - /// Constructor - EACompBase(MachInst machInst) - : AlphaStaticInst("(eacomp)", machInst, IntAluOp) - { - } - - %(BasicExecDeclare)s - }; - - /** - * Base class for "fake" memory-access instructions returnded by - * memAccInst(). - */ - class MemAccBase : public AlphaStaticInst - { - public: - /// Constructor - MemAccBase(MachInst machInst, OpClass __opClass) - : AlphaStaticInst("(memacc)", machInst, __opClass) - { - } - - %(BasicExecDeclare)s - }; - }}; @@ -850,21 +816,6 @@ output decoder {{ } }}; -output exec {{ - Fault - EACompBase::execute(%(CPU_exec_context)s *, Trace::InstRecord *) - { - panic("attempt to execute eacomp"); - } - - Fault - MemAccBase::execute(%(CPU_exec_context)s *, Trace::InstRecord *) - { - panic("attempt to execute memacc"); - } -}}; - - def format LoadAddress(code) {{ iop = InstObjParams(name, Name, 'MemoryDisp32', CodeBlock(code)) header_output = BasicDeclare.subst(iop) @@ -885,21 +836,25 @@ def template LoadStoreDeclare {{ /** * "Fake" effective address computation class for "%(mnemonic)s". */ - class EAComp : public EACompBase + class EAComp : public %(base_class)s { public: /// Constructor EAComp(MachInst machInst); + + %(BasicExecDeclare)s }; /** * "Fake" memory access instruction class for "%(mnemonic)s". */ - class MemAcc : public MemAccBase + class MemAcc : public %(base_class)s { public: /// Constructor MemAcc(MachInst machInst); + + %(BasicExecDeclare)s }; public: @@ -912,14 +867,17 @@ def template LoadStoreDeclare {{ }}; def template LoadStoreConstructor {{ + /** TODO: change op_class to AddrGenOp or something (requires + * creating new member of OpClass enum in op_class.hh, updating + * config files, etc.). */ inline %(class_name)s::EAComp::EAComp(MachInst machInst) - : EACompBase(machInst) + : %(base_class)s("%(mnemonic)s (EAComp)", machInst, IntAluOp) { %(ea_constructor)s; } inline %(class_name)s::MemAcc::MemAcc(MachInst machInst) - : MemAccBase(machInst, %(op_class)s) + : %(base_class)s("%(mnemonic)s (MemAcc)", machInst, %(op_class)s) { %(memacc_constructor)s; } @@ -932,6 +890,64 @@ def template LoadStoreConstructor {{ } }}; + +def template EACompExecute {{ + Fault + %(class_name)s::EAComp::execute(%(CPU_exec_context)s *xc, + Trace::InstRecord *traceData) + { + Addr EA; + Fault fault = No_Fault; + + %(fp_enable_check)s; + %(op_decl)s; + %(op_rd)s; + %(code)s; + + if (fault == No_Fault) { + %(op_wb)s; + xc->setEA(EA); + } + + return fault; + } +}}; + +def template MemAccExecute {{ + Fault + %(class_name)s::MemAcc::execute(%(CPU_exec_context)s *xc, + Trace::InstRecord *traceData) + { + Addr EA; + Fault fault = No_Fault; + + %(fp_enable_check)s; + %(op_decl)s; + %(op_nonmem_rd)s; + EA = xc->getEA(); + + if (fault == No_Fault) { + %(op_mem_rd)s; + %(code)s; + } + + if (fault == No_Fault) { + %(op_mem_wb)s; + } + + if (fault == No_Fault) { + %(postacc_code)s; + } + + if (fault == No_Fault) { + %(op_nonmem_wb)s; + } + + return fault; + } +}}; + + def template LoadStoreExecute {{ Fault %(class_name)s::execute(%(CPU_exec_context)s *xc, Trace::InstRecord *traceData) @@ -1022,18 +1038,33 @@ def LoadStoreBase(name, Name, ea_code, memacc_code, postacc_code = '', # Would be nice to autogenerate this list, but oh well. valid_mem_flags = ['LOCKED', 'NO_FAULT', 'EVICT_NEXT', 'PF_EXCLUSIVE'] - inst_flags = [] - mem_flags = [] - for f in flags: - if f in valid_mem_flags: - mem_flags.append(f) - else: - inst_flags.append(f) + mem_flags = [f for f in flags if f in valid_mem_flags] + inst_flags = [f for f in flags if f not in valid_mem_flags] + # add hook to get effective addresses into execution trace output. + ea_code += '\nif (traceData) { traceData->setAddr(EA); }\n' + + # generate code block objects ea_cblk = CodeBlock(ea_code) memacc_cblk = CodeBlock(memacc_code) postacc_cblk = CodeBlock(postacc_code) + # Some CPU models execute the memory operation as an atomic unit, + # while others want to separate them into an effective address + # computation and a memory access operation. As a result, we need + # to generate three StaticInst objects. Note that the latter two + # are nested inside the larger "atomic" one. + + # generate InstObjParams for EAComp object + ea_iop = InstObjParams(name, Name, base_class, ea_cblk, inst_flags) + + # generate InstObjParams for MemAcc object + memacc_iop = InstObjParams(name, Name, base_class, memacc_cblk, inst_flags) + # in the split execution model, the MemAcc portion is responsible + # for the post-access code. + memacc_iop.postacc_code = postacc_cblk.code + + # generate InstObjParams for unified execution cblk = CodeBlock(ea_code + memacc_code + postacc_code) iop = InstObjParams(name, Name, base_class, cblk, inst_flags) @@ -1043,13 +1074,17 @@ def LoadStoreBase(name, Name, ea_code, memacc_code, postacc_code = '', iop.memacc_code = memacc_cblk.code iop.postacc_code = postacc_cblk.code - mem_flags = string.join(mem_flags, '|') - if mem_flags != '': - iop.constructor += '\n\tmemAccessFlags = ' + mem_flags + ';' + if mem_flags: + s = '\n\tmemAccessFlags = ' + string.join(mem_flags, '|') + ';' + iop.constructor += s + memacc_iop.constructor += s # (header_output, decoder_output, decode_block, exec_output) return (LoadStoreDeclare.subst(iop), LoadStoreConstructor.subst(iop), - decode_template.subst(iop), exec_template.subst(iop)) + decode_template.subst(iop), + EACompExecute.subst(ea_iop) + + MemAccExecute.subst(memacc_iop) + + exec_template.subst(iop)) }}; @@ -1460,8 +1495,8 @@ output header {{ /// Constructor HwLoadStore(const char *mnem, MachInst _machInst, OpClass __opClass, - StaticInstPtr _eaCompPtr, - StaticInstPtr _memAccPtr); + StaticInstPtr _eaCompPtr = nullStaticInstPtr, + StaticInstPtr _memAccPtr = nullStaticInstPtr); std::string generateDisassembly(Addr pc, const SymbolTable *symtab); }; diff --git a/arch/isa_parser.py b/arch/isa_parser.py index c0b5131de..18e4b0a45 100755 --- a/arch/isa_parser.py +++ b/arch/isa_parser.py @@ -1210,10 +1210,12 @@ class MemOperandTraits(OperandTraits): def makeWrite(self, op_desc): (size, type, is_signed) = operandSizeMap[op_desc.eff_ext] eff_type = 'uint%d_t' % size - return 'fault = xc->write((%s&)%s, EA, %s_flags,' \ - ' &%s_write_result);\n' \ + wb = 'fault = xc->write((%s&)%s, EA, %s_flags, &%s_write_result);\n' \ % (eff_type, op_desc.munged_name, op_desc.base_name, op_desc.base_name) + wb += 'if (traceData) { traceData->setData(%s); }' % \ + op_desc.munged_name + return wb class NPCOperandTraits(OperandTraits): def makeConstructor(self, op_desc): diff --git a/cpu/simple_cpu/simple_cpu.cc b/cpu/simple_cpu/simple_cpu.cc index e3cce28ae..f292b25a5 100644 --- a/cpu/simple_cpu/simple_cpu.cc +++ b/cpu/simple_cpu/simple_cpu.cc @@ -74,14 +74,6 @@ using namespace std; -template -void -SimpleCPU::trace_data(T data) { - if (traceData) { - traceData->setData(data); - } -} - SimpleCPU::TickEvent::TickEvent(SimpleCPU *c) : Event(&mainEventQueue, CPU_Tick_Pri), cpu(c), multiplier(1) diff --git a/cpu/simple_cpu/simple_cpu.hh b/cpu/simple_cpu/simple_cpu.hh index 9b6e2423b..a81d6365b 100644 --- a/cpu/simple_cpu/simple_cpu.hh +++ b/cpu/simple_cpu/simple_cpu.hh @@ -102,8 +102,7 @@ class SimpleCPU : public BaseCPU private: Trace::InstRecord *traceData; - template - void trace_data(T data); + public: // enum Status { @@ -244,6 +243,11 @@ class SimpleCPU : public BaseCPU template Fault write(T data, Addr addr, unsigned flags, uint64_t *res); + // These functions are only used in CPU models that split + // effective address computation from the actual memory access. + void setEA(Addr EA) { panic("SimpleCPU::setEA() not implemented\n"); } + Addr getEA() { panic("SimpleCPU::getEA() not implemented\n"); } + void prefetch(Addr addr, unsigned flags) { // need to do this...