gpu-compute: add gpu_isa.hh to switch hdrs, add GPUISA to WF

the GPUISA class is meant to encapsulate any ISA-specific behavior - special
register accesses, isa-specific WF/kernel state, etc. - in a generic enough
way so that it may be used in ISA-agnostic code.

gpu-compute: use the GPUISA object to advance the PC

the GPU model treats the PC as a pointer to individual instruction objects -
which are store in a contiguous array - and not a byte address to be fetched
from the real memory system. this is ok for HSAIL because all instructions
are considered by the model to be the same size.

in machine ISA, however, instructions may be 32b or 64b, and branches are
calculated by advancing the PC by the number of words (4 byte chunks) it
needs to advance in the real instruction stream. because of this there is
a mismatch between the PC we use to index into the instruction array, and
the actual byte address PC the ISA expects. here we move the PC advance
calculation to the ISA so that differences in the instrucion sizes may be
accounted for in generic way.
This commit is contained in:
Tony Gutierrez 2016-10-26 22:47:38 -04:00
parent 98d8a7051d
commit d327cdba07
7 changed files with 139 additions and 4 deletions

View file

@ -71,6 +71,7 @@ make_switching_dir('arch', isa_switch_hdrs, env)
if env['BUILD_GPU']:
gpu_isa_switch_hdrs = Split('''
gpu_decoder.hh
gpu_isa.hh
gpu_types.hh
''')

82
src/arch/hsail/gpu_isa.hh Normal file
View file

@ -0,0 +1,82 @@
/*
* Copyright (c) 2016 Advanced Micro Devices, Inc.
* All rights reserved.
*
* For use for simulation and test purposes only
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* Authors: Anthony Gutierrez
*/
#ifndef __ARCH_HSAIL_GPU_ISA_HH__
#define __ARCH_HSAIL_GPU_ISA_HH__
#include <cstdint>
#include "base/misc.hh"
#include "gpu-compute/misc.hh"
class Wavefront;
namespace HsailISA
{
typedef uint64_t MiscReg;
class GPUISA
{
public:
GPUISA(Wavefront &wf) : wavefront(wf)
{
}
void
writeMiscReg(int opIdx, MiscReg operandVal)
{
fatal("HSAIL does not implement misc registers yet\n");
}
MiscReg
readMiscReg(int opIdx) const
{
fatal("HSAIL does not implement misc registers yet\n");
}
bool hasScalarUnit() const { return false; }
uint32_t
advancePC(uint32_t old_pc, GPUDynInstPtr gpuDynInst)
{
return old_pc + 1;
}
private:
Wavefront &wavefront;
};
}
#endif // __ARCH_HSAIL_GPU_ISA_HH__

View file

@ -114,7 +114,18 @@ void
FetchUnit::initiateFetch(Wavefront *wavefront)
{
// calculate the virtual address to fetch from the SQC
Addr vaddr = wavefront->pc() + wavefront->instructionBuffer.size();
Addr vaddr = wavefront->pc();
/**
* the instruction buffer holds one instruction per entry, regardless
* of the underlying instruction's size. the PC, however, addresses
* instrutions on a 32b granularity so we must account for that here.
*/
for (int i = 0; i < wavefront->instructionBuffer.size(); ++i) {
int current_inst_size =
wavefront->instructionBuffer.at(i)->staticInstruction()->instSize();
vaddr += current_inst_size / sizeof(uint32_t);
}
vaddr = wavefront->basePtr + vaddr * sizeof(GPUStaticInst*);
DPRINTF(GPUTLB, "CU%d: WF[%d][%d]: Initiating fetch translation: %#x\n",
@ -267,6 +278,18 @@ FetchUnit::processFetchReturn(PacketPtr pkt)
GPUStaticInst *inst_ptr = decoder.decode(inst_index_ptr[i]);
assert(inst_ptr);
if (inst_ptr->instSize() == 8) {
/**
* this instruction occupies 2 consecutive
* entries in the instruction array, the
* second of which contains a nullptr. so if
* this inst is 8 bytes we advance two entries
* instead of 1
*/
++i;
}
DPRINTF(GPUFetch, "CU%d: WF[%d][%d]: added %s\n",
computeUnit->cu_id, wavefront->simdId,
wavefront->wfSlotId, inst_ptr->disassemble());

View file

@ -34,9 +34,10 @@
*/
#include "gpu-compute/gpu_exec_context.hh"
#include "gpu-compute/wavefront.hh"
GPUExecContext::GPUExecContext(ComputeUnit *_cu, Wavefront *_wf)
: cu(_cu), wf(_wf)
: cu(_cu), wf(_wf), gpuISA(_wf->gpuISA())
{
}
@ -51,3 +52,15 @@ GPUExecContext::wavefront()
{
return wf;
}
TheGpuISA::MiscReg
GPUExecContext::readMiscReg(int opIdx) const
{
return gpuISA.readMiscReg(opIdx);
}
void
GPUExecContext::writeMiscReg(int opIdx, TheGpuISA::MiscReg operandVal)
{
gpuISA.writeMiscReg(opIdx, operandVal);
}

View file

@ -36,6 +36,9 @@
#ifndef __GPU_EXEC_CONTEXT_HH__
#define __GPU_EXEC_CONTEXT_HH__
#include "arch/gpu_isa.hh"
#include "config/the_gpu_isa.hh"
class ComputeUnit;
class Wavefront;
@ -46,9 +49,13 @@ class GPUExecContext
Wavefront* wavefront();
ComputeUnit* computeUnit();
TheGpuISA::MiscReg readMiscReg(int opIdx) const;
void writeMiscReg(int opIdx, TheGpuISA::MiscReg operandVal);
protected:
ComputeUnit *cu;
Wavefront *wf;
TheGpuISA::GPUISA &gpuISA;
};
#endif // __GPU_EXEC_CONTEXT_HH__

View file

@ -49,7 +49,7 @@ WavefrontParams::create()
}
Wavefront::Wavefront(const Params *p)
: SimObject(p), callArgMem(nullptr)
: SimObject(p), callArgMem(nullptr), _gpuISA(*this)
{
lastTrace = 0;
simdId = p->simdId;
@ -670,7 +670,7 @@ Wavefront::exec()
computeUnit->lastExecCycle[simdId]);
computeUnit->lastExecCycle[simdId] = computeUnit->totalCycles.value();
if (pc() == old_pc) {
uint32_t new_pc = old_pc + 1;
uint32_t new_pc = _gpuISA.advancePC(old_pc, ii);
// PC not modified by instruction, proceed to next or pop frame
pc(new_pc);
if (new_pc == rpc()) {

View file

@ -42,8 +42,10 @@
#include <stack>
#include <vector>
#include "arch/gpu_isa.hh"
#include "base/misc.hh"
#include "base/types.hh"
#include "config/the_gpu_isa.hh"
#include "gpu-compute/condition_register_state.hh"
#include "gpu-compute/lds_state.hh"
#include "gpu-compute/misc.hh"
@ -372,7 +374,14 @@ class Wavefront : public SimObject
*/
void setContext(const void *in);
TheGpuISA::GPUISA&
gpuISA()
{
return _gpuISA;
}
private:
TheGpuISA::GPUISA _gpuISA;
/**
* Stack containing Control Flow Graph nodes (i.e., kernel instructions)
* to be visited by the wavefront, and the associated execution masks. The