2009-02-11 00:49:29 +01:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2007 MIPS Technologies, Inc.
|
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions are
|
|
|
|
* met: redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer;
|
|
|
|
* redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution;
|
|
|
|
* neither the name of the copyright holders nor the names of its
|
|
|
|
* contributors may be used to endorse or promote products derived from
|
|
|
|
* this software without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
|
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
|
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
|
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
|
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
|
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
|
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
|
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
|
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
|
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
|
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*
|
|
|
|
* Authors: Korey Sewell
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef __CPU_INORDER_CPU_HH__
|
|
|
|
#define __CPU_INORDER_CPU_HH__
|
|
|
|
|
|
|
|
#include <iostream>
|
|
|
|
#include <list>
|
|
|
|
#include <queue>
|
|
|
|
#include <set>
|
|
|
|
#include <vector>
|
|
|
|
|
|
|
|
#include "arch/isa_traits.hh"
|
2009-07-09 08:02:20 +02:00
|
|
|
#include "arch/types.hh"
|
2009-09-15 07:44:48 +02:00
|
|
|
#include "arch/registers.hh"
|
2009-02-11 00:49:29 +01:00
|
|
|
#include "base/statistics.hh"
|
2011-01-03 23:35:47 +01:00
|
|
|
#include "cpu/timebuf.hh"
|
2009-05-26 18:23:13 +02:00
|
|
|
#include "base/types.hh"
|
2009-02-11 00:49:29 +01:00
|
|
|
#include "config/full_system.hh"
|
2009-09-23 17:34:21 +02:00
|
|
|
#include "config/the_isa.hh"
|
2009-02-11 00:49:29 +01:00
|
|
|
#include "cpu/activity.hh"
|
|
|
|
#include "cpu/base.hh"
|
|
|
|
#include "cpu/simple_thread.hh"
|
|
|
|
#include "cpu/inorder/inorder_dyn_inst.hh"
|
|
|
|
#include "cpu/inorder/pipeline_traits.hh"
|
|
|
|
#include "cpu/inorder/pipeline_stage.hh"
|
|
|
|
#include "cpu/inorder/thread_state.hh"
|
|
|
|
#include "cpu/inorder/reg_dep_map.hh"
|
|
|
|
#include "cpu/o3/dep_graph.hh"
|
|
|
|
#include "cpu/o3/rename_map.hh"
|
|
|
|
#include "mem/packet.hh"
|
|
|
|
#include "mem/port.hh"
|
|
|
|
#include "mem/request.hh"
|
|
|
|
#include "sim/eventq.hh"
|
|
|
|
#include "sim/process.hh"
|
|
|
|
|
|
|
|
class ThreadContext;
|
|
|
|
class MemInterface;
|
|
|
|
class MemObject;
|
|
|
|
class Process;
|
|
|
|
class ResourcePool;
|
|
|
|
|
|
|
|
class InOrderCPU : public BaseCPU
|
|
|
|
{
|
|
|
|
|
|
|
|
protected:
|
|
|
|
typedef ThePipeline::Params Params;
|
|
|
|
typedef InOrderThreadState Thread;
|
|
|
|
|
|
|
|
//ISA TypeDefs
|
|
|
|
typedef TheISA::IntReg IntReg;
|
|
|
|
typedef TheISA::FloatReg FloatReg;
|
|
|
|
typedef TheISA::FloatRegBits FloatRegBits;
|
2009-07-09 08:02:20 +02:00
|
|
|
typedef TheISA::MiscReg MiscReg;
|
2009-02-11 00:49:29 +01:00
|
|
|
|
|
|
|
//DynInstPtr TypeDefs
|
|
|
|
typedef ThePipeline::DynInstPtr DynInstPtr;
|
|
|
|
typedef std::list<DynInstPtr>::iterator ListIt;
|
|
|
|
|
|
|
|
//TimeBuffer TypeDefs
|
|
|
|
typedef TimeBuffer<InterStageStruct> StageQueue;
|
|
|
|
|
|
|
|
friend class Resource;
|
2010-02-01 00:26:32 +01:00
|
|
|
|
2009-02-11 00:49:29 +01:00
|
|
|
public:
|
|
|
|
/** Constructs a CPU with the given parameters. */
|
|
|
|
InOrderCPU(Params *params);
|
2010-02-01 00:30:08 +01:00
|
|
|
/* Destructor */
|
|
|
|
~InOrderCPU();
|
|
|
|
|
2009-02-11 00:49:29 +01:00
|
|
|
/** CPU ID */
|
|
|
|
int cpu_id;
|
|
|
|
|
2010-02-01 00:29:59 +01:00
|
|
|
// SE Mode ASIDs
|
|
|
|
ThreadID asid[ThePipeline::MaxThreads];
|
|
|
|
|
2009-02-11 00:49:29 +01:00
|
|
|
/** Type of core that this is */
|
|
|
|
std::string coreType;
|
|
|
|
|
2010-02-01 00:25:13 +01:00
|
|
|
// Only need for SE MODE
|
|
|
|
enum ThreadModel {
|
|
|
|
Single,
|
|
|
|
SMT,
|
|
|
|
SwitchOnCacheMiss
|
|
|
|
};
|
|
|
|
|
|
|
|
ThreadModel threadModel;
|
|
|
|
|
2009-02-11 00:49:29 +01:00
|
|
|
int readCpuId() { return cpu_id; }
|
|
|
|
|
|
|
|
void setCpuId(int val) { cpu_id = val; }
|
|
|
|
|
|
|
|
Params *cpu_params;
|
|
|
|
|
|
|
|
public:
|
|
|
|
enum Status {
|
|
|
|
Running,
|
|
|
|
Idle,
|
|
|
|
Halted,
|
|
|
|
Blocked,
|
|
|
|
SwitchedOut
|
|
|
|
};
|
|
|
|
|
|
|
|
/** Overall CPU status. */
|
|
|
|
Status _status;
|
|
|
|
private:
|
|
|
|
/** Define TickEvent for the CPU */
|
|
|
|
class TickEvent : public Event
|
|
|
|
{
|
|
|
|
private:
|
|
|
|
/** Pointer to the CPU. */
|
|
|
|
InOrderCPU *cpu;
|
|
|
|
|
|
|
|
public:
|
|
|
|
/** Constructs a tick event. */
|
|
|
|
TickEvent(InOrderCPU *c);
|
|
|
|
|
|
|
|
/** Processes a tick event, calling tick() on the CPU. */
|
|
|
|
void process();
|
|
|
|
|
|
|
|
/** Returns the description of the tick event. */
|
|
|
|
const char *description();
|
|
|
|
};
|
|
|
|
|
|
|
|
/** The tick event used for scheduling CPU ticks. */
|
|
|
|
TickEvent tickEvent;
|
|
|
|
|
|
|
|
/** Schedule tick event, regardless of its current state. */
|
|
|
|
void scheduleTickEvent(int delay)
|
|
|
|
{
|
2011-01-08 06:50:29 +01:00
|
|
|
assert(!tickEvent.scheduled() || tickEvent.squashed());
|
2011-01-08 06:50:29 +01:00
|
|
|
reschedule(&tickEvent, nextCycle(curTick() + ticks(delay)), true);
|
2009-02-11 00:49:29 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/** Unschedule tick event, regardless of its current state. */
|
|
|
|
void unscheduleTickEvent()
|
|
|
|
{
|
|
|
|
if (tickEvent.scheduled())
|
|
|
|
tickEvent.squash();
|
|
|
|
}
|
|
|
|
|
|
|
|
public:
|
|
|
|
// List of Events That can be scheduled from
|
|
|
|
// within the CPU.
|
|
|
|
// NOTE(1): The Resource Pool also uses this event list
|
|
|
|
// to schedule events broadcast to all resources interfaces
|
|
|
|
// NOTE(2): CPU Events usually need to schedule a corresponding resource
|
|
|
|
// pool event.
|
|
|
|
enum CPUEventType {
|
|
|
|
ActivateThread,
|
2010-02-01 00:26:32 +01:00
|
|
|
ActivateNextReadyThread,
|
|
|
|
DeactivateThread,
|
2010-02-01 00:28:05 +01:00
|
|
|
HaltThread,
|
2009-02-11 00:49:29 +01:00
|
|
|
SuspendThread,
|
|
|
|
Trap,
|
|
|
|
InstGraduated,
|
2010-02-01 00:26:13 +01:00
|
|
|
SquashFromMemStall,
|
2009-02-11 00:49:29 +01:00
|
|
|
UpdatePCs,
|
|
|
|
NumCPUEvents
|
|
|
|
};
|
|
|
|
|
2009-05-12 21:01:16 +02:00
|
|
|
static std::string eventNames[NumCPUEvents];
|
|
|
|
|
2009-02-11 00:49:29 +01:00
|
|
|
/** Define CPU Event */
|
|
|
|
class CPUEvent : public Event
|
|
|
|
{
|
|
|
|
protected:
|
|
|
|
InOrderCPU *cpu;
|
|
|
|
|
|
|
|
public:
|
|
|
|
CPUEventType cpuEventType;
|
2009-05-26 18:23:13 +02:00
|
|
|
ThreadID tid;
|
2010-02-01 00:26:03 +01:00
|
|
|
DynInstPtr inst;
|
2009-02-11 00:49:29 +01:00
|
|
|
Fault fault;
|
2010-02-01 00:26:03 +01:00
|
|
|
unsigned vpe;
|
|
|
|
|
2009-02-11 00:49:29 +01:00
|
|
|
public:
|
|
|
|
/** Constructs a CPU event. */
|
|
|
|
CPUEvent(InOrderCPU *_cpu, CPUEventType e_type, Fault fault,
|
2010-02-01 00:26:26 +01:00
|
|
|
ThreadID _tid, DynInstPtr inst, unsigned event_pri_offset);
|
2009-02-11 00:49:29 +01:00
|
|
|
|
|
|
|
/** Set Type of Event To Be Scheduled */
|
2009-05-26 18:23:13 +02:00
|
|
|
void setEvent(CPUEventType e_type, Fault _fault, ThreadID _tid,
|
2010-02-01 00:26:03 +01:00
|
|
|
DynInstPtr _inst)
|
2009-02-11 00:49:29 +01:00
|
|
|
{
|
|
|
|
fault = _fault;
|
|
|
|
cpuEventType = e_type;
|
|
|
|
tid = _tid;
|
2010-02-01 00:26:03 +01:00
|
|
|
inst = _inst;
|
|
|
|
vpe = 0;
|
2009-02-11 00:49:29 +01:00
|
|
|
}
|
|
|
|
|
2010-06-24 21:34:19 +02:00
|
|
|
/** Processes a CPU event. */
|
|
|
|
void process();
|
2009-02-11 00:49:29 +01:00
|
|
|
|
2010-06-24 21:34:19 +02:00
|
|
|
/** Returns the description of the CPU event. */
|
2009-02-11 00:49:29 +01:00
|
|
|
const char *description();
|
|
|
|
|
|
|
|
/** Schedule Event */
|
|
|
|
void scheduleEvent(int delay);
|
|
|
|
|
|
|
|
/** Unschedule This Event */
|
|
|
|
void unscheduleEvent();
|
|
|
|
};
|
|
|
|
|
|
|
|
/** Schedule a CPU Event */
|
2009-05-26 18:23:13 +02:00
|
|
|
void scheduleCpuEvent(CPUEventType cpu_event, Fault fault, ThreadID tid,
|
2010-02-01 00:26:26 +01:00
|
|
|
DynInstPtr inst, unsigned delay = 0,
|
|
|
|
unsigned event_pri_offset = 0);
|
2009-02-11 00:49:29 +01:00
|
|
|
|
|
|
|
public:
|
|
|
|
/** Interface between the CPU and CPU resources. */
|
|
|
|
ResourcePool *resPool;
|
|
|
|
|
2010-01-31 23:18:15 +01:00
|
|
|
/** Instruction used to signify that there is no *real* instruction in
|
|
|
|
buffer slot */
|
2010-02-01 00:29:59 +01:00
|
|
|
DynInstPtr dummyInst[ThePipeline::MaxThreads];
|
2010-02-01 00:30:48 +01:00
|
|
|
DynInstPtr dummyBufferInst;
|
|
|
|
DynInstPtr dummyReqInst;
|
2009-02-11 00:49:29 +01:00
|
|
|
|
|
|
|
/** Used by resources to signify a denied access to a resource. */
|
2010-02-01 00:29:59 +01:00
|
|
|
ResourceRequest *dummyReq[ThePipeline::MaxThreads];
|
2009-02-11 00:49:29 +01:00
|
|
|
|
|
|
|
/** Identifies the resource id that identifies a fetch
|
|
|
|
* access unit.
|
|
|
|
*/
|
|
|
|
unsigned fetchPortIdx;
|
|
|
|
|
2009-05-12 21:01:13 +02:00
|
|
|
/** Identifies the resource id that identifies a ITB */
|
|
|
|
unsigned itbIdx;
|
|
|
|
|
2009-02-11 00:49:29 +01:00
|
|
|
/** Identifies the resource id that identifies a data
|
|
|
|
* access unit.
|
|
|
|
*/
|
|
|
|
unsigned dataPortIdx;
|
|
|
|
|
2009-05-12 21:01:13 +02:00
|
|
|
/** Identifies the resource id that identifies a DTB */
|
|
|
|
unsigned dtbIdx;
|
|
|
|
|
2009-02-11 00:49:29 +01:00
|
|
|
/** The Pipeline Stages for the CPU */
|
|
|
|
PipelineStage *pipelineStage[ThePipeline::NumStages];
|
|
|
|
|
2011-02-04 06:08:18 +01:00
|
|
|
/** Width (processing bandwidth) of each stage */
|
|
|
|
int stageWidth;
|
|
|
|
|
2009-05-12 21:01:14 +02:00
|
|
|
/** Program Counters */
|
ISA,CPU,etc: Create an ISA defined PC type that abstracts out ISA behaviors.
This change is a low level and pervasive reorganization of how PCs are managed
in M5. Back when Alpha was the only ISA, there were only 2 PCs to worry about,
the PC and the NPC, and the lsb of the PC signaled whether or not you were in
PAL mode. As other ISAs were added, we had to add an NNPC, micro PC and next
micropc, x86 and ARM introduced variable length instruction sets, and ARM
started to keep track of mode bits in the PC. Each CPU model handled PCs in
its own custom way that needed to be updated individually to handle the new
dimensions of variability, or, in the case of ARMs mode-bit-in-the-pc hack,
the complexity could be hidden in the ISA at the ISA implementation's expense.
Areas like the branch predictor hadn't been updated to handle branch delay
slots or micropcs, and it turns out that had introduced a significant (10s of
percent) performance bug in SPARC and to a lesser extend MIPS. Rather than
perpetuate the problem by reworking O3 again to handle the PC features needed
by x86, this change was introduced to rework PC handling in a more modular,
transparent, and hopefully efficient way.
PC type:
Rather than having the superset of all possible elements of PC state declared
in each of the CPU models, each ISA defines its own PCState type which has
exactly the elements it needs. A cross product of canned PCState classes are
defined in the new "generic" ISA directory for ISAs with/without delay slots
and microcode. These are either typedef-ed or subclassed by each ISA. To read
or write this structure through a *Context, you use the new pcState() accessor
which reads or writes depending on whether it has an argument. If you just
want the address of the current or next instruction or the current micro PC,
you can get those through read-only accessors on either the PCState type or
the *Contexts. These are instAddr(), nextInstAddr(), and microPC(). Note the
move away from readPC. That name is ambiguous since it's not clear whether or
not it should be the actual address to fetch from, or if it should have extra
bits in it like the PAL mode bit. Each class is free to define its own
functions to get at whatever values it needs however it needs to to be used in
ISA specific code. Eventually Alpha's PAL mode bit could be moved out of the
PC and into a separate field like ARM.
These types can be reset to a particular pc (where npc = pc +
sizeof(MachInst), nnpc = npc + sizeof(MachInst), upc = 0, nupc = 1 as
appropriate), printed, serialized, and compared. There is a branching()
function which encapsulates code in the CPU models that checked if an
instruction branched or not. Exactly what that means in the context of branch
delay slots which can skip an instruction when not taken is ambiguous, and
ideally this function and its uses can be eliminated. PCStates also generally
know how to advance themselves in various ways depending on if they point at
an instruction, a microop, or the last microop of a macroop. More on that
later.
Ideally, accessing all the PCs at once when setting them will improve
performance of M5 even though more data needs to be moved around. This is
because often all the PCs need to be manipulated together, and by getting them
all at once you avoid multiple function calls. Also, the PCs of a particular
thread will have spatial locality in the cache. Previously they were grouped
by element in arrays which spread out accesses.
Advancing the PC:
The PCs were previously managed entirely by the CPU which had to know about PC
semantics, try to figure out which dimension to increment the PC in, what to
set NPC/NNPC, etc. These decisions are best left to the ISA in conjunction
with the PC type itself. Because most of the information about how to
increment the PC (mainly what type of instruction it refers to) is contained
in the instruction object, a new advancePC virtual function was added to the
StaticInst class. Subclasses provide an implementation that moves around the
right element of the PC with a minimal amount of decision making. In ISAs like
Alpha, the instructions always simply assign NPC to PC without having to worry
about micropcs, nnpcs, etc. The added cost of a virtual function call should
be outweighed by not having to figure out as much about what to do with the
PCs and mucking around with the extra elements.
One drawback of making the StaticInsts advance the PC is that you have to
actually have one to advance the PC. This would, superficially, seem to
require decoding an instruction before fetch could advance. This is, as far as
I can tell, realistic. fetch would advance through memory addresses, not PCs,
perhaps predicting new memory addresses using existing ones. More
sophisticated decisions about control flow would be made later on, after the
instruction was decoded, and handed back to fetch. If branching needs to
happen, some amount of decoding needs to happen to see that it's a branch,
what the target is, etc. This could get a little more complicated if that gets
done by the predecoder, but I'm choosing to ignore that for now.
Variable length instructions:
To handle variable length instructions in x86 and ARM, the predecoder now
takes in the current PC by reference to the getExtMachInst function. It can
modify the PC however it needs to (by setting NPC to be the PC + instruction
length, for instance). This could be improved since the CPU doesn't know if
the PC was modified and always has to write it back.
ISA parser:
To support the new API, all PC related operand types were removed from the
parser and replaced with a PCState type. There are two warts on this
implementation. First, as with all the other operand types, the PCState still
has to have a valid operand type even though it doesn't use it. Second, using
syntax like PCS.npc(target) doesn't work for two reasons, this looks like the
syntax for operand type overriding, and the parser can't figure out if you're
reading or writing. Instructions that use the PCS operand (which I've
consistently called it) need to first read it into a local variable,
manipulate it, and then write it back out.
Return address stack:
The return address stack needed a little extra help because, in the presence
of branch delay slots, it has to merge together elements of the return PC and
the call PC. To handle that, a buildRetPC utility function was added. There
are basically only two versions in all the ISAs, but it didn't seem short
enough to put into the generic ISA directory. Also, the branch predictor code
in O3 and InOrder were adjusted so that they always store the PC of the actual
call instruction in the RAS, not the next PC. If the call instruction is a
microop, the next PC refers to the next microop in the same macroop which is
probably not desirable. The buildRetPC function advances the PC intelligently
to the next macroop (in an ISA specific way) so that that case works.
Change in stats:
There were no change in stats except in MIPS and SPARC in the O3 model. MIPS
runs in about 9% fewer ticks. SPARC runs with 30%-50% fewer ticks, which could
likely be improved further by setting call/return instruction flags and taking
advantage of the RAS.
TODO:
Add != operators to the PCState classes, defined trivially to be !(a==b).
Smooth out places where PCs are split apart, passed around, and put back
together later. I think this might happen in SPARC's fault code. Add ISA
specific constructors that allow setting PC elements without calling a bunch
of accessors. Try to eliminate the need for the branching() function. Factor
out Alpha's PAL mode pc bit into a separate flag field, and eliminate places
where it's blindly masked out or tested in the PC.
2010-10-31 08:07:20 +01:00
|
|
|
TheISA::PCState pc[ThePipeline::MaxThreads];
|
2009-02-11 00:49:29 +01:00
|
|
|
|
|
|
|
/** The Register File for the CPU */
|
2009-07-09 08:02:20 +02:00
|
|
|
union {
|
|
|
|
FloatReg f[ThePipeline::MaxThreads][TheISA::NumFloatRegs];
|
|
|
|
FloatRegBits i[ThePipeline::MaxThreads][TheISA::NumFloatRegs];
|
|
|
|
} floatRegs;
|
2009-07-09 08:02:20 +02:00
|
|
|
TheISA::IntReg intRegs[ThePipeline::MaxThreads][TheISA::NumIntRegs];
|
2009-07-09 08:02:20 +02:00
|
|
|
|
|
|
|
/** ISA state */
|
|
|
|
TheISA::ISA isa[ThePipeline::MaxThreads];
|
2009-02-11 00:49:29 +01:00
|
|
|
|
|
|
|
/** Dependency Tracker for Integer & Floating Point Regs */
|
|
|
|
RegDepMap archRegDepMap[ThePipeline::MaxThreads];
|
|
|
|
|
|
|
|
/** Global communication structure */
|
|
|
|
TimeBuffer<TimeStruct> timeBuffer;
|
|
|
|
|
|
|
|
/** Communication structure that sits in between pipeline stages */
|
|
|
|
StageQueue *stageQueue[ThePipeline::NumStages-1];
|
|
|
|
|
2009-05-12 21:01:14 +02:00
|
|
|
TheISA::TLB *getITBPtr();
|
|
|
|
TheISA::TLB *getDTBPtr();
|
2009-05-12 21:01:13 +02:00
|
|
|
|
2011-02-12 16:14:36 +01:00
|
|
|
/** Accessor Type for the SkedCache */
|
|
|
|
typedef uint32_t SkedID;
|
|
|
|
|
|
|
|
/** Cache of Instruction Schedule using the instruction's name as a key */
|
|
|
|
static std::map<SkedID, ThePipeline::RSkedPtr> skedCache;
|
|
|
|
|
|
|
|
typedef std::map<SkedID, ThePipeline::RSkedPtr>::iterator SkedCacheIt;
|
|
|
|
|
|
|
|
/** Initialized to last iterator in map, signifying a invalid entry
|
|
|
|
on map searches
|
|
|
|
*/
|
|
|
|
SkedCacheIt endOfSkedIt;
|
|
|
|
|
2011-02-12 16:14:40 +01:00
|
|
|
ThePipeline::RSkedPtr frontEndSked;
|
|
|
|
|
2011-02-12 16:14:36 +01:00
|
|
|
/** Add a new instruction schedule to the schedule cache */
|
|
|
|
void addToSkedCache(DynInstPtr inst, ThePipeline::RSkedPtr inst_sked)
|
|
|
|
{
|
|
|
|
SkedID sked_id = genSkedID(inst);
|
2011-02-18 20:29:26 +01:00
|
|
|
assert(skedCache.find(sked_id) == skedCache.end());
|
2011-02-12 16:14:36 +01:00
|
|
|
skedCache[sked_id] = inst_sked;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/** Find a instruction schedule */
|
|
|
|
ThePipeline::RSkedPtr lookupSked(DynInstPtr inst)
|
|
|
|
{
|
|
|
|
SkedID sked_id = genSkedID(inst);
|
|
|
|
SkedCacheIt lookup_it = skedCache.find(sked_id);
|
|
|
|
|
|
|
|
if (lookup_it != endOfSkedIt) {
|
|
|
|
return (*lookup_it).second;
|
|
|
|
} else {
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static const uint8_t INST_OPCLASS = 26;
|
|
|
|
static const uint8_t INST_LOAD = 25;
|
|
|
|
static const uint8_t INST_STORE = 24;
|
|
|
|
static const uint8_t INST_CONTROL = 23;
|
|
|
|
static const uint8_t INST_NONSPEC = 22;
|
|
|
|
static const uint8_t INST_DEST_REGS = 18;
|
|
|
|
static const uint8_t INST_SRC_REGS = 14;
|
|
|
|
|
|
|
|
inline SkedID genSkedID(DynInstPtr inst)
|
|
|
|
{
|
|
|
|
SkedID id = 0;
|
|
|
|
id = (inst->opClass() << INST_OPCLASS) |
|
|
|
|
(inst->isLoad() << INST_LOAD) |
|
|
|
|
(inst->isStore() << INST_STORE) |
|
|
|
|
(inst->isControl() << INST_CONTROL) |
|
|
|
|
(inst->isNonSpeculative() << INST_NONSPEC) |
|
|
|
|
(inst->numDestRegs() << INST_DEST_REGS) |
|
|
|
|
(inst->numSrcRegs() << INST_SRC_REGS);
|
|
|
|
return id;
|
|
|
|
}
|
|
|
|
|
2011-02-12 16:14:40 +01:00
|
|
|
ThePipeline::RSkedPtr createFrontEndSked();
|
|
|
|
ThePipeline::RSkedPtr createBackEndSked(DynInstPtr inst);
|
|
|
|
|
|
|
|
class StageScheduler {
|
|
|
|
private:
|
|
|
|
ThePipeline::RSkedPtr rsked;
|
|
|
|
int stageNum;
|
|
|
|
int nextTaskPriority;
|
|
|
|
|
|
|
|
public:
|
|
|
|
StageScheduler(ThePipeline::RSkedPtr _rsked, int stage_num)
|
|
|
|
: rsked(_rsked), stageNum(stage_num),
|
|
|
|
nextTaskPriority(0)
|
|
|
|
{ }
|
|
|
|
|
|
|
|
void needs(int unit, int request) {
|
|
|
|
rsked->push(new ScheduleEntry(
|
|
|
|
stageNum, nextTaskPriority++, unit, request
|
|
|
|
));
|
|
|
|
}
|
|
|
|
|
|
|
|
void needs(int unit, int request, int param) {
|
|
|
|
rsked->push(new ScheduleEntry(
|
|
|
|
stageNum, nextTaskPriority++, unit, request, param
|
|
|
|
));
|
|
|
|
}
|
|
|
|
};
|
2011-02-12 16:14:36 +01:00
|
|
|
|
2009-02-11 00:49:29 +01:00
|
|
|
public:
|
|
|
|
|
|
|
|
/** Registers statistics. */
|
|
|
|
void regStats();
|
|
|
|
|
|
|
|
/** Ticks CPU, calling tick() on each stage, and checking the overall
|
|
|
|
* activity to see if the CPU should deschedule itself.
|
|
|
|
*/
|
|
|
|
void tick();
|
|
|
|
|
|
|
|
/** Initialize the CPU */
|
|
|
|
void init();
|
|
|
|
|
|
|
|
/** Reset State in the CPU */
|
|
|
|
void reset();
|
|
|
|
|
|
|
|
/** Get a Memory Port */
|
|
|
|
Port* getPort(const std::string &if_name, int idx = 0);
|
|
|
|
|
2009-09-15 07:44:48 +02:00
|
|
|
#if FULL_SYSTEM
|
|
|
|
/** HW return from error interrupt. */
|
|
|
|
Fault hwrei(ThreadID tid);
|
|
|
|
|
|
|
|
bool simPalCheck(int palFunc, ThreadID tid);
|
|
|
|
|
|
|
|
/** Returns the Fault for any valid interrupt. */
|
|
|
|
Fault getInterrupts();
|
|
|
|
|
|
|
|
/** Processes any an interrupt fault. */
|
|
|
|
void processInterrupts(Fault interrupt);
|
|
|
|
|
|
|
|
/** Halts the CPU. */
|
|
|
|
void halt() { panic("Halt not implemented!\n"); }
|
|
|
|
|
|
|
|
/** Update the Virt and Phys ports of all ThreadContexts to
|
|
|
|
* reflect change in memory connections. */
|
|
|
|
void updateMemPorts();
|
|
|
|
|
|
|
|
/** Check if this address is a valid instruction address. */
|
|
|
|
bool validInstAddr(Addr addr) { return true; }
|
|
|
|
|
|
|
|
/** Check if this address is a valid data address. */
|
|
|
|
bool validDataAddr(Addr addr) { return true; }
|
|
|
|
#endif
|
|
|
|
|
2009-02-11 00:49:29 +01:00
|
|
|
/** trap() - sets up a trap event on the cpuTraps to handle given fault.
|
|
|
|
* trapCPU() - Traps to handle given fault
|
|
|
|
*/
|
2010-09-14 04:26:03 +02:00
|
|
|
void trap(Fault fault, ThreadID tid, DynInstPtr inst, int delay = 0);
|
|
|
|
void trapCPU(Fault fault, ThreadID tid, DynInstPtr inst);
|
2009-02-11 00:49:29 +01:00
|
|
|
|
|
|
|
/** Add Thread to Active Threads List. */
|
2009-05-26 18:23:13 +02:00
|
|
|
void activateContext(ThreadID tid, int delay = 0);
|
|
|
|
void activateThread(ThreadID tid);
|
2010-02-01 00:27:38 +01:00
|
|
|
void activateThreadInPipeline(ThreadID tid);
|
|
|
|
|
2010-02-01 00:26:32 +01:00
|
|
|
/** Add Thread to Active Threads List. */
|
|
|
|
void activateNextReadyContext(int delay = 0);
|
|
|
|
void activateNextReadyThread();
|
|
|
|
|
2010-02-01 00:26:40 +01:00
|
|
|
/** Remove from Active Thread List */
|
|
|
|
void deactivateContext(ThreadID tid, int delay = 0);
|
|
|
|
void deactivateThread(ThreadID tid);
|
|
|
|
|
|
|
|
/** Suspend Thread, Remove from Active Threads List, Add to Suspend List */
|
2009-05-26 18:23:13 +02:00
|
|
|
void suspendContext(ThreadID tid, int delay = 0);
|
|
|
|
void suspendThread(ThreadID tid);
|
2009-02-11 00:49:29 +01:00
|
|
|
|
2010-02-01 00:28:05 +01:00
|
|
|
/** Halt Thread, Remove from Active Thread List, Place Thread on Halted
|
|
|
|
* Threads List
|
|
|
|
*/
|
|
|
|
void haltContext(ThreadID tid, int delay = 0);
|
|
|
|
void haltThread(ThreadID tid);
|
2010-02-01 00:26:40 +01:00
|
|
|
|
|
|
|
/** squashFromMemStall() - sets up a squash event
|
|
|
|
* squashDueToMemStall() - squashes pipeline
|
2010-02-01 00:28:05 +01:00
|
|
|
* @note: maybe squashContext/squashThread would be better?
|
2010-02-01 00:26:40 +01:00
|
|
|
*/
|
|
|
|
void squashFromMemStall(DynInstPtr inst, ThreadID tid, int delay = 0);
|
|
|
|
void squashDueToMemStall(int stage_num, InstSeqNum seq_num, ThreadID tid);
|
|
|
|
|
|
|
|
void removePipelineStalls(ThreadID tid);
|
|
|
|
void squashThreadInPipeline(ThreadID tid);
|
|
|
|
void squashBehindMemStall(int stage_num, InstSeqNum seq_num, ThreadID tid);
|
2009-02-11 00:49:29 +01:00
|
|
|
|
2009-05-12 21:01:13 +02:00
|
|
|
PipelineStage* getPipeStage(int stage_num);
|
|
|
|
|
2009-02-11 00:49:29 +01:00
|
|
|
int
|
|
|
|
contextId()
|
|
|
|
{
|
|
|
|
hack_once("return a bogus context id");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-02-11 00:49:29 +01:00
|
|
|
/** Update The Order In Which We Process Threads. */
|
|
|
|
void updateThreadPriority();
|
|
|
|
|
|
|
|
/** Switches a Pipeline Stage to Active. (Unused currently) */
|
|
|
|
void switchToActive(int stage_idx)
|
|
|
|
{ /*pipelineStage[stage_idx]->switchToActive();*/ }
|
|
|
|
|
|
|
|
/** Get the current instruction sequence number, and increment it. */
|
2009-05-26 18:23:13 +02:00
|
|
|
InstSeqNum getAndIncrementInstSeq(ThreadID tid)
|
2009-02-11 00:49:29 +01:00
|
|
|
{ return globalSeqNum[tid]++; }
|
|
|
|
|
|
|
|
/** Get the current instruction sequence number, and increment it. */
|
2009-05-26 18:23:13 +02:00
|
|
|
InstSeqNum nextInstSeqNum(ThreadID tid)
|
2009-02-11 00:49:29 +01:00
|
|
|
{ return globalSeqNum[tid]; }
|
|
|
|
|
|
|
|
/** Increment Instruction Sequence Number */
|
2009-05-26 18:23:13 +02:00
|
|
|
void incrInstSeqNum(ThreadID tid)
|
2009-02-11 00:49:29 +01:00
|
|
|
{ globalSeqNum[tid]++; }
|
|
|
|
|
|
|
|
/** Set Instruction Sequence Number */
|
2009-05-26 18:23:13 +02:00
|
|
|
void setInstSeqNum(ThreadID tid, InstSeqNum seq_num)
|
2009-02-11 00:49:29 +01:00
|
|
|
{
|
|
|
|
globalSeqNum[tid] = seq_num;
|
|
|
|
}
|
|
|
|
|
2009-03-04 19:17:08 +01:00
|
|
|
/** Get & Update Next Event Number */
|
2009-02-11 00:49:29 +01:00
|
|
|
InstSeqNum getNextEventNum()
|
|
|
|
{
|
2010-01-31 23:18:15 +01:00
|
|
|
#ifdef DEBUG
|
2009-02-11 00:49:29 +01:00
|
|
|
return cpuEventNum++;
|
2010-01-31 23:18:15 +01:00
|
|
|
#else
|
|
|
|
return 0;
|
|
|
|
#endif
|
2009-02-11 00:49:29 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/** Register file accessors */
|
2009-05-26 18:23:13 +02:00
|
|
|
uint64_t readIntReg(int reg_idx, ThreadID tid);
|
2009-02-11 00:49:29 +01:00
|
|
|
|
2009-07-09 08:02:20 +02:00
|
|
|
FloatReg readFloatReg(int reg_idx, ThreadID tid);
|
2009-02-11 00:49:29 +01:00
|
|
|
|
2009-07-09 08:02:20 +02:00
|
|
|
FloatRegBits readFloatRegBits(int reg_idx, ThreadID tid);
|
2009-02-11 00:49:29 +01:00
|
|
|
|
2009-05-26 18:23:13 +02:00
|
|
|
void setIntReg(int reg_idx, uint64_t val, ThreadID tid);
|
2009-02-11 00:49:29 +01:00
|
|
|
|
2009-07-09 08:02:20 +02:00
|
|
|
void setFloatReg(int reg_idx, FloatReg val, ThreadID tid);
|
2009-02-11 00:49:29 +01:00
|
|
|
|
2009-07-09 08:02:20 +02:00
|
|
|
void setFloatRegBits(int reg_idx, FloatRegBits val, ThreadID tid);
|
2009-02-11 00:49:29 +01:00
|
|
|
|
|
|
|
/** Reads a miscellaneous register. */
|
2009-05-26 18:23:13 +02:00
|
|
|
MiscReg readMiscRegNoEffect(int misc_reg, ThreadID tid = 0);
|
2009-02-11 00:49:29 +01:00
|
|
|
|
|
|
|
/** Reads a misc. register, including any side effects the read
|
|
|
|
* might have as defined by the architecture.
|
|
|
|
*/
|
2009-05-26 18:23:13 +02:00
|
|
|
MiscReg readMiscReg(int misc_reg, ThreadID tid = 0);
|
2009-02-11 00:49:29 +01:00
|
|
|
|
|
|
|
/** Sets a miscellaneous register. */
|
2009-05-26 18:23:13 +02:00
|
|
|
void setMiscRegNoEffect(int misc_reg, const MiscReg &val,
|
|
|
|
ThreadID tid = 0);
|
2009-02-11 00:49:29 +01:00
|
|
|
|
|
|
|
/** Sets a misc. register, including any side effects the write
|
|
|
|
* might have as defined by the architecture.
|
|
|
|
*/
|
2009-05-26 18:23:13 +02:00
|
|
|
void setMiscReg(int misc_reg, const MiscReg &val, ThreadID tid = 0);
|
2009-02-11 00:49:29 +01:00
|
|
|
|
|
|
|
/** Reads a int/fp/misc reg. from another thread depending on ISA-defined
|
|
|
|
* target thread
|
|
|
|
*/
|
2009-05-26 18:23:13 +02:00
|
|
|
uint64_t readRegOtherThread(unsigned misc_reg,
|
|
|
|
ThreadID tid = InvalidThreadID);
|
2009-02-11 00:49:29 +01:00
|
|
|
|
|
|
|
/** Sets a int/fp/misc reg. from another thread depending on an ISA-defined
|
|
|
|
* target thread
|
|
|
|
*/
|
2009-05-26 18:23:13 +02:00
|
|
|
void setRegOtherThread(unsigned misc_reg, const MiscReg &val,
|
|
|
|
ThreadID tid);
|
2009-02-11 00:49:29 +01:00
|
|
|
|
|
|
|
/** Reads the commit PC of a specific thread. */
|
ISA,CPU,etc: Create an ISA defined PC type that abstracts out ISA behaviors.
This change is a low level and pervasive reorganization of how PCs are managed
in M5. Back when Alpha was the only ISA, there were only 2 PCs to worry about,
the PC and the NPC, and the lsb of the PC signaled whether or not you were in
PAL mode. As other ISAs were added, we had to add an NNPC, micro PC and next
micropc, x86 and ARM introduced variable length instruction sets, and ARM
started to keep track of mode bits in the PC. Each CPU model handled PCs in
its own custom way that needed to be updated individually to handle the new
dimensions of variability, or, in the case of ARMs mode-bit-in-the-pc hack,
the complexity could be hidden in the ISA at the ISA implementation's expense.
Areas like the branch predictor hadn't been updated to handle branch delay
slots or micropcs, and it turns out that had introduced a significant (10s of
percent) performance bug in SPARC and to a lesser extend MIPS. Rather than
perpetuate the problem by reworking O3 again to handle the PC features needed
by x86, this change was introduced to rework PC handling in a more modular,
transparent, and hopefully efficient way.
PC type:
Rather than having the superset of all possible elements of PC state declared
in each of the CPU models, each ISA defines its own PCState type which has
exactly the elements it needs. A cross product of canned PCState classes are
defined in the new "generic" ISA directory for ISAs with/without delay slots
and microcode. These are either typedef-ed or subclassed by each ISA. To read
or write this structure through a *Context, you use the new pcState() accessor
which reads or writes depending on whether it has an argument. If you just
want the address of the current or next instruction or the current micro PC,
you can get those through read-only accessors on either the PCState type or
the *Contexts. These are instAddr(), nextInstAddr(), and microPC(). Note the
move away from readPC. That name is ambiguous since it's not clear whether or
not it should be the actual address to fetch from, or if it should have extra
bits in it like the PAL mode bit. Each class is free to define its own
functions to get at whatever values it needs however it needs to to be used in
ISA specific code. Eventually Alpha's PAL mode bit could be moved out of the
PC and into a separate field like ARM.
These types can be reset to a particular pc (where npc = pc +
sizeof(MachInst), nnpc = npc + sizeof(MachInst), upc = 0, nupc = 1 as
appropriate), printed, serialized, and compared. There is a branching()
function which encapsulates code in the CPU models that checked if an
instruction branched or not. Exactly what that means in the context of branch
delay slots which can skip an instruction when not taken is ambiguous, and
ideally this function and its uses can be eliminated. PCStates also generally
know how to advance themselves in various ways depending on if they point at
an instruction, a microop, or the last microop of a macroop. More on that
later.
Ideally, accessing all the PCs at once when setting them will improve
performance of M5 even though more data needs to be moved around. This is
because often all the PCs need to be manipulated together, and by getting them
all at once you avoid multiple function calls. Also, the PCs of a particular
thread will have spatial locality in the cache. Previously they were grouped
by element in arrays which spread out accesses.
Advancing the PC:
The PCs were previously managed entirely by the CPU which had to know about PC
semantics, try to figure out which dimension to increment the PC in, what to
set NPC/NNPC, etc. These decisions are best left to the ISA in conjunction
with the PC type itself. Because most of the information about how to
increment the PC (mainly what type of instruction it refers to) is contained
in the instruction object, a new advancePC virtual function was added to the
StaticInst class. Subclasses provide an implementation that moves around the
right element of the PC with a minimal amount of decision making. In ISAs like
Alpha, the instructions always simply assign NPC to PC without having to worry
about micropcs, nnpcs, etc. The added cost of a virtual function call should
be outweighed by not having to figure out as much about what to do with the
PCs and mucking around with the extra elements.
One drawback of making the StaticInsts advance the PC is that you have to
actually have one to advance the PC. This would, superficially, seem to
require decoding an instruction before fetch could advance. This is, as far as
I can tell, realistic. fetch would advance through memory addresses, not PCs,
perhaps predicting new memory addresses using existing ones. More
sophisticated decisions about control flow would be made later on, after the
instruction was decoded, and handed back to fetch. If branching needs to
happen, some amount of decoding needs to happen to see that it's a branch,
what the target is, etc. This could get a little more complicated if that gets
done by the predecoder, but I'm choosing to ignore that for now.
Variable length instructions:
To handle variable length instructions in x86 and ARM, the predecoder now
takes in the current PC by reference to the getExtMachInst function. It can
modify the PC however it needs to (by setting NPC to be the PC + instruction
length, for instance). This could be improved since the CPU doesn't know if
the PC was modified and always has to write it back.
ISA parser:
To support the new API, all PC related operand types were removed from the
parser and replaced with a PCState type. There are two warts on this
implementation. First, as with all the other operand types, the PCState still
has to have a valid operand type even though it doesn't use it. Second, using
syntax like PCS.npc(target) doesn't work for two reasons, this looks like the
syntax for operand type overriding, and the parser can't figure out if you're
reading or writing. Instructions that use the PCS operand (which I've
consistently called it) need to first read it into a local variable,
manipulate it, and then write it back out.
Return address stack:
The return address stack needed a little extra help because, in the presence
of branch delay slots, it has to merge together elements of the return PC and
the call PC. To handle that, a buildRetPC utility function was added. There
are basically only two versions in all the ISAs, but it didn't seem short
enough to put into the generic ISA directory. Also, the branch predictor code
in O3 and InOrder were adjusted so that they always store the PC of the actual
call instruction in the RAS, not the next PC. If the call instruction is a
microop, the next PC refers to the next microop in the same macroop which is
probably not desirable. The buildRetPC function advances the PC intelligently
to the next macroop (in an ISA specific way) so that that case works.
Change in stats:
There were no change in stats except in MIPS and SPARC in the O3 model. MIPS
runs in about 9% fewer ticks. SPARC runs with 30%-50% fewer ticks, which could
likely be improved further by setting call/return instruction flags and taking
advantage of the RAS.
TODO:
Add != operators to the PCState classes, defined trivially to be !(a==b).
Smooth out places where PCs are split apart, passed around, and put back
together later. I think this might happen in SPARC's fault code. Add ISA
specific constructors that allow setting PC elements without calling a bunch
of accessors. Try to eliminate the need for the branching() function. Factor
out Alpha's PAL mode pc bit into a separate flag field, and eliminate places
where it's blindly masked out or tested in the PC.
2010-10-31 08:07:20 +01:00
|
|
|
TheISA::PCState
|
|
|
|
pcState(ThreadID tid)
|
|
|
|
{
|
|
|
|
return pc[tid];
|
|
|
|
}
|
2009-02-11 00:49:29 +01:00
|
|
|
|
|
|
|
/** Sets the commit PC of a specific thread. */
|
ISA,CPU,etc: Create an ISA defined PC type that abstracts out ISA behaviors.
This change is a low level and pervasive reorganization of how PCs are managed
in M5. Back when Alpha was the only ISA, there were only 2 PCs to worry about,
the PC and the NPC, and the lsb of the PC signaled whether or not you were in
PAL mode. As other ISAs were added, we had to add an NNPC, micro PC and next
micropc, x86 and ARM introduced variable length instruction sets, and ARM
started to keep track of mode bits in the PC. Each CPU model handled PCs in
its own custom way that needed to be updated individually to handle the new
dimensions of variability, or, in the case of ARMs mode-bit-in-the-pc hack,
the complexity could be hidden in the ISA at the ISA implementation's expense.
Areas like the branch predictor hadn't been updated to handle branch delay
slots or micropcs, and it turns out that had introduced a significant (10s of
percent) performance bug in SPARC and to a lesser extend MIPS. Rather than
perpetuate the problem by reworking O3 again to handle the PC features needed
by x86, this change was introduced to rework PC handling in a more modular,
transparent, and hopefully efficient way.
PC type:
Rather than having the superset of all possible elements of PC state declared
in each of the CPU models, each ISA defines its own PCState type which has
exactly the elements it needs. A cross product of canned PCState classes are
defined in the new "generic" ISA directory for ISAs with/without delay slots
and microcode. These are either typedef-ed or subclassed by each ISA. To read
or write this structure through a *Context, you use the new pcState() accessor
which reads or writes depending on whether it has an argument. If you just
want the address of the current or next instruction or the current micro PC,
you can get those through read-only accessors on either the PCState type or
the *Contexts. These are instAddr(), nextInstAddr(), and microPC(). Note the
move away from readPC. That name is ambiguous since it's not clear whether or
not it should be the actual address to fetch from, or if it should have extra
bits in it like the PAL mode bit. Each class is free to define its own
functions to get at whatever values it needs however it needs to to be used in
ISA specific code. Eventually Alpha's PAL mode bit could be moved out of the
PC and into a separate field like ARM.
These types can be reset to a particular pc (where npc = pc +
sizeof(MachInst), nnpc = npc + sizeof(MachInst), upc = 0, nupc = 1 as
appropriate), printed, serialized, and compared. There is a branching()
function which encapsulates code in the CPU models that checked if an
instruction branched or not. Exactly what that means in the context of branch
delay slots which can skip an instruction when not taken is ambiguous, and
ideally this function and its uses can be eliminated. PCStates also generally
know how to advance themselves in various ways depending on if they point at
an instruction, a microop, or the last microop of a macroop. More on that
later.
Ideally, accessing all the PCs at once when setting them will improve
performance of M5 even though more data needs to be moved around. This is
because often all the PCs need to be manipulated together, and by getting them
all at once you avoid multiple function calls. Also, the PCs of a particular
thread will have spatial locality in the cache. Previously they were grouped
by element in arrays which spread out accesses.
Advancing the PC:
The PCs were previously managed entirely by the CPU which had to know about PC
semantics, try to figure out which dimension to increment the PC in, what to
set NPC/NNPC, etc. These decisions are best left to the ISA in conjunction
with the PC type itself. Because most of the information about how to
increment the PC (mainly what type of instruction it refers to) is contained
in the instruction object, a new advancePC virtual function was added to the
StaticInst class. Subclasses provide an implementation that moves around the
right element of the PC with a minimal amount of decision making. In ISAs like
Alpha, the instructions always simply assign NPC to PC without having to worry
about micropcs, nnpcs, etc. The added cost of a virtual function call should
be outweighed by not having to figure out as much about what to do with the
PCs and mucking around with the extra elements.
One drawback of making the StaticInsts advance the PC is that you have to
actually have one to advance the PC. This would, superficially, seem to
require decoding an instruction before fetch could advance. This is, as far as
I can tell, realistic. fetch would advance through memory addresses, not PCs,
perhaps predicting new memory addresses using existing ones. More
sophisticated decisions about control flow would be made later on, after the
instruction was decoded, and handed back to fetch. If branching needs to
happen, some amount of decoding needs to happen to see that it's a branch,
what the target is, etc. This could get a little more complicated if that gets
done by the predecoder, but I'm choosing to ignore that for now.
Variable length instructions:
To handle variable length instructions in x86 and ARM, the predecoder now
takes in the current PC by reference to the getExtMachInst function. It can
modify the PC however it needs to (by setting NPC to be the PC + instruction
length, for instance). This could be improved since the CPU doesn't know if
the PC was modified and always has to write it back.
ISA parser:
To support the new API, all PC related operand types were removed from the
parser and replaced with a PCState type. There are two warts on this
implementation. First, as with all the other operand types, the PCState still
has to have a valid operand type even though it doesn't use it. Second, using
syntax like PCS.npc(target) doesn't work for two reasons, this looks like the
syntax for operand type overriding, and the parser can't figure out if you're
reading or writing. Instructions that use the PCS operand (which I've
consistently called it) need to first read it into a local variable,
manipulate it, and then write it back out.
Return address stack:
The return address stack needed a little extra help because, in the presence
of branch delay slots, it has to merge together elements of the return PC and
the call PC. To handle that, a buildRetPC utility function was added. There
are basically only two versions in all the ISAs, but it didn't seem short
enough to put into the generic ISA directory. Also, the branch predictor code
in O3 and InOrder were adjusted so that they always store the PC of the actual
call instruction in the RAS, not the next PC. If the call instruction is a
microop, the next PC refers to the next microop in the same macroop which is
probably not desirable. The buildRetPC function advances the PC intelligently
to the next macroop (in an ISA specific way) so that that case works.
Change in stats:
There were no change in stats except in MIPS and SPARC in the O3 model. MIPS
runs in about 9% fewer ticks. SPARC runs with 30%-50% fewer ticks, which could
likely be improved further by setting call/return instruction flags and taking
advantage of the RAS.
TODO:
Add != operators to the PCState classes, defined trivially to be !(a==b).
Smooth out places where PCs are split apart, passed around, and put back
together later. I think this might happen in SPARC's fault code. Add ISA
specific constructors that allow setting PC elements without calling a bunch
of accessors. Try to eliminate the need for the branching() function. Factor
out Alpha's PAL mode pc bit into a separate flag field, and eliminate places
where it's blindly masked out or tested in the PC.
2010-10-31 08:07:20 +01:00
|
|
|
void
|
|
|
|
pcState(const TheISA::PCState &newPC, ThreadID tid)
|
|
|
|
{
|
|
|
|
pc[tid] = newPC;
|
|
|
|
}
|
2009-02-11 00:49:29 +01:00
|
|
|
|
ISA,CPU,etc: Create an ISA defined PC type that abstracts out ISA behaviors.
This change is a low level and pervasive reorganization of how PCs are managed
in M5. Back when Alpha was the only ISA, there were only 2 PCs to worry about,
the PC and the NPC, and the lsb of the PC signaled whether or not you were in
PAL mode. As other ISAs were added, we had to add an NNPC, micro PC and next
micropc, x86 and ARM introduced variable length instruction sets, and ARM
started to keep track of mode bits in the PC. Each CPU model handled PCs in
its own custom way that needed to be updated individually to handle the new
dimensions of variability, or, in the case of ARMs mode-bit-in-the-pc hack,
the complexity could be hidden in the ISA at the ISA implementation's expense.
Areas like the branch predictor hadn't been updated to handle branch delay
slots or micropcs, and it turns out that had introduced a significant (10s of
percent) performance bug in SPARC and to a lesser extend MIPS. Rather than
perpetuate the problem by reworking O3 again to handle the PC features needed
by x86, this change was introduced to rework PC handling in a more modular,
transparent, and hopefully efficient way.
PC type:
Rather than having the superset of all possible elements of PC state declared
in each of the CPU models, each ISA defines its own PCState type which has
exactly the elements it needs. A cross product of canned PCState classes are
defined in the new "generic" ISA directory for ISAs with/without delay slots
and microcode. These are either typedef-ed or subclassed by each ISA. To read
or write this structure through a *Context, you use the new pcState() accessor
which reads or writes depending on whether it has an argument. If you just
want the address of the current or next instruction or the current micro PC,
you can get those through read-only accessors on either the PCState type or
the *Contexts. These are instAddr(), nextInstAddr(), and microPC(). Note the
move away from readPC. That name is ambiguous since it's not clear whether or
not it should be the actual address to fetch from, or if it should have extra
bits in it like the PAL mode bit. Each class is free to define its own
functions to get at whatever values it needs however it needs to to be used in
ISA specific code. Eventually Alpha's PAL mode bit could be moved out of the
PC and into a separate field like ARM.
These types can be reset to a particular pc (where npc = pc +
sizeof(MachInst), nnpc = npc + sizeof(MachInst), upc = 0, nupc = 1 as
appropriate), printed, serialized, and compared. There is a branching()
function which encapsulates code in the CPU models that checked if an
instruction branched or not. Exactly what that means in the context of branch
delay slots which can skip an instruction when not taken is ambiguous, and
ideally this function and its uses can be eliminated. PCStates also generally
know how to advance themselves in various ways depending on if they point at
an instruction, a microop, or the last microop of a macroop. More on that
later.
Ideally, accessing all the PCs at once when setting them will improve
performance of M5 even though more data needs to be moved around. This is
because often all the PCs need to be manipulated together, and by getting them
all at once you avoid multiple function calls. Also, the PCs of a particular
thread will have spatial locality in the cache. Previously they were grouped
by element in arrays which spread out accesses.
Advancing the PC:
The PCs were previously managed entirely by the CPU which had to know about PC
semantics, try to figure out which dimension to increment the PC in, what to
set NPC/NNPC, etc. These decisions are best left to the ISA in conjunction
with the PC type itself. Because most of the information about how to
increment the PC (mainly what type of instruction it refers to) is contained
in the instruction object, a new advancePC virtual function was added to the
StaticInst class. Subclasses provide an implementation that moves around the
right element of the PC with a minimal amount of decision making. In ISAs like
Alpha, the instructions always simply assign NPC to PC without having to worry
about micropcs, nnpcs, etc. The added cost of a virtual function call should
be outweighed by not having to figure out as much about what to do with the
PCs and mucking around with the extra elements.
One drawback of making the StaticInsts advance the PC is that you have to
actually have one to advance the PC. This would, superficially, seem to
require decoding an instruction before fetch could advance. This is, as far as
I can tell, realistic. fetch would advance through memory addresses, not PCs,
perhaps predicting new memory addresses using existing ones. More
sophisticated decisions about control flow would be made later on, after the
instruction was decoded, and handed back to fetch. If branching needs to
happen, some amount of decoding needs to happen to see that it's a branch,
what the target is, etc. This could get a little more complicated if that gets
done by the predecoder, but I'm choosing to ignore that for now.
Variable length instructions:
To handle variable length instructions in x86 and ARM, the predecoder now
takes in the current PC by reference to the getExtMachInst function. It can
modify the PC however it needs to (by setting NPC to be the PC + instruction
length, for instance). This could be improved since the CPU doesn't know if
the PC was modified and always has to write it back.
ISA parser:
To support the new API, all PC related operand types were removed from the
parser and replaced with a PCState type. There are two warts on this
implementation. First, as with all the other operand types, the PCState still
has to have a valid operand type even though it doesn't use it. Second, using
syntax like PCS.npc(target) doesn't work for two reasons, this looks like the
syntax for operand type overriding, and the parser can't figure out if you're
reading or writing. Instructions that use the PCS operand (which I've
consistently called it) need to first read it into a local variable,
manipulate it, and then write it back out.
Return address stack:
The return address stack needed a little extra help because, in the presence
of branch delay slots, it has to merge together elements of the return PC and
the call PC. To handle that, a buildRetPC utility function was added. There
are basically only two versions in all the ISAs, but it didn't seem short
enough to put into the generic ISA directory. Also, the branch predictor code
in O3 and InOrder were adjusted so that they always store the PC of the actual
call instruction in the RAS, not the next PC. If the call instruction is a
microop, the next PC refers to the next microop in the same macroop which is
probably not desirable. The buildRetPC function advances the PC intelligently
to the next macroop (in an ISA specific way) so that that case works.
Change in stats:
There were no change in stats except in MIPS and SPARC in the O3 model. MIPS
runs in about 9% fewer ticks. SPARC runs with 30%-50% fewer ticks, which could
likely be improved further by setting call/return instruction flags and taking
advantage of the RAS.
TODO:
Add != operators to the PCState classes, defined trivially to be !(a==b).
Smooth out places where PCs are split apart, passed around, and put back
together later. I think this might happen in SPARC's fault code. Add ISA
specific constructors that allow setting PC elements without calling a bunch
of accessors. Try to eliminate the need for the branching() function. Factor
out Alpha's PAL mode pc bit into a separate flag field, and eliminate places
where it's blindly masked out or tested in the PC.
2010-10-31 08:07:20 +01:00
|
|
|
Addr instAddr(ThreadID tid) { return pc[tid].instAddr(); }
|
|
|
|
Addr nextInstAddr(ThreadID tid) { return pc[tid].nextInstAddr(); }
|
|
|
|
MicroPC microPC(ThreadID tid) { return pc[tid].microPC(); }
|
2009-02-11 00:49:29 +01:00
|
|
|
|
|
|
|
/** Function to add instruction onto the head of the list of the
|
|
|
|
* instructions. Used when new instructions are fetched.
|
|
|
|
*/
|
|
|
|
ListIt addInst(DynInstPtr &inst);
|
|
|
|
|
|
|
|
/** Function to tell the CPU that an instruction has completed. */
|
2009-05-26 18:23:13 +02:00
|
|
|
void instDone(DynInstPtr inst, ThreadID tid);
|
2009-02-11 00:49:29 +01:00
|
|
|
|
|
|
|
/** Add Instructions to the CPU Remove List*/
|
|
|
|
void addToRemoveList(DynInstPtr &inst);
|
|
|
|
|
|
|
|
/** Remove an instruction from CPU */
|
|
|
|
void removeInst(DynInstPtr &inst);
|
|
|
|
|
|
|
|
/** Remove all instructions younger than the given sequence number. */
|
2009-05-26 18:23:13 +02:00
|
|
|
void removeInstsUntil(const InstSeqNum &seq_num,ThreadID tid);
|
2009-02-11 00:49:29 +01:00
|
|
|
|
|
|
|
/** Removes the instruction pointed to by the iterator. */
|
2009-05-26 18:23:13 +02:00
|
|
|
inline void squashInstIt(const ListIt &instIt, ThreadID tid);
|
2009-02-11 00:49:29 +01:00
|
|
|
|
|
|
|
/** Cleans up all instructions on the instruction remove list. */
|
|
|
|
void cleanUpRemovedInsts();
|
|
|
|
|
2011-02-18 20:28:10 +01:00
|
|
|
/** Cleans up all events on the CPU event remove list. */
|
2009-02-11 00:49:29 +01:00
|
|
|
void cleanUpRemovedEvents();
|
|
|
|
|
|
|
|
/** Debug function to print all instructions on the list. */
|
|
|
|
void dumpInsts();
|
|
|
|
|
|
|
|
/** Forwards an instruction read to the appropriate data
|
|
|
|
* resource (indexes into Resource Pool thru "dataPortIdx")
|
|
|
|
*/
|
2010-08-13 15:16:02 +02:00
|
|
|
Fault read(DynInstPtr inst, Addr addr,
|
|
|
|
uint8_t *data, unsigned size, unsigned flags);
|
2009-02-11 00:49:29 +01:00
|
|
|
|
|
|
|
/** Forwards an instruction write. to the appropriate data
|
|
|
|
* resource (indexes into Resource Pool thru "dataPortIdx")
|
|
|
|
*/
|
2010-08-13 15:16:02 +02:00
|
|
|
Fault write(DynInstPtr inst, uint8_t *data, unsigned size,
|
|
|
|
Addr addr, unsigned flags, uint64_t *write_res = NULL);
|
2009-02-11 00:49:29 +01:00
|
|
|
|
|
|
|
/** Executes a syscall.*/
|
2009-05-26 18:23:13 +02:00
|
|
|
void syscall(int64_t callnum, ThreadID tid);
|
2009-02-11 00:49:29 +01:00
|
|
|
|
|
|
|
public:
|
|
|
|
/** Per-Thread List of all the instructions in flight. */
|
|
|
|
std::list<DynInstPtr> instList[ThePipeline::MaxThreads];
|
|
|
|
|
|
|
|
/** List of all the instructions that will be removed at the end of this
|
|
|
|
* cycle.
|
|
|
|
*/
|
|
|
|
std::queue<ListIt> removeList;
|
|
|
|
|
|
|
|
/** List of all the cpu event requests that will be removed at the end of
|
|
|
|
* the current cycle.
|
|
|
|
*/
|
|
|
|
std::queue<Event*> cpuEventRemoveList;
|
|
|
|
|
|
|
|
/** Records if instructions need to be removed this cycle due to
|
|
|
|
* being retired or squashed.
|
|
|
|
*/
|
|
|
|
bool removeInstsThisCycle;
|
|
|
|
|
|
|
|
/** True if there is non-speculative Inst Active In Pipeline. Lets any
|
|
|
|
* execution unit know, NOT to execute while the instruction is active.
|
|
|
|
*/
|
|
|
|
bool nonSpecInstActive[ThePipeline::MaxThreads];
|
|
|
|
|
|
|
|
/** Instruction Seq. Num of current non-speculative instruction. */
|
|
|
|
InstSeqNum nonSpecSeqNum[ThePipeline::MaxThreads];
|
|
|
|
|
|
|
|
/** Instruction Seq. Num of last instruction squashed in pipeline */
|
|
|
|
InstSeqNum squashSeqNum[ThePipeline::MaxThreads];
|
|
|
|
|
|
|
|
/** Last Cycle that the CPU squashed instruction end. */
|
|
|
|
Tick lastSquashCycle[ThePipeline::MaxThreads];
|
|
|
|
|
2009-05-26 18:23:13 +02:00
|
|
|
std::list<ThreadID> fetchPriorityList;
|
2009-02-11 00:49:29 +01:00
|
|
|
|
|
|
|
protected:
|
|
|
|
/** Active Threads List */
|
2009-05-26 18:23:13 +02:00
|
|
|
std::list<ThreadID> activeThreads;
|
2009-02-11 00:49:29 +01:00
|
|
|
|
2010-02-01 00:26:32 +01:00
|
|
|
/** Ready Threads List */
|
|
|
|
std::list<ThreadID> readyThreads;
|
|
|
|
|
2009-02-11 00:49:29 +01:00
|
|
|
/** Suspended Threads List */
|
2009-05-26 18:23:13 +02:00
|
|
|
std::list<ThreadID> suspendedThreads;
|
2009-02-11 00:49:29 +01:00
|
|
|
|
2010-02-01 00:28:05 +01:00
|
|
|
/** Halted Threads List */
|
|
|
|
std::list<ThreadID> haltedThreads;
|
|
|
|
|
2010-02-01 00:26:40 +01:00
|
|
|
/** Thread Status Functions */
|
2009-05-26 18:23:13 +02:00
|
|
|
bool isThreadActive(ThreadID tid);
|
2010-02-01 00:26:47 +01:00
|
|
|
bool isThreadReady(ThreadID tid);
|
2009-05-26 18:23:13 +02:00
|
|
|
bool isThreadSuspended(ThreadID tid);
|
2009-02-11 00:49:29 +01:00
|
|
|
|
|
|
|
private:
|
|
|
|
/** The activity recorder; used to tell if the CPU has any
|
|
|
|
* activity remaining or if it can go to idle and deschedule
|
|
|
|
* itself.
|
|
|
|
*/
|
|
|
|
ActivityRecorder activityRec;
|
|
|
|
|
|
|
|
public:
|
|
|
|
/** Number of Active Threads in the CPU */
|
2009-05-26 18:23:13 +02:00
|
|
|
ThreadID numActiveThreads() { return activeThreads.size(); }
|
2009-02-11 00:49:29 +01:00
|
|
|
|
2010-02-01 00:26:32 +01:00
|
|
|
/** Thread id of active thread
|
2010-02-01 00:26:40 +01:00
|
|
|
* Only used for SwitchOnCacheMiss model.
|
|
|
|
* Assumes only 1 thread active
|
2010-02-01 00:26:32 +01:00
|
|
|
*/
|
|
|
|
ThreadID activeThreadId()
|
|
|
|
{
|
|
|
|
if (numActiveThreads() > 0)
|
|
|
|
return activeThreads.front();
|
|
|
|
else
|
2010-02-01 00:26:54 +01:00
|
|
|
return InvalidThreadID;
|
2010-02-01 00:26:32 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2009-02-11 00:49:29 +01:00
|
|
|
/** Records that there was time buffer activity this cycle. */
|
|
|
|
void activityThisCycle() { activityRec.activity(); }
|
|
|
|
|
|
|
|
/** Changes a stage's status to active within the activity recorder. */
|
|
|
|
void activateStage(const int idx)
|
|
|
|
{ activityRec.activateStage(idx); }
|
|
|
|
|
|
|
|
/** Changes a stage's status to inactive within the activity recorder. */
|
|
|
|
void deactivateStage(const int idx)
|
|
|
|
{ activityRec.deactivateStage(idx); }
|
|
|
|
|
|
|
|
/** Wakes the CPU, rescheduling the CPU if it's not already active. */
|
|
|
|
void wakeCPU();
|
|
|
|
|
2009-09-15 07:44:48 +02:00
|
|
|
#if FULL_SYSTEM
|
|
|
|
virtual void wakeup();
|
|
|
|
#endif
|
|
|
|
|
2009-02-11 00:49:29 +01:00
|
|
|
// LL/SC debug functionality
|
|
|
|
unsigned stCondFails;
|
2010-01-31 23:18:15 +01:00
|
|
|
|
|
|
|
unsigned readStCondFailures()
|
|
|
|
{ return stCondFails; }
|
|
|
|
|
|
|
|
unsigned setStCondFailures(unsigned st_fails)
|
|
|
|
{ return stCondFails = st_fails; }
|
2009-02-11 00:49:29 +01:00
|
|
|
|
|
|
|
/** Returns a pointer to a thread context. */
|
2009-05-26 18:23:13 +02:00
|
|
|
ThreadContext *tcBase(ThreadID tid = 0)
|
2009-02-11 00:49:29 +01:00
|
|
|
{
|
|
|
|
return thread[tid]->getTC();
|
|
|
|
}
|
|
|
|
|
2009-05-05 08:39:05 +02:00
|
|
|
/** Count the Total Instructions Committed in the CPU. */
|
|
|
|
virtual Counter totalInstructions() const
|
|
|
|
{
|
|
|
|
Counter total(0);
|
|
|
|
|
2009-06-05 08:21:12 +02:00
|
|
|
for (ThreadID tid = 0; tid < (ThreadID)thread.size(); tid++)
|
2009-05-26 18:23:13 +02:00
|
|
|
total += thread[tid]->numInst;
|
2009-05-05 08:39:05 +02:00
|
|
|
|
|
|
|
return total;
|
|
|
|
}
|
|
|
|
|
2009-09-15 07:44:48 +02:00
|
|
|
#if FULL_SYSTEM
|
|
|
|
/** Pointer to the system. */
|
|
|
|
System *system;
|
|
|
|
|
|
|
|
/** Pointer to physical memory. */
|
|
|
|
PhysicalMemory *physmem;
|
|
|
|
#endif
|
|
|
|
|
2009-02-11 00:49:29 +01:00
|
|
|
/** The global sequence number counter. */
|
|
|
|
InstSeqNum globalSeqNum[ThePipeline::MaxThreads];
|
|
|
|
|
2010-01-31 23:18:15 +01:00
|
|
|
#ifdef DEBUG
|
2009-02-11 00:49:29 +01:00
|
|
|
/** The global event number counter. */
|
|
|
|
InstSeqNum cpuEventNum;
|
|
|
|
|
2010-01-31 23:18:15 +01:00
|
|
|
/** Number of resource requests active in CPU **/
|
|
|
|
unsigned resReqCount;
|
|
|
|
#endif
|
|
|
|
|
2009-02-11 00:49:29 +01:00
|
|
|
/** Counter of how many stages have completed switching out. */
|
|
|
|
int switchCount;
|
|
|
|
|
|
|
|
/** Pointers to all of the threads in the CPU. */
|
|
|
|
std::vector<Thread *> thread;
|
|
|
|
|
|
|
|
/** Pointer to the icache interface. */
|
|
|
|
MemInterface *icacheInterface;
|
2009-03-04 19:17:08 +01:00
|
|
|
|
2009-02-11 00:49:29 +01:00
|
|
|
/** Pointer to the dcache interface. */
|
|
|
|
MemInterface *dcacheInterface;
|
|
|
|
|
|
|
|
/** Whether or not the CPU should defer its registration. */
|
|
|
|
bool deferRegistration;
|
|
|
|
|
|
|
|
/** Per-Stage Instruction Tracing */
|
|
|
|
bool stageTracing;
|
|
|
|
|
|
|
|
/** The cycle that the CPU was last running, used for statistics. */
|
|
|
|
Tick lastRunningCycle;
|
|
|
|
|
2010-02-01 00:28:59 +01:00
|
|
|
void updateContextSwitchStats();
|
|
|
|
unsigned instsPerSwitch;
|
|
|
|
Stats::Average instsPerCtxtSwitch;
|
|
|
|
Stats::Scalar numCtxtSwitches;
|
|
|
|
|
2009-02-11 00:49:29 +01:00
|
|
|
/** Update Thread , used for statistic purposes*/
|
|
|
|
inline void tickThreadStats();
|
|
|
|
|
|
|
|
/** Per-Thread Tick */
|
2009-03-06 04:09:53 +01:00
|
|
|
Stats::Vector threadCycles;
|
2009-02-11 00:49:29 +01:00
|
|
|
|
|
|
|
/** Tick for SMT */
|
2009-03-06 04:09:53 +01:00
|
|
|
Stats::Scalar smtCycles;
|
2009-02-11 00:49:29 +01:00
|
|
|
|
|
|
|
/** Stat for total number of times the CPU is descheduled. */
|
2009-03-06 04:09:53 +01:00
|
|
|
Stats::Scalar timesIdled;
|
2009-02-11 00:49:29 +01:00
|
|
|
|
2010-06-24 21:34:12 +02:00
|
|
|
/** Stat for total number of cycles the CPU spends descheduled or no
|
|
|
|
* stages active.
|
|
|
|
*/
|
2009-03-06 04:09:53 +01:00
|
|
|
Stats::Scalar idleCycles;
|
2009-02-11 00:49:29 +01:00
|
|
|
|
2010-02-01 00:30:24 +01:00
|
|
|
/** Stat for total number of cycles the CPU is active. */
|
|
|
|
Stats::Scalar runCycles;
|
|
|
|
|
|
|
|
/** Percentage of cycles a stage was active */
|
|
|
|
Stats::Formula activity;
|
|
|
|
|
2010-06-24 00:18:20 +02:00
|
|
|
/** Instruction Mix Stats */
|
|
|
|
Stats::Scalar comLoads;
|
|
|
|
Stats::Scalar comStores;
|
|
|
|
Stats::Scalar comBranches;
|
|
|
|
Stats::Scalar comNops;
|
|
|
|
Stats::Scalar comNonSpec;
|
|
|
|
Stats::Scalar comInts;
|
|
|
|
Stats::Scalar comFloats;
|
|
|
|
|
2009-02-11 00:49:29 +01:00
|
|
|
/** Stat for the number of committed instructions per thread. */
|
2009-03-06 04:09:53 +01:00
|
|
|
Stats::Vector committedInsts;
|
2009-02-11 00:49:29 +01:00
|
|
|
|
|
|
|
/** Stat for the number of committed instructions per thread. */
|
2009-03-06 04:09:53 +01:00
|
|
|
Stats::Vector smtCommittedInsts;
|
2009-02-11 00:49:29 +01:00
|
|
|
|
|
|
|
/** Stat for the total number of committed instructions. */
|
2009-03-06 04:09:53 +01:00
|
|
|
Stats::Scalar totalCommittedInsts;
|
2009-02-11 00:49:29 +01:00
|
|
|
|
|
|
|
/** Stat for the CPI per thread. */
|
|
|
|
Stats::Formula cpi;
|
|
|
|
|
|
|
|
/** Stat for the SMT-CPI per thread. */
|
|
|
|
Stats::Formula smtCpi;
|
|
|
|
|
|
|
|
/** Stat for the total CPI. */
|
|
|
|
Stats::Formula totalCpi;
|
|
|
|
|
|
|
|
/** Stat for the IPC per thread. */
|
|
|
|
Stats::Formula ipc;
|
|
|
|
|
|
|
|
/** Stat for the total IPC. */
|
|
|
|
Stats::Formula smtIpc;
|
|
|
|
|
|
|
|
/** Stat for the total IPC. */
|
|
|
|
Stats::Formula totalIpc;
|
|
|
|
};
|
|
|
|
|
|
|
|
#endif // __CPU_O3_CPU_HH__
|