inorder: clean up the old way of inst. scheduling

remove remnants of old way of instruction scheduling which dynamically allocated
a new resource schedule for every instruction
This commit is contained in:
Korey Sewell 2011-02-12 10:14:48 -05:00
parent e26aee514d
commit 470aa289da
8 changed files with 35 additions and 266 deletions

View file

@ -63,7 +63,6 @@ if 'InOrderCPU' in env['CPU_MODELS']:
'InOrderGraduation', 'InOrderCachePort', 'RegDepMap', 'Resource',
'ThreadModel', 'AddrDep'])
Source('pipeline_traits.cc')
Source('inorder_dyn_inst.cc')
Source('inorder_cpu_builder.cc')
Source('inorder_trace.cc')

View file

@ -1415,14 +1415,6 @@ InOrderCPU::cleanUpRemovedInsts()
DynInstPtr inst = *removeList.front();
ThreadID tid = inst->threadNumber;
// Make Sure Resource Schedule Is Emptied Out
ThePipeline::ResSchedule *inst_sched = &inst->resSched;
while (!inst_sched->empty()) {
ScheduleEntry* sch_entry = inst_sched->top();
inst_sched->pop();
delete sch_entry;
}
// Remove From Register Dependency Map, If Necessary
archRegDepMap[(*removeList.front())->threadNumber].
remove((*removeList.front()));
@ -1430,8 +1422,8 @@ InOrderCPU::cleanUpRemovedInsts()
// Clear if Non-Speculative
if (inst->staticInst &&
inst->seqNum == nonSpecSeqNum[tid] &&
nonSpecInstActive[tid] == true) {
inst->seqNum == nonSpecSeqNum[tid] &&
nonSpecInstActive[tid] == true) {
nonSpecInstActive[tid] = false;
}

View file

@ -125,7 +125,6 @@ InOrderDynInst::initVars()
readyRegs = 0;
nextStage = 0;
nextInstStageNum = 0;
for(int i = 0; i < MaxInstDestRegs; i++)
instResult[i].val.integer = 0;
@ -208,8 +207,6 @@ InOrderDynInst::~InOrderDynInst()
--instcount;
deleteStages();
DPRINTF(InOrderDynInst, "DynInst: [tid:%i] [sn:%lli] Instruction destroyed"
" (active insts: %i)\n", threadNumber, seqNum, instcount);
}
@ -284,29 +281,6 @@ InOrderDynInst::completeAcc(Packet *pkt)
return this->fault;
}
InstStage *InOrderDynInst::addStage()
{
this->currentInstStage = new InstStage(this, nextInstStageNum++);
instStageList.push_back( this->currentInstStage );
return this->currentInstStage;
}
InstStage *InOrderDynInst::addStage(int stage_num)
{
nextInstStageNum = stage_num;
return InOrderDynInst::addStage();
}
void InOrderDynInst::deleteStages() {
std::list<InstStage*>::iterator list_it = instStageList.begin();
std::list<InstStage*>::iterator list_end = instStageList.end();
while(list_it != list_end) {
delete *list_it;
list_it++;
}
}
Fault
InOrderDynInst::memAccess()
{

View file

@ -210,9 +210,6 @@ class InOrderDynInst : public FastAlloc, public RefCounted
/** Data used for a store for operation. */
uint64_t storeData;
/** The resource schedule for this inst */
ThePipeline::ResSchedule resSched;
/** List of active resource requests for this instruction */
std::list<ResourceRequest*> reqList;
@ -304,11 +301,6 @@ class InOrderDynInst : public FastAlloc, public RefCounted
int nextStage;
/* vars to keep track of InstStage's - used for resource sched defn */
int nextInstStageNum;
ThePipeline::InstStage *currentInstStage;
std::list<ThePipeline::InstStage*> instStageList;
private:
/** Function to initialize variables in the constructors. */
void initVars();
@ -445,20 +437,9 @@ class InOrderDynInst : public FastAlloc, public RefCounted
backSked_end = backSked->end();
}
void setNextStage(int stage_num) { nextStage = stage_num; }
int getNextStage() { return nextStage; }
ThePipeline::InstStage *addStage();
ThePipeline::InstStage *addStage(int stage);
ThePipeline::InstStage *currentStage() { return currentInstStage; }
void deleteStages();
/** Add A Entry To Reource Schedule */
void addToSched(ScheduleEntry* sched_entry)
{ resSched.push(sched_entry); }
/** Print Resource Schedule */
void printSked()
{

View file

@ -1,171 +0,0 @@
/*
* Copyright (c) 2007 MIPS Technologies, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Authors: Korey Sewell
*
*/
#include "cpu/inorder/pipeline_traits.hh"
#include "cpu/inorder/inorder_dyn_inst.hh"
#include "cpu/inorder/resources/resource_list.hh"
using namespace std;
namespace ThePipeline {
//@TODO: create my own Instruction Schedule Class
//that operates as a Priority QUEUE
int getNextPriority(DynInstPtr &inst, int stage_num)
{
int cur_pri = 20;
/*
std::priority_queue<ScheduleEntry*, std::vector<ScheduleEntry*>,
entryCompare>::iterator sked_it = inst->resSched.begin();
std::priority_queue<ScheduleEntry*, std::vector<ScheduleEntry*>,
entryCompare>::iterator sked_end = inst->resSched.end();
while (sked_it != sked_end) {
if (sked_it.top()->stageNum == stage_num) {
cur_pri = sked_it.top()->priority;
}
sked_it++;
}
*/
return cur_pri;
}
void createFrontEndSchedule(DynInstPtr &inst)
{
InstStage *F = inst->addStage();
InstStage *D = inst->addStage();
// FETCH
F->needs(FetchSeq, FetchSeqUnit::AssignNextPC);
F->needs(ICache, FetchUnit::InitiateFetch);
// DECODE
D->needs(ICache, FetchUnit::CompleteFetch);
D->needs(Decode, DecodeUnit::DecodeInst);
D->needs(BPred, BranchPredictor::PredictBranch);
D->needs(FetchSeq, FetchSeqUnit::UpdateTargetPC);
inst->resSched.init();
}
bool createBackEndSchedule(DynInstPtr &inst)
{
if (!inst->staticInst) {
return false;
}
InstStage *X = inst->addStage();
InstStage *M = inst->addStage();
InstStage *W = inst->addStage();
// EXECUTE
for (int idx=0; idx < inst->numSrcRegs(); idx++) {
if (!idx || !inst->isStore()) {
X->needs(RegManager, UseDefUnit::ReadSrcReg, idx);
}
}
if ( inst->isNonSpeculative() ) {
// skip execution of non speculative insts until later
} else if ( inst->isMemRef() ) {
if ( inst->isLoad() ) {
X->needs(AGEN, AGENUnit::GenerateAddr);
}
} else if (inst->opClass() == IntMultOp || inst->opClass() == IntDivOp) {
X->needs(MDU, MultDivUnit::StartMultDiv);
} else {
X->needs(ExecUnit, ExecutionUnit::ExecuteInst);
}
if (inst->opClass() == IntMultOp || inst->opClass() == IntDivOp) {
X->needs(MDU, MultDivUnit::EndMultDiv);
}
// MEMORY
if ( inst->isLoad() ) {
M->needs(DCache, CacheUnit::InitiateReadData);
} else if ( inst->isStore() ) {
if ( inst->numSrcRegs() >= 2 ) {
M->needs(RegManager, UseDefUnit::ReadSrcReg, 1);
}
M->needs(AGEN, AGENUnit::GenerateAddr);
M->needs(DCache, CacheUnit::InitiateWriteData);
}
// WRITEBACK
if ( inst->isLoad() ) {
W->needs(DCache, CacheUnit::CompleteReadData);
} else if ( inst->isStore() ) {
W->needs(DCache, CacheUnit::CompleteWriteData);
}
if ( inst->isNonSpeculative() ) {
if ( inst->isMemRef() ) fatal("Non-Speculative Memory Instruction");
W->needs(ExecUnit, ExecutionUnit::ExecuteInst);
}
for (int idx=0; idx < inst->numDestRegs(); idx++) {
W->needs(RegManager, UseDefUnit::WriteDestReg, idx);
}
W->needs(Grad, GraduationUnit::GraduateInst);
return true;
}
InstStage::InstStage(DynInstPtr inst, int stage_num)
{
stageNum = stage_num;
nextTaskPriority = 0;
instSched = &inst->resSched;
}
void
InstStage::needs(int unit, int request) {
instSched->push( new ScheduleEntry(
stageNum, nextTaskPriority++, unit, request
));
}
void
InstStage::needs(int unit, int request, int param) {
instSched->push( new ScheduleEntry(
stageNum, nextTaskPriority++, unit, request, param
));
}
};

View file

@ -78,23 +78,6 @@ namespace ThePipeline {
//////////////////////////
typedef ResourceSked ResSchedule;
typedef ResourceSked* RSkedPtr;
void createFrontEndSchedule(DynInstPtr &inst);
bool createBackEndSchedule(DynInstPtr &inst);
int getNextPriority(DynInstPtr &inst, int stage_num);
class InstStage {
private:
int nextTaskPriority;
int stageNum;
ResSchedule *instSched;
public:
InstStage(DynInstPtr inst, int stage_num);
void needs(int unit, int request);
void needs(int unit, int request, int param);
};
};

View file

@ -492,11 +492,15 @@ CacheUnit::read(DynInstPtr inst, Addr addr,
// Schedule Split Read/Complete for Instruction
// ==============================
int stage_num = cache_req->getStageNum();
int stage_pri = ThePipeline::getNextPriority(inst, stage_num);
RSkedPtr inst_sked = (stage_num >= ThePipeline::BackEndStartStage) ?
inst->backSked : inst->frontSked;
// this is just an arbitrarily high priority to ensure that this
// gets pushed to the back of the list
int stage_pri = 20;
int isplit_cmd = CacheUnit::InitSecondSplitRead;
inst->resSched.push(new
inst_sked->push(new
ScheduleEntry(stage_num,
stage_pri,
cpu->resPool->getResIdx(DCache),
@ -504,7 +508,7 @@ CacheUnit::read(DynInstPtr inst, Addr addr,
1));
int csplit_cmd = CacheUnit::CompleteSecondSplitRead;
inst->resSched.push(new
inst_sked->push(new
ScheduleEntry(stage_num + 1,
1/*stage_pri*/,
cpu->resPool->getResIdx(DCache),
@ -597,24 +601,28 @@ CacheUnit::write(DynInstPtr inst, uint8_t *data, unsigned size,
// Schedule Split Read/Complete for Instruction
// ==============================
int stage_num = cache_req->getStageNum();
RSkedPtr inst_sked = (stage_num >= ThePipeline::BackEndStartStage) ?
inst->backSked : inst->frontSked;
int stage_pri = ThePipeline::getNextPriority(inst, stage_num);
// this is just an arbitrarily high priority to ensure that this
// gets pushed to the back of the list
int stage_pri = 20;
int isplit_cmd = CacheUnit::InitSecondSplitWrite;
inst->resSched.push(new
ScheduleEntry(stage_num,
stage_pri,
cpu->resPool->getResIdx(DCache),
isplit_cmd,
1));
inst_sked->push(new
ScheduleEntry(stage_num,
stage_pri,
cpu->resPool->getResIdx(DCache),
isplit_cmd,
1));
int csplit_cmd = CacheUnit::CompleteSecondSplitWrite;
inst->resSched.push(new
ScheduleEntry(stage_num + 1,
1/*stage_pri*/,
cpu->resPool->getResIdx(DCache),
csplit_cmd,
1));
inst_sked->push(new
ScheduleEntry(stage_num + 1,
1/*stage_pri*/,
cpu->resPool->getResIdx(DCache),
csplit_cmd,
1));
inst->splitInstSked = true;
} else {
DPRINTF(InOrderCachePort, "[tid:%i] sn:%i] Retrying Split Read "

View file

@ -99,19 +99,22 @@ InstBuffer::execute(int slot_idx)
inst->seqNum, next_stage);
// Add to schedule: Insert into buffer in next stage
int stage_pri = ThePipeline::getNextPriority(inst,
next_stage);
int stage_pri = 20;
RSkedPtr insert_sked = (stage_num >= ThePipeline::BackEndStartStage) ?
inst->backSked : inst->frontSked;
inst->resSched.push(new ScheduleEntry(next_stage,
insert_sked->push(new ScheduleEntry(next_stage,
stage_pri,
id,
InstBuffer::InsertInst));
// Add to schedule: Remove from buffer in next next (bypass)
// stage
stage_pri = ThePipeline::getNextPriority(inst, bypass_stage);
stage_pri = 20;
RSkedPtr bypass_sked = (stage_num >= ThePipeline::BackEndStartStage) ?
inst->backSked : inst->frontSked;
inst->resSched.push(new ScheduleEntry(bypass_stage,
bypass_sked->push(new ScheduleEntry(bypass_stage,
stage_pri,
id,
InstBuffer::RemoveInst));