diff --git a/src/cpu/inorder/cpu.cc b/src/cpu/inorder/cpu.cc index 39357cd30..0591b0510 100644 --- a/src/cpu/inorder/cpu.cc +++ b/src/cpu/inorder/cpu.cc @@ -341,6 +341,7 @@ InOrderCPU::InOrderCPU(Params *params) dummyBufferInst->resetInstCount(); endOfSkedIt = skedCache.end(); + frontEndSked = createFrontEndSked(); lastRunningCycle = curTick(); @@ -364,6 +365,102 @@ InOrderCPU::~InOrderCPU() std::map InOrderCPU::skedCache; +RSkedPtr +InOrderCPU::createFrontEndSked() +{ + RSkedPtr res_sked = NULL; + int stage_num = 0; + StageScheduler F(res_sked, stage_num++); + StageScheduler D(res_sked, stage_num++); + + // FETCH + F.needs(FetchSeq, FetchSeqUnit::AssignNextPC); + F.needs(ICache, FetchUnit::InitiateFetch); + + // DECODE + D.needs(ICache, FetchUnit::CompleteFetch); + D.needs(Decode, DecodeUnit::DecodeInst); + D.needs(BPred, BranchPredictor::PredictBranch); + D.needs(FetchSeq, FetchSeqUnit::UpdateTargetPC); + + return res_sked; +} + +RSkedPtr +InOrderCPU::createBackEndSked(DynInstPtr inst) +{ + RSkedPtr res_sked = lookupSked(inst); + if (res_sked != NULL) { + return res_sked; + } + + int stage_num = ThePipeline::BackEndStartStage; + StageScheduler X(res_sked, stage_num++); + StageScheduler M(res_sked, stage_num++); + StageScheduler W(res_sked, stage_num++); + + if (!inst->staticInst) { + warn_once("Static Instruction Object Not Set. Can't Create" + " Back End Schedule"); + return false; + } + + // EXECUTE + for (int idx=0; idx < inst->numSrcRegs(); idx++) { + if (!idx || !inst->isStore()) { + X.needs(RegManager, UseDefUnit::ReadSrcReg, idx); + } + } + + if ( inst->isNonSpeculative() ) { + // skip execution of non speculative insts until later + } else if ( inst->isMemRef() ) { + if ( inst->isLoad() ) { + X.needs(AGEN, AGENUnit::GenerateAddr); + } + } else if (inst->opClass() == IntMultOp || inst->opClass() == IntDivOp) { + X.needs(MDU, MultDivUnit::StartMultDiv); + } else { + X.needs(ExecUnit, ExecutionUnit::ExecuteInst); + } + + if (inst->opClass() == IntMultOp || inst->opClass() == IntDivOp) { + X.needs(MDU, MultDivUnit::EndMultDiv); + } + + // MEMORY + if ( inst->isLoad() ) { + M.needs(DCache, CacheUnit::InitiateReadData); + } else if ( inst->isStore() ) { + if ( inst->numSrcRegs() >= 2 ) { + M.needs(RegManager, UseDefUnit::ReadSrcReg, 1); + } + M.needs(AGEN, AGENUnit::GenerateAddr); + M.needs(DCache, CacheUnit::InitiateWriteData); + } + + + // WRITEBACK + if ( inst->isLoad() ) { + W.needs(DCache, CacheUnit::CompleteReadData); + } else if ( inst->isStore() ) { + W.needs(DCache, CacheUnit::CompleteWriteData); + } + + if ( inst->isNonSpeculative() ) { + if ( inst->isMemRef() ) fatal("Non-Speculative Memory Instruction"); + W.needs(ExecUnit, ExecutionUnit::ExecuteInst); + } + + for (int idx=0; idx < inst->numDestRegs(); idx++) { + W.needs(RegManager, UseDefUnit::WriteDestReg, idx); + } + + W.needs(Grad, GraduationUnit::GraduateInst); + + return res_sked; +} + void InOrderCPU::regStats() { diff --git a/src/cpu/inorder/cpu.hh b/src/cpu/inorder/cpu.hh index 154ab690c..2a5c815e1 100644 --- a/src/cpu/inorder/cpu.hh +++ b/src/cpu/inorder/cpu.hh @@ -309,6 +309,8 @@ class InOrderCPU : public BaseCPU */ SkedCacheIt endOfSkedIt; + ThePipeline::RSkedPtr frontEndSked; + /** Add a new instruction schedule to the schedule cache */ void addToSkedCache(DynInstPtr inst, ThePipeline::RSkedPtr inst_sked) { @@ -351,6 +353,33 @@ class InOrderCPU : public BaseCPU return id; } + ThePipeline::RSkedPtr createFrontEndSked(); + ThePipeline::RSkedPtr createBackEndSked(DynInstPtr inst); + + class StageScheduler { + private: + ThePipeline::RSkedPtr rsked; + int stageNum; + int nextTaskPriority; + + public: + StageScheduler(ThePipeline::RSkedPtr _rsked, int stage_num) + : rsked(_rsked), stageNum(stage_num), + nextTaskPriority(0) + { } + + void needs(int unit, int request) { + rsked->push(new ScheduleEntry( + stageNum, nextTaskPriority++, unit, request + )); + } + + void needs(int unit, int request, int param) { + rsked->push(new ScheduleEntry( + stageNum, nextTaskPriority++, unit, request, param + )); + } + }; public: