inorder: enforce 78-character rule

This commit is contained in:
Korey Sewell 2010-06-24 15:34:12 -04:00
parent ecba3074c2
commit f95430d97e
30 changed files with 435 additions and 294 deletions

View file

@ -158,9 +158,11 @@ void
InOrderCPU::CPUEvent::scheduleEvent(int delay)
{
if (squashed())
mainEventQueue.reschedule(this, cpu->nextCycle(curTick + cpu->ticks(delay)));
mainEventQueue.reschedule(this, cpu->nextCycle(curTick +
cpu->ticks(delay)));
else if (!scheduled())
mainEventQueue.schedule(this, cpu->nextCycle(curTick + cpu->ticks(delay)));
mainEventQueue.schedule(this, cpu->nextCycle(curTick +
cpu->ticks(delay)));
}
void
@ -674,7 +676,8 @@ InOrderCPU::squashFromMemStall(DynInstPtr inst, ThreadID tid, int delay)
void
InOrderCPU::squashDueToMemStall(int stage_num, InstSeqNum seq_num, ThreadID tid)
InOrderCPU::squashDueToMemStall(int stage_num, InstSeqNum seq_num,
ThreadID tid)
{
DPRINTF(InOrderCPU, "Squashing Pipeline Stages Due to Memory Stall...\n");
@ -965,7 +968,8 @@ InOrderCPU::suspendContext(ThreadID tid, int delay)
void
InOrderCPU::suspendThread(ThreadID tid)
{
DPRINTF(InOrderCPU, "[tid:%i]: Placing on Suspended Threads List...\n", tid);
DPRINTF(InOrderCPU, "[tid:%i]: Placing on Suspended Threads List...\n",
tid);
deactivateThread(tid);
suspendedThreads.push_back(tid);
thread[tid]->lastSuspend = curTick;
@ -1323,8 +1327,8 @@ InOrderCPU::squashInstIt(const ListIt &instIt, ThreadID tid)
(*instIt)->setRemoveList();
removeList.push(instIt);
} else {
DPRINTF(InOrderCPU, "Ignoring instruction removal for [tid:%i] PC %#x "
"[sn:%lli], already on remove list\n",
DPRINTF(InOrderCPU, "Ignoring instruction removal for [tid:%i]"
" PC %#x [sn:%lli], already on remove list\n",
(*instIt)->threadNumber, (*instIt)->readPC(),
(*instIt)->seqNum);
}
@ -1387,7 +1391,8 @@ InOrderCPU::cleanUpRemovedReqs()
res_req->inst->seqNum,
res_req->getStageNum(),
res_req->res->name(),
(res_req->isCompleted()) ? res_req->getComplSlot() : res_req->getSlot(),
(res_req->isCompleted()) ?
res_req->getComplSlot() : res_req->getSlot(),
res_req->isCompleted());
reqRemoveList.pop();

View file

@ -731,7 +731,9 @@ class InOrderCPU : public BaseCPU
/** Stat for total number of times the CPU is descheduled. */
Stats::Scalar timesIdled;
/** Stat for total number of cycles the CPU spends descheduled or no stages active. */
/** Stat for total number of cycles the CPU spends descheduled or no
* stages active.
*/
Stats::Scalar idleCycles;
/** Stat for total number of cycles the CPU is active. */

View file

@ -172,8 +172,8 @@ InOrderDynInst::initVars()
DPRINTF(InOrderDynInst, "DynInst: [tid:%i] [sn:%lli] Instruction created. (active insts: %i)\n",
threadNumber, seqNum, instcount);
DPRINTF(InOrderDynInst, "DynInst: [tid:%i] [sn:%lli] Instruction created."
" (active insts: %i)\n", threadNumber, seqNum, instcount);
}
void
@ -209,8 +209,8 @@ InOrderDynInst::~InOrderDynInst()
deleteStages();
DPRINTF(InOrderDynInst, "DynInst: [tid:%i] [sn:%lli] Instruction destroyed. (active insts: %i)\n",
threadNumber, seqNum, instcount);
DPRINTF(InOrderDynInst, "DynInst: [tid:%i] [sn:%lli] Instruction destroyed"
" (active insts: %i)\n", threadNumber, seqNum, instcount);
}
void
@ -387,8 +387,8 @@ InOrderDynInst::releaseReq(ResourceRequest* req)
while(list_it != list_end) {
if((*list_it)->getResIdx() == req->getResIdx() &&
(*list_it)->getSlot() == req->getSlot()) {
DPRINTF(InOrderDynInst, "[tid:%u]: [sn:%i] Done with request to %s.\n",
threadNumber, seqNum, req->res->name());
DPRINTF(InOrderDynInst, "[tid:%u]: [sn:%i] Done with request "
"to %s.\n", threadNumber, seqNum, req->res->name());
reqList.erase(list_it);
return;
}
@ -402,8 +402,8 @@ InOrderDynInst::releaseReq(ResourceRequest* req)
void
InOrderDynInst::setIntSrc(int idx, uint64_t val)
{
DPRINTF(InOrderDynInst, "[tid:%i]: [sn:%i] Source Value %i being set to %#x.\n",
threadNumber, seqNum, idx, val);
DPRINTF(InOrderDynInst, "[tid:%i]: [sn:%i] Source Value %i being set "
"to %#x.\n", threadNumber, seqNum, idx, val);
instSrc[idx].integer = val;
}
@ -757,7 +757,8 @@ unsigned int MyHashFunc(const InOrderDynInst *addr)
return hash;
}
typedef m5::hash_map<const InOrderDynInst *, const InOrderDynInst *, MyHashFunc>
typedef m5::hash_map<const InOrderDynInst *, const InOrderDynInst *,
MyHashFunc>
my_hash_t;
my_hash_t thishash;

View file

@ -141,7 +141,7 @@ class InOrderDynInst : public FastAlloc, public RefCounted
InstSeqNum bdelaySeqNum;
enum Status {
RegDepMapEntry, /// Instruction has been entered onto the RegDepMap
RegDepMapEntry, /// Instruction is entered onto the RegDepMap
IqEntry, /// Instruction is in the IQ
RobEntry, /// Instruction is in the ROB
LsqEntry, /// Instruction is in the LSQ
@ -648,8 +648,8 @@ class InOrderDynInst : public FastAlloc, public RefCounted
Fault write(T data, Addr addr, unsigned flags,
uint64_t *res);
/** Initiates a memory access - Calculate Eff. Addr & Initiate Memory Access
* Only valid for memory operations.
/** Initiates a memory access - Calculate Eff. Addr & Initiate Memory
* Access Only valid for memory operations.
*/
Fault initiateAcc();
@ -685,7 +685,7 @@ class InOrderDynInst : public FastAlloc, public RefCounted
/** Returns the effective address. */
const Addr &getEA() const { return instEffAddr; }
/** Returns whether or not the eff. addr. calculation has been completed. */
/** Returns whether or not the eff. addr. calculation has been completed.*/
bool doneEACalc() { return eaCalcDone; }
/** Returns whether or not the eff. addr. source registers are ready.
@ -895,7 +895,8 @@ class InOrderDynInst : public FastAlloc, public RefCounted
void setMiscReg(int misc_reg, const MiscReg &val);
void setMiscRegNoEffect(int misc_reg, const MiscReg &val);
void setMiscRegOperand(const StaticInst *si, int idx, const MiscReg &val);
void setMiscRegOperandNoEffect(const StaticInst *si, int idx, const MiscReg &val);
void setMiscRegOperandNoEffect(const StaticInst *si, int idx,
const MiscReg &val);
virtual uint64_t readRegOtherThread(unsigned idx,
ThreadID tid = InvalidThreadID);

View file

@ -235,17 +235,20 @@ PipelineStage::removeStalls(ThreadID tid)
{
for (int st_num = 0; st_num < NumStages; st_num++) {
if (stalls[tid].stage[st_num] == true) {
DPRINTF(InOrderStage, "Removing stall from stage %i.\n", st_num);
DPRINTF(InOrderStage, "Removing stall from stage %i.\n",
st_num);
stalls[tid].stage[st_num] = false;
}
if (toPrevStages->stageBlock[st_num][tid] == true) {
DPRINTF(InOrderStage, "Removing pending block from stage %i.\n", st_num);
DPRINTF(InOrderStage, "Removing pending block from stage %i.\n",
st_num);
toPrevStages->stageBlock[st_num][tid] = false;
}
if (fromNextStages->stageBlock[st_num][tid] == true) {
DPRINTF(InOrderStage, "Removing pending block from stage %i.\n", st_num);
DPRINTF(InOrderStage, "Removing pending block from stage %i.\n",
st_num);
fromNextStages->stageBlock[st_num][tid] = false;
}
}
@ -568,15 +571,15 @@ PipelineStage::activateThread(ThreadID tid)
} else {
DynInstPtr inst = switchedOutBuffer[tid];
DPRINTF(InOrderStage,"[tid:%i]: Re-Inserting [sn:%lli] PC:%#x into "
"stage skidBuffer %i\n", tid, inst->seqNum,
DPRINTF(InOrderStage,"[tid:%i]: Re-Inserting [sn:%lli] PC:%#x into"
" stage skidBuffer %i\n", tid, inst->seqNum,
inst->readPC(), inst->threadNumber);
// Make instruction available for pipeline processing
skidBuffer[tid].push(inst);
// Update PC so that we start fetching after this instruction to prevent
// "double"-execution of instructions
// Update PC so that we start fetching after this instruction to
// prevent "double"-execution of instructions
cpu->resPool->scheduleEvent((InOrderCPU::CPUEventType)
ResourcePool::UpdateAfterContextSwitch,
inst, 0, 0, tid);
@ -988,10 +991,11 @@ PipelineStage::processInstSchedule(DynInstPtr inst,int &reqs_processed)
if (req->isMemStall() &&
cpu->threadModel == InOrderCPU::SwitchOnCacheMiss) {
// Save Stalling Instruction
DPRINTF(ThreadModel, "[tid:%i] [sn:%i] Detected cache miss.\n", tid, inst->seqNum);
DPRINTF(ThreadModel, "[tid:%i] [sn:%i] Detected cache "
"miss.\n", tid, inst->seqNum);
DPRINTF(InOrderStage, "Inserting [tid:%i][sn:%i] into switch out buffer.\n",
tid, inst->seqNum);
DPRINTF(InOrderStage, "Inserting [tid:%i][sn:%i] into "
"switch out buffer.\n", tid, inst->seqNum);
switchedOutBuffer[tid] = inst;
switchedOutValid[tid] = true;
@ -1004,26 +1008,27 @@ PipelineStage::processInstSchedule(DynInstPtr inst,int &reqs_processed)
// Switch On Cache Miss
//=====================
// Suspend Thread at end of cycle
DPRINTF(ThreadModel, "Suspending [tid:%i] due to cache miss.\n", tid);
DPRINTF(ThreadModel, "Suspending [tid:%i] due to cache "
"miss.\n", tid);
cpu->suspendContext(tid);
// Activate Next Ready Thread at end of cycle
DPRINTF(ThreadModel, "Attempting to activate next ready thread due to"
" cache miss.\n");
DPRINTF(ThreadModel, "Attempting to activate next ready "
"thread due to cache miss.\n");
cpu->activateNextReadyContext();
}
// Mark request for deletion
// if it isnt currently being used by a resource
if (!req->hasSlot()) {
DPRINTF(InOrderStage, "[sn:%i] Deleting Request, has no slot in resource.\n",
inst->seqNum);
DPRINTF(InOrderStage, "[sn:%i] Deleting Request, has no "
"slot in resource.\n", inst->seqNum);
cpu->reqRemoveList.push(req);
} else {
DPRINTF(InOrderStage, "[sn:%i] Ignoring Request Deletion, in resource [slot:%i].\n",
inst->seqNum, req->getSlot());
//req = cpu->dummyReq[tid];
DPRINTF(InOrderStage, "[sn:%i] Ignoring Request Deletion, "
"in resource [slot:%i].\n", inst->seqNum,
req->getSlot());
}

View file

@ -154,8 +154,9 @@ RegDepMap::canRead(unsigned idx, DynInstPtr inst)
if (inst->seqNum <= (*list_it)->seqNum) {
return true;
} else {
DPRINTF(RegDepMap, "[sn:%i] Can't read from RegFile, [sn:%i] has not written"
" it's value back yet.\n", inst->seqNum, (*list_it)->seqNum);
DPRINTF(RegDepMap, "[sn:%i] Can't read from RegFile, [sn:%i] has "
"not written it's value back yet.\n",
inst->seqNum, (*list_it)->seqNum);
return false;
}
}
@ -184,13 +185,14 @@ RegDepMap::canForward(unsigned reg_idx, DynInstPtr inst)
return forward_inst;
} else {
if (!forward_inst->isExecuted()) {
DPRINTF(RegDepMap, "[sn:%i] Can't get value through forwarding, "
" [sn:%i] has not been executed yet.\n",
DPRINTF(RegDepMap, "[sn:%i] Can't get value through "
"forwarding, [sn:%i] has not been executed yet.\n",
inst->seqNum, forward_inst->seqNum);
} else if (forward_inst->readResultTime(dest_reg_idx) >= curTick) {
DPRINTF(RegDepMap, "[sn:%i] Can't get value through forwarding, "
" [sn:%i] executed on tick:%i.\n",
inst->seqNum, forward_inst->seqNum, forward_inst->readResultTime(dest_reg_idx));
DPRINTF(RegDepMap, "[sn:%i] Can't get value through "
"forwarding, [sn:%i] executed on tick:%i.\n",
inst->seqNum, forward_inst->seqNum,
forward_inst->readResultTime(dest_reg_idx));
}
return NULL;
@ -213,8 +215,9 @@ RegDepMap::canWrite(unsigned idx, DynInstPtr inst)
if (inst->seqNum <= (*list_it)->seqNum) {
return true;
} else {
DPRINTF(RegDepMap, "[sn:%i] Can't write from RegFile: [sn:%i] has not written"
" it's value back yet.\n", inst->seqNum, (*list_it)->seqNum);
DPRINTF(RegDepMap, "[sn:%i] Can't write from RegFile: [sn:%i] "
"has not written it's value back yet.\n", inst->seqNum,
(*list_it)->seqNum);
}
return false;

View file

@ -60,29 +60,36 @@ class RegDepMap
/** Insert all of a instruction's destination registers into map*/
void insert(DynInstPtr inst);
/** Insert an instruction into a specific destination register index onto map */
/** Insert an instruction into a specific destination register index
* onto map
*/
void insert(unsigned idx, DynInstPtr inst);
/** Remove all of a instruction's destination registers into map*/
void remove(DynInstPtr inst);
/** Remove a specific instruction and destination register index from map */
/** Remove a specific instruction and dest. register index from map*/
void remove(unsigned idx, DynInstPtr inst);
/** Remove Front instruction from a destination register */
void removeFront(unsigned idx, DynInstPtr inst);
/** Is the current instruction able to read from this destination register? */
/** Is the current instruction able to read from this
* destination register?
*/
bool canRead(unsigned idx, DynInstPtr inst);
/** Is the current instruction able to get a forwarded value from another instruction
* for this destination register? */
/** Is the current instruction able to get a forwarded value from
* another instruction for this destination register?
*/
DynInstPtr canForward(unsigned reg_idx, DynInstPtr inst);
/** find an instruction to forward/bypass a value from */
DynInstPtr findBypassInst(unsigned idx);
/** Is the current instruction able to write to this destination register? */
/** Is the current instruction able to write to this
* destination register?
*/
bool canWrite(unsigned idx, DynInstPtr inst);
/** Size of Dependency of Map */

View file

@ -346,7 +346,8 @@ Resource::squash(DynInstPtr inst, int stage_num, InstSeqNum squash_seq_num,
}
void
Resource::squashDueToMemStall(DynInstPtr inst, int stage_num, InstSeqNum squash_seq_num,
Resource::squashDueToMemStall(DynInstPtr inst, int stage_num,
InstSeqNum squash_seq_num,
ThreadID tid)
{
squash(inst, stage_num, squash_seq_num, tid);
@ -454,8 +455,9 @@ ResourceRequest::~ResourceRequest()
void
ResourceRequest::done(bool completed)
{
DPRINTF(Resource, "%s [slot:%i] done with request from [sn:%i] [tid:%i].\n",
res->name(), slotNum, inst->seqNum, inst->readTid());
DPRINTF(Resource, "%s [slot:%i] done with request from "
"[sn:%i] [tid:%i].\n", res->name(), slotNum,
inst->seqNum, inst->readTid());
setCompleted(completed);
@ -463,7 +465,8 @@ ResourceRequest::done(bool completed)
if (completed) {
complSlotNum = slotNum;
// Would like to start a convention such as all requests deleted in resources/pipeline
// Would like to start a convention such as all requests deleted in
// resources/pipeline
// but a little more complex then it seems...
// For now, all COMPLETED requests deleted in resource..
// all FAILED requests deleted in pipeline stage
@ -474,7 +477,8 @@ ResourceRequest::done(bool completed)
// Free Slot So Another Instruction Can Use This Resource
res->freeSlot(slotNum);
// change slot # to -1, since we check slotNum to see if request is still valid
// change slot # to -1, since we check slotNum to see if request is
// still valid
slotNum = -1;
#ifdef DEBUG

View file

@ -259,7 +259,8 @@ ResourcePool::scheduleEvent(InOrderCPU::CPUEventType e_type, DynInstPtr inst,
inst->bdelaySeqNum,
inst->readTid());
mainEventQueue.schedule(res_pool_event,
cpu->nextCycle(curTick + cpu->ticks(delay)));
cpu->nextCycle(curTick +
cpu->ticks(delay)));
}
break;
@ -278,7 +279,8 @@ ResourcePool::scheduleEvent(InOrderCPU::CPUEventType e_type, DynInstPtr inst,
tid);
mainEventQueue.schedule(res_pool_event,
cpu->nextCycle(curTick + cpu->ticks(delay)));
cpu->nextCycle(curTick +
cpu->ticks(delay)));
}
break;
@ -286,8 +288,10 @@ ResourcePool::scheduleEvent(InOrderCPU::CPUEventType e_type, DynInstPtr inst,
case InOrderCPU::SuspendThread:
{
DPRINTF(Resource, "Scheduling Suspend Thread Resource Pool Event for tick %i.\n",
DPRINTF(Resource, "Scheduling Suspend Thread Resource Pool "
"Event for tick %i.\n",
cpu->nextCycle(cpu->nextCycle(curTick + cpu->ticks(delay))));
ResPoolEvent *res_pool_event = new ResPoolEvent(this,
e_type,
inst,
@ -295,7 +299,9 @@ ResourcePool::scheduleEvent(InOrderCPU::CPUEventType e_type, DynInstPtr inst,
inst->bdelaySeqNum,
tid);
mainEventQueue.schedule(res_pool_event, cpu->nextCycle(cpu->nextCycle(curTick + cpu->ticks(delay))));
Tick sked_tick = curTick + cpu->ticks(delay);
mainEventQueue.schedule(res_pool_event,
cpu->nextCycle(cpu->nextCycle(sked_tick)));
}
break;
@ -311,7 +317,8 @@ ResourcePool::scheduleEvent(InOrderCPU::CPUEventType e_type, DynInstPtr inst,
inst->seqNum,
inst->readTid());
mainEventQueue.schedule(res_pool_event,
cpu->nextCycle(curTick + cpu->ticks(delay)));
cpu->nextCycle(curTick +
cpu->ticks(delay)));
}
break;
@ -327,7 +334,8 @@ ResourcePool::scheduleEvent(InOrderCPU::CPUEventType e_type, DynInstPtr inst,
inst->bdelaySeqNum,
inst->readTid());
mainEventQueue.schedule(res_pool_event,
cpu->nextCycle(curTick + cpu->ticks(delay)));
cpu->nextCycle(curTick +
cpu->ticks(delay)));
}
break;
@ -356,7 +364,8 @@ ResourcePool::scheduleEvent(InOrderCPU::CPUEventType e_type, DynInstPtr inst,
inst->squashingStage,
inst->seqNum,
inst->readTid());
mainEventQueue.schedule(res_pool_event, cpu->nextCycle(curTick + cpu->ticks(delay)));
mainEventQueue.schedule(res_pool_event,
cpu->nextCycle(curTick + cpu->ticks(delay)));
}
break;
@ -443,8 +452,8 @@ ResourcePool::deactivateAll(ThreadID tid)
void
ResourcePool::suspendAll(ThreadID tid)
{
DPRINTF(Resource, "[tid:%i] Broadcasting Thread Suspension to all resources.\n",
tid);
DPRINTF(Resource, "[tid:%i] Broadcasting Thread Suspension to all "
"resources.\n", tid);
int num_resources = resources.size();
@ -543,10 +552,17 @@ ResourcePool::ResPoolEvent::description()
void
ResourcePool::ResPoolEvent::scheduleEvent(int delay)
{
if (squashed())
mainEventQueue.reschedule(this,resPool->cpu->nextCycle(curTick + resPool->cpu->ticks(delay)));
else if (!scheduled())
mainEventQueue.schedule(this, resPool->cpu->nextCycle(curTick + resPool->cpu->ticks(delay)));
if (squashed()) {
mainEventQueue.reschedule(this,
resPool->cpu->nextCycle(curTick +
resPool->
cpu->ticks(delay)));
} else if (!scheduled()) {
mainEventQueue.schedule(this,
resPool->cpu->nextCycle(curTick +
resPool->
cpu->ticks(delay)));
}
}
/** Unschedule resource event, regardless of its current state. */

View file

@ -32,7 +32,8 @@
#include "cpu/inorder/resources/agen_unit.hh"
AGENUnit::AGENUnit(std::string res_name, int res_id, int res_width,
int res_latency, InOrderCPU *_cpu, ThePipeline::Params *params)
int res_latency, InOrderCPU *_cpu,
ThePipeline::Params *params)
: Resource(res_name, res_id, res_width, res_latency, _cpu)
{ }

View file

@ -48,7 +48,7 @@ class AGENUnit : public Resource {
public:
AGENUnit(std::string res_name, int res_id, int res_width,
int res_latency, InOrderCPU *_cpu, ThePipeline::Params *params);
int res_latency, InOrderCPU *_cpu, ThePipeline::Params *params);
virtual ~AGENUnit() {}
enum Command {

View file

@ -161,21 +161,22 @@ BPredUnit::predict(DynInstPtr &inst, Addr &PC, ThreadID tid)
Addr target;
++lookups;
DPRINTF(InOrderBPred, "[tid:%i] [sn:%i] %s ... PC%#x doing branch prediction\n",
tid, inst->seqNum, inst->staticInst->disassemble(inst->PC),
inst->readPC());
DPRINTF(InOrderBPred, "[tid:%i] [sn:%i] %s ... PC%#x doing branch "
"prediction\n", tid, inst->seqNum,
inst->staticInst->disassemble(inst->PC), inst->readPC());
void *bp_history = NULL;
if (inst->isUncondCtrl()) {
DPRINTF(InOrderBPred, "BranchPred: [tid:%i] Unconditional control.\n", tid);
DPRINTF(InOrderBPred, "[tid:%i] Unconditional control.\n",
tid);
pred_taken = true;
// Tell the BP there was an unconditional branch.
BPUncond(bp_history);
if (inst->isReturn() && RAS[tid].empty()) {
DPRINTF(InOrderBPred, "BranchPred: [tid:%i] RAS is empty, predicting "
DPRINTF(InOrderBPred, "[tid:%i] RAS is empty, predicting "
"false.\n", tid);
pred_taken = false;
}
@ -184,7 +185,7 @@ BPredUnit::predict(DynInstPtr &inst, Addr &PC, ThreadID tid)
pred_taken = BPLookup(PC, bp_history);
DPRINTF(InOrderBPred, "BranchPred: [tid:%i]: Branch predictor predicted %i "
DPRINTF(InOrderBPred, "[tid:%i]: Branch predictor predicted %i "
"for PC %#x\n",
tid, pred_taken, inst->readPC());
}
@ -210,7 +211,7 @@ BPredUnit::predict(DynInstPtr &inst, Addr &PC, ThreadID tid)
RAS[tid].pop();
DPRINTF(InOrderBPred, "BranchPred: [tid:%i]: Instruction %#x is a return, "
DPRINTF(InOrderBPred, "[tid:%i]: Instruction %#x is a return, "
"RAS predicted target: %#x, RAS index: %i.\n",
tid, inst->readPC(), target, predict_record.RASIndex);
} else {
@ -229,7 +230,7 @@ BPredUnit::predict(DynInstPtr &inst, Addr &PC, ThreadID tid)
// be popped off if the speculation is incorrect.
predict_record.wasCall = true;
DPRINTF(InOrderBPred, "BranchPred: [tid:%i]: Instruction %#x was a call"
DPRINTF(InOrderBPred, "[tid:%i]: Instruction %#x was a call"
", adding %#x to the RAS index: %i.\n",
tid, inst->readPC(), ras_pc, RAS[tid].topIdx());
}
@ -239,7 +240,7 @@ BPredUnit::predict(DynInstPtr &inst, Addr &PC, ThreadID tid)
inst->isDirectCtrl()) {
target = inst->branchTarget();
DPRINTF(InOrderBPred, "BranchPred: [tid:%i]: Setting %#x predicted"
DPRINTF(InOrderBPred, "[tid:%i]: Setting %#x predicted"
" target to %#x.\n",
tid, inst->readPC(), target);
} else if (BTB.valid(PC, asid)) {
@ -248,11 +249,11 @@ BPredUnit::predict(DynInstPtr &inst, Addr &PC, ThreadID tid)
// If it's not a return, use the BTB to get the target addr.
target = BTB.lookup(PC, asid);
DPRINTF(InOrderBPred, "BranchPred: [tid:%i]: [asid:%i] Instruction %#x predicted"
" target is %#x.\n",
DPRINTF(InOrderBPred, "[tid:%i]: [asid:%i] Instruction %#x "
"predicted target is %#x.\n",
tid, asid, inst->readPC(), target);
} else {
DPRINTF(InOrderBPred, "BranchPred: [tid:%i]: BTB doesn't have a "
DPRINTF(InOrderBPred, "[tid:%i]: BTB doesn't have a "
"valid entry.\n",tid);
pred_taken = false;
}
@ -275,7 +276,8 @@ BPredUnit::predict(DynInstPtr &inst, Addr &PC, ThreadID tid)
predHist[tid].push_front(predict_record);
DPRINTF(InOrderBPred, "[tid:%i] [sn:%i] pushed onto front of predHist ...predHist.size(): %i\n",
DPRINTF(InOrderBPred, "[tid:%i] [sn:%i] pushed onto front of predHist "
"...predHist.size(): %i\n",
tid, inst->seqNum, predHist[tid].size());
inst->setBranchPred(pred_taken);
@ -303,15 +305,15 @@ BPredUnit::update(const InstSeqNum &done_sn, ThreadID tid)
void
BPredUnit::squash(const InstSeqNum &squashed_sn, ThreadID tid)
BPredUnit::squash(const InstSeqNum &squashed_sn, ThreadID tid, ThreadID asid)
{
History &pred_hist = predHist[tid];
while (!pred_hist.empty() &&
pred_hist.front().seqNum > squashed_sn) {
if (pred_hist.front().usedRAS) {
DPRINTF(InOrderBPred, "BranchPred: [tid:%i]: Restoring top of RAS to: %i,"
" target: %#x.\n",
DPRINTF(InOrderBPred, "BranchPred: [tid:%i]: Restoring top of RAS "
"to: %i, target: %#x.\n",
tid,
pred_hist.front().RASIndex,
pred_hist.front().RASTarget);
@ -320,8 +322,8 @@ BPredUnit::squash(const InstSeqNum &squashed_sn, ThreadID tid)
pred_hist.front().RASTarget);
} else if (pred_hist.front().wasCall) {
DPRINTF(InOrderBPred, "BranchPred: [tid:%i]: Removing speculative entry "
"added to the RAS.\n",tid);
DPRINTF(InOrderBPred, "BranchPred: [tid:%i]: Removing speculative "
"entry added to the RAS.\n",tid);
RAS[tid].pop();
}
@ -337,9 +339,10 @@ BPredUnit::squash(const InstSeqNum &squashed_sn, ThreadID tid)
void
BPredUnit::squash(const InstSeqNum &squashed_sn,
const Addr &corr_target,
bool actually_taken,
ThreadID tid)
const Addr &corr_target,
bool actually_taken,
ThreadID tid,
ThreadID asid)
{
// Now that we know that a branch was mispredicted, we need to undo
// all the branches that have been seen up until this branch and
@ -349,7 +352,7 @@ BPredUnit::squash(const InstSeqNum &squashed_sn,
++condIncorrect;
DPRINTF(InOrderBPred, "BranchPred: [tid:%i]: Squashing from sequence number %i, "
DPRINTF(InOrderBPred, "[tid:%i]: Squashing from sequence number %i, "
"setting target to %#x.\n",
tid, squashed_sn, corr_target);
@ -379,18 +382,19 @@ BPredUnit::squash(const InstSeqNum &squashed_sn,
BPUpdate((*hist_it).PC, actually_taken,
pred_hist.front().bpHistory);
BTB.update((*hist_it).PC, corr_target, tid);
BTB.update((*hist_it).PC, corr_target, asid);
DPRINTF(InOrderBPred, "BranchPred: [tid:%i]: Removing history for [sn:%i] "
DPRINTF(InOrderBPred, "[tid:%i]: Removing history for [sn:%i] "
"PC %#x.\n", tid, (*hist_it).seqNum, (*hist_it).PC);
pred_hist.erase(hist_it);
DPRINTF(InOrderBPred, "[tid:%i]: predHist.size(): %i\n", tid, predHist[tid].size());
DPRINTF(InOrderBPred, "[tid:%i]: predHist.size(): %i\n", tid,
predHist[tid].size());
} else {
DPRINTF(InOrderBPred, "BranchPred: [tid:%i]: [sn:%i] pred_hist empty, can't update.\n",
tid, squashed_sn);
DPRINTF(InOrderBPred, "[tid:%i]: [sn:%i] pred_hist empty, can't "
"update.\n", tid, squashed_sn);
}
}

View file

@ -106,7 +106,8 @@ class BPredUnit
* until.
* @param tid The thread id.
*/
void squash(const InstSeqNum &squashed_sn, ThreadID tid);
void squash(const InstSeqNum &squashed_sn, ThreadID tid,
ThreadID asid = 0);
/**
* Squashes all outstanding updates until a given sequence number, and
@ -118,7 +119,7 @@ class BPredUnit
* @param tid The thread id.
*/
void squash(const InstSeqNum &squashed_sn, const Addr &corr_target,
bool actually_taken, ThreadID tid);
bool actually_taken, ThreadID tid, ThreadID asid = 0);
/**
* @param bp_history Pointer to the history object. The predictor

View file

@ -37,7 +37,8 @@ using namespace TheISA;
using namespace ThePipeline;
BranchPredictor::BranchPredictor(std::string res_name, int res_id, int res_width,
int res_latency, InOrderCPU *_cpu, ThePipeline::Params *params)
int res_latency, InOrderCPU *_cpu,
ThePipeline::Params *params)
: Resource(res_name, res_id, res_width, res_latency, _cpu),
branchPred(this, params)
{
@ -80,26 +81,27 @@ BranchPredictor::execute(int slot_num)
{
if (inst->seqNum > cpu->squashSeqNum[tid] &&
curTick == cpu->lastSquashCycle[tid]) {
DPRINTF(InOrderStage, "[tid:%u]: [sn:%i]: squashed, skipping prediction \n",
tid, inst->seqNum);
DPRINTF(InOrderStage, "[tid:%u]: [sn:%i]: squashed, "
"skipping prediction \n", tid, inst->seqNum);
} else {
Addr pred_PC = inst->readNextPC();
if (inst->isControl()) {
// If not, the pred_PC be updated to pc+8
// If predicted, the pred_PC will be updated to new target value
// If predicted, the pred_PC will be updated to new target
// value
bool predict_taken = branchPred.predict(inst, pred_PC, tid);
if (predict_taken) {
DPRINTF(InOrderBPred, "[tid:%i]: [sn:%i]: Branch predicted true.\n",
tid, seq_num);
DPRINTF(InOrderBPred, "[tid:%i]: [sn:%i]: Branch "
"predicted true.\n", tid, seq_num);
inst->setPredTarg(pred_PC);
predictedTaken++;
} else {
DPRINTF(InOrderBPred, "[tid:%i]: [sn:%i]: Branch predicted false.\n",
tid, seq_num);
DPRINTF(InOrderBPred, "[tid:%i]: [sn:%i]: Branch "
"predicted false.\n", tid, seq_num);
if (inst->isCondDelaySlot())
{
@ -113,11 +115,12 @@ BranchPredictor::execute(int slot_num)
inst->setBranchPred(predict_taken);
DPRINTF(InOrderBPred, "[tid:%i]: [sn:%i]: Predicted PC is %08p.\n",
tid, seq_num, pred_PC);
DPRINTF(InOrderBPred, "[tid:%i]: [sn:%i]: Predicted PC is "
"%08p.\n", tid, seq_num, pred_PC);
} else {
//DPRINTF(InOrderBPred, "[tid:%i]: Ignoring [sn:%i] because this isn't "
//DPRINTF(InOrderBPred, "[tid:%i]: Ignoring [sn:%i] "
// "because this isn't "
// "a control instruction.\n", tid, seq_num);
}
}
@ -130,10 +133,12 @@ BranchPredictor::execute(int slot_num)
{
if (inst->seqNum > cpu->squashSeqNum[tid] &&
curTick == cpu->lastSquashCycle[tid]) {
DPRINTF(InOrderStage, "[tid:%u]: [sn:%i]: squashed, skipping branch predictor update \n",
DPRINTF(InOrderStage, "[tid:%u]: [sn:%i]: squashed, "
"skipping branch predictor update \n",
tid, inst->seqNum);
} else {
DPRINTF(InOrderBPred, "[tid:%i]: [sn:%i]: Updating Branch Predictor.\n",
DPRINTF(InOrderBPred, "[tid:%i]: [sn:%i]: Updating "
"Branch Predictor.\n",
tid, seq_num);

View file

@ -140,8 +140,8 @@ CacheUnit::getSlot(DynInstPtr inst)
// For a Split-Load, the instruction would have processed once already
// causing the address to be unset.
if (!inst->validMemAddr() && !inst->splitInst) {
panic("[tid:%i][sn:%i] Mem. Addr. must be set before requesting cache access\n",
inst->readTid(), inst->seqNum);
panic("[tid:%i][sn:%i] Mem. Addr. must be set before requesting "
"cache access\n", inst->readTid(), inst->seqNum);
}
Addr req_addr = inst->getMemAddr();
@ -212,14 +212,15 @@ CacheUnit::removeAddrDependency(DynInstPtr inst)
inst->unsetMemAddr();
// Erase from Address List
vector<Addr>::iterator vect_it = find(addrList[tid].begin(), addrList[tid].end(),
vector<Addr>::iterator vect_it = find(addrList[tid].begin(),
addrList[tid].end(),
mem_addr);
assert(vect_it != addrList[tid].end() || inst->splitInst);
if (vect_it != addrList[tid].end()) {
DPRINTF(AddrDep,
"[tid:%i]: [sn:%i] Address %08p removed from dependency list\n",
inst->readTid(), inst->seqNum, (*vect_it));
"[tid:%i]: [sn:%i] Address %08p removed from dependency "
"list\n", inst->readTid(), inst->seqNum, (*vect_it));
addrList[tid].erase(vect_it);
@ -237,7 +238,8 @@ CacheUnit::findRequest(DynInstPtr inst)
map<int, ResReqPtr>::iterator map_end = reqMap.end();
while (map_it != map_end) {
CacheRequest* cache_req = dynamic_cast<CacheRequest*>((*map_it).second);
CacheRequest* cache_req =
dynamic_cast<CacheRequest*>((*map_it).second);
assert(cache_req);
if (cache_req &&
@ -258,7 +260,8 @@ CacheUnit::findSplitRequest(DynInstPtr inst, int idx)
map<int, ResReqPtr>::iterator map_end = reqMap.end();
while (map_it != map_end) {
CacheRequest* cache_req = dynamic_cast<CacheRequest*>((*map_it).second);
CacheRequest* cache_req =
dynamic_cast<CacheRequest*>((*map_it).second);
assert(cache_req);
if (cache_req &&
@ -452,8 +455,9 @@ CacheUnit::read(DynInstPtr inst, Addr addr, T &data, unsigned flags)
cache_req->splitAccess = true;
cache_req->split2ndAccess = true;
DPRINTF(InOrderCachePort, "[sn:%i] Split Read Access (2 of 2) for (%#x, %#x).\n", inst->seqNum,
inst->getMemAddr(), inst->split2ndAddr);
DPRINTF(InOrderCachePort, "[sn:%i] Split Read Access (2 of 2) for "
"(%#x, %#x).\n", inst->seqNum, inst->getMemAddr(),
inst->split2ndAddr);
}
@ -463,8 +467,8 @@ CacheUnit::read(DynInstPtr inst, Addr addr, T &data, unsigned flags)
if (secondAddr > addr && !inst->split2ndAccess) {
DPRINTF(InOrderCachePort, "%i: sn[%i] Split Read Access (1 of 2) for (%#x, %#x).\n", curTick, inst->seqNum,
addr, secondAddr);
DPRINTF(InOrderCachePort, "%i: sn[%i] Split Read Access (1 of 2) for "
"(%#x, %#x).\n", curTick, inst->seqNum, addr, secondAddr);
// Save All "Total" Split Information
// ==============================
@ -479,23 +483,26 @@ CacheUnit::read(DynInstPtr inst, Addr addr, T &data, unsigned flags)
int stage_pri = ThePipeline::getNextPriority(inst, stage_num);
inst->resSched.push(new ScheduleEntry(stage_num,
stage_pri,
cpu->resPool->getResIdx(DCache),
CacheUnit::InitSecondSplitRead,
1)
);
int isplit_cmd = CacheUnit::InitSecondSplitRead;
inst->resSched.push(new
ScheduleEntry(stage_num,
stage_pri,
cpu->resPool->getResIdx(DCache),
isplit_cmd,
1));
inst->resSched.push(new ScheduleEntry(stage_num + 1,
1/*stage_pri*/,
cpu->resPool->getResIdx(DCache),
CacheUnit::CompleteSecondSplitRead,
1)
);
int csplit_cmd = CacheUnit::CompleteSecondSplitRead;
inst->resSched.push(new
ScheduleEntry(stage_num + 1,
1/*stage_pri*/,
cpu->resPool->getResIdx(DCache),
csplit_cmd,
1));
inst->splitInstSked = true;
} else {
DPRINTF(InOrderCachePort, "[tid:%i] [sn:%i] Retrying Split Read Access (1 of 2) for (%#x, %#x).\n",
inst->readTid(), inst->seqNum, addr, secondAddr);
DPRINTF(InOrderCachePort, "[tid:%i] [sn:%i] Retrying Split Read "
"Access (1 of 2) for (%#x, %#x).\n", inst->readTid(),
inst->seqNum, addr, secondAddr);
}
// Split Information for First Access
@ -555,8 +562,9 @@ CacheUnit::write(DynInstPtr inst, T data, Addr addr, unsigned flags,
cache_req->splitAccess = true;
cache_req->split2ndAccess = true;
DPRINTF(InOrderCachePort, "[sn:%i] Split Write Access (2 of 2) for (%#x, %#x).\n", inst->seqNum,
inst->getMemAddr(), inst->split2ndAddr);
DPRINTF(InOrderCachePort, "[sn:%i] Split Write Access (2 of 2) for "
"(%#x, %#x).\n", inst->seqNum, inst->getMemAddr(),
inst->split2ndAddr);
}
//The address of the second part of this access if it needs to be split
@ -565,8 +573,8 @@ CacheUnit::write(DynInstPtr inst, T data, Addr addr, unsigned flags,
if (secondAddr > addr && !inst->split2ndAccess) {
DPRINTF(InOrderCachePort, "[sn:%i] Split Write Access (1 of 2) for (%#x, %#x).\n", inst->seqNum,
addr, secondAddr);
DPRINTF(InOrderCachePort, "[sn:%i] Split Write Access (1 of 2) for "
"(%#x, %#x).\n", inst->seqNum, addr, secondAddr);
// Save All "Total" Split Information
// ==============================
@ -580,22 +588,25 @@ CacheUnit::write(DynInstPtr inst, T data, Addr addr, unsigned flags,
int stage_pri = ThePipeline::getNextPriority(inst, stage_num);
inst->resSched.push(new ScheduleEntry(stage_num,
stage_pri,
cpu->resPool->getResIdx(DCache),
CacheUnit::InitSecondSplitWrite,
1)
);
int isplit_cmd = CacheUnit::InitSecondSplitWrite;
inst->resSched.push(new
ScheduleEntry(stage_num,
stage_pri,
cpu->resPool->getResIdx(DCache),
isplit_cmd,
1));
inst->resSched.push(new ScheduleEntry(stage_num + 1,
1/*stage_pri*/,
cpu->resPool->getResIdx(DCache),
CacheUnit::CompleteSecondSplitWrite,
1)
);
int csplit_cmd = CacheUnit::CompleteSecondSplitWrite;
inst->resSched.push(new
ScheduleEntry(stage_num + 1,
1/*stage_pri*/,
cpu->resPool->getResIdx(DCache),
csplit_cmd,
1));
inst->splitInstSked = true;
} else {
DPRINTF(InOrderCachePort, "[tid:%i] sn:%i] Retrying Split Read Access (1 of 2) for (%#x, %#x).\n",
DPRINTF(InOrderCachePort, "[tid:%i] sn:%i] Retrying Split Read "
"Access (1 of 2) for (%#x, %#x).\n",
inst->readTid(), inst->seqNum, addr, secondAddr);
}
@ -687,8 +698,9 @@ CacheUnit::execute(int slot_num)
case InitiateWriteData:
DPRINTF(InOrderCachePort,
"[tid:%u]: [sn:%i] Initiating data %s access to %s for addr. %08p\n",
tid, inst->seqNum, acc_type, name(), cache_req->inst->getMemAddr());
"[tid:%u]: [sn:%i] Initiating data %s access to %s for "
"addr. %08p\n", tid, inst->seqNum, acc_type, name(),
cache_req->inst->getMemAddr());
inst->setCurResSlot(slot_num);
@ -702,21 +714,25 @@ CacheUnit::execute(int slot_num)
case InitSecondSplitRead:
DPRINTF(InOrderCachePort,
"[tid:%u]: [sn:%i] Initiating split data read access to %s for addr. %08p\n",
tid, inst->seqNum, name(), cache_req->inst->split2ndAddr);
"[tid:%u]: [sn:%i] Initiating split data read access to %s "
"for addr. %08p\n", tid, inst->seqNum, name(),
cache_req->inst->split2ndAddr);
inst->split2ndAccess = true;
assert(inst->split2ndAddr != 0);
read(inst, inst->split2ndAddr, inst->split2ndData, inst->split2ndFlags);
read(inst, inst->split2ndAddr, inst->split2ndData,
inst->split2ndFlags);
break;
case InitSecondSplitWrite:
DPRINTF(InOrderCachePort,
"[tid:%u]: [sn:%i] Initiating split data write access to %s for addr. %08p\n",
tid, inst->seqNum, name(), cache_req->inst->getMemAddr());
"[tid:%u]: [sn:%i] Initiating split data write access to %s "
"for addr. %08p\n", tid, inst->seqNum, name(),
cache_req->inst->getMemAddr());
inst->split2ndAccess = true;
assert(inst->split2ndAddr != 0);
write(inst, inst->split2ndAddr, inst->split2ndData, inst->split2ndFlags, NULL);
write(inst, inst->split2ndAddr, inst->split2ndData,
inst->split2ndFlags, NULL);
break;
@ -773,8 +789,8 @@ CacheUnit::execute(int slot_num)
case CompleteSecondSplitRead:
DPRINTF(InOrderCachePort,
"[tid:%i]: [sn:%i]: Trying to Complete Split Data Read Access\n",
tid, inst->seqNum);
"[tid:%i]: [sn:%i]: Trying to Complete Split Data Read "
"Access\n", tid, inst->seqNum);
if (cache_req->isMemAccComplete() ||
inst->isDataPrefetch() ||
@ -792,8 +808,8 @@ CacheUnit::execute(int slot_num)
case CompleteSecondSplitWrite:
DPRINTF(InOrderCachePort,
"[tid:%i]: [sn:%i]: Trying to Complete Split Data Write Access\n",
tid, inst->seqNum);
"[tid:%i]: [sn:%i]: Trying to Complete Split Data Write "
"Access\n", tid, inst->seqNum);
if (cache_req->isMemAccComplete() ||
inst->isDataPrefetch() ||
@ -853,7 +869,8 @@ CacheUnit::writeHint(DynInstPtr inst)
// @TODO: Split into doCacheRead() and doCacheWrite()
Fault
CacheUnit::doCacheAccess(DynInstPtr inst, uint64_t *write_res, CacheReqPtr split_req)
CacheUnit::doCacheAccess(DynInstPtr inst, uint64_t *write_res,
CacheReqPtr split_req)
{
Fault fault = NoFault;
#if TRACING_ON
@ -882,8 +899,10 @@ CacheUnit::doCacheAccess(DynInstPtr inst, uint64_t *write_res, CacheReqPtr split
: MemCmd::WriteReq);
}
cache_req->dataPkt = new CacheReqPacket(cache_req, cache_req->pktCmd,
Packet::Broadcast, cache_req->instIdx);
cache_req->dataPkt = new CacheReqPacket(cache_req,
cache_req->pktCmd,
Packet::Broadcast,
cache_req->instIdx);
if (cache_req->dataPkt->isRead()) {
cache_req->dataPkt->dataStatic(cache_req->reqData);
@ -987,11 +1006,10 @@ CacheUnit::processCacheCompletion(PacketPtr pkt)
findSplitRequest(cache_pkt->cacheReq->getInst(), cache_pkt->instIdx));
if (!cache_req) {
warn(
"[tid:%u]: [sn:%i]: Can't find slot for cache access to addr. %08p\n",
cache_pkt->cacheReq->getInst()->readTid(),
cache_pkt->cacheReq->getInst()->seqNum,
cache_pkt->cacheReq->getInst()->getMemAddr());
panic("[tid:%u]: [sn:%i]: Can't find slot for cache access to "
"addr. %08p\n", cache_pkt->cacheReq->getInst()->readTid(),
cache_pkt->cacheReq->getInst()->seqNum,
cache_pkt->cacheReq->getInst()->getMemAddr());
}
assert(cache_req);
@ -1101,7 +1119,8 @@ CacheUnit::processCacheCompletion(PacketPtr pkt)
if (cache_req->isMemStall() &&
cpu->threadModel == InOrderCPU::SwitchOnCacheMiss) {
DPRINTF(InOrderCachePort, "[tid:%u] Waking up from Cache Miss.\n", tid);
DPRINTF(InOrderCachePort, "[tid:%u] Waking up from Cache Miss.\n",
tid);
cpu->activateContext(tid);
@ -1209,7 +1228,8 @@ CacheUnit::squash(DynInstPtr inst, int stage_num,
req_ptr->getInst()->readTid(), req_ptr->getInst()->seqNum);
if (req_ptr->isSquashed()) {
DPRINTF(AddrDep, "Request for [tid:%i] [sn:%i] already squashed, ignoring squash process.\n",
DPRINTF(AddrDep, "Request for [tid:%i] [sn:%i] already "
"squashed, ignoring squash process.\n",
req_ptr->getInst()->readTid(),
req_ptr->getInst()->seqNum);
map_it++;
@ -1242,16 +1262,19 @@ CacheUnit::squash(DynInstPtr inst, int stage_num,
slot_remove_list.push_back(req_ptr->getSlot());
} else {
DPRINTF(InOrderCachePort,
"[tid:%i] Request from [sn:%i] squashed, but still pending completion.\n",
"[tid:%i] Request from [sn:%i] squashed, but still "
"pending completion.\n",
req_ptr->getInst()->readTid(), req_ptr->getInst()->seqNum);
DPRINTF(RefCount,
"[tid:%i] Request from [sn:%i] squashed (split:%i), but still pending completion.\n",
"[tid:%i] Request from [sn:%i] squashed (split:%i), but "
"still pending completion.\n",
req_ptr->getInst()->readTid(), req_ptr->getInst()->seqNum,
req_ptr->getInst()->splitInst);
}
if (req_ptr->getInst()->validMemAddr()) {
DPRINTF(AddrDep, "Squash of [tid:%i] [sn:%i], attempting to remove addr. %08p dependencies.\n",
DPRINTF(AddrDep, "Squash of [tid:%i] [sn:%i], attempting to "
"remove addr. %08p dependencies.\n",
req_ptr->getInst()->readTid(),
req_ptr->getInst()->seqNum,
req_ptr->getInst()->getMemAddr());

View file

@ -174,7 +174,8 @@ class CacheUnit : public Resource
/** Read/Write on behalf of an instruction.
* curResSlot needs to be a valid value in instruction.
*/
Fault doCacheAccess(DynInstPtr inst, uint64_t *write_result=NULL, CacheReqPtr split_req=NULL);
Fault doCacheAccess(DynInstPtr inst, uint64_t *write_result=NULL,
CacheReqPtr split_req=NULL);
void prefetch(DynInstPtr inst);

View file

@ -37,7 +37,8 @@ using namespace ThePipeline;
using namespace std;
DecodeUnit::DecodeUnit(std::string res_name, int res_id, int res_width,
int res_latency, InOrderCPU *_cpu, ThePipeline::Params *params)
int res_latency, InOrderCPU *_cpu,
ThePipeline::Params *params)
: Resource(res_name, res_id, res_width, res_latency, _cpu)
{
for (ThreadID tid = 0; tid < MaxThreads; tid++) {

View file

@ -39,7 +39,8 @@ using namespace std;
using namespace ThePipeline;
ExecutionUnit::ExecutionUnit(string res_name, int res_id, int res_width,
int res_latency, InOrderCPU *_cpu, ThePipeline::Params *params)
int res_latency, InOrderCPU *_cpu,
ThePipeline::Params *params)
: Resource(res_name, res_id, res_width, res_latency, _cpu)
{ }
@ -127,16 +128,19 @@ ExecutionUnit::execute(int slot_num)
inst->bdelaySeqNum = seq_num;
inst->setPredTarg(inst->nextPC);
DPRINTF(InOrderExecute, "[tid:%i]: Conditional branch inst"
"[sn:%i] PC %#x mispredicted as taken.\n", tid,
DPRINTF(InOrderExecute, "[tid:%i]: Conditional"
" branch inst [sn:%i] PC %#x mis"
"predicted as taken.\n", tid,
seq_num, inst->PC);
} else if (!inst->predTaken() && inst->isCondDelaySlot()) {
} else if (!inst->predTaken() &&
inst->isCondDelaySlot()) {
inst->bdelaySeqNum = seq_num;
inst->setPredTarg(inst->nextPC);
inst->procDelaySlotOnMispred = true;
DPRINTF(InOrderExecute, "[tid:%i]: Conditional branch inst."
"[sn:%i] PC %#x mispredicted as not taken.\n", tid,
DPRINTF(InOrderExecute, "[tid:%i]: Conditional"
" branch inst [sn:%i] PC %#x mis"
"predicted as not taken.\n", tid,
seq_num, inst->PC);
} else {
#if ISA_HAS_DELAY_SLOT
@ -146,15 +150,19 @@ ExecutionUnit::execute(int slot_num)
inst->bdelaySeqNum = seq_num;
inst->setPredTarg(inst->nextPC);
#endif
DPRINTF(InOrderExecute, "[tid:%i]: Misprediction detected at "
"[sn:%i] PC %#x,\n\t squashing after delay slot "
"instruction [sn:%i].\n",
tid, seq_num, inst->PC, inst->bdelaySeqNum);
DPRINTF(InOrderStall, "STALL: [tid:%i]: Branch "
"misprediction at %#x\n", tid, inst->PC);
DPRINTF(InOrderExecute, "[tid:%i]: "
"Misprediction detected at "
"[sn:%i] PC %#x,\n\t squashing after "
"delay slot instruction [sn:%i].\n",
tid, seq_num, inst->PC,
inst->bdelaySeqNum);
DPRINTF(InOrderStall, "STALL: [tid:%i]: Branch"
" misprediction at %#x\n",
tid, inst->PC);
}
DPRINTF(InOrderExecute, "[tid:%i] Redirecting fetch to %#x.\n", tid,
DPRINTF(InOrderExecute, "[tid:%i] Redirecting "
"fetch to %#x.\n", tid,
inst->readPredTarg());
} else if(inst->isIndirectCtrl()){
@ -166,22 +174,25 @@ ExecutionUnit::execute(int slot_num)
inst->bdelaySeqNum = seq_num;
#endif
DPRINTF(InOrderExecute, "[tid:%i] Redirecting fetch to %#x.\n", tid,
DPRINTF(InOrderExecute, "[tid:%i] Redirecting"
" fetch to %#x.\n", tid,
inst->readPredTarg());
} else {
panic("Non-control instruction (%s) mispredicting?!!",
inst->staticInst->getName());
panic("Non-control instruction (%s) mispredict"
"ing?!!", inst->staticInst->getName());
}
DPRINTF(InOrderExecute, "[tid:%i] Squashing will start from stage %i.\n",
tid, stage_num);
DPRINTF(InOrderExecute, "[tid:%i] Squashing will "
"start from stage %i.\n", tid, stage_num);
cpu->pipelineStage[stage_num]->squashDueToBranch(inst, tid);
cpu->pipelineStage[stage_num]->squashDueToBranch(inst,
tid);
inst->squashingStage = stage_num;
// Squash throughout other resources
cpu->resPool->scheduleEvent((InOrderCPU::CPUEventType)ResourcePool::SquashAll,
cpu->resPool->scheduleEvent((InOrderCPU::CPUEventType)
ResourcePool::SquashAll,
inst, 0, 0, tid);
if (inst->predTaken()) {
@ -195,14 +206,17 @@ ExecutionUnit::execute(int slot_num)
tid, inst->seqNum, inst->staticInst->disassemble(inst->PC),
inst->readPC());
}
predictedIncorrect++;
} else {
DPRINTF(InOrderExecute, "[tid:%i]: [sn:%i]: Prediction Correct.\n",
inst->readTid(), seq_num);
DPRINTF(InOrderExecute, "[tid:%i]: [sn:%i]: Prediction"
"Correct.\n", inst->readTid(), seq_num);
predictedCorrect++;
}
exec_req->done();
} else {
warn("inst [sn:%i] had a %s fault", seq_num, fault->name());
warn("inst [sn:%i] had a %s fault",
seq_num, fault->name());
}
} else {
// Regular ALU instruction
@ -212,13 +226,16 @@ ExecutionUnit::execute(int slot_num)
if (fault == NoFault) {
inst->setExecuted();
DPRINTF(InOrderExecute, "[tid:%i]: [sn:%i]: The result of execution is 0x%x.\n",
inst->readTid(), seq_num, (inst->resultType(0) == InOrderDynInst::Float) ?
DPRINTF(InOrderExecute, "[tid:%i]: [sn:%i]: The result "
"of execution is 0x%x.\n", inst->readTid(),
seq_num,
(inst->resultType(0) == InOrderDynInst::Float) ?
inst->readFloatResult(0) : inst->readIntResult(0));
exec_req->done();
} else {
warn("inst [sn:%i] had a %s fault", seq_num, fault->name());
warn("inst [sn:%i] had a %s fault",
seq_num, fault->name());
cpu->trap(fault, tid);
}
}

View file

@ -38,7 +38,8 @@ using namespace TheISA;
using namespace ThePipeline;
FetchSeqUnit::FetchSeqUnit(std::string res_name, int res_id, int res_width,
int res_latency, InOrderCPU *_cpu, ThePipeline::Params *params)
int res_latency, InOrderCPU *_cpu,
ThePipeline::Params *params)
: Resource(res_name, res_id, res_width, res_latency, _cpu),
instSize(sizeof(MachInst))
{
@ -95,7 +96,8 @@ FetchSeqUnit::execute(int slot_num)
delaySlotInfo[tid].targetReady = false;
DPRINTF(InOrderFetchSeq, "[tid:%i]: Setting PC to delay slot target\n",tid);
DPRINTF(InOrderFetchSeq, "[tid:%i]: Setting PC to delay "
"slot target\n",tid);
}
inst->setPC(PC[tid]);
@ -110,8 +112,10 @@ FetchSeqUnit::execute(int slot_num)
inst->setMemAddr(PC[tid]);
inst->setSeqNum(cpu->getAndIncrementInstSeq(tid));
DPRINTF(InOrderFetchSeq, "[tid:%i]: Assigning [sn:%i] to PC %08p, NPC %08p, NNPC %08p\n", tid,
inst->seqNum, inst->readPC(), inst->readNextPC(), inst->readNextNPC());
DPRINTF(InOrderFetchSeq, "[tid:%i]: Assigning [sn:%i] to "
"PC %08p, NPC %08p, NNPC %08p\n", tid,
inst->seqNum, inst->readPC(), inst->readNextPC(),
inst->readNextNPC());
if (delaySlotInfo[tid].numInsts > 0) {
--delaySlotInfo[tid].numInsts;
@ -121,8 +125,9 @@ FetchSeqUnit::execute(int slot_num)
delaySlotInfo[tid].targetReady = true;
}
DPRINTF(InOrderFetchSeq, "[tid:%i]: %i delay slot inst(s) left to"
" process.\n", tid, delaySlotInfo[tid].numInsts);
DPRINTF(InOrderFetchSeq, "[tid:%i]: %i delay slot inst(s) "
"left to process.\n", tid,
delaySlotInfo[tid].numInsts);
}
PC[tid] = nextPC[tid];
@ -147,7 +152,8 @@ FetchSeqUnit::execute(int slot_num)
pcBlockStage[tid] = stage_num;
} else if (inst->isCondDelaySlot() && !inst->predTaken()) {
// Not-Taken AND Conditional Control
DPRINTF(InOrderFetchSeq, "[tid:%i]: [sn:%i]: [PC:%08p] Predicted Not-Taken Cond. "
DPRINTF(InOrderFetchSeq, "[tid:%i]: [sn:%i]: [PC:%08p] "
"Predicted Not-Taken Cond. "
"Delay inst. Skipping delay slot and Updating PC to %08p\n",
tid, inst->seqNum, inst->readPC(), inst->readPredTarg());
@ -160,7 +166,8 @@ FetchSeqUnit::execute(int slot_num)
squashAfterInst(inst, stage_num, tid);
} else if (!inst->isCondDelaySlot() && !inst->predTaken()) {
// Not-Taken Control
DPRINTF(InOrderFetchSeq, "[tid:%i]: [sn:%i]: Predicted Not-Taken Control "
DPRINTF(InOrderFetchSeq, "[tid:%i]: [sn:%i]: Predicted "
"Not-Taken Control "
"inst. updating PC to %08p\n", tid, inst->seqNum,
inst->readNextPC());
#if ISA_HAS_DELAY_SLOT
@ -177,8 +184,9 @@ FetchSeqUnit::execute(int slot_num)
delaySlotInfo[tid].targetReady = false;
delaySlotInfo[tid].targetAddr = inst->readPredTarg();
DPRINTF(InOrderFetchSeq, "[tid:%i]: [sn:%i] Updating delay slot target "
"to PC %08p\n", tid, inst->seqNum, inst->readPredTarg());
DPRINTF(InOrderFetchSeq, "[tid:%i]: [sn:%i] Updating delay"
" slot target to PC %08p\n", tid, inst->seqNum,
inst->readPredTarg());
inst->bdelaySeqNum = seq_num + 1;
#else
inst->bdelaySeqNum = seq_num;
@ -187,15 +195,17 @@ FetchSeqUnit::execute(int slot_num)
inst->squashingStage = stage_num;
DPRINTF(InOrderFetchSeq, "[tid:%i] Setting up squash to start from stage %i, after [sn:%i].\n",
DPRINTF(InOrderFetchSeq, "[tid:%i] Setting up squash to "
"start from stage %i, after [sn:%i].\n",
tid, stage_num, inst->bdelaySeqNum);
// Do Squashing
squashAfterInst(inst, stage_num, tid);
}
} else {
DPRINTF(InOrderFetchSeq, "[tid:%i]: [sn:%i]: Ignoring branch target update "
"since then is not a control instruction.\n", tid, inst->seqNum);
DPRINTF(InOrderFetchSeq, "[tid:%i]: [sn:%i]: Ignoring branch "
"target update since then is not a control "
"instruction.\n", tid, inst->seqNum);
}
fs_req->done();
@ -213,8 +223,8 @@ FetchSeqUnit::squashAfterInst(DynInstPtr inst, int stage_num, ThreadID tid)
// Squash In Pipeline Stage
cpu->pipelineStage[stage_num]->squashDueToBranch(inst, tid);
// Squash inside current resource, so if there needs to be fetching on same cycle
// the fetch information will be correct.
// Squash inside current resource, so if there needs to be fetching on
// same cycle the fetch information will be correct.
// squash(inst, stage_num, inst->bdelaySeqNum, tid);
// Schedule Squash Through-out Resource Pool
@ -224,8 +234,8 @@ void
FetchSeqUnit::squash(DynInstPtr inst, int squash_stage,
InstSeqNum squash_seq_num, ThreadID tid)
{
DPRINTF(InOrderFetchSeq, "[tid:%i]: Updating due to squash from stage %i.\n",
tid, squash_stage);
DPRINTF(InOrderFetchSeq, "[tid:%i]: Updating due to squash from stage %i."
"\n", tid, squash_stage);
InstSeqNum done_seq_num = inst->bdelaySeqNum;
@ -236,8 +246,8 @@ FetchSeqUnit::squash(DynInstPtr inst, int squash_stage,
if (squashSeqNum[tid] <= done_seq_num &&
lastSquashCycle[tid] == curTick) {
DPRINTF(InOrderFetchSeq, "[tid:%i]: Ignoring squash from stage %i, since"
"there is an outstanding squash that is older.\n",
DPRINTF(InOrderFetchSeq, "[tid:%i]: Ignoring squash from stage %i, "
"since there is an outstanding squash that is older.\n",
tid, squash_stage);
} else {
squashSeqNum[tid] = done_seq_num;
@ -265,7 +275,8 @@ FetchSeqUnit::squash(DynInstPtr inst, int squash_stage,
delaySlotInfo[tid].numInsts = 1;
delaySlotInfo[tid].targetReady = false;
delaySlotInfo[tid].targetAddr = (inst->procDelaySlotOnMispred) ? inst->branchTarget() : new_PC;
delaySlotInfo[tid].targetAddr = (inst->procDelaySlotOnMispred) ?
inst->branchTarget() : new_PC;
// Reset PC to Delay Slot Instruction
if (inst->procDelaySlotOnMispred) {
@ -278,7 +289,8 @@ FetchSeqUnit::squash(DynInstPtr inst, int squash_stage,
// Unblock Any Stages Waiting for this information to be updated ...
if (!pcValid[tid]) {
cpu->pipelineStage[pcBlockStage[tid]]->toPrevStages->stageUnblock[pcBlockStage[tid]][tid] = true;
cpu->pipelineStage[pcBlockStage[tid]]->
toPrevStages->stageUnblock[pcBlockStage[tid]][tid] = true;
}
pcValid[tid] = true;
@ -301,8 +313,9 @@ FetchSeqUnit::FetchSeqEvent::process()
fs_res->PC[i] = fs_res->cpu->readPC(i);
fs_res->nextPC[i] = fs_res->cpu->readNextPC(i);
fs_res->nextNPC[i] = fs_res->cpu->readNextNPC(i);
DPRINTF(InOrderFetchSeq, "[tid:%i]: Setting PC:%08p NPC:%08p NNPC:%08p.\n",
fs_res->PC[i], fs_res->nextPC[i], fs_res->nextNPC[i]);
DPRINTF(InOrderFetchSeq, "[tid:%i]: Setting PC:%08p NPC:%08p "
"NNPC:%08p.\n", fs_res->PC[i], fs_res->nextPC[i],
fs_res->nextNPC[i]);
fs_res->pcValid[i] = true;
}
@ -322,8 +335,8 @@ FetchSeqUnit::activateThread(ThreadID tid)
cpu->fetchPriorityList.push_back(tid);
DPRINTF(InOrderFetchSeq, "[tid:%i]: Reading PC:%08p NPC:%08p NNPC:%08p.\n",
tid, PC[tid], nextPC[tid], nextNPC[tid]);
DPRINTF(InOrderFetchSeq, "[tid:%i]: Reading PC:%08p NPC:%08p "
"NNPC:%08p.\n", tid, PC[tid], nextPC[tid], nextNPC[tid]);
}
void

View file

@ -34,7 +34,8 @@
using namespace ThePipeline;
GraduationUnit::GraduationUnit(std::string res_name, int res_id, int res_width,
int res_latency, InOrderCPU *_cpu, ThePipeline::Params *params)
int res_latency, InOrderCPU *_cpu,
ThePipeline::Params *params)
: Resource(res_name, res_id, res_width, res_latency, _cpu),
lastCycleGrad(0), numCycleGrad(0)
@ -60,9 +61,9 @@ GraduationUnit::execute(int slot_num)
{
case GraduateInst:
{
// @TODO: Instructions should never really get to this point since this should be handled
// through the request interface. Check to make sure this happens and delete this
// code.
// @TODO: Instructions should never really get to this point since
// this should be handled through the request interface. Check to
// make sure this happens and delete this code.
if (lastCycleGrad != curTick) {
lastCycleGrad = curTick;
numCycleGrad = 0;
@ -79,8 +80,8 @@ GraduationUnit::execute(int slot_num)
"[tid:%i] Graduating instruction [sn:%i].\n",
tid, inst->seqNum);
// Release Non-Speculative "Block" on instructions that could not execute
// because there was a non-speculative inst. active.
// Release Non-Speculative "Block" on instructions that could not
// execute because there was a non-speculative inst. active.
// @TODO: Fix this functionality. Probably too conservative.
if (inst->isNonSpeculative()) {
*nonSpecInstActive[tid] = false;

View file

@ -51,7 +51,8 @@ class GraduationUnit : public Resource {
public:
GraduationUnit(std::string res_name, int res_id, int res_width,
int res_latency, InOrderCPU *_cpu, ThePipeline::Params *params);
int res_latency, InOrderCPU *_cpu,
ThePipeline::Params *params);
virtual ~GraduationUnit() {}
virtual void execute(int slot_num);

View file

@ -43,7 +43,8 @@ using namespace TheISA;
using namespace ThePipeline;
InstBuffer::InstBuffer(string res_name, int res_id, int res_width,
int res_latency, InOrderCPU *_cpu, ThePipeline::Params *params)
int res_latency, InOrderCPU *_cpu,
ThePipeline::Params *params)
: Resource(res_name, res_id, res_width, res_latency, _cpu)
{ }
@ -77,37 +78,49 @@ InstBuffer::execute(int slot_idx)
bool do_bypass = true;
if (!instList.empty()) {
DPRINTF(InOrderInstBuffer, "[sn:%i] cannot bypass stage %i because buffer isn't empty.\n",
DPRINTF(InOrderInstBuffer, "[sn:%i] cannot bypass stage %i "
"because buffer isn't empty.\n",
inst->seqNum, next_stage);
do_bypass = false;
} else if(cpu->pipelineStage[bypass_stage]->isBlocked(tid)) {
DPRINTF(InOrderInstBuffer, "[sn:%i] cannot bypass stage %i because stage %i is blocking.\n",
DPRINTF(InOrderInstBuffer, "[sn:%i] cannot bypass stage %i "
"because stage %i is blocking.\n",
inst->seqNum, next_stage);
do_bypass = false;
} else if(cpu->pipelineStage[bypass_stage]->stageBufferAvail() <= 0) {
DPRINTF(InOrderInstBuffer, "[sn:%i] cannot bypass stage %i because there is no room in "
"stage %i incoming stage buffer.\n", inst->seqNum, next_stage);
} else if(cpu->pipelineStage[bypass_stage]->
stageBufferAvail() <= 0) {
DPRINTF(InOrderInstBuffer, "[sn:%i] cannot bypass stage %i "
"because there is no room in stage %i incoming stage "
"buffer.\n", inst->seqNum, next_stage);
do_bypass = false;
}
if (!do_bypass) { // SCHEDULE USAGE OF BUFFER
DPRINTF(InOrderInstBuffer, "Scheduling [sn:%i] for buffer insertion in stage %i\n",
DPRINTF(InOrderInstBuffer, "Scheduling [sn:%i] for buffer "
"insertion in stage %i\n",
inst->seqNum, next_stage);
// Add to schedule: Insert into buffer in next stage
int stage_pri = ThePipeline::getNextPriority(inst, next_stage);
int stage_pri = ThePipeline::getNextPriority(inst,
next_stage);
inst->resSched.push(new ScheduleEntry(next_stage, stage_pri, id,
InstBuffer::InsertInst));
inst->resSched.push(new ScheduleEntry(next_stage,
stage_pri,
id,
InstBuffer::InsertInst));
// Add to schedule: Remove from buffer in next next (bypass) stage
// Add to schedule: Remove from buffer in next next (bypass)
// stage
stage_pri = ThePipeline::getNextPriority(inst, bypass_stage);
inst->resSched.push(new ScheduleEntry(bypass_stage, stage_pri, id,
InstBuffer::RemoveInst));
inst->resSched.push(new ScheduleEntry(bypass_stage,
stage_pri,
id,
InstBuffer::RemoveInst));
} else { // BYPASS BUFFER & NEXT STAGE
DPRINTF(InOrderInstBuffer, "Setting [sn:%i] to bypass stage %i and enter stage %i.\n",
inst->seqNum, next_stage, bypass_stage);
DPRINTF(InOrderInstBuffer, "Setting [sn:%i] to bypass stage "
"%i and enter stage %i.\n", inst->seqNum, next_stage,
bypass_stage);
inst->setNextStage(bypass_stage);
instsBypassed++;
}
@ -121,20 +134,21 @@ InstBuffer::execute(int slot_idx)
bool inserted = false;
if (instList.size() < width) {
DPRINTF(InOrderInstBuffer, "[tid:%i]: Inserting [sn:%i] into buffer.\n",
tid, inst->seqNum);
DPRINTF(InOrderInstBuffer, "[tid:%i]: Inserting [sn:%i] into "
"buffer.\n", tid, inst->seqNum);
insert(inst);
inserted = true;
} else {
DPRINTF(InOrderInstBuffer, "[tid:%i]: Denying [sn:%i] request because "
"buffer is full.\n", tid, inst->seqNum);
DPRINTF(InOrderInstBuffer, "[tid:%i]: Denying [sn:%i] request "
"because buffer is full.\n", tid, inst->seqNum);
std::list<DynInstPtr>::iterator list_it = instList.begin();
std::list<DynInstPtr>::iterator list_end = instList.end();
while (list_it != list_end) {
DPRINTF(Resource,"Serving [tid:%i] [sn:%i].\n", (*list_it)->readTid(), (*list_it)->seqNum);
DPRINTF(Resource,"Serving [tid:%i] [sn:%i].\n",
(*list_it)->readTid(), (*list_it)->seqNum);
list_it++;
}
}
@ -145,8 +159,8 @@ InstBuffer::execute(int slot_idx)
case RemoveInst:
{
DPRINTF(InOrderInstBuffer, "[tid:%i]: Removing [sn:%i] from buffer.\n",
tid, inst->seqNum);
DPRINTF(InOrderInstBuffer, "[tid:%i]: Removing [sn:%i] from "
"buffer.\n", tid, inst->seqNum);
remove(inst);
ib_req->done();
}
@ -156,7 +170,8 @@ InstBuffer::execute(int slot_idx)
fatal("Unrecognized command to %s", resName);
}
DPRINTF(InOrderInstBuffer, "Buffer now contains %i insts.\n", instList.size());
DPRINTF(InOrderInstBuffer, "Buffer now contains %i insts.\n",
instList.size());
}
void
@ -213,8 +228,8 @@ InstBuffer::squash(DynInstPtr inst, int stage_num,
// Removed Instructions from InstList & Clear Remove List
while (!remove_list.empty()) {
DPRINTF(InOrderInstBuffer, "[tid:%i]: Removing squashed [sn:%i] from buffer.\n",
tid, (*remove_list.front())->seqNum);
DPRINTF(InOrderInstBuffer, "[tid:%i]: Removing squashed [sn:%i] from "
"buffer.\n", tid, (*remove_list.front())->seqNum);
instList.erase(remove_list.front());
remove_list.pop();
}

View file

@ -56,7 +56,7 @@ class InstBuffer : public Resource {
public:
InstBuffer(std::string res_name, int res_id, int res_width,
int res_latency, InOrderCPU *_cpu, ThePipeline::Params *params);
int res_latency, InOrderCPU *_cpu, ThePipeline::Params *params);
virtual ~InstBuffer() {}
virtual void regStats();

View file

@ -29,8 +29,8 @@
*
*/
#ifndef __CPU_INORDER_GRAD_UNIT_HH__
#define __CPU_INORDER_GRAD_UNIT_HH__
#ifndef __CPU_INORDER_MEM_DEP_UNIT_HH__
#define __CPU_INORDER_MEM_DEP_UNIT_HH__
#include <vector>
#include <list>

View file

@ -40,13 +40,19 @@ using namespace std;
using namespace ThePipeline;
MultDivUnit::MultDivUnit(string res_name, int res_id, int res_width,
int res_latency, InOrderCPU *_cpu, ThePipeline::Params *params)
int res_latency, InOrderCPU *_cpu,
ThePipeline::Params *params)
: Resource(res_name, res_id, res_width, res_latency, _cpu),
multRepeatRate(params->multRepeatRate), multLatency(params->multLatency),
div8RepeatRate(params->div8RepeatRate), div8Latency(params->div8Latency),
div16RepeatRate(params->div16RepeatRate), div16Latency(params->div16Latency),
div24RepeatRate(params->div24RepeatRate), div24Latency(params->div24Latency),
div32RepeatRate(params->div32RepeatRate), div32Latency(params->div32Latency),
multRepeatRate(params->multRepeatRate),
multLatency(params->multLatency),
div8RepeatRate(params->div8RepeatRate),
div8Latency(params->div8Latency),
div16RepeatRate(params->div16RepeatRate),
div16Latency(params->div16Latency),
div24RepeatRate(params->div24RepeatRate),
div24Latency(params->div24Latency),
div32RepeatRate(params->div32RepeatRate),
div32Latency(params->div32Latency),
lastMDUCycle(0), lastOpType(No_OpClass)
{ }
@ -76,8 +82,8 @@ MultDivUnit::init()
int
MultDivUnit::findSlot(DynInstPtr inst)
{
DPRINTF(InOrderMDU, "Finding slot for inst:%i\n | slots-free:%i | slots-used:%i\n",
inst->seqNum, slotsAvail(), slotsInUse());
DPRINTF(InOrderMDU, "Finding slot for inst:%i\n | slots-free:%i | "
"slots-used:%i\n", inst->seqNum, slotsAvail(), slotsInUse());
return Resource::findSlot(inst);
}
@ -85,8 +91,9 @@ MultDivUnit::findSlot(DynInstPtr inst)
void
MultDivUnit::freeSlot(int slot_idx)
{
DPRINTF(InOrderMDU, "Freeing slot for inst:%i\n | slots-free:%i | slots-used:%i\n",
reqMap[slot_idx]->getInst()->seqNum, slotsAvail(), slotsInUse());
DPRINTF(InOrderMDU, "Freeing slot for inst:%i\n | slots-free:%i | "
"slots-used:%i\n", reqMap[slot_idx]->getInst()->seqNum,
slotsAvail(), slotsInUse());
Resource::freeSlot(slot_idx);
}
@ -107,8 +114,8 @@ MultDivUnit::requestAgain(DynInstPtr inst, bool &service_request)
// If different, then update command in the request
mult_div_req->cmd = inst->resSched.top()->cmd;
DPRINTF(InOrderMDU,
"[tid:%i]: [sn:%i]: Updating the command for this instruction\n",
inst->readTid(), inst->seqNum);
"[tid:%i]: [sn:%i]: Updating the command for this "
"instruction\n", inst->readTid(), inst->seqNum);
} else {
// If same command, just check to see if memory access was completed
// but dont try to re-execute
@ -157,8 +164,8 @@ MultDivUnit::getSlot(DynInstPtr inst)
}
if (lastMDUCycle + repeat_rate > curTick) {
DPRINTF(InOrderMDU, "MDU not ready to process another inst. until %i, denying request.\n",
lastMDUCycle + repeat_rate);
DPRINTF(InOrderMDU, "MDU not ready to process another inst. until %i, "
"denying request.\n", lastMDUCycle + repeat_rate);
return -1;
} else {
int rval = Resource::getSlot(inst);

View file

@ -56,7 +56,8 @@ class MultDivUnit : public Resource {
public:
MultDivUnit(std::string res_name, int res_id, int res_width,
int res_latency, InOrderCPU *_cpu, ThePipeline::Params *params);
int res_latency, InOrderCPU *_cpu,
ThePipeline::Params *params);
public:
/** Override default Resource getSlot(). Will only getSlot if

View file

@ -43,7 +43,8 @@ using namespace TheISA;
using namespace ThePipeline;
UseDefUnit::UseDefUnit(string res_name, int res_id, int res_width,
int res_latency, InOrderCPU *_cpu, ThePipeline::Params *params)
int res_latency, InOrderCPU *_cpu,
ThePipeline::Params *params)
: Resource(res_name, res_id, res_width, res_latency, _cpu),
maxSeqNum((InstSeqNum)-1)
{

View file

@ -54,7 +54,7 @@ class UseDefUnit : public Resource {
public:
UseDefUnit(std::string res_name, int res_id, int res_width,
int res_latency, InOrderCPU *_cpu, ThePipeline::Params *params);
int res_latency, InOrderCPU *_cpu, ThePipeline::Params *params);
virtual ~UseDefUnit() {}
virtual ResourceRequest* getRequest(DynInstPtr _inst, int stage_num,

View file

@ -242,21 +242,24 @@ InOrderThreadContext::setRegOtherThread(int misc_reg, const MiscReg &val,
void
InOrderThreadContext::setPC(uint64_t val)
{
DPRINTF(InOrderCPU, "[tid:%i] Setting PC to %08p\n", thread->readTid(), val);
DPRINTF(InOrderCPU, "[tid:%i] Setting PC to %08p\n",
thread->readTid(), val);
cpu->setPC(val, thread->readTid());
}
void
InOrderThreadContext::setNextPC(uint64_t val)
{
DPRINTF(InOrderCPU, "[tid:%i] Setting NPC to %08p\n", thread->readTid(), val);
DPRINTF(InOrderCPU, "[tid:%i] Setting NPC to %08p\n",
thread->readTid(), val);
cpu->setNextPC(val, thread->readTid());
}
void
InOrderThreadContext::setNextNPC(uint64_t val)
{
DPRINTF(InOrderCPU, "[tid:%i] Setting NNPC to %08p\n", thread->readTid(), val);
DPRINTF(InOrderCPU, "[tid:%i] Setting NNPC to %08p\n",
thread->readTid(), val);
cpu->setNextNPC(val, thread->readTid());
}

View file

@ -114,7 +114,8 @@ class InOrderThreadContext : public ThreadContext
virtual VirtualPort *getVirtPort();
virtual void connectMemPorts(ThreadContext *tc) { thread->connectMemPorts(tc); }
virtual void connectMemPorts(ThreadContext *tc)
{ thread->connectMemPorts(tc); }
/** Dumps the function profiling information.
* @todo: Implement.
@ -203,7 +204,8 @@ class InOrderThreadContext : public ThreadContext
virtual void setFloatRegBits(int reg_idx, FloatRegBits val);
virtual void setRegOtherThread(int misc_reg, const MiscReg &val,
virtual void setRegOtherThread(int misc_reg,
const MiscReg &val,
ThreadID tid);
/** Reads this thread's PC. */