Merge ktlim@zamp:./local/clean/tmp/test-regress

into  zamp.eecs.umich.edu:/z/ktlim2/clean/newmem-busfix

--HG--
extra : convert_revision : b98236507bb8996ce605b48b5a5a6a7aac297dc5
This commit is contained in:
Kevin Lim 2006-11-12 21:57:58 -05:00
commit 3052632b68
15 changed files with 108 additions and 102 deletions

View file

@ -85,10 +85,6 @@ def run(options, root, testsys, cpu_class):
if not m5.build_env['FULL_SYSTEM']: if not m5.build_env['FULL_SYSTEM']:
switch_cpus[i].workload = testsys.cpu[i].workload switch_cpus[i].workload = testsys.cpu[i].workload
switch_cpus[i].clock = testsys.cpu[0].clock switch_cpus[i].clock = testsys.cpu[0].clock
if options.caches:
switch_cpus[i].addPrivateSplitL1Caches(L1Cache(size = '32kB'),
L1Cache(size = '64kB'))
switch_cpus[i].connectMemPorts(testsys.membus)
root.switch_cpus = switch_cpus root.switch_cpus = switch_cpus
switch_cpu_list = [(testsys.cpu[i], switch_cpus[i]) for i in xrange(np)] switch_cpu_list = [(testsys.cpu[i], switch_cpus[i]) for i in xrange(np)]
@ -108,19 +104,15 @@ def run(options, root, testsys, cpu_class):
switch_cpus[i].clock = testsys.cpu[0].clock switch_cpus[i].clock = testsys.cpu[0].clock
switch_cpus_1[i].clock = testsys.cpu[0].clock switch_cpus_1[i].clock = testsys.cpu[0].clock
if options.caches: if not options.caches:
switch_cpus[i].addPrivateSplitL1Caches(L1Cache(size = '32kB'),
L1Cache(size = '64kB'))
switch_cpus[i].connectMemPorts(testsys.membus)
else:
# O3 CPU must have a cache to work. # O3 CPU must have a cache to work.
switch_cpus_1[i].addPrivateSplitL1Caches(L1Cache(size = '32kB'), switch_cpus_1[i].addPrivateSplitL1Caches(L1Cache(size = '32kB'),
L1Cache(size = '64kB')) L1Cache(size = '64kB'))
switch_cpus_1[i].connectMemPorts(testsys.membus) switch_cpus_1[i].connectMemPorts(testsys.membus)
root.switch_cpus = switch_cpus testsys.switch_cpus = switch_cpus
root.switch_cpus_1 = switch_cpus_1 testsys.switch_cpus_1 = switch_cpus_1
switch_cpu_list = [(testsys.cpu[i], switch_cpus[i]) for i in xrange(np)] switch_cpu_list = [(testsys.cpu[i], switch_cpus[i]) for i in xrange(np)]
switch_cpu_list1 = [(switch_cpus[i], switch_cpus_1[i]) for i in xrange(np)] switch_cpu_list1 = [(switch_cpus[i], switch_cpus_1[i]) for i in xrange(np)]
@ -219,5 +211,5 @@ def run(options, root, testsys, cpu_class):
if exit_cause == '': if exit_cause == '':
exit_cause = exit_event.getCause() exit_cause = exit_event.getCause()
print 'Exiting @ cycle', m5.curTick(), 'because ', exit_cause print 'Exiting @ cycle %i because %s' % (m5.curTick(), exit_cause)

View file

@ -95,7 +95,7 @@ test_sys = makeLinuxAlphaSystem(test_mem_mode, bm[0])
np = options.num_cpus np = options.num_cpus
test_sys.cpu = [TestCPUClass(cpu_id=i) for i in xrange(np)] test_sys.cpu = [TestCPUClass(cpu_id=i) for i in xrange(np)]
for i in xrange(np): for i in xrange(np):
if options.caches and not options.standard_switch and not FutureClass: if options.caches:
test_sys.cpu[i].addPrivateSplitL1Caches(L1Cache(size = '32kB'), test_sys.cpu[i].addPrivateSplitL1Caches(L1Cache(size = '32kB'),
L1Cache(size = '64kB')) L1Cache(size = '64kB'))
test_sys.cpu[i].connectMemPorts(test_sys.membus) test_sys.cpu[i].connectMemPorts(test_sys.membus)

View file

@ -101,7 +101,7 @@ system = System(cpu = [CPUClass(cpu_id=i) for i in xrange(np)],
system.physmem.port = system.membus.port system.physmem.port = system.membus.port
for i in xrange(np): for i in xrange(np):
if options.caches and not options.standard_switch and not FutureClass: if options.caches:
system.cpu[i].addPrivateSplitL1Caches(L1Cache(size = '32kB'), system.cpu[i].addPrivateSplitL1Caches(L1Cache(size = '32kB'),
L1Cache(size = '64kB')) L1Cache(size = '64kB'))
system.cpu[i].connectMemPorts(system.membus) system.cpu[i].connectMemPorts(system.membus)

View file

@ -49,6 +49,7 @@ namespace AlphaISA
{ {
memset(interrupts, 0, sizeof(interrupts)); memset(interrupts, 0, sizeof(interrupts));
intstatus = 0; intstatus = 0;
newInfoSet = false;
} }
void post(int int_num, int index) void post(int int_num, int index)
@ -137,18 +138,10 @@ namespace AlphaISA
} }
if (ipl && ipl > tc->readMiscReg(IPR_IPLR)) { if (ipl && ipl > tc->readMiscReg(IPR_IPLR)) {
tc->setMiscReg(IPR_ISR, summary); // assert(!newInfoSet);
tc->setMiscReg(IPR_INTID, ipl); newIpl = ipl;
newSummary = newSummary;
/* The following needs to be added back in somehow */ newInfoSet = true;
// Checker needs to know these two registers were updated.
/*#if USE_CHECKER
if (this->checker) {
this->checker->threadBase()->setMiscReg(IPR_ISR, summary);
this->checker->threadBase()->setMiscReg(IPR_INTID, ipl);
}
#endif*/
DPRINTF(Flow, "Interrupt! IPLR=%d ipl=%d summary=%x\n", DPRINTF(Flow, "Interrupt! IPLR=%d ipl=%d summary=%x\n",
tc->readMiscReg(IPR_IPLR), ipl, summary); tc->readMiscReg(IPR_IPLR), ipl, summary);
@ -158,7 +151,18 @@ namespace AlphaISA
} }
} }
void updateIntrInfo(ThreadContext *tc)
{
assert(newInfoSet);
tc->setMiscReg(IPR_ISR, newSummary);
tc->setMiscReg(IPR_INTID, newIpl);
newInfoSet = false;
}
private: private:
bool newInfoSet;
int newIpl;
int newSummary;
}; };
} }

View file

@ -292,7 +292,7 @@ namespace AlphaISA
Fault Fault
ITB::translate(RequestPtr &req, ThreadContext *tc) const ITB::translate(RequestPtr &req, ThreadContext *tc) const
{ {
if (PcPAL(req->getVaddr())) { if (PcPAL(req->getPC())) {
// strip off PAL PC marker (lsb is 1) // strip off PAL PC marker (lsb is 1)
req->setPaddr((req->getVaddr() & ~3) & PAddrImplMask); req->setPaddr((req->getVaddr() & ~3) & PAddrImplMask);
hits++; hits++;

View file

@ -156,8 +156,11 @@ class AlphaO3CPU : public FullO3CPU<Impl>
bool simPalCheck(int palFunc, unsigned tid); bool simPalCheck(int palFunc, unsigned tid);
/** Processes any interrupts. */ /** Returns the Fault for any valid interrupt. */
void processInterrupts(); Fault getInterrupts();
/** Processes any an interrupt fault. */
void processInterrupts(Fault interrupt);
/** Halts the CPU. */ /** Halts the CPU. */
void halt() { panic("Halt not implemented!\n"); } void halt() { panic("Halt not implemented!\n"); }

View file

@ -266,9 +266,17 @@ AlphaO3CPU<Impl>::simPalCheck(int palFunc, unsigned tid)
return true; return true;
} }
template <class Impl>
Fault
AlphaO3CPU<Impl>::getInterrupts()
{
// Check if there are any outstanding interrupts
return this->interrupts.getInterrupt(this->threadContexts[0]);
}
template <class Impl> template <class Impl>
void void
AlphaO3CPU<Impl>::processInterrupts() AlphaO3CPU<Impl>::processInterrupts(Fault interrupt)
{ {
// Check for interrupts here. For now can copy the code that // Check for interrupts here. For now can copy the code that
// exists within isa_fullsys_traits.hh. Also assume that thread 0 // exists within isa_fullsys_traits.hh. Also assume that thread 0
@ -276,14 +284,12 @@ AlphaO3CPU<Impl>::processInterrupts()
// @todo: Possibly consolidate the interrupt checking code. // @todo: Possibly consolidate the interrupt checking code.
// @todo: Allow other threads to handle interrupts. // @todo: Allow other threads to handle interrupts.
// Check if there are any outstanding interrupts assert(interrupt != NoFault);
//Handle the interrupts this->interrupts.updateIntrInfo(this->threadContexts[0]);
Fault interrupt = this->interrupts.getInterrupt(this->tcBase(0));
if (interrupt != NoFault) { DPRINTF(O3CPU, "Interrupt %s being handled\n", interrupt->name());
this->checkInterrupts = false; this->checkInterrupts = false;
this->trap(interrupt, 0); this->trap(interrupt, 0);
}
} }
#endif // FULL_SYSTEM #endif // FULL_SYSTEM

View file

@ -640,8 +640,18 @@ DefaultCommit<Impl>::commit()
// @todo: Allow other threads to handle interrupts. // @todo: Allow other threads to handle interrupts.
if (cpu->checkInterrupts && if (cpu->checkInterrupts &&
cpu->check_interrupts(cpu->tcBase(0)) && cpu->check_interrupts(cpu->tcBase(0)) &&
commitStatus[0] != TrapPending &&
!trapSquash[0] && !trapSquash[0] &&
!tcSquash[0]) { !tcSquash[0]) {
// Get any interrupt that happened
Fault intr = cpu->getInterrupts();
// Exit this if block if there's no fault.
if (intr == NoFault) {
goto commit_insts;
}
// Tell fetch that there is an interrupt pending. This will // Tell fetch that there is an interrupt pending. This will
// make fetch wait until it sees a non PAL-mode PC, at which // make fetch wait until it sees a non PAL-mode PC, at which
// point it stops fetching instructions. // point it stops fetching instructions.
@ -650,36 +660,37 @@ DefaultCommit<Impl>::commit()
// Wait until the ROB is empty and all stores have drained in // Wait until the ROB is empty and all stores have drained in
// order to enter the interrupt. // order to enter the interrupt.
if (rob->isEmpty() && !iewStage->hasStoresToWB()) { if (rob->isEmpty() && !iewStage->hasStoresToWB()) {
// Not sure which thread should be the one to interrupt. For now // Squash or record that I need to squash this cycle if
// always do thread 0. // an interrupt needed to be handled.
DPRINTF(Commit, "Interrupt detected.\n");
assert(!thread[0]->inSyscall); assert(!thread[0]->inSyscall);
thread[0]->inSyscall = true; thread[0]->inSyscall = true;
// CPU will handle implementation of the interrupt. // CPU will handle interrupt.
cpu->processInterrupts(); cpu->processInterrupts(intr);
// Now squash or record that I need to squash this cycle.
commitStatus[0] = TrapPending;
// Exit state update mode to avoid accidental updating.
thread[0]->inSyscall = false; thread[0]->inSyscall = false;
commitStatus[0] = TrapPending;
// Generate trap squash event. // Generate trap squash event.
generateTrapEvent(0); generateTrapEvent(0);
toIEW->commitInfo[0].clearInterrupt = true; toIEW->commitInfo[0].clearInterrupt = true;
DPRINTF(Commit, "Interrupt detected.\n");
} else { } else {
DPRINTF(Commit, "Interrupt pending, waiting for ROB to empty.\n"); DPRINTF(Commit, "Interrupt pending, waiting for ROB to empty.\n");
} }
} }
// Label for goto. Not pretty but more readable than really big
// if statement above.
commit_insts:
#endif // FULL_SYSTEM #endif // FULL_SYSTEM
//////////////////////////////////// ////////////////////////////////////
// Check for any possible squashes, handle them first // Check for any possible squashes, handle them first
//////////////////////////////////// ////////////////////////////////////
std::list<unsigned>::iterator threads = (*activeThreads).begin(); std::list<unsigned>::iterator threads = (*activeThreads).begin();
while (threads != (*activeThreads).end()) { while (threads != (*activeThreads).end()) {

View file

@ -819,6 +819,12 @@ unsigned int
FullO3CPU<Impl>::drain(Event *drain_event) FullO3CPU<Impl>::drain(Event *drain_event)
{ {
DPRINTF(O3CPU, "Switching out\n"); DPRINTF(O3CPU, "Switching out\n");
// If the CPU isn't doing anything, then return immediately.
if (_status == Idle || _status == SwitchedOut) {
return 0;
}
drainCount = 0; drainCount = 0;
fetch.drain(); fetch.drain();
decode.drain(); decode.drain();

View file

@ -561,27 +561,36 @@ DefaultFetch<Impl>::fetchCacheLine(Addr fetch_PC, Fault &ret_fault, unsigned tid
Fault fault = NoFault; Fault fault = NoFault;
//AlphaDep //AlphaDep
if (cacheBlocked || isSwitchedOut() || if (cacheBlocked) {
(interruptPending && (fetch_PC & 0x3))) { DPRINTF(Fetch, "[tid:%i] Can't fetch cache line, cache blocked\n",
tid);
return false;
} else if (isSwitchedOut()) {
DPRINTF(Fetch, "[tid:%i] Can't fetch cache line, switched out\n",
tid);
return false;
} else if (interruptPending && !(fetch_PC & 0x3)) {
// Hold off fetch from getting new instructions when: // Hold off fetch from getting new instructions when:
// Cache is blocked, or // Cache is blocked, or
// while an interrupt is pending and we're not in PAL mode, or // while an interrupt is pending and we're not in PAL mode, or
// fetch is switched out. // fetch is switched out.
DPRINTF(Fetch, "[tid:%i] Can't fetch cache line, interrupt pending\n",
tid);
return false; return false;
} }
// Align the fetch PC so it's at the start of a cache block. // Align the fetch PC so it's at the start of a cache block.
fetch_PC = icacheBlockAlignPC(fetch_PC); Addr block_PC = icacheBlockAlignPC(fetch_PC);
// If we've already got the block, no need to try to fetch it again. // If we've already got the block, no need to try to fetch it again.
if (cacheDataValid[tid] && fetch_PC == cacheDataPC[tid]) { if (cacheDataValid[tid] && block_PC == cacheDataPC[tid]) {
return true; return true;
} }
// Setup the memReq to do a read of the first instruction's address. // Setup the memReq to do a read of the first instruction's address.
// Set the appropriate read size and flags as well. // Set the appropriate read size and flags as well.
// Build request here. // Build request here.
RequestPtr mem_req = new Request(tid, fetch_PC, cacheBlkSize, 0, RequestPtr mem_req = new Request(tid, block_PC, cacheBlkSize, 0,
fetch_PC, cpu->readCpuId(), tid); fetch_PC, cpu->readCpuId(), tid);
memReq[tid] = mem_req; memReq[tid] = mem_req;
@ -611,7 +620,7 @@ DefaultFetch<Impl>::fetchCacheLine(Addr fetch_PC, Fault &ret_fault, unsigned tid
Packet::ReadReq, Packet::Broadcast); Packet::ReadReq, Packet::Broadcast);
data_pkt->dataDynamicArray(new uint8_t[cacheBlkSize]); data_pkt->dataDynamicArray(new uint8_t[cacheBlkSize]);
cacheDataPC[tid] = fetch_PC; cacheDataPC[tid] = block_PC;
cacheDataValid[tid] = false; cacheDataValid[tid] = false;
DPRINTF(Fetch, "Fetch: Doing instruction read.\n"); DPRINTF(Fetch, "Fetch: Doing instruction read.\n");
@ -1052,12 +1061,16 @@ DefaultFetch<Impl>::fetch(bool &status_change)
} else { } else {
if (fetchStatus[tid] == Idle) { if (fetchStatus[tid] == Idle) {
++fetchIdleCycles; ++fetchIdleCycles;
DPRINTF(Fetch, "[tid:%i]: Fetch is idle!\n", tid);
} else if (fetchStatus[tid] == Blocked) { } else if (fetchStatus[tid] == Blocked) {
++fetchBlockedCycles; ++fetchBlockedCycles;
DPRINTF(Fetch, "[tid:%i]: Fetch is blocked!\n", tid);
} else if (fetchStatus[tid] == Squashing) { } else if (fetchStatus[tid] == Squashing) {
++fetchSquashCycles; ++fetchSquashCycles;
DPRINTF(Fetch, "[tid:%i]: Fetch is squashing!\n", tid);
} else if (fetchStatus[tid] == IcacheWaitResponse) { } else if (fetchStatus[tid] == IcacheWaitResponse) {
++icacheStallCycles; ++icacheStallCycles;
DPRINTF(Fetch, "[tid:%i]: Fetch is waiting cache response!\n", tid);
} }
// Status is Idle, Squashing, Blocked, or IcacheWaitResponse, so // Status is Idle, Squashing, Blocked, or IcacheWaitResponse, so

View file

@ -700,52 +700,12 @@ OzoneCPU<Impl>::processInterrupts()
// Check if there are any outstanding interrupts // Check if there are any outstanding interrupts
//Handle the interrupts //Handle the interrupts
int ipl = 0; Fault interrupt = this->interrupts.getInterrupt(thread.getTC());
int summary = 0;
checkInterrupts = false; if (interrupt != NoFault) {
this->interrupts.updateIntrInfo(thread.getTC());
if (thread.readMiscReg(IPR_ASTRR)) this->checkInterrupts = false;
panic("asynchronous traps not implemented\n"); interrupt->invoke(thread.getTC());
if (thread.readMiscReg(IPR_SIRR)) {
for (int i = INTLEVEL_SOFTWARE_MIN;
i < INTLEVEL_SOFTWARE_MAX; i++) {
if (thread.readMiscReg(IPR_SIRR) & (ULL(1) << i)) {
// See table 4-19 of the 21164 hardware reference
ipl = (i - INTLEVEL_SOFTWARE_MIN) + 1;
summary |= (ULL(1) << i);
}
}
}
uint64_t interrupts = intr_status();
if (interrupts) {
for (int i = INTLEVEL_EXTERNAL_MIN;
i < INTLEVEL_EXTERNAL_MAX; i++) {
if (interrupts & (ULL(1) << i)) {
// See table 4-19 of the 21164 hardware reference
ipl = i;
summary |= (ULL(1) << i);
}
}
}
if (ipl && ipl > thread.readMiscReg(IPR_IPLR)) {
thread.setMiscReg(IPR_ISR, summary);
thread.setMiscReg(IPR_INTID, ipl);
#if USE_CHECKER
// @todo: Make this more transparent
if (checker) {
checker->threadBase()->setMiscReg(IPR_ISR, summary);
checker->threadBase()->setMiscReg(IPR_INTID, ipl);
}
#endif
Fault fault = new InterruptFault;
fault->invoke(thread.getTC());
DPRINTF(Flow, "Interrupt! IPLR=%d ipl=%d summary=%x\n",
thread.readMiscReg(IPR_IPLR), ipl, summary);
} }
} }

View file

@ -476,8 +476,8 @@ FrontEnd<Impl>::fetchCacheLine()
// Setup the memReq to do a read of the first isntruction's address. // Setup the memReq to do a read of the first isntruction's address.
// Set the appropriate read size and flags as well. // Set the appropriate read size and flags as well.
memReq = new Request(0, fetch_PC, cacheBlkSize, flags, memReq = new Request(0, fetch_PC, cacheBlkSize, 0,
fetch_PC, cpu->readCpuId(), 0); PC, cpu->readCpuId(), 0);
// Translate the instruction request. // Translate the instruction request.
fault = cpu->translateInstReq(memReq, thread); fault = cpu->translateInstReq(memReq, thread);

View file

@ -213,6 +213,9 @@ AtomicSimpleCPU::takeOverFrom(BaseCPU *oldCPU)
break; break;
} }
} }
if (_status != Running) {
_status = Idle;
}
} }

View file

@ -315,6 +315,7 @@ BaseSimpleCPU::checkForInterrupts()
Fault interrupt = interrupts.getInterrupt(tc); Fault interrupt = interrupts.getInterrupt(tc);
if (interrupt != NoFault) { if (interrupt != NoFault) {
interrupts.updateIntrInfo(tc);
checkInterrupts = false; checkInterrupts = false;
interrupt->invoke(tc); interrupt->invoke(tc);
} }

View file

@ -242,8 +242,11 @@ Bus::recvRetry(int id)
} }
} }
//If we weren't able to drain before, we might be able to now. //If we weren't able to drain before, we might be able to now.
if (drainEvent && retryList.size() == 0 && curTick >= tickNextIdle) if (drainEvent && retryList.size() == 0 && curTick >= tickNextIdle) {
drainEvent->process(); drainEvent->process();
// Clear the drain event once we're done with it.
drainEvent = NULL;
}
} }
Port * Port *
@ -367,6 +370,10 @@ Bus::recvAtomic(PacketPtr pkt)
DPRINTF(Bus, "recvAtomic: packet src %d dest %d addr 0x%x cmd %s\n", DPRINTF(Bus, "recvAtomic: packet src %d dest %d addr 0x%x cmd %s\n",
pkt->getSrc(), pkt->getDest(), pkt->getAddr(), pkt->cmdString()); pkt->getSrc(), pkt->getDest(), pkt->getAddr(), pkt->cmdString());
assert(pkt->getDest() == Packet::Broadcast); assert(pkt->getDest() == Packet::Broadcast);
// Assume one bus cycle in order to get through. This may have
// some clock skew issues yet again...
pkt->finishTime = curTick + clock;
Tick snoopTime = atomicSnoop(pkt); Tick snoopTime = atomicSnoop(pkt);
if (snoopTime) if (snoopTime)
return snoopTime; //Snoop satisfies it return snoopTime; //Snoop satisfies it