gem5/src/arch/arm/table_walker.cc

734 lines
23 KiB
C++
Raw Normal View History

/*
* Copyright (c) 2010 ARM Limited
* All rights reserved
*
* The license below extends only to copyright in the software and shall
* not be construed as granting a license to any other intellectual
* property including but not limited to intellectual property relating
* to a hardware implementation of the functionality of the software
* licensed hereunder. You may use the software subject to the license
* terms below provided that you ensure that this notice is replicated
* unmodified and in its entirety in all distributions of the software,
* modified or unmodified, in source code or in binary form.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Authors: Ali Saidi
*/
#include "arch/arm/faults.hh"
#include "arch/arm/table_walker.hh"
#include "arch/arm/tlb.hh"
#include "cpu/base.hh"
#include "cpu/thread_context.hh"
2011-04-15 19:44:06 +02:00
#include "dev/io_device.hh"
2010-11-15 21:04:03 +01:00
#include "sim/system.hh"
using namespace ArmISA;
TableWalker::TableWalker(const Params *p)
: MemObject(p), port(NULL), tlb(NULL), currState(NULL), pending(false),
doL1DescEvent(this), doL2DescEvent(this), doProcessEvent(this)
{
2010-08-23 18:18:39 +02:00
sctlr = 0;
}
TableWalker::~TableWalker()
{
;
}
2010-11-15 21:04:03 +01:00
unsigned int
TableWalker::drain(Event *de)
{
2010-11-15 21:04:03 +01:00
if (stateQueueL1.size() || stateQueueL2.size() || pendingQueue.size())
2010-11-08 20:58:25 +01:00
{
changeState(Draining);
DPRINTF(Checkpoint, "TableWalker busy, wait to drain\n");
return 1;
}
else
{
changeState(Drained);
DPRINTF(Checkpoint, "TableWalker free, no need to drain\n");
return 0;
}
}
2010-11-15 21:04:03 +01:00
void
TableWalker::resume()
{
MemObject::resume();
if ((params()->sys->getMemoryMode() == Enums::timing) && currState) {
delete currState;
currState = NULL;
}
}
Port*
TableWalker::getPort(const std::string &if_name, int idx)
{
if (if_name == "port") {
if (port != NULL)
return port;
System *sys = params()->sys;
Tick minb = params()->min_backoff;
Tick maxb = params()->max_backoff;
port = new DmaPort(this, sys, minb, maxb);
return port;
}
return NULL;
}
Fault
TableWalker::walk(RequestPtr _req, ThreadContext *_tc, uint8_t _cid, TLB::Mode _mode,
TLB::Translation *_trans, bool _timing)
{
if (!currState) {
// For atomic mode, a new WalkerState instance should be only created
// once per TLB. For timing mode, a new instance is generated for every
// TLB miss.
DPRINTF(TLBVerbose, "creating new instance of WalkerState\n");
currState = new WalkerState();
currState->tableWalker = this;
} else if (_timing) {
// This is a translation that was completed and then faulted again
// because some underlying parameters that affect the translation
// changed out from under us (e.g. asid). It will either be a
// misprediction, in which case nothing will happen or we'll use
// this fault to re-execute the faulting instruction which should clean
// up everything.
if (currState->vaddr == _req->getVaddr()) {
return new ReExec;
}
panic("currState should always be empty in timing mode!\n");
}
currState->tc = _tc;
currState->transState = _trans;
currState->req = _req;
currState->fault = NoFault;
currState->contextId = _cid;
currState->timing = _timing;
currState->mode = _mode;
/** @todo These should be cached or grabbed from cached copies in
the TLB, all these miscreg reads are expensive */
ISA,CPU,etc: Create an ISA defined PC type that abstracts out ISA behaviors. This change is a low level and pervasive reorganization of how PCs are managed in M5. Back when Alpha was the only ISA, there were only 2 PCs to worry about, the PC and the NPC, and the lsb of the PC signaled whether or not you were in PAL mode. As other ISAs were added, we had to add an NNPC, micro PC and next micropc, x86 and ARM introduced variable length instruction sets, and ARM started to keep track of mode bits in the PC. Each CPU model handled PCs in its own custom way that needed to be updated individually to handle the new dimensions of variability, or, in the case of ARMs mode-bit-in-the-pc hack, the complexity could be hidden in the ISA at the ISA implementation's expense. Areas like the branch predictor hadn't been updated to handle branch delay slots or micropcs, and it turns out that had introduced a significant (10s of percent) performance bug in SPARC and to a lesser extend MIPS. Rather than perpetuate the problem by reworking O3 again to handle the PC features needed by x86, this change was introduced to rework PC handling in a more modular, transparent, and hopefully efficient way. PC type: Rather than having the superset of all possible elements of PC state declared in each of the CPU models, each ISA defines its own PCState type which has exactly the elements it needs. A cross product of canned PCState classes are defined in the new "generic" ISA directory for ISAs with/without delay slots and microcode. These are either typedef-ed or subclassed by each ISA. To read or write this structure through a *Context, you use the new pcState() accessor which reads or writes depending on whether it has an argument. If you just want the address of the current or next instruction or the current micro PC, you can get those through read-only accessors on either the PCState type or the *Contexts. These are instAddr(), nextInstAddr(), and microPC(). Note the move away from readPC. That name is ambiguous since it's not clear whether or not it should be the actual address to fetch from, or if it should have extra bits in it like the PAL mode bit. Each class is free to define its own functions to get at whatever values it needs however it needs to to be used in ISA specific code. Eventually Alpha's PAL mode bit could be moved out of the PC and into a separate field like ARM. These types can be reset to a particular pc (where npc = pc + sizeof(MachInst), nnpc = npc + sizeof(MachInst), upc = 0, nupc = 1 as appropriate), printed, serialized, and compared. There is a branching() function which encapsulates code in the CPU models that checked if an instruction branched or not. Exactly what that means in the context of branch delay slots which can skip an instruction when not taken is ambiguous, and ideally this function and its uses can be eliminated. PCStates also generally know how to advance themselves in various ways depending on if they point at an instruction, a microop, or the last microop of a macroop. More on that later. Ideally, accessing all the PCs at once when setting them will improve performance of M5 even though more data needs to be moved around. This is because often all the PCs need to be manipulated together, and by getting them all at once you avoid multiple function calls. Also, the PCs of a particular thread will have spatial locality in the cache. Previously they were grouped by element in arrays which spread out accesses. Advancing the PC: The PCs were previously managed entirely by the CPU which had to know about PC semantics, try to figure out which dimension to increment the PC in, what to set NPC/NNPC, etc. These decisions are best left to the ISA in conjunction with the PC type itself. Because most of the information about how to increment the PC (mainly what type of instruction it refers to) is contained in the instruction object, a new advancePC virtual function was added to the StaticInst class. Subclasses provide an implementation that moves around the right element of the PC with a minimal amount of decision making. In ISAs like Alpha, the instructions always simply assign NPC to PC without having to worry about micropcs, nnpcs, etc. The added cost of a virtual function call should be outweighed by not having to figure out as much about what to do with the PCs and mucking around with the extra elements. One drawback of making the StaticInsts advance the PC is that you have to actually have one to advance the PC. This would, superficially, seem to require decoding an instruction before fetch could advance. This is, as far as I can tell, realistic. fetch would advance through memory addresses, not PCs, perhaps predicting new memory addresses using existing ones. More sophisticated decisions about control flow would be made later on, after the instruction was decoded, and handed back to fetch. If branching needs to happen, some amount of decoding needs to happen to see that it's a branch, what the target is, etc. This could get a little more complicated if that gets done by the predecoder, but I'm choosing to ignore that for now. Variable length instructions: To handle variable length instructions in x86 and ARM, the predecoder now takes in the current PC by reference to the getExtMachInst function. It can modify the PC however it needs to (by setting NPC to be the PC + instruction length, for instance). This could be improved since the CPU doesn't know if the PC was modified and always has to write it back. ISA parser: To support the new API, all PC related operand types were removed from the parser and replaced with a PCState type. There are two warts on this implementation. First, as with all the other operand types, the PCState still has to have a valid operand type even though it doesn't use it. Second, using syntax like PCS.npc(target) doesn't work for two reasons, this looks like the syntax for operand type overriding, and the parser can't figure out if you're reading or writing. Instructions that use the PCS operand (which I've consistently called it) need to first read it into a local variable, manipulate it, and then write it back out. Return address stack: The return address stack needed a little extra help because, in the presence of branch delay slots, it has to merge together elements of the return PC and the call PC. To handle that, a buildRetPC utility function was added. There are basically only two versions in all the ISAs, but it didn't seem short enough to put into the generic ISA directory. Also, the branch predictor code in O3 and InOrder were adjusted so that they always store the PC of the actual call instruction in the RAS, not the next PC. If the call instruction is a microop, the next PC refers to the next microop in the same macroop which is probably not desirable. The buildRetPC function advances the PC intelligently to the next macroop (in an ISA specific way) so that that case works. Change in stats: There were no change in stats except in MIPS and SPARC in the O3 model. MIPS runs in about 9% fewer ticks. SPARC runs with 30%-50% fewer ticks, which could likely be improved further by setting call/return instruction flags and taking advantage of the RAS. TODO: Add != operators to the PCState classes, defined trivially to be !(a==b). Smooth out places where PCs are split apart, passed around, and put back together later. I think this might happen in SPARC's fault code. Add ISA specific constructors that allow setting PC elements without calling a bunch of accessors. Try to eliminate the need for the branching() function. Factor out Alpha's PAL mode pc bit into a separate flag field, and eliminate places where it's blindly masked out or tested in the PC.
2010-10-31 08:07:20 +01:00
currState->vaddr = currState->req->getVaddr();
currState->sctlr = currState->tc->readMiscReg(MISCREG_SCTLR);
sctlr = currState->sctlr;
currState->N = currState->tc->readMiscReg(MISCREG_TTBCR);
currState->isFetch = (currState->mode == TLB::Execute);
currState->isWrite = (currState->mode == TLB::Write);
if (!currState->timing)
return processWalk();
if (pending || pendingQueue.size()) {
pendingQueue.push_back(currState);
currState = NULL;
} else {
pending = true;
return processWalk();
}
return NoFault;
}
void
TableWalker::processWalkWrapper()
{
assert(!currState);
assert(pendingQueue.size());
currState = pendingQueue.front();
pendingQueue.pop_front();
pending = true;
processWalk();
}
Fault
TableWalker::processWalk()
{
Addr ttbr = 0;
// If translation isn't enabled, we shouldn't be here
assert(currState->sctlr.m);
2010-06-02 19:58:16 +02:00
DPRINTF(TLB, "Begining table walk for address %#x, TTBCR: %#x, bits:%#x\n",
currState->vaddr, currState->N, mbits(currState->vaddr, 31,
32-currState->N));
2010-06-02 19:58:16 +02:00
if (currState->N == 0 || !mbits(currState->vaddr, 31, 32-currState->N)) {
2010-06-02 19:58:16 +02:00
DPRINTF(TLB, " - Selecting TTBR0\n");
ttbr = currState->tc->readMiscReg(MISCREG_TTBR0);
} else {
2010-06-02 19:58:16 +02:00
DPRINTF(TLB, " - Selecting TTBR1\n");
ttbr = currState->tc->readMiscReg(MISCREG_TTBR1);
currState->N = 0;
}
Addr l1desc_addr = mbits(ttbr, 31, 14-currState->N) |
(bits(currState->vaddr,31-currState->N,20) << 2);
2010-06-02 19:58:16 +02:00
DPRINTF(TLB, " - Descriptor at address %#x\n", l1desc_addr);
// Trickbox address check
Fault f;
f = tlb->walkTrickBoxCheck(l1desc_addr, currState->vaddr, sizeof(uint32_t),
currState->isFetch, currState->isWrite, 0, true);
if (f) {
DPRINTF(TLB, "Trickbox check caused fault on %#x\n", currState->vaddr);
if (currState->timing) {
pending = false;
nextWalk(currState->tc);
currState = NULL;
} else {
currState->tc = NULL;
currState->req = NULL;
}
return f;
}
Request::Flags flag = 0;
if (currState->sctlr.c == 0) {
flag = Request::UNCACHEABLE;
}
if (currState->timing) {
port->dmaAction(MemCmd::ReadReq, l1desc_addr, sizeof(uint32_t),
&doL1DescEvent, (uint8_t*)&currState->l1Desc.data,
currState->tc->getCpuPtr()->ticks(1), flag);
DPRINTF(TLBVerbose, "Adding to walker fifo: queue size before adding: %d\n",
stateQueueL1.size());
stateQueueL1.push_back(currState);
currState = NULL;
} else {
port->dmaAction(MemCmd::ReadReq, l1desc_addr, sizeof(uint32_t),
NULL, (uint8_t*)&currState->l1Desc.data,
currState->tc->getCpuPtr()->ticks(1), flag);
doL1Descriptor();
f = currState->fault;
}
return f;
}
void
TableWalker::memAttrs(ThreadContext *tc, TlbEntry &te, SCTLR sctlr,
uint8_t texcb, bool s)
{
// Note: tc and sctlr local variables are hiding tc and sctrl class
// variables
DPRINTF(TLBVerbose, "memAttrs texcb:%d s:%d\n", texcb, s);
te.shareable = false; // default value
te.nonCacheable = false;
bool outer_shareable = false;
if (sctlr.tre == 0 || ((sctlr.tre == 1) && (sctlr.m == 0))) {
switch(texcb) {
case 0: // Stongly-ordered
te.nonCacheable = true;
te.mtype = TlbEntry::StronglyOrdered;
te.shareable = true;
te.innerAttrs = 1;
te.outerAttrs = 0;
break;
case 1: // Shareable Device
te.nonCacheable = true;
te.mtype = TlbEntry::Device;
te.shareable = true;
te.innerAttrs = 3;
te.outerAttrs = 0;
break;
case 2: // Outer and Inner Write-Through, no Write-Allocate
te.mtype = TlbEntry::Normal;
te.shareable = s;
te.innerAttrs = 6;
te.outerAttrs = bits(texcb, 1, 0);
break;
case 3: // Outer and Inner Write-Back, no Write-Allocate
te.mtype = TlbEntry::Normal;
te.shareable = s;
te.innerAttrs = 7;
te.outerAttrs = bits(texcb, 1, 0);
break;
case 4: // Outer and Inner Non-cacheable
te.nonCacheable = true;
te.mtype = TlbEntry::Normal;
te.shareable = s;
te.innerAttrs = 0;
te.outerAttrs = bits(texcb, 1, 0);
break;
case 5: // Reserved
panic("Reserved texcb value!\n");
break;
case 6: // Implementation Defined
panic("Implementation-defined texcb value!\n");
break;
case 7: // Outer and Inner Write-Back, Write-Allocate
te.mtype = TlbEntry::Normal;
te.shareable = s;
te.innerAttrs = 5;
te.outerAttrs = 1;
break;
case 8: // Non-shareable Device
te.nonCacheable = true;
te.mtype = TlbEntry::Device;
te.shareable = false;
te.innerAttrs = 3;
te.outerAttrs = 0;
break;
case 9 ... 15: // Reserved
panic("Reserved texcb value!\n");
break;
case 16 ... 31: // Cacheable Memory
te.mtype = TlbEntry::Normal;
te.shareable = s;
if (bits(texcb, 1,0) == 0 || bits(texcb, 3,2) == 0)
te.nonCacheable = true;
te.innerAttrs = bits(texcb, 1, 0);
te.outerAttrs = bits(texcb, 3, 2);
break;
default:
panic("More than 32 states for 5 bits?\n");
}
} else {
assert(tc);
PRRR prrr = tc->readMiscReg(MISCREG_PRRR);
NMRR nmrr = tc->readMiscReg(MISCREG_NMRR);
DPRINTF(TLBVerbose, "memAttrs PRRR:%08x NMRR:%08x\n", prrr, nmrr);
uint8_t curr_tr = 0, curr_ir = 0, curr_or = 0;
switch(bits(texcb, 2,0)) {
case 0:
curr_tr = prrr.tr0;
curr_ir = nmrr.ir0;
curr_or = nmrr.or0;
outer_shareable = (prrr.nos0 == 0);
break;
case 1:
curr_tr = prrr.tr1;
curr_ir = nmrr.ir1;
curr_or = nmrr.or1;
outer_shareable = (prrr.nos1 == 0);
break;
case 2:
curr_tr = prrr.tr2;
curr_ir = nmrr.ir2;
curr_or = nmrr.or2;
outer_shareable = (prrr.nos2 == 0);
break;
case 3:
curr_tr = prrr.tr3;
curr_ir = nmrr.ir3;
curr_or = nmrr.or3;
outer_shareable = (prrr.nos3 == 0);
break;
case 4:
curr_tr = prrr.tr4;
curr_ir = nmrr.ir4;
curr_or = nmrr.or4;
outer_shareable = (prrr.nos4 == 0);
break;
case 5:
curr_tr = prrr.tr5;
curr_ir = nmrr.ir5;
curr_or = nmrr.or5;
outer_shareable = (prrr.nos5 == 0);
break;
case 6:
panic("Imp defined type\n");
case 7:
curr_tr = prrr.tr7;
curr_ir = nmrr.ir7;
curr_or = nmrr.or7;
outer_shareable = (prrr.nos7 == 0);
break;
}
switch(curr_tr) {
case 0:
DPRINTF(TLBVerbose, "StronglyOrdered\n");
te.mtype = TlbEntry::StronglyOrdered;
te.nonCacheable = true;
te.innerAttrs = 1;
te.outerAttrs = 0;
te.shareable = true;
break;
case 1:
DPRINTF(TLBVerbose, "Device ds1:%d ds0:%d s:%d\n",
prrr.ds1, prrr.ds0, s);
te.mtype = TlbEntry::Device;
te.nonCacheable = true;
te.innerAttrs = 3;
te.outerAttrs = 0;
if (prrr.ds1 && s)
te.shareable = true;
if (prrr.ds0 && !s)
te.shareable = true;
break;
case 2:
DPRINTF(TLBVerbose, "Normal ns1:%d ns0:%d s:%d\n",
prrr.ns1, prrr.ns0, s);
te.mtype = TlbEntry::Normal;
if (prrr.ns1 && s)
te.shareable = true;
if (prrr.ns0 && !s)
te.shareable = true;
break;
case 3:
panic("Reserved type");
}
if (te.mtype == TlbEntry::Normal){
switch(curr_ir) {
case 0:
te.nonCacheable = true;
te.innerAttrs = 0;
break;
case 1:
te.innerAttrs = 5;
break;
case 2:
te.innerAttrs = 6;
break;
case 3:
te.innerAttrs = 7;
break;
}
switch(curr_or) {
case 0:
te.nonCacheable = true;
te.outerAttrs = 0;
break;
case 1:
te.outerAttrs = 1;
break;
case 2:
te.outerAttrs = 2;
break;
case 3:
te.outerAttrs = 3;
break;
}
}
}
DPRINTF(TLBVerbose, "memAttrs: shareable: %d, innerAttrs: %d, \
outerAttrs: %d\n",
te.shareable, te.innerAttrs, te.outerAttrs);
/** Formatting for Physical Address Register (PAR)
* Only including lower bits (TLB info here)
* PAR:
* PA [31:12]
* Reserved [11]
* TLB info [10:1]
* NOS [10] (Not Outer Sharable)
* NS [9] (Non-Secure)
* -- [8] (Implementation Defined)
* SH [7] (Sharable)
* Inner[6:4](Inner memory attributes)
* Outer[3:2](Outer memory attributes)
* SS [1] (SuperSection)
* F [0] (Fault, Fault Status in [6:1] if faulted)
*/
te.attributes = (
((outer_shareable ? 0:1) << 10) |
// TODO: NS Bit
((te.shareable ? 1:0) << 7) |
(te.innerAttrs << 4) |
(te.outerAttrs << 2)
// TODO: Supersection bit
// TODO: Fault bit
);
}
void
TableWalker::doL1Descriptor()
{
DPRINTF(TLB, "L1 descriptor for %#x is %#x\n",
currState->vaddr, currState->l1Desc.data);
TlbEntry te;
switch (currState->l1Desc.type()) {
case L1Descriptor::Ignore:
case L1Descriptor::Reserved:
if (!currState->timing) {
currState->tc = NULL;
currState->req = NULL;
}
2010-06-02 19:58:16 +02:00
DPRINTF(TLB, "L1 Descriptor Reserved/Ignore, causing fault\n");
if (currState->isFetch)
currState->fault =
new PrefetchAbort(currState->vaddr, ArmFault::Translation0);
2010-06-02 19:58:16 +02:00
else
currState->fault =
2010-08-23 18:18:39 +02:00
new DataAbort(currState->vaddr, 0, currState->isWrite,
ArmFault::Translation0);
return;
case L1Descriptor::Section:
if (currState->sctlr.afe && bits(currState->l1Desc.ap(), 0) == 0) {
/** @todo: check sctlr.ha (bit[17]) if Hardware Access Flag is
* enabled if set, do l1.Desc.setAp0() instead of generating
* AccessFlag0
*/
currState->fault = new DataAbort(currState->vaddr,
currState->l1Desc.domain(), currState->isWrite,
ArmFault::AccessFlag0);
}
if (currState->l1Desc.supersection()) {
panic("Haven't implemented supersections\n");
}
te.N = 20;
te.pfn = currState->l1Desc.pfn();
te.size = (1<<te.N) - 1;
te.global = !currState->l1Desc.global();
te.valid = true;
te.vpn = currState->vaddr >> te.N;
te.sNp = true;
te.xn = currState->l1Desc.xn();
te.ap = currState->l1Desc.ap();
te.domain = currState->l1Desc.domain();
te.asid = currState->contextId;
memAttrs(currState->tc, te, currState->sctlr,
currState->l1Desc.texcb(), currState->l1Desc.shareable());
DPRINTF(TLB, "Inserting Section Descriptor into TLB\n");
DPRINTF(TLB, " - N:%d pfn:%#x size: %#x global:%d valid: %d\n",
te.N, te.pfn, te.size, te.global, te.valid);
DPRINTF(TLB, " - vpn:%#x sNp: %d xn:%d ap:%d domain: %d asid:%d nc:%d\n",
te.vpn, te.sNp, te.xn, te.ap, te.domain, te.asid,
te.nonCacheable);
DPRINTF(TLB, " - domain from l1 desc: %d data: %#x bits:%d\n",
currState->l1Desc.domain(), currState->l1Desc.data,
(currState->l1Desc.data >> 5) & 0xF );
if (!currState->timing) {
currState->tc = NULL;
currState->req = NULL;
}
tlb->insert(currState->vaddr, te);
return;
case L1Descriptor::PageTable:
Addr l2desc_addr;
l2desc_addr = currState->l1Desc.l2Addr() |
(bits(currState->vaddr, 19,12) << 2);
DPRINTF(TLB, "L1 descriptor points to page table at: %#x\n",
l2desc_addr);
// Trickbox address check
currState->fault = tlb->walkTrickBoxCheck(l2desc_addr, currState->vaddr,
sizeof(uint32_t), currState->isFetch, currState->isWrite,
currState->l1Desc.domain(), false);
if (currState->fault) {
if (!currState->timing) {
currState->tc = NULL;
currState->req = NULL;
}
return;
}
if (currState->timing) {
currState->delayed = true;
port->dmaAction(MemCmd::ReadReq, l2desc_addr, sizeof(uint32_t),
&doL2DescEvent, (uint8_t*)&currState->l2Desc.data,
currState->tc->getCpuPtr()->ticks(1));
} else {
port->dmaAction(MemCmd::ReadReq, l2desc_addr, sizeof(uint32_t),
NULL, (uint8_t*)&currState->l2Desc.data,
currState->tc->getCpuPtr()->ticks(1));
doL2Descriptor();
}
return;
default:
panic("A new type in a 2 bit field?\n");
}
}
void
TableWalker::doL2Descriptor()
{
DPRINTF(TLB, "L2 descriptor for %#x is %#x\n",
currState->vaddr, currState->l2Desc.data);
TlbEntry te;
if (currState->l2Desc.invalid()) {
DPRINTF(TLB, "L2 descriptor invalid, causing fault\n");
if (!currState->timing) {
currState->tc = NULL;
currState->req = NULL;
}
if (currState->isFetch)
currState->fault =
new PrefetchAbort(currState->vaddr, ArmFault::Translation1);
2010-06-02 19:58:16 +02:00
else
currState->fault =
new DataAbort(currState->vaddr, currState->l1Desc.domain(),
currState->isWrite, ArmFault::Translation1);
return;
}
if (currState->sctlr.afe && bits(currState->l2Desc.ap(), 0) == 0) {
/** @todo: check sctlr.ha (bit[17]) if Hardware Access Flag is enabled
* if set, do l2.Desc.setAp0() instead of generating AccessFlag0
*/
currState->fault =
2010-08-23 18:18:39 +02:00
new DataAbort(currState->vaddr, 0, currState->isWrite,
ArmFault::AccessFlag1);
}
if (currState->l2Desc.large()) {
te.N = 16;
te.pfn = currState->l2Desc.pfn();
} else {
te.N = 12;
te.pfn = currState->l2Desc.pfn();
}
te.valid = true;
te.size = (1 << te.N) - 1;
te.asid = currState->contextId;
te.sNp = false;
te.vpn = currState->vaddr >> te.N;
te.global = currState->l2Desc.global();
te.xn = currState->l2Desc.xn();
te.ap = currState->l2Desc.ap();
te.domain = currState->l1Desc.domain();
memAttrs(currState->tc, te, currState->sctlr, currState->l2Desc.texcb(),
currState->l2Desc.shareable());
if (!currState->timing) {
currState->tc = NULL;
currState->req = NULL;
}
tlb->insert(currState->vaddr, te);
}
void
TableWalker::doL1DescriptorWrapper()
{
currState = stateQueueL1.front();
currState->delayed = false;
DPRINTF(TLBVerbose, "L1 Desc object host addr: %p\n",&currState->l1Desc.data);
DPRINTF(TLBVerbose, "L1 Desc object data: %08x\n",currState->l1Desc.data);
DPRINTF(TLBVerbose, "calling doL1Descriptor for vaddr:%#x\n", currState->vaddr);
doL1Descriptor();
stateQueueL1.pop_front();
// Check if fault was generated
if (currState->fault != NoFault) {
currState->transState->finish(currState->fault, currState->req,
currState->tc, currState->mode);
pending = false;
nextWalk(currState->tc);
currState->req = NULL;
currState->tc = NULL;
currState->delayed = false;
}
else if (!currState->delayed) {
// delay is not set so there is no L2 to do
DPRINTF(TLBVerbose, "calling translateTiming again\n");
currState->fault = tlb->translateTiming(currState->req, currState->tc,
currState->transState, currState->mode);
pending = false;
nextWalk(currState->tc);
currState->req = NULL;
currState->tc = NULL;
currState->delayed = false;
delete currState;
} else {
// need to do L2 descriptor
stateQueueL2.push_back(currState);
}
currState = NULL;
}
void
TableWalker::doL2DescriptorWrapper()
{
currState = stateQueueL2.front();
assert(currState->delayed);
DPRINTF(TLBVerbose, "calling doL2Descriptor for vaddr:%#x\n",
currState->vaddr);
doL2Descriptor();
// Check if fault was generated
if (currState->fault != NoFault) {
currState->transState->finish(currState->fault, currState->req,
currState->tc, currState->mode);
}
else {
DPRINTF(TLBVerbose, "calling translateTiming again\n");
currState->fault = tlb->translateTiming(currState->req, currState->tc,
currState->transState, currState->mode);
}
stateQueueL2.pop_front();
pending = false;
nextWalk(currState->tc);
currState->req = NULL;
currState->tc = NULL;
currState->delayed = false;
delete currState;
currState = NULL;
}
void
TableWalker::nextWalk(ThreadContext *tc)
{
if (pendingQueue.size())
schedule(doProcessEvent, tc->getCpuPtr()->nextCycle(curTick()+1));
}
ArmISA::TableWalker *
ArmTableWalkerParams::create()
{
return new ArmISA::TableWalker(this);
}