Address translation: Make the page table more flexible.

The page table now stores actual page table entries. It is still a templated
class here, but this will be corrected in the near future.

--HG--
extra : convert_revision : 804dcc6320414c2b3ab76a74a15295bd24e1d13d
This commit is contained in:
Gabe Black 2007-08-26 20:33:57 -07:00
parent 80d51650c8
commit 9b49a78cfd
22 changed files with 298 additions and 310 deletions

View file

@ -245,15 +245,16 @@ AlphaISA::MiscRegFile::readIpr(int idx, ThreadContext *tc)
case AlphaISA::IPR_DTB_PTE:
{
AlphaISA::PTE &pte = tc->getDTBPtr()->index(!tc->misspeculating());
AlphaISA::TlbEntry &entry
= tc->getDTBPtr()->index(!tc->misspeculating());
retval |= ((uint64_t)pte.ppn & ULL(0x7ffffff)) << 32;
retval |= ((uint64_t)pte.xre & ULL(0xf)) << 8;
retval |= ((uint64_t)pte.xwe & ULL(0xf)) << 12;
retval |= ((uint64_t)pte.fonr & ULL(0x1)) << 1;
retval |= ((uint64_t)pte.fonw & ULL(0x1))<< 2;
retval |= ((uint64_t)pte.asma & ULL(0x1)) << 4;
retval |= ((uint64_t)pte.asn & ULL(0x7f)) << 57;
retval |= ((uint64_t)entry.ppn & ULL(0x7ffffff)) << 32;
retval |= ((uint64_t)entry.xre & ULL(0xf)) << 8;
retval |= ((uint64_t)entry.xwe & ULL(0xf)) << 12;
retval |= ((uint64_t)entry.fonr & ULL(0x1)) << 1;
retval |= ((uint64_t)entry.fonw & ULL(0x1))<< 2;
retval |= ((uint64_t)entry.asma & ULL(0x1)) << 4;
retval |= ((uint64_t)entry.asn & ULL(0x7f)) << 57;
}
break;
@ -480,7 +481,7 @@ AlphaISA::MiscRegFile::setIpr(int idx, uint64_t val, ThreadContext *tc)
break;
case AlphaISA::IPR_DTB_TAG: {
struct AlphaISA::PTE pte;
struct AlphaISA::TlbEntry entry;
// FIXME: granularity hints NYI...
if (EV5::DTB_PTE_GH(ipr[AlphaISA::IPR_DTB_PTE]) != 0)
@ -490,21 +491,21 @@ AlphaISA::MiscRegFile::setIpr(int idx, uint64_t val, ThreadContext *tc)
ipr[idx] = val;
// construct PTE for new entry
pte.ppn = EV5::DTB_PTE_PPN(ipr[AlphaISA::IPR_DTB_PTE]);
pte.xre = EV5::DTB_PTE_XRE(ipr[AlphaISA::IPR_DTB_PTE]);
pte.xwe = EV5::DTB_PTE_XWE(ipr[AlphaISA::IPR_DTB_PTE]);
pte.fonr = EV5::DTB_PTE_FONR(ipr[AlphaISA::IPR_DTB_PTE]);
pte.fonw = EV5::DTB_PTE_FONW(ipr[AlphaISA::IPR_DTB_PTE]);
pte.asma = EV5::DTB_PTE_ASMA(ipr[AlphaISA::IPR_DTB_PTE]);
pte.asn = EV5::DTB_ASN_ASN(ipr[AlphaISA::IPR_DTB_ASN]);
entry.ppn = EV5::DTB_PTE_PPN(ipr[AlphaISA::IPR_DTB_PTE]);
entry.xre = EV5::DTB_PTE_XRE(ipr[AlphaISA::IPR_DTB_PTE]);
entry.xwe = EV5::DTB_PTE_XWE(ipr[AlphaISA::IPR_DTB_PTE]);
entry.fonr = EV5::DTB_PTE_FONR(ipr[AlphaISA::IPR_DTB_PTE]);
entry.fonw = EV5::DTB_PTE_FONW(ipr[AlphaISA::IPR_DTB_PTE]);
entry.asma = EV5::DTB_PTE_ASMA(ipr[AlphaISA::IPR_DTB_PTE]);
entry.asn = EV5::DTB_ASN_ASN(ipr[AlphaISA::IPR_DTB_ASN]);
// insert new TAG/PTE value into data TLB
tc->getDTBPtr()->insert(val, pte);
tc->getDTBPtr()->insert(val, entry);
}
break;
case AlphaISA::IPR_ITB_PTE: {
struct AlphaISA::PTE pte;
struct AlphaISA::TlbEntry entry;
// FIXME: granularity hints NYI...
if (EV5::ITB_PTE_GH(val) != 0)
@ -514,16 +515,16 @@ AlphaISA::MiscRegFile::setIpr(int idx, uint64_t val, ThreadContext *tc)
ipr[idx] = val;
// construct PTE for new entry
pte.ppn = EV5::ITB_PTE_PPN(val);
pte.xre = EV5::ITB_PTE_XRE(val);
pte.xwe = 0;
pte.fonr = EV5::ITB_PTE_FONR(val);
pte.fonw = EV5::ITB_PTE_FONW(val);
pte.asma = EV5::ITB_PTE_ASMA(val);
pte.asn = EV5::ITB_ASN_ASN(ipr[AlphaISA::IPR_ITB_ASN]);
entry.ppn = EV5::ITB_PTE_PPN(val);
entry.xre = EV5::ITB_PTE_XRE(val);
entry.xwe = 0;
entry.fonr = EV5::ITB_PTE_FONR(val);
entry.fonw = EV5::ITB_PTE_FONW(val);
entry.asma = EV5::ITB_PTE_ASMA(val);
entry.asn = EV5::ITB_ASN_ASN(ipr[AlphaISA::IPR_ITB_ASN]);
// insert new TAG/PTE value into data TLB
tc->getITBPtr()->insert(ipr[AlphaISA::IPR_ITB_TAG], pte);
tc->getITBPtr()->insert(ipr[AlphaISA::IPR_ITB_TAG], entry);
}
break;

View file

@ -185,18 +185,18 @@ void ItbPageFault::invoke(ThreadContext * tc)
VAddr vaddr(pc);
VAddr paddr(physaddr);
PTE pte;
pte.tag = vaddr.vpn();
pte.ppn = paddr.vpn();
pte.xre = 15; //This can be read in all modes.
pte.xwe = 1; //This can be written only in kernel mode.
pte.asn = p->M5_pid; //Address space number.
pte.asma = false; //Only match on this ASN.
pte.fonr = false; //Don't fault on read.
pte.fonw = false; //Don't fault on write.
pte.valid = true; //This entry is valid.
TlbEntry entry;
entry.tag = vaddr.vpn();
entry.ppn = paddr.vpn();
entry.xre = 15; //This can be read in all modes.
entry.xwe = 1; //This can be written only in kernel mode.
entry.asn = p->M5_pid; //Address space number.
entry.asma = false; //Only match on this ASN.
entry.fonr = false; //Don't fault on read.
entry.fonw = false; //Don't fault on write.
entry.valid = true; //This entry is valid.
tc->getITBPtr()->insert(vaddr.page(), pte);
tc->getITBPtr()->insert(vaddr.page(), entry);
}
}
@ -214,18 +214,18 @@ void NDtbMissFault::invoke(ThreadContext * tc)
} else {
VAddr paddr(physaddr);
PTE pte;
pte.tag = vaddr.vpn();
pte.ppn = paddr.vpn();
pte.xre = 15; //This can be read in all modes.
pte.xwe = 15; //This can be written in all modes.
pte.asn = p->M5_pid; //Address space number.
pte.asma = false; //Only match on this ASN.
pte.fonr = false; //Don't fault on read.
pte.fonw = false; //Don't fault on write.
pte.valid = true; //This entry is valid.
TlbEntry entry;
entry.tag = vaddr.vpn();
entry.ppn = paddr.vpn();
entry.xre = 15; //This can be read in all modes.
entry.xwe = 15; //This can be written in all modes.
entry.asn = p->M5_pid; //Address space number.
entry.asma = false; //Only match on this ASN.
entry.fonr = false; //Don't fault on read.
entry.fonw = false; //Don't fault on write.
entry.valid = true; //This entry is valid.
tc->getDTBPtr()->insert(vaddr.page(), pte);
tc->getDTBPtr()->insert(vaddr.page(), entry);
}
}

View file

@ -88,11 +88,6 @@ static inline Fault genMachineCheckFault()
return new MachineCheckFault;
}
static inline Fault genAlignmentFault()
{
return new AlignmentFault;
}
class ResetFault : public AlphaFault
{
private:

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2006 The Regents of The University of Michigan
* Copyright (c) 2006-2007 The Regents of The University of Michigan
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -34,7 +34,7 @@
namespace AlphaISA
{
void
PTE::serialize(std::ostream &os)
TlbEntry::serialize(std::ostream &os)
{
SERIALIZE_SCALAR(tag);
SERIALIZE_SCALAR(ppn);
@ -48,7 +48,7 @@ namespace AlphaISA
}
void
PTE::unserialize(Checkpoint *cp, const std::string &section)
TlbEntry::unserialize(Checkpoint *cp, const std::string &section)
{
UNSERIALIZE_SCALAR(tag);
UNSERIALIZE_SCALAR(ppn);

View file

@ -89,9 +89,14 @@ namespace AlphaISA {
Addr paddr() const { return _pfn() << PageShift; }
};
// ITB/DTB page table entry
struct PTE
// ITB/DTB table entry
struct TlbEntry
{
//Construct an entry that maps to physical address addr.
TlbEntry(Addr addr)
{
}
Addr tag; // virtual page number tag
Addr ppn; // physical page number
uint8_t xre; // read permissions - VMEM_PERM_* mask

View file

@ -62,8 +62,8 @@ bool uncacheBit40 = false;
TLB::TLB(const string &name, int s)
: SimObject(name), size(s), nlu(0)
{
table = new PTE[size];
memset(table, 0, sizeof(PTE[size]));
table = new TlbEntry[size];
memset(table, 0, sizeof(TlbEntry[size]));
flushCache();
}
@ -74,23 +74,23 @@ TLB::~TLB()
}
// look up an entry in the TLB
PTE *
TlbEntry *
TLB::lookup(Addr vpn, uint8_t asn)
{
// assume not found...
PTE *retval = NULL;
TlbEntry *retval = NULL;
if (PTECache[0]) {
if (vpn == PTECache[0]->tag &&
(PTECache[0]->asma || PTECache[0]->asn == asn))
retval = PTECache[0];
else if (PTECache[1]) {
if (vpn == PTECache[1]->tag &&
(PTECache[1]->asma || PTECache[1]->asn == asn))
retval = PTECache[1];
else if (PTECache[2] && vpn == PTECache[2]->tag &&
(PTECache[2]->asma || PTECache[2]->asn == asn))
retval = PTECache[2];
if (EntryCache[0]) {
if (vpn == EntryCache[0]->tag &&
(EntryCache[0]->asma || EntryCache[0]->asn == asn))
retval = EntryCache[0];
else if (EntryCache[1]) {
if (vpn == EntryCache[1]->tag &&
(EntryCache[1]->asma || EntryCache[1]->asn == asn))
retval = EntryCache[1];
else if (EntryCache[2] && vpn == EntryCache[2]->tag &&
(EntryCache[2]->asma || EntryCache[2]->asn == asn))
retval = EntryCache[2];
}
}
@ -99,10 +99,10 @@ TLB::lookup(Addr vpn, uint8_t asn)
if (i != lookupTable.end()) {
while (i->first == vpn) {
int index = i->second;
PTE *pte = &table[index];
assert(pte->valid);
if (vpn == pte->tag && (pte->asma || pte->asn == asn)) {
retval = updateCache(pte);
TlbEntry *entry = &table[index];
assert(entry->valid);
if (vpn == entry->tag && (entry->asma || entry->asn == asn)) {
retval = updateCache(entry);
break;
}
@ -157,7 +157,7 @@ TLB::checkCacheability(RequestPtr &req)
// insert a new TLB entry
void
TLB::insert(Addr addr, PTE &pte)
TLB::insert(Addr addr, TlbEntry &entry)
{
flushCache();
VAddr vaddr = addr;
@ -181,9 +181,9 @@ TLB::insert(Addr addr, PTE &pte)
lookupTable.erase(i);
}
DPRINTF(TLB, "insert @%d: %#x -> %#x\n", nlu, vaddr.vpn(), pte.ppn);
DPRINTF(TLB, "insert @%d: %#x -> %#x\n", nlu, vaddr.vpn(), entry.ppn);
table[nlu] = pte;
table[nlu] = entry;
table[nlu].tag = vaddr.vpn();
table[nlu].valid = true;
@ -195,7 +195,7 @@ void
TLB::flushAll()
{
DPRINTF(TLB, "flushAll\n");
memset(table, 0, sizeof(PTE[size]));
memset(table, 0, sizeof(TlbEntry[size]));
flushCache();
lookupTable.clear();
nlu = 0;
@ -209,17 +209,17 @@ TLB::flushProcesses()
PageTable::iterator end = lookupTable.end();
while (i != end) {
int index = i->second;
PTE *pte = &table[index];
assert(pte->valid);
TlbEntry *entry = &table[index];
assert(entry->valid);
// we can't increment i after we erase it, so save a copy and
// increment it to get the next entry now
PageTable::iterator cur = i;
++i;
if (!pte->asma) {
DPRINTF(TLB, "flush @%d: %#x -> %#x\n", index, pte->tag, pte->ppn);
pte->valid = false;
if (!entry->asma) {
DPRINTF(TLB, "flush @%d: %#x -> %#x\n", index, entry->tag, entry->ppn);
entry->valid = false;
lookupTable.erase(cur);
}
}
@ -237,15 +237,15 @@ TLB::flushAddr(Addr addr, uint8_t asn)
while (i != lookupTable.end() && i->first == vaddr.vpn()) {
int index = i->second;
PTE *pte = &table[index];
assert(pte->valid);
TlbEntry *entry = &table[index];
assert(entry->valid);
if (vaddr.vpn() == pte->tag && (pte->asma || pte->asn == asn)) {
if (vaddr.vpn() == entry->tag && (entry->asma || entry->asn == asn)) {
DPRINTF(TLB, "flushaddr @%d: %#x -> %#x\n", index, vaddr.vpn(),
pte->ppn);
entry->ppn);
// invalidate this entry
pte->valid = false;
entry->valid = false;
lookupTable.erase(i++);
} else {
@ -262,7 +262,7 @@ TLB::serialize(ostream &os)
SERIALIZE_SCALAR(nlu);
for (int i = 0; i < size; i++) {
nameOut(os, csprintf("%s.PTE%d", name(), i));
nameOut(os, csprintf("%s.Entry%d", name(), i));
table[i].serialize(os);
}
}
@ -274,7 +274,7 @@ TLB::unserialize(Checkpoint *cp, const string &section)
UNSERIALIZE_SCALAR(nlu);
for (int i = 0; i < size; i++) {
table[i].unserialize(cp, csprintf("%s.PTE%d", section, i));
table[i].unserialize(cp, csprintf("%s.Entry%d", section, i));
if (table[i].valid) {
lookupTable.insert(make_pair(table[i].tag, i));
}
@ -364,20 +364,20 @@ ITB::translate(RequestPtr &req, ThreadContext *tc)
} else {
// not a physical address: need to look up pte
int asn = DTB_ASN_ASN(tc->readMiscRegNoEffect(IPR_DTB_ASN));
PTE *pte = lookup(VAddr(req->getVaddr()).vpn(),
TlbEntry *entry = lookup(VAddr(req->getVaddr()).vpn(),
asn);
if (!pte) {
if (!entry) {
misses++;
return new ItbPageFault(req->getVaddr());
}
req->setPaddr((pte->ppn << PageShift) +
req->setPaddr((entry->ppn << PageShift) +
(VAddr(req->getVaddr()).offset()
& ~3));
// check permissions for this access
if (!(pte->xre &
if (!(entry->xre &
(1 << ICM_CM(tc->readMiscRegNoEffect(IPR_ICM))))) {
// instruction access fault
acv++;
@ -548,10 +548,9 @@ DTB::translate(RequestPtr &req, ThreadContext *tc, bool write)
int asn = DTB_ASN_ASN(tc->readMiscRegNoEffect(IPR_DTB_ASN));
// not a physical address: need to look up pte
PTE *pte = lookup(VAddr(req->getVaddr()).vpn(),
asn);
TlbEntry *entry = lookup(VAddr(req->getVaddr()).vpn(), asn);
if (!pte) {
if (!entry) {
// page fault
if (write) { write_misses++; } else { read_misses++; }
uint64_t flags = (write ? MM_STAT_WR_MASK : 0) |
@ -563,32 +562,32 @@ DTB::translate(RequestPtr &req, ThreadContext *tc, bool write)
flags));
}
req->setPaddr((pte->ppn << PageShift) +
req->setPaddr((entry->ppn << PageShift) +
VAddr(req->getVaddr()).offset());
if (write) {
if (!(pte->xwe & MODE2MASK(mode))) {
if (!(entry->xwe & MODE2MASK(mode))) {
// declare the instruction access fault
write_acv++;
uint64_t flags = MM_STAT_WR_MASK |
MM_STAT_ACV_MASK |
(pte->fonw ? MM_STAT_FONW_MASK : 0);
(entry->fonw ? MM_STAT_FONW_MASK : 0);
return new DtbPageFault(req->getVaddr(), req->getFlags(), flags);
}
if (pte->fonw) {
if (entry->fonw) {
write_acv++;
uint64_t flags = MM_STAT_WR_MASK |
MM_STAT_FONW_MASK;
return new DtbPageFault(req->getVaddr(), req->getFlags(), flags);
}
} else {
if (!(pte->xre & MODE2MASK(mode))) {
if (!(entry->xre & MODE2MASK(mode))) {
read_acv++;
uint64_t flags = MM_STAT_ACV_MASK |
(pte->fonr ? MM_STAT_FONR_MASK : 0);
(entry->fonr ? MM_STAT_FONR_MASK : 0);
return new DtbAcvFault(req->getVaddr(), req->getFlags(), flags);
}
if (pte->fonr) {
if (entry->fonr) {
read_acv++;
uint64_t flags = MM_STAT_FONR_MASK;
return new DtbPageFault(req->getVaddr(), req->getFlags(), flags);
@ -609,15 +608,15 @@ DTB::translate(RequestPtr &req, ThreadContext *tc, bool write)
return checkCacheability(req);
}
PTE &
TlbEntry &
TLB::index(bool advance)
{
PTE *pte = &table[nlu];
TlbEntry *entry = &table[nlu];
if (advance)
nextnlu();
return *pte;
return *entry;
}
/* end namespace AlphaISA */ }

View file

@ -48,20 +48,20 @@ class ThreadContext;
namespace AlphaISA
{
class PTE;
class TlbEntry;
class TLB : public SimObject
{
protected:
typedef std::multimap<Addr, int> PageTable;
PageTable lookupTable; // Quick lookup into page table
PageTable lookupTable; // Quick lookup into page table
PTE *table; // the Page Table
int size; // TLB Size
int nlu; // not last used entry (for replacement)
TlbEntry *table; // the Page Table
int size; // TLB Size
int nlu; // not last used entry (for replacement)
void nextnlu() { if (++nlu >= size) nlu = 0; }
PTE *lookup(Addr vpn, uint8_t asn);
TlbEntry *lookup(Addr vpn, uint8_t asn);
public:
TLB(const std::string &name, int size);
@ -69,8 +69,8 @@ namespace AlphaISA
int getsize() const { return size; }
PTE &index(bool advance = true);
void insert(Addr vaddr, PTE &pte);
TlbEntry &index(bool advance = true);
void insert(Addr vaddr, TlbEntry &entry);
void flushAll();
void flushProcesses();
@ -90,13 +90,17 @@ namespace AlphaISA
virtual void unserialize(Checkpoint *cp, const std::string &section);
// Most recently used page table entries
PTE *PTECache[3];
inline void flushCache() { memset(PTECache, 0, 3 * sizeof(PTE*)); }
inline PTE* updateCache(PTE *pte) {
PTECache[2] = PTECache[1];
PTECache[1] = PTECache[0];
PTECache[0] = pte;
return pte;
TlbEntry *EntryCache[3];
inline void flushCache()
{
memset(EntryCache, 0, 3 * sizeof(TlbEntry*));
}
inline TlbEntry* updateCache(TlbEntry *entry) {
EntryCache[2] = EntryCache[1];
EntryCache[1] = EntryCache[0];
EntryCache[0] = entry;
return entry;
}
};

View file

@ -75,12 +75,6 @@ FaultName UnimplementedOpcodeFault::_name = "opdec";
FaultVect UnimplementedOpcodeFault::_vect = 0x0481;
FaultStat UnimplementedOpcodeFault::_count;
#if !FULL_SYSTEM
//FaultName PageTableFault::_name = "page_table_fault";
//FaultVect PageTableFault::_vect = 0x0000;
//FaultStat PageTableFault::_count;
#endif
FaultName InterruptFault::_name = "interrupt";
FaultVect InterruptFault::_vect = 0x0101;
FaultStat InterruptFault::_count;
@ -125,40 +119,6 @@ FaultName DspStateDisabledFault::_name = "intover";
FaultVect DspStateDisabledFault::_vect = 0x001a;
FaultStat DspStateDisabledFault::_count;
/*void PageTableFault::invoke(ThreadContext *tc)
{
Process *p = tc->getProcessPtr();
Addr page_addr = p->pTable->pageAlign(vaddr);
warn("%i: [tid:%i]: %s encountered @ addr %x. Allocating new page for address range %x - %x.\n",
curTick, tc->getThreadNum(), name(), vaddr, page_addr, page_addr+VMPageSize);
p->pTable->allocate(page_addr, VMPageSize);
return;
}
*/
/* address is higher than the stack region or in the current stack region
if (vaddr > p->stack_base || vaddr > p->stack_min)
FaultBase::invoke(tc);
// We've accessed the next page
if (vaddr > p->stack_min - PageBytes) {
p->stack_min -= PageBytes;
if (p->stack_base - p->stack_min > 8*1024*1024) {
warn("Already allocated Over max stack size for one thread\n");
}
warn("%i: Allocating page for range %x - %x",
curTick, p->stack_min, p->stack_min-PageBytes);
p->pTable->allocate(p->stack_min, PageBytes);
warn("Increasing stack size by one page.");
} else {
FaultBase::invoke(tc);
}*/
void ResetFault::invoke(ThreadContext *tc)
{
warn("[tid:%i]: %s encountered.\n", tc->getThreadNum(), name());

View file

@ -92,40 +92,11 @@ class UnimplementedOpcodeFault : public MipsFault
FaultStat & countStat() {return _count;}
};
#if !FULL_SYSTEM
//class PageTableFault : public MipsFault
//{
//private:
// Addr vaddr;
// static FaultName _name;
// static FaultVect _vect;
// static FaultStat _count;
//public:
// PageTableFault(Addr va)
// : vaddr(va) {}
// FaultName name() {return _name;}
// FaultVect vect() {return _vect;}
// FaultStat & countStat() {return _count;}
// void invoke(ThreadContext * tc);
//};
static inline Fault genPageTableFault(Addr va)
{
return new PageTableFault(va);
}
#endif
static inline Fault genMachineCheckFault()
{
return new MachineCheckFault;
}
static inline Fault genAlignmentFault()
{
return new AlignmentFault;
}
class ResetFault : public MipsFault
{
private:

View file

@ -31,6 +31,7 @@
#ifndef __ARCH_MIPS_TYPES_HH__
#define __ARCH_MIPS_TYPES_HH__
#include "mem/types.hh"
#include "sim/host.hh"
namespace MipsISA
@ -93,7 +94,9 @@ namespace MipsISA
RND_DOWN,
RND_UP,
RND_NEAREST
};
};
typedef ::PageTable<> PageTable;
} // namespace MipsISA

View file

@ -283,11 +283,6 @@ static inline Fault genMachineCheckFault()
return new InternalProcessorError;
}
static inline Fault genAlignmentFault()
{
return new MemAddressNotAligned;
}
} // SparcISA namespace

View file

@ -33,6 +33,7 @@
#include <inttypes.h>
#include "base/bigint.hh"
#include "mem/page_table.hh"
namespace SparcISA
{
@ -60,6 +61,8 @@ namespace SparcISA
typedef int RegContextVal;
typedef uint16_t RegIndex;
typedef ::PageTable<> PageTable;
}
#endif

View file

@ -91,20 +91,10 @@ namespace X86ISA
}
};
static inline Fault genPageTableFault(Addr va)
{
panic("Page table fault not implemented in x86!\n");
}
static inline Fault genMachineCheckFault()
{
panic("Machine check fault not implemented in x86!\n");
}
static inline Fault genAlignmentFault()
{
panic("Alignment fault not implemented (or for the most part existant) in x86!\n");
}
};
#endif // __ARCH_X86_FAULTS_HH__

View file

@ -93,6 +93,7 @@
#include "base/loader/object_file.hh"
#include "base/loader/elf_object.hh"
#include "base/misc.hh"
#include "base/trace.hh"
#include "cpu/thread_context.hh"
#include "mem/page_table.hh"
#include "mem/translating_port.hh"

View file

@ -60,8 +60,20 @@
#include "arch/x86/tlb.hh"
#include "params/X86DTB.hh"
#include "params/X86ITB.hh"
#include "sim/serialize.hh"
namespace X86ISA {
void
TlbEntry::serialize(std::ostream &os)
{
SERIALIZE_SCALAR(pageStart);
}
void
TlbEntry::unserialize(Checkpoint *cp, const std::string &section)
{
UNSERIALIZE_SCALAR(pageStart);
}
};
X86ISA::ITB *

View file

@ -58,21 +58,37 @@
#ifndef __ARCH_X86_TLB_HH__
#define __ARCH_X86_TLB_HH__
#include <iostream>
#include <string>
#include "sim/host.hh"
#include "sim/tlb.hh"
class Checkpoint;
namespace X86ISA
{
class ITB : public GenericITB
struct TlbEntry
{
Addr pageStart;
TlbEntry() {}
TlbEntry(Addr paddr) : pageStart(paddr) {}
void serialize(std::ostream &os);
void unserialize(Checkpoint *cp, const std::string &section);
};
class ITB : public GenericITB<false, false>
{
public:
ITB(const std::string &name) : GenericITB(name)
ITB(const std::string &name) : GenericITB<false, false>(name)
{}
};
class DTB : public GenericDTB
class DTB : public GenericDTB<false, false>
{
public:
DTB(const std::string &name) : GenericDTB(name)
DTB(const std::string &name) : GenericDTB<false, false>(name)
{}
};
};

View file

@ -63,32 +63,6 @@ PageTable::~PageTable()
{
}
Fault
PageTable::page_check(Addr addr, int64_t size) const
{
if (size < sizeof(uint64_t)) {
if (!isPowerOf2(size)) {
panic("Invalid request size!\n");
return genMachineCheckFault();
}
if ((size - 1) & addr)
return genAlignmentFault();
}
else {
if ((addr & (VMPageSize - 1)) + size > VMPageSize) {
panic("Invalid request size!\n");
return genMachineCheckFault();
}
if ((sizeof(uint64_t) - 1) & addr)
return genAlignmentFault();
}
return NoFault;
}
void
PageTable::allocate(Addr vaddr, int64_t size)
{
@ -98,62 +72,73 @@ PageTable::allocate(Addr vaddr, int64_t size)
DPRINTF(MMU, "Allocating Page: %#x-%#x\n", vaddr, vaddr+ size);
for (; size > 0; size -= pageSize, vaddr += pageSize) {
m5::hash_map<Addr,Addr>::iterator iter = pTable.find(vaddr);
PTableItr iter = pTable.find(vaddr);
if (iter != pTable.end()) {
// already mapped
fatal("PageTable::allocate: address 0x%x already mapped", vaddr);
fatal("PageTable::allocate: address 0x%x already mapped",
vaddr);
}
pTable[vaddr] = system->new_page();
pTable[vaddr] = TheISA::TlbEntry(system->new_page());
updateCache(vaddr, pTable[vaddr]);
}
}
bool
PageTable::translate(Addr vaddr, Addr &paddr)
PageTable::lookup(Addr vaddr, TheISA::TlbEntry &entry)
{
Addr page_addr = pageAlign(vaddr);
paddr = 0;
if (pTableCache[0].vaddr == page_addr) {
paddr = pTableCache[0].paddr + pageOffset(vaddr);
entry = pTableCache[0].entry;
return true;
}
if (pTableCache[1].vaddr == page_addr) {
paddr = pTableCache[1].paddr + pageOffset(vaddr);
entry = pTableCache[1].entry;
return true;
}
if (pTableCache[2].vaddr == page_addr) {
paddr = pTableCache[2].paddr + pageOffset(vaddr);
entry = pTableCache[2].entry;
return true;
}
m5::hash_map<Addr,Addr>::iterator iter = pTable.find(page_addr);
PTableItr iter = pTable.find(page_addr);
if (iter == pTable.end()) {
return false;
}
updateCache(page_addr, iter->second);
paddr = iter->second + pageOffset(vaddr);
entry = iter->second;
return true;
}
bool
PageTable::translate(Addr vaddr, Addr &paddr)
{
TheISA::TlbEntry entry;
if (!lookup(vaddr, entry))
return false;
paddr = pageOffset(vaddr) + entry.pageStart;
return true;
}
Fault
PageTable::translate(RequestPtr &req)
PageTable::translate(RequestPtr req)
{
Addr paddr;
assert(pageAlign(req->getVaddr() + req->getSize() - 1)
== pageAlign(req->getVaddr()));
if (!translate(req->getVaddr(), paddr)) {
return Fault(new PageTableFault(req->getVaddr()));
return Fault(new GenericPageTableFault(req->getVaddr()));
}
req->setPaddr(paddr);
return page_check(req->getPaddr(), req->getSize());
if ((paddr & (pageSize - 1)) + req->getSize() > pageSize) {
panic("Request spans page boundaries!\n");
return NoFault;
}
return NoFault;
}
void
@ -163,11 +148,11 @@ PageTable::serialize(std::ostream &os)
int count = 0;
m5::hash_map<Addr,Addr>::iterator iter = pTable.begin();
m5::hash_map<Addr,Addr>::iterator end = pTable.end();
PTableItr iter = pTable.begin();
PTableItr end = pTable.end();
while (iter != end) {
paramOut(os, csprintf("ptable.entry%dvaddr", count), iter->first);
paramOut(os, csprintf("ptable.entry%dpaddr", count), iter->second);
iter->second.serialize(os);
++iter;
++count;
@ -180,16 +165,16 @@ PageTable::unserialize(Checkpoint *cp, const std::string &section)
{
int i = 0, count;
paramIn(cp, section, "ptable.size", count);
Addr vaddr, paddr;
Addr vaddr;
TheISA::TlbEntry entry;
pTable.clear();
while(i < count) {
paramIn(cp, section, csprintf("ptable.entry%dvaddr", i), vaddr);
paramIn(cp, section, csprintf("ptable.entry%dpaddr", i), paddr);
pTable[vaddr] = paddr;
entry.unserialize(cp, section);
pTable[vaddr] = entry;
++i;
}
}

View file

@ -40,11 +40,11 @@
#include "sim/faults.hh"
#include "arch/isa_traits.hh"
#include "arch/tlb.hh"
#include "base/hashmap.hh"
#include "base/trace.hh"
#include "mem/request.hh"
#include "mem/packet.hh"
#include "sim/sim_object.hh"
#include "sim/host.hh"
#include "sim/serialize.hh"
class System;
@ -54,12 +54,14 @@ class System;
class PageTable
{
protected:
m5::hash_map<Addr,Addr> pTable;
typedef m5::hash_map<Addr, TheISA::TlbEntry> PTable;
typedef PTable::iterator PTableItr;
PTable pTable;
struct cacheElement {
Addr paddr;
Addr vaddr;
} ;
TheISA::TlbEntry entry;
};
struct cacheElement pTableCache[3];
@ -77,10 +79,15 @@ class PageTable
Addr pageAlign(Addr a) { return (a & ~offsetMask); }
Addr pageOffset(Addr a) { return (a & offsetMask); }
Fault page_check(Addr addr, int64_t size) const;
void allocate(Addr vaddr, int64_t size);
/**
* Lookup function
* @param vaddr The virtual address.
* @return entry The page table entry corresponding to vaddr.
*/
bool lookup(Addr vaddr, TheISA::TlbEntry &entry);
/**
* Translate function
* @param vaddr The virtual address.
@ -90,28 +97,29 @@ class PageTable
/**
* Perform a translation on the memory request, fills in paddr
* field of mem_req.
* field of req.
* @param req The memory request.
*/
Fault translate(RequestPtr &req);
Fault translate(RequestPtr req);
/**
* Update the page table cache.
* @param vaddr virtual address (page aligned) to check
* @param paddr physical address (page aligned) to return
* @param pte page table entry to return
*/
inline void updateCache(Addr vaddr, Addr paddr)
inline void updateCache(Addr vaddr, TheISA::TlbEntry entry)
{
pTableCache[2].paddr = pTableCache[1].paddr;
pTableCache[2].entry = pTableCache[1].entry;
pTableCache[2].vaddr = pTableCache[1].vaddr;
pTableCache[1].paddr = pTableCache[0].paddr;
pTableCache[1].entry = pTableCache[0].entry;
pTableCache[1].vaddr = pTableCache[0].vaddr;
pTableCache[0].paddr = paddr;
pTableCache[0].entry = entry;
pTableCache[0].vaddr = vaddr;
}
void serialize(std::ostream &os);
void unserialize(Checkpoint *cp, const std::string &section);
};

View file

@ -56,8 +56,9 @@ void UnimpFault::invoke(ThreadContext * tc)
{
panic("Unimpfault: %s\n", panicStr.c_str());
}
#if !FULL_SYSTEM
void PageTableFault::invoke(ThreadContext *tc)
void GenericPageTableFault::invoke(ThreadContext *tc)
{
Process *p = tc->getProcessPtr();
@ -65,4 +66,9 @@ void PageTableFault::invoke(ThreadContext *tc)
panic("Page table fault when accessing virtual address %#x\n", vaddr);
}
void GenericAlignmentFault::invoke(ThreadContext *tc)
{
panic("Alignment fault when accessing virtual address %#x\n", vaddr);
}
#endif

View file

@ -77,13 +77,23 @@ class UnimpFault : public FaultBase
};
#if !FULL_SYSTEM
class PageTableFault : public FaultBase
class GenericPageTableFault : public FaultBase
{
private:
Addr vaddr;
public:
FaultName name() const {return "M5 page table fault";}
PageTableFault(Addr va) : vaddr(va) {}
FaultName name() const {return "Generic page table fault";}
GenericPageTableFault(Addr va) : vaddr(va) {}
void invoke(ThreadContext * tc);
};
class GenericAlignmentFault : public FaultBase
{
private:
Addr vaddr;
public:
FaultName name() const {return "Generic alignment fault";}
GenericAlignmentFault(Addr va) : vaddr(va) {}
void invoke(ThreadContext * tc);
};
#endif

View file

@ -34,21 +34,17 @@
#include "sim/tlb.hh"
Fault
GenericITB::translate(RequestPtr &req, ThreadContext *tc)
GenericTLBBase::translate(RequestPtr req, ThreadContext * tc)
{
#if FULL_SYSTEM
panic("Generic ITB translation shouldn't be used in full system mode.\n");
panic("Generic translation shouldn't be used in full system mode.\n");
#else
return tc->getProcessPtr()->pTable->translate(req);
Process * p = tc->getProcessPtr();
Fault fault = p->pTable->translate(req);
if(fault != NoFault)
return fault;
return NoFault;
#endif
}
Fault
GenericDTB::translate(RequestPtr &req, ThreadContext *tc, bool write)
{
#if FULL_SYSTEM
panic("Generic DTB translation shouldn't be used in full system mode.\n");
#else
return tc->getProcessPtr()->pTable->translate(req);
#endif
};

View file

@ -31,36 +31,64 @@
#ifndef __SIM_TLB_HH__
#define __SIM_TLB_HH__
#include "base/misc.hh"
#include "mem/request.hh"
#include "sim/sim_object.hh"
#include "sim/faults.hh"
#include "sim/sim_object.hh"
class ThreadContext;
class Packet;
class GenericTLB : public SimObject
class GenericTLBBase : public SimObject
{
protected:
GenericTLBBase(const std::string &name) : SimObject(name)
{}
Fault translate(RequestPtr req, ThreadContext *tc);
};
template <bool doSizeCheck=true, bool doAlignmentCheck=true>
class GenericTLB : public GenericTLBBase
{
public:
GenericTLB(const std::string &name) : SimObject(name)
GenericTLB(const std::string &name) : GenericTLBBase(name)
{}
Fault translate(RequestPtr req, ThreadContext *tc, bool=false)
{
Fault fault = GenericTLBBase::translate(req, tc);
if (fault != NoFault)
return fault;
typeof(req->getSize()) size = req->getSize();
Addr paddr = req->getPaddr();
if(doSizeCheck && !isPowerOf2(size))
panic("Invalid request size!\n");
if (doAlignmentCheck && ((size - 1) & paddr))
return Fault(new GenericAlignmentFault(paddr));
return NoFault;
}
};
template <bool doSizeCheck=true, bool doAlignmentCheck=true>
class GenericITB : public GenericTLB<doSizeCheck, doAlignmentCheck>
{
public:
GenericITB(const std::string &name) :
GenericTLB<doSizeCheck, doAlignmentCheck>(name)
{}
};
class GenericITB : public GenericTLB
template <bool doSizeCheck=true, bool doAlignmentCheck=true>
class GenericDTB : public GenericTLB<doSizeCheck, doAlignmentCheck>
{
public:
GenericITB(const std::string &name) : GenericTLB(name)
GenericDTB(const std::string &name) :
GenericTLB<doSizeCheck, doAlignmentCheck>(name)
{}
Fault translate(RequestPtr &req, ThreadContext *tc);
};
class GenericDTB : public GenericTLB
{
public:
GenericDTB(const std::string &name) : GenericTLB(name)
{}
Fault translate(RequestPtr &req, ThreadContext *tc, bool write);
};
#endif // __ARCH_SPARC_TLB_HH__