sparc: Fix style, create a helper function for translation.

The translate function simplifies code and removes some compiler
warnings in gcc 3.4
This commit is contained in:
Nathan Binkert 2008-09-23 20:38:02 -07:00
parent 38dd9687ce
commit 70ec46de17
3 changed files with 121 additions and 100 deletions

View file

@ -38,8 +38,8 @@
class Checkpoint;
namespace SparcISA
{
namespace SparcISA {
struct VAddr
{
VAddr(Addr a) { panic("not implemented yet."); }
@ -54,8 +54,15 @@ class TteTag
public:
TteTag() : entry(0), populated(false) {}
TteTag(uint64_t e) : entry(e), populated(true) {}
const TteTag &operator=(uint64_t e) { populated = true;
entry = e; return *this; }
const TteTag &
operator=(uint64_t e)
{
populated = true;
entry = e;
return *this;
}
bool valid() const {assert(populated); return !bits(entry,62,62); }
Addr va() const {assert(populated); return bits(entry,41,0); }
};
@ -76,13 +83,13 @@ class PageTableEntry
uint64_t entry4u;
bool populated;
public:
PageTableEntry() : entry(0), type(invalid), populated(false) {}
PageTableEntry()
: entry(0), type(invalid), populated(false)
{}
PageTableEntry(uint64_t e, EntryType t = sun4u)
: entry(e), type(t), populated(true)
{
populate(entry, type);
}
@ -113,49 +120,74 @@ class PageTableEntry
}
}
void clear()
void
clear()
{
populated = false;
}
static int pageSizes[6];
uint64_t operator()() const { assert(populated); return entry4u; }
const PageTableEntry &operator=(uint64_t e) { populated = true;
entry4u = e; return *this; }
const PageTableEntry &
operator=(uint64_t e)
{
populated = true;
entry4u = e;
return *this;
}
const PageTableEntry &operator=(const PageTableEntry &e)
{ populated = true; entry4u = e.entry4u; type = e.type; return *this; }
const PageTableEntry &
operator=(const PageTableEntry &e)
{
populated = true;
entry4u = e.entry4u;
type = e.type;
return *this;
}
bool valid() const { return bits(entry4u,63,63) && populated; }
uint8_t _size() const { assert(populated);
return bits(entry4u, 62,61) |
bits(entry4u, 48,48) << 2; }
Addr size() const { assert(_size() < 6); return pageSizes[_size()]; }
Addr sizeMask() const { assert(_size() < 6); return pageSizes[_size()]-1;}
bool ie() const { return bits(entry4u, 59,59); }
Addr pfn() const { assert(populated); return bits(entry4u,39,13); }
Addr paddr() const { assert(populated); return mbits(entry4u, 39,13);}
bool locked() const { assert(populated); return bits(entry4u,6,6); }
bool cv() const { assert(populated); return bits(entry4u,4,4); }
bool cp() const { assert(populated); return bits(entry4u,5,5); }
bool priv() const { assert(populated); return bits(entry4u,2,2); }
bool writable() const { assert(populated); return bits(entry4u,1,1); }
bool nofault() const { assert(populated); return bits(entry4u,60,60); }
bool sideffect() const { assert(populated); return bits(entry4u,3,3); }
Addr paddrMask() const { assert(populated);
return mbits(entry4u, 39,13) & ~sizeMask(); }
bool valid() const { return bits(entry4u,63,63) && populated; }
uint8_t
_size() const
{
assert(populated);
return bits(entry4u, 62,61) | bits(entry4u, 48,48) << 2;
}
Addr size() const { assert(_size() < 6); return pageSizes[_size()]; }
Addr sizeMask() const { return size() - 1; }
bool ie() const { return bits(entry4u, 59,59); }
Addr pfn() const { assert(populated); return bits(entry4u,39,13); }
Addr paddr() const { assert(populated); return mbits(entry4u, 39,13);}
bool locked() const { assert(populated); return bits(entry4u,6,6); }
bool cv() const { assert(populated); return bits(entry4u,4,4); }
bool cp() const { assert(populated); return bits(entry4u,5,5); }
bool priv() const { assert(populated); return bits(entry4u,2,2); }
bool writable() const { assert(populated); return bits(entry4u,1,1); }
bool nofault() const { assert(populated); return bits(entry4u,60,60); }
bool sideffect() const { assert(populated); return bits(entry4u,3,3); }
Addr paddrMask() const { assert(populated); return paddr() & ~sizeMask(); }
Addr
translate(Addr vaddr) const
{
assert(populated);
Addr mask = sizeMask();
return (paddr() & ~mask) | (vaddr & mask);
}
};
struct TlbRange {
struct TlbRange
{
Addr va;
Addr size;
int contextId;
int partitionId;
bool real;
inline bool operator<(const TlbRange &r2) const
inline bool
operator<(const TlbRange &r2) const
{
if (real && !r2.real)
return true;
@ -178,7 +210,9 @@ struct TlbRange {
return true;
return false;
}
inline bool operator==(const TlbRange &r2) const
inline bool
operator==(const TlbRange &r2) const
{
return va == r2.va &&
size == r2.size &&
@ -189,7 +223,11 @@ struct TlbRange {
};
struct TlbEntry {
struct TlbEntry
{
TlbEntry()
{}
TlbEntry(Addr asn, Addr vaddr, Addr paddr)
{
uint64_t entry = 0;
@ -215,8 +253,7 @@ struct TlbEntry {
valid = true;
}
TlbEntry()
{}
TlbRange range;
PageTableEntry pte;
bool used;
@ -229,11 +266,9 @@ struct TlbEntry {
void serialize(std::ostream &os);
void unserialize(Checkpoint *cp, const std::string &section);
};
}; // namespace SparcISA
} // namespace SparcISA
#endif // __ARCH_SPARC_PAGE_TABLE_HH__

View file

@ -51,7 +51,7 @@ TLB::TLB(const Params *p)
{
// To make this work you'll have to change the hypervisor and OS
if (size > 64)
fatal("SPARC T1 TLB registers don't support more than 64 TLB entries.");
fatal("SPARC T1 TLB registers don't support more than 64 TLB entries");
tlb = new TlbEntry[size];
std::memset(tlb, 0, sizeof(TlbEntry) * size);
@ -87,8 +87,6 @@ void
TLB::insert(Addr va, int partition_id, int context_id, bool real,
const PageTableEntry& PTE, int entry)
{
MapIter i;
TlbEntry *new_entry = NULL;
// TlbRange tr;
@ -103,8 +101,9 @@ TLB::insert(Addr va, int partition_id, int context_id, bool real,
tr.real = real;
*/
DPRINTF(TLB, "TLB: Inserting TLB Entry; va=%#x pa=%#x pid=%d cid=%d r=%d entryid=%d\n",
va, PTE.paddr(), partition_id, context_id, (int)real, entry);
DPRINTF(TLB,
"TLB: Inserting Entry; va=%#x pa=%#x pid=%d cid=%d r=%d entryid=%d\n",
va, PTE.paddr(), partition_id, context_id, (int)real, entry);
// Demap any entry that conflicts
for (x = 0; x < size; x++) {
@ -128,7 +127,6 @@ TLB::insert(Addr va, int partition_id, int context_id, bool real,
}
}
/*
i = lookupTable.find(tr);
if (i != lookupTable.end()) {
@ -195,25 +193,22 @@ insertAllLocked:
new_entry->valid = true;
usedEntries++;
i = lookupTable.insert(new_entry->range, new_entry);
assert(i != lookupTable.end());
// If all entries have there used bit set, clear it on them all, but the
// one we just inserted
// If all entries have their used bit set, clear it on them all,
// but the one we just inserted
if (usedEntries == size) {
clearUsedBits();
new_entry->used = true;
usedEntries++;
}
}
TlbEntry*
TLB::lookup(Addr va, int partition_id, bool real, int context_id, bool
update_used)
TLB::lookup(Addr va, int partition_id, bool real, int context_id,
bool update_used)
{
MapIter i;
TlbRange tr;
@ -240,8 +235,8 @@ TLB::lookup(Addr va, int partition_id, bool real, int context_id, bool
DPRINTF(TLB, "TLB: Valid entry found pa: %#x size: %#x\n", t->pte.paddr(),
t->pte.size());
// Update the used bits only if this is a real access (not a fake one from
// virttophys()
// Update the used bits only if this is a real access (not a fake
// one from virttophys()
if (!t->used && update_used) {
t->used = true;
usedEntries++;
@ -304,11 +299,10 @@ TLB::demapPage(Addr va, int partition_id, bool real, int context_id)
void
TLB::demapContext(int partition_id, int context_id)
{
int x;
DPRINTF(IPR, "TLB: Demapping Context pid=%#d cid=%d\n",
partition_id, context_id);
cacheValid = false;
for (x = 0; x < size; x++) {
for (int x = 0; x < size; x++) {
if (tlb[x].range.contextId == context_id &&
tlb[x].range.partitionId == partition_id) {
if (tlb[x].valid == true) {
@ -327,10 +321,9 @@ TLB::demapContext(int partition_id, int context_id)
void
TLB::demapAll(int partition_id)
{
int x;
DPRINTF(TLB, "TLB: Demapping All pid=%#d\n", partition_id);
cacheValid = false;
for (x = 0; x < size; x++) {
for (int x = 0; x < size; x++) {
if (tlb[x].valid && !tlb[x].pte.locked() &&
tlb[x].range.partitionId == partition_id) {
freeList.push_front(&tlb[x]);
@ -347,11 +340,10 @@ TLB::demapAll(int partition_id)
void
TLB::invalidateAll()
{
int x;
cacheValid = false;
lookupTable.clear();
for (x = 0; x < size; x++) {
for (int x = 0; x < size; x++) {
if (tlb[x].valid == true)
freeList.push_back(&tlb[x]);
tlb[x].valid = false;
@ -361,7 +353,8 @@ TLB::invalidateAll()
}
uint64_t
TLB::TteRead(int entry) {
TLB::TteRead(int entry)
{
if (entry >= size)
panic("entry: %d\n", entry);
@ -373,7 +366,8 @@ TLB::TteRead(int entry) {
}
uint64_t
TLB::TagRead(int entry) {
TLB::TagRead(int entry)
{
assert(entry < size);
uint64_t tag;
if (!tlb[entry].valid)
@ -459,9 +453,8 @@ ITB::translate(RequestPtr &req, ThreadContext *tc)
if (cacheEntry) {
if (cacheEntry->range.va < vaddr + sizeof(MachInst) &&
cacheEntry->range.va + cacheEntry->range.size >= vaddr) {
req->setPaddr(cacheEntry->pte.paddr() & ~(cacheEntry->pte.size()-1) |
vaddr & cacheEntry->pte.size()-1 );
return NoFault;
req->setPaddr(cacheEntry->pte.translate(vaddr));
return NoFault;
}
} else {
req->setPaddr(vaddr & PAddrImplMask);
@ -550,18 +543,18 @@ ITB::translate(RequestPtr &req, ThreadContext *tc)
cacheState = tlbdata;
cacheEntry = e;
req->setPaddr(e->pte.paddr() & ~(e->pte.size()-1) |
vaddr & e->pte.size()-1 );
req->setPaddr(e->pte.translate(vaddr));
DPRINTF(TLB, "TLB: %#X -> %#X\n", vaddr, req->getPaddr());
return NoFault;
}
Fault
DTB::translate(RequestPtr &req, ThreadContext *tc, bool write)
{
/* @todo this could really use some profiling and fixing to make it faster! */
/*
* @todo this could really use some profiling and fixing to make
* it faster!
*/
uint64_t tlbdata = tc->readMiscRegNoEffect(MISCREG_TLB_DATA);
Addr vaddr = req->getVaddr();
Addr size = req->getSize();
@ -599,11 +592,11 @@ DTB::translate(RequestPtr &req, ThreadContext *tc, bool write)
if (cacheAsi[0] == asi &&
ce_va < vaddr + size && ce_va + ce->range.size > vaddr &&
(!write || ce->pte.writable())) {
req->setPaddr(ce->pte.paddrMask() | vaddr & ce->pte.sizeMask());
if (ce->pte.sideffect() || (ce->pte.paddr() >> 39) & 1)
req->setFlags(req->getFlags() | UNCACHEABLE);
DPRINTF(TLB, "TLB: %#X -> %#X\n", vaddr, req->getPaddr());
return NoFault;
req->setPaddr(ce->pte.translate(vaddr));
if (ce->pte.sideffect() || (ce->pte.paddr() >> 39) & 1)
req->setFlags(req->getFlags() | UNCACHEABLE);
DPRINTF(TLB, "TLB: %#X -> %#X\n", vaddr, req->getPaddr());
return NoFault;
} // if matched
} // if cache entry valid
if (cacheEntry[1]) {
@ -612,11 +605,11 @@ DTB::translate(RequestPtr &req, ThreadContext *tc, bool write)
if (cacheAsi[1] == asi &&
ce_va < vaddr + size && ce_va + ce->range.size > vaddr &&
(!write || ce->pte.writable())) {
req->setPaddr(ce->pte.paddrMask() | vaddr & ce->pte.sizeMask());
if (ce->pte.sideffect() || (ce->pte.paddr() >> 39) & 1)
req->setFlags(req->getFlags() | UNCACHEABLE);
DPRINTF(TLB, "TLB: %#X -> %#X\n", vaddr, req->getPaddr());
return NoFault;
req->setPaddr(ce->pte.translate(vaddr));
if (ce->pte.sideffect() || (ce->pte.paddr() >> 39) & 1)
req->setFlags(req->getFlags() | UNCACHEABLE);
DPRINTF(TLB, "TLB: %#X -> %#X\n", vaddr, req->getPaddr());
return NoFault;
} // if matched
} // if cache entry valid
}
@ -639,7 +632,7 @@ DTB::translate(RequestPtr &req, ThreadContext *tc, bool write)
TlbEntry *e;
DPRINTF(TLB, "TLB: priv:%d hpriv:%d red:%d lsudm:%d part_id: %#X\n",
priv, hpriv, red, lsu_dm, part_id);
priv, hpriv, red, lsu_dm, part_id);
if (implicit) {
if (tl > 0) {
@ -725,11 +718,10 @@ DTB::translate(RequestPtr &req, ThreadContext *tc, bool write)
return new DataAccessException;
}
if ((!lsu_dm && !hpriv && !red) || AsiIsReal(asi)) {
real = true;
context = 0;
};
}
if (hpriv && (implicit || (!AsiIsAsIfUser(asi) && !AsiIsReal(asi)))) {
req->setPaddr(vaddr & PAddrImplMask);
@ -776,7 +768,6 @@ DTB::translate(RequestPtr &req, ThreadContext *tc, bool write)
return new DataAccessException;
}
if (e->pte.sideffect() || (e->pte.paddr() >> 39) & 1)
req->setFlags(req->getFlags() | UNCACHEABLE);
@ -796,8 +787,7 @@ DTB::translate(RequestPtr &req, ThreadContext *tc, bool write)
cacheAsi[0] = (ASI)0;
}
cacheValid = true;
req->setPaddr(e->pte.paddr() & ~(e->pte.size()-1) |
vaddr & e->pte.size()-1);
req->setPaddr(e->pte.translate(vaddr));
DPRINTF(TLB, "TLB: %#X -> %#X\n", vaddr, req->getPaddr());
return NoFault;
@ -869,7 +859,7 @@ DTB::doMmuRegRead(ThreadContext *tc, Packet *pkt)
DPRINTF(IPR, "Memory Mapped IPR Read: asi=%#X a=%#x\n",
(uint32_t)pkt->req->getAsi(), pkt->getAddr());
ITB * itb = tc->getITBPtr();
ITB *itb = tc->getITBPtr();
switch (asi) {
case ASI_LSU_CONTROL_REG:
@ -1055,7 +1045,7 @@ DTB::doMmuRegWrite(ThreadContext *tc, Packet *pkt)
DPRINTF(IPR, "Memory Mapped IPR Write: asi=%#X a=%#x d=%#X\n",
(uint32_t)asi, va, data);
ITB * itb = tc->getITBPtr();
ITB *itb = tc->getITBPtr();
switch (asi) {
case ASI_LSU_CONTROL_REG:
@ -1173,7 +1163,8 @@ DTB::doMmuRegWrite(ThreadContext *tc, Packet *pkt)
real_insert = bits(va, 9,9);
pte.populate(data, bits(va,10,10) ? PageTableEntry::sun4v :
PageTableEntry::sun4u);
insert(va_insert, part_insert, ct_insert, real_insert, pte, entry_insert);
insert(va_insert, part_insert, ct_insert, real_insert, pte,
entry_insert);
break;
case ASI_IMMU_DEMAP:
ignore = false;
@ -1272,7 +1263,7 @@ DTB::doMmuRegWrite(ThreadContext *tc, Packet *pkt)
tc->getSystemPtr()->threadContexts[bits(data,12,8)]->getCpuPtr()->
post_interrupt(bits(data,5,0),0);
break;
default:
default:
doMmuWriteError:
panic("need to impl DTB::doMmuRegWrite() got asi=%#x, va=%#x d=%#x\n",
(uint32_t)pkt->req->getAsi(), pkt->getAddr(), data);
@ -1310,10 +1301,6 @@ DTB::GetTsbPtr(ThreadContext *tc, Addr addr, int ctx, Addr *ptrs)
itb->cx_config);
}
uint64_t
DTB::MakeTsbPtr(TsbPageSize ps, uint64_t tag_access, uint64_t c0_tsb,
uint64_t c0_config, uint64_t cX_tsb, uint64_t cX_config)
@ -1341,7 +1328,6 @@ DTB::MakeTsbPtr(TsbPageSize ps, uint64_t tag_access, uint64_t c0_tsb,
return ptr;
}
void
TLB::serialize(std::ostream &os)
{

View file

@ -109,9 +109,9 @@ class TLB : public BaseTLB
* @param paritition_id partition this entry is for
* @param real is this a real->phys or virt->phys translation
* @param context_id if this is virt->phys what context
* @param update_used should ew update the used bits in the entries on not
* useful if we are trying to do a va->pa without mucking with any state for
* a debug read for example.
* @param update_used should ew update the used bits in the
* entries on not useful if we are trying to do a va->pa without
* mucking with any state for a debug read for example.
* @return A pointer to a tlb entry
*/
TlbEntry *lookup(Addr va, int partition_id, bool real, int context_id = 0,