Bug fixes in the TLB
Make our replacement algorithm same as legion (although not same as the spec) itb should be 64 entries not 48 src/arch/sparc/tlb.cc: Bug fixes in the TLB Make our replacement algorithm same as legion (although not same as the spec) src/arch/sparc/tlb.hh: Make our replacement algorithm same as legion (although not same as the spec) src/python/m5/objects/SparcTLB.py: itb should be 64 entries too --HG-- extra : convert_revision : 1b5cb3597091e3cfe293e94f6f2219b1e621c35f
This commit is contained in:
parent
ff88f3b13a
commit
ba14d6d0e1
3 changed files with 97 additions and 30 deletions
|
@ -45,7 +45,8 @@ namespace SparcISA
|
|||
{
|
||||
|
||||
TLB::TLB(const std::string &name, int s)
|
||||
: SimObject(name), size(s), usedEntries(0), cacheValid(false)
|
||||
: SimObject(name), size(s), usedEntries(0), lastReplaced(0),
|
||||
cacheValid(false)
|
||||
{
|
||||
// To make this work you'll have to change the hypervisor and OS
|
||||
if (size > 64)
|
||||
|
@ -53,13 +54,16 @@ TLB::TLB(const std::string &name, int s)
|
|||
|
||||
tlb = new TlbEntry[size];
|
||||
memset(tlb, 0, sizeof(TlbEntry) * size);
|
||||
|
||||
for (int x = 0; x < size; x++)
|
||||
freeList.push_back(&tlb[x]);
|
||||
}
|
||||
|
||||
void
|
||||
TLB::clearUsedBits()
|
||||
{
|
||||
MapIter i;
|
||||
for (i = lookupTable.begin(); i != lookupTable.end();) {
|
||||
for (i = lookupTable.begin(); i != lookupTable.end(); i++) {
|
||||
TlbEntry *t = i->second;
|
||||
if (!t->pte.locked()) {
|
||||
t->used = false;
|
||||
|
@ -77,32 +81,76 @@ TLB::insert(Addr va, int partition_id, int context_id, bool real,
|
|||
|
||||
MapIter i;
|
||||
TlbEntry *new_entry = NULL;
|
||||
TlbRange tr;
|
||||
int x;
|
||||
|
||||
cacheValid = false;
|
||||
tr.va = va;
|
||||
tr.size = PTE.size() - 1;
|
||||
tr.contextId = context_id;
|
||||
tr.partitionId = partition_id;
|
||||
tr.real = real;
|
||||
|
||||
|
||||
DPRINTF(TLB, "TLB: Inserting TLB Entry; va=%#x pa=%#x pid=%d cid=%d r=%d entryid=%d\n",
|
||||
va, PTE.paddr(), partition_id, context_id, (int)real, entry);
|
||||
|
||||
// Demap any entry that conflicts
|
||||
i = lookupTable.find(tr);
|
||||
if (i != lookupTable.end()) {
|
||||
i->second->valid = false;
|
||||
if (i->second->used) {
|
||||
i->second->used = false;
|
||||
usedEntries--;
|
||||
}
|
||||
freeList.push_front(i->second);
|
||||
DPRINTF(TLB, "TLB: Found conflicting entry %#X , deleting it\n",
|
||||
i->second);
|
||||
lookupTable.erase(i);
|
||||
}
|
||||
|
||||
DPRINTF(TLB, "TLB: Inserting TLB Entry; va=%#x pa=%#x pid=%d cid=%d r=%d\n",
|
||||
va, PTE.paddr(), partition_id, context_id, (int)real);
|
||||
|
||||
if (entry != -1) {
|
||||
assert(entry < size && entry >= 0);
|
||||
new_entry = &tlb[entry];
|
||||
} else {
|
||||
if (!freeList.empty()) {
|
||||
new_entry = freeList.front();
|
||||
} else {
|
||||
x = lastReplaced;
|
||||
do {
|
||||
++x;
|
||||
if (x == size)
|
||||
x = 0;
|
||||
if (x == lastReplaced)
|
||||
goto insertAllLocked;
|
||||
} while (tlb[x].pte.locked());
|
||||
lastReplaced = x;
|
||||
new_entry = &tlb[x];
|
||||
lookupTable.erase(new_entry->range);
|
||||
}
|
||||
/*
|
||||
for (x = 0; x < size; x++) {
|
||||
if (!tlb[x].valid || !tlb[x].used) {
|
||||
new_entry = &tlb[x];
|
||||
break;
|
||||
}
|
||||
}
|
||||
}*/
|
||||
}
|
||||
|
||||
insertAllLocked:
|
||||
// Update the last ently if their all locked
|
||||
if (!new_entry)
|
||||
if (!new_entry) {
|
||||
new_entry = &tlb[size-1];
|
||||
lookupTable.erase(new_entry->range);
|
||||
}
|
||||
|
||||
freeList.remove(new_entry);
|
||||
DPRINTF(TLB, "Using entry: %#X\n", new_entry);
|
||||
|
||||
assert(PTE.valid());
|
||||
new_entry->range.va = va;
|
||||
new_entry->range.size = PTE.size();
|
||||
new_entry->range.size = PTE.size() - 1;
|
||||
new_entry->range.partitionId = partition_id;
|
||||
new_entry->range.contextId = context_id;
|
||||
new_entry->range.real = real;
|
||||
|
@ -112,17 +160,6 @@ TLB::insert(Addr va, int partition_id, int context_id, bool real,
|
|||
usedEntries++;
|
||||
|
||||
|
||||
// Demap any entry that conflicts
|
||||
i = lookupTable.find(new_entry->range);
|
||||
if (i != lookupTable.end()) {
|
||||
i->second->valid = false;
|
||||
if (i->second->used) {
|
||||
i->second->used = false;
|
||||
usedEntries--;
|
||||
}
|
||||
DPRINTF(TLB, "TLB: Found conflicting entry, deleting it\n");
|
||||
lookupTable.erase(i);
|
||||
}
|
||||
|
||||
i = lookupTable.insert(new_entry->range, new_entry);
|
||||
assert(i != lookupTable.end());
|
||||
|
@ -219,6 +256,8 @@ TLB::demapPage(Addr va, int partition_id, bool real, int context_id)
|
|||
i->second->used = false;
|
||||
usedEntries--;
|
||||
}
|
||||
freeList.push_front(i->second);
|
||||
DPRINTF(TLB, "Freeing TLB entry : %#X\n", i->second);
|
||||
lookupTable.erase(i);
|
||||
}
|
||||
}
|
||||
|
@ -233,6 +272,10 @@ TLB::demapContext(int partition_id, int context_id)
|
|||
for (x = 0; x < size; x++) {
|
||||
if (tlb[x].range.contextId == context_id &&
|
||||
tlb[x].range.partitionId == partition_id) {
|
||||
if (tlb[x].valid == true) {
|
||||
freeList.push_front(&tlb[x]);
|
||||
DPRINTF(TLB, "Freeing TLB entry : %#X\n", &tlb[x]);
|
||||
}
|
||||
tlb[x].valid = false;
|
||||
if (tlb[x].used) {
|
||||
tlb[x].used = false;
|
||||
|
@ -251,6 +294,10 @@ TLB::demapAll(int partition_id)
|
|||
cacheValid = false;
|
||||
for (x = 0; x < size; x++) {
|
||||
if (!tlb[x].pte.locked() && tlb[x].range.partitionId == partition_id) {
|
||||
if (tlb[x].valid == true){
|
||||
freeList.push_front(&tlb[x]);
|
||||
DPRINTF(TLB, "Freeing TLB entry : %#X\n", &tlb[x]);
|
||||
}
|
||||
tlb[x].valid = false;
|
||||
if (tlb[x].used) {
|
||||
tlb[x].used = false;
|
||||
|
@ -267,7 +314,10 @@ TLB::invalidateAll()
|
|||
int x;
|
||||
cacheValid = false;
|
||||
|
||||
freeList.clear();
|
||||
for (x = 0; x < size; x++) {
|
||||
if (tlb[x].valid == true)
|
||||
freeList.push_back(&tlb[x]);
|
||||
tlb[x].valid = false;
|
||||
}
|
||||
usedEntries = 0;
|
||||
|
@ -275,17 +325,26 @@ TLB::invalidateAll()
|
|||
|
||||
uint64_t
|
||||
TLB::TteRead(int entry) {
|
||||
if (entry >= size)
|
||||
panic("entry: %d\n", entry);
|
||||
|
||||
assert(entry < size);
|
||||
if (tlb[entry].valid)
|
||||
return tlb[entry].pte();
|
||||
else
|
||||
return (uint64_t)-1ll;
|
||||
}
|
||||
|
||||
uint64_t
|
||||
TLB::TagRead(int entry) {
|
||||
assert(entry < size);
|
||||
uint64_t tag;
|
||||
if (!tlb[entry].valid)
|
||||
return (uint64_t)-1ll;
|
||||
|
||||
tag = tlb[entry].range.contextId | tlb[entry].range.va |
|
||||
(uint64_t)tlb[entry].range.partitionId << 61;
|
||||
tag = tlb[entry].range.contextId;
|
||||
tag |= tlb[entry].range.va;
|
||||
tag |= (uint64_t)tlb[entry].range.partitionId << 61;
|
||||
tag |= tlb[entry].range.real ? ULL(1) << 60 : 0;
|
||||
tag |= (uint64_t)~tlb[entry].pte._size() << 56;
|
||||
return tag;
|
||||
|
@ -501,13 +560,13 @@ DTB::translate(RequestPtr &req, ThreadContext *tc, bool write)
|
|||
// Be fast if we can!
|
||||
if (cacheValid && cacheState == tlbdata) {
|
||||
if (cacheEntry[0] && cacheAsi[0] == asi && cacheEntry[0]->range.va < vaddr + size &&
|
||||
cacheEntry[0]->range.va + cacheEntry[0]->range.size >= vaddr) {
|
||||
cacheEntry[0]->range.va + cacheEntry[0]->range.size > vaddr) {
|
||||
req->setPaddr(cacheEntry[0]->pte.paddr() & ~(cacheEntry[0]->pte.size()-1) |
|
||||
vaddr & cacheEntry[0]->pte.size()-1 );
|
||||
return NoFault;
|
||||
}
|
||||
if (cacheEntry[1] && cacheAsi[1] == asi && cacheEntry[1]->range.va < vaddr + size &&
|
||||
cacheEntry[1]->range.va + cacheEntry[1]->range.size >= vaddr) {
|
||||
cacheEntry[1]->range.va + cacheEntry[1]->range.size > vaddr) {
|
||||
req->setPaddr(cacheEntry[1]->pte.paddr() & ~(cacheEntry[1]->pte.size()-1) |
|
||||
vaddr & cacheEntry[1]->pte.size()-1 );
|
||||
return NoFault;
|
||||
|
@ -667,8 +726,12 @@ continueDtbFlow:
|
|||
}
|
||||
|
||||
// cache translation date for next translation
|
||||
cacheValid = true;
|
||||
cacheState = tlbdata;
|
||||
if (!cacheValid) {
|
||||
cacheEntry[1] = NULL;
|
||||
cacheEntry[0] = NULL;
|
||||
}
|
||||
|
||||
if (cacheEntry[0] != e && cacheEntry[1] != e) {
|
||||
cacheEntry[1] = cacheEntry[0];
|
||||
cacheEntry[0] = e;
|
||||
|
@ -677,7 +740,7 @@ continueDtbFlow:
|
|||
if (implicit)
|
||||
cacheAsi[0] = (ASI)0;
|
||||
}
|
||||
|
||||
cacheValid = true;
|
||||
req->setPaddr(e->pte.paddr() & ~(e->pte.size()-1) |
|
||||
vaddr & e->pte.size()-1);
|
||||
DPRINTF(TLB, "TLB: %#X -> %#X\n", vaddr, req->getPaddr());
|
||||
|
@ -696,7 +759,7 @@ handleQueueRegAccess:
|
|||
writeSfr(tc, vaddr, write, Primary, true, IllegalAsi, asi);
|
||||
return new PrivilegedAction;
|
||||
}
|
||||
if (priv && vaddr & 0xF || vaddr > 0x3f8 || vaddr < 0x3c0) {
|
||||
if (!hpriv && vaddr & 0xF || vaddr > 0x3f8 || vaddr < 0x3c0) {
|
||||
writeSfr(tc, vaddr, write, Primary, true, IllegalAsi, asi);
|
||||
return new DataAccessException;
|
||||
}
|
||||
|
|
|
@ -54,10 +54,13 @@ class TLB : public SimObject
|
|||
|
||||
int size;
|
||||
int usedEntries;
|
||||
int lastReplaced;
|
||||
|
||||
uint64_t cacheState;
|
||||
bool cacheValid;
|
||||
|
||||
std::list<TlbEntry*> freeList;
|
||||
|
||||
enum FaultTypes {
|
||||
OtherFault = 0,
|
||||
PrivViolation = 0x1,
|
||||
|
@ -93,9 +96,6 @@ class TLB : public SimObject
|
|||
/** Given an entry id, read that tlb entries' tag. */
|
||||
uint64_t TagRead(int entry);
|
||||
|
||||
/** Give an entry id, read that tlb entries' tte */
|
||||
uint64_t TteRead(int entry);
|
||||
|
||||
/** Remove all entries from the TLB */
|
||||
void invalidateAll();
|
||||
|
||||
|
@ -128,6 +128,10 @@ class TLB : public SimObject
|
|||
// Checkpointing
|
||||
virtual void serialize(std::ostream &os);
|
||||
virtual void unserialize(Checkpoint *cp, const std::string §ion);
|
||||
|
||||
/** Give an entry id, read that tlb entries' tte */
|
||||
uint64_t TteRead(int entry);
|
||||
|
||||
};
|
||||
|
||||
class ITB : public TLB
|
||||
|
|
|
@ -11,4 +11,4 @@ class SparcDTB(SparcTLB):
|
|||
|
||||
class SparcITB(SparcTLB):
|
||||
type = 'SparcITB'
|
||||
size = 48
|
||||
size = 64
|
||||
|
|
Loading…
Reference in a new issue