Merge zizzer:/bk/newmem

into  zower.eecs.umich.edu:/eecshome/m5/newmem

--HG--
extra : convert_revision : f4a05accb8fa24d425dd818b1b7f268378180e99
This commit is contained in:
Gabe Black 2007-01-03 00:52:30 -05:00
commit 8840ebcb00
65 changed files with 2278 additions and 927 deletions

View file

@ -1,4 +1,4 @@
# Copyright (c) 2006 The Regents of The University of Michigan # Copyright (c) 2006-2007 The Regents of The University of Michigan
# All rights reserved. # All rights reserved.
# #
# Redistribution and use in source and binary forms, with or without # Redistribution and use in source and binary forms, with or without
@ -46,6 +46,10 @@ config_root = os.path.dirname(config_path)
parser = optparse.OptionParser() parser = optparse.OptionParser()
# System options
parser.add_option("--kernel", action="store", type="string")
parser.add_option("--script", action="store", type="string")
# Benchmark options # Benchmark options
parser.add_option("--l2cache", action="store_true") parser.add_option("--l2cache", action="store_true")
parser.add_option("--dual", action="store_true", parser.add_option("--dual", action="store_true",
@ -99,6 +103,12 @@ elif m5.build_env['TARGET_ISA'] == "sparc":
else: else:
m5.panic("incapable of building non-alpha or non-sparc full system!") m5.panic("incapable of building non-alpha or non-sparc full system!")
if options.kernel is not None:
test_sys.kernel = binary(options.kernel)
if options.script is not None:
test_sys.readfile = options.script
np = options.num_cpus np = options.num_cpus
if options.l2cache: if options.l2cache:

View file

@ -115,11 +115,9 @@ base_sources = Split('''
mem/cache/miss/mshr_queue.cc mem/cache/miss/mshr_queue.cc
mem/cache/prefetch/base_prefetcher.cc mem/cache/prefetch/base_prefetcher.cc
mem/cache/prefetch/ghb_prefetcher.cc mem/cache/prefetch/ghb_prefetcher.cc
mem/cache/prefetch/prefetcher.cc
mem/cache/prefetch/stride_prefetcher.cc mem/cache/prefetch/stride_prefetcher.cc
mem/cache/prefetch/tagged_prefetcher.cc mem/cache/prefetch/tagged_prefetcher.cc
mem/cache/tags/base_tags.cc mem/cache/tags/base_tags.cc
mem/cache/tags/cache_tags.cc
mem/cache/tags/fa_lru.cc mem/cache/tags/fa_lru.cc
mem/cache/tags/iic.cc mem/cache/tags/iic.cc
mem/cache/tags/lru.cc mem/cache/tags/lru.cc
@ -131,8 +129,11 @@ base_sources = Split('''
mem/cache/cache_builder.cc mem/cache/cache_builder.cc
python/swig/init.cc
python/swig/debug_wrap.cc python/swig/debug_wrap.cc
python/swig/main_wrap.cc python/swig/main_wrap.cc
python/swig/event_wrap.cc
python/swig/pyevent.cc
sim/builder.cc sim/builder.cc
sim/debug.cc sim/debug.cc

View file

@ -1,4 +1,4 @@
// Copyright (c) 2006 The Regents of The University of Michigan // Copyright (c) 2006-2007 The Regents of The University of Michigan
// All rights reserved. // All rights reserved.
// //
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
@ -1039,11 +1039,31 @@ decode OP default Unknown::unknown()
0x10: lduwa({{Rd = Mem.uw;}}, {{EXT_ASI}}); 0x10: lduwa({{Rd = Mem.uw;}}, {{EXT_ASI}});
0x11: lduba({{Rd = Mem.ub;}}, {{EXT_ASI}}); 0x11: lduba({{Rd = Mem.ub;}}, {{EXT_ASI}});
0x12: lduha({{Rd = Mem.uhw;}}, {{EXT_ASI}}); 0x12: lduha({{Rd = Mem.uhw;}}, {{EXT_ASI}});
0x13: ldtwa({{ 0x13: decode EXT_ASI {
uint64_t val = Mem.udw; //ASI_QUAD_LDD
RdLow = val<31:0>; 0x24: TwinLoad::ldtx_quad_ldd(
RdHigh = val<63:32>; {{RdTwin.udw = Mem.udw}}, {{EXT_ASI}});
}}, {{EXT_ASI}}); //ASI_LDTX_REAL
0x26: TwinLoad::ldtx_real(
{{RdTwin.udw = Mem.udw}}, {{EXT_ASI}});
//ASI_LDTX_N
0x27: TwinLoad::ldtx_n(
{{RdTwin.udw = Mem.udw}}, {{EXT_ASI}});
//ASI_LDTX_L
0x2C: TwinLoad::ldtx_l(
{{RdTwin.udw = Mem.udw}}, {{EXT_ASI}});
//ASI_LDTX_REAL_L
0x2E: TwinLoad::ldtx_real_l(
{{RdTwin.udw = Mem.udw}}, {{EXT_ASI}});
//ASI_LDTX_N_L
0x2F: TwinLoad::ldtx_n_l(
{{RdTwin.udw = Mem.udw}}, {{EXT_ASI}});
default: ldtwa({{
uint64_t val = Mem.udw;
RdLow = val<31:0>;
RdHigh = val<63:32>;
}}, {{EXT_ASI}});
}
} }
format StoreAlt { format StoreAlt {
0x14: stwa({{Mem.uw = Rd;}}, {{EXT_ASI}}); 0x14: stwa({{Mem.uw = Rd;}}, {{EXT_ASI}});
@ -1105,18 +1125,6 @@ decode OP default Unknown::unknown()
0x15: FailUnimpl::lddfa_real_io(); 0x15: FailUnimpl::lddfa_real_io();
//ASI_REAL_IO_LITTLE //ASI_REAL_IO_LITTLE
0x1D: FailUnimpl::lddfa_real_io_l(); 0x1D: FailUnimpl::lddfa_real_io_l();
//ASI_LDTX_REAL
0x26: TwinLoad::ldtx_real(
{{RdTwin.udw = Mem.udw}}, {{EXT_ASI}});
//ASI_LDTX_N
0x27: TwinLoad::ldtx_n(
{{RdTwin.udw = Mem.udw}}, {{EXT_ASI}});
//ASI_LDTX_REAL_L
0x2E: TwinLoad::ldtx_real_l(
{{RdTwin.udw = Mem.udw}}, {{EXT_ASI}});
//ASI_LDTX_N_L
0x2F: TwinLoad::ldtx_n_l(
{{RdTwin.udw = Mem.udw}}, {{EXT_ASI}});
//ASI_PRIMARY //ASI_PRIMARY
0x80: FailUnimpl::lddfa_p(); 0x80: FailUnimpl::lddfa_p();
//ASI_PRIMARY_LITTLE //ASI_PRIMARY_LITTLE

View file

@ -1,4 +1,4 @@
// Copyright (c) 2006 The Regents of The University of Michigan // Copyright (c) 2006-2007 The Regents of The University of Michigan
// All rights reserved. // All rights reserved.
// //
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
@ -101,7 +101,7 @@ output header {{
// We make the assumption that all block memory operations // We make the assumption that all block memory operations
// Will take 8 instructions to execute // Will take 8 instructions to execute
TwinMem(const char *mnem, ExtMachInst _machInst) : TwinMem(const char *mnem, ExtMachInst _machInst) :
SparcMacroInst(mnem, _machInst, No_OpClass, 8) SparcMacroInst(mnem, _machInst, No_OpClass, 2)
{} {}
}; };

View file

@ -1,4 +1,4 @@
// Copyright (c) 2006 The Regents of The University of Michigan // Copyright (c) 2006-2007 The Regents of The University of Michigan
// All rights reserved. // All rights reserved.
// //
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
@ -295,9 +295,9 @@ let {{
fault = new MemAddressNotAligned; fault = new MemAddressNotAligned;
''' '''
TwinAlignmentFaultCheck = ''' TwinAlignmentFaultCheck = '''
if(RD & 0xe) if(RD & 0x1)
fault = new IllegalInstruction; fault = new IllegalInstruction;
else if(EA & 0x1f) else if(EA & 0xf)
fault = new MemAddressNotAligned; fault = new MemAddressNotAligned;
''' '''
# XXX Need to take care of pstate.hpriv as well. The lower ASIs # XXX Need to take care of pstate.hpriv as well. The lower ASIs

View file

@ -45,7 +45,8 @@ namespace SparcISA
{ {
TLB::TLB(const std::string &name, int s) TLB::TLB(const std::string &name, int s)
: SimObject(name), size(s), usedEntries(0), cacheValid(false) : SimObject(name), size(s), usedEntries(0), lastReplaced(0),
cacheValid(false)
{ {
// To make this work you'll have to change the hypervisor and OS // To make this work you'll have to change the hypervisor and OS
if (size > 64) if (size > 64)
@ -53,13 +54,16 @@ TLB::TLB(const std::string &name, int s)
tlb = new TlbEntry[size]; tlb = new TlbEntry[size];
memset(tlb, 0, sizeof(TlbEntry) * size); memset(tlb, 0, sizeof(TlbEntry) * size);
for (int x = 0; x < size; x++)
freeList.push_back(&tlb[x]);
} }
void void
TLB::clearUsedBits() TLB::clearUsedBits()
{ {
MapIter i; MapIter i;
for (i = lookupTable.begin(); i != lookupTable.end();) { for (i = lookupTable.begin(); i != lookupTable.end(); i++) {
TlbEntry *t = i->second; TlbEntry *t = i->second;
if (!t->pte.locked()) { if (!t->pte.locked()) {
t->used = false; t->used = false;
@ -77,32 +81,76 @@ TLB::insert(Addr va, int partition_id, int context_id, bool real,
MapIter i; MapIter i;
TlbEntry *new_entry = NULL; TlbEntry *new_entry = NULL;
TlbRange tr;
int x; int x;
cacheValid = false; cacheValid = false;
tr.va = va;
tr.size = PTE.size() - 1;
tr.contextId = context_id;
tr.partitionId = partition_id;
tr.real = real;
DPRINTF(TLB, "TLB: Inserting TLB Entry; va=%#x pa=%#x pid=%d cid=%d r=%d entryid=%d\n",
va, PTE.paddr(), partition_id, context_id, (int)real, entry);
// Demap any entry that conflicts
i = lookupTable.find(tr);
if (i != lookupTable.end()) {
i->second->valid = false;
if (i->second->used) {
i->second->used = false;
usedEntries--;
}
freeList.push_front(i->second);
DPRINTF(TLB, "TLB: Found conflicting entry %#X , deleting it\n",
i->second);
lookupTable.erase(i);
}
DPRINTF(TLB, "TLB: Inserting TLB Entry; va=%#x pa=%#x pid=%d cid=%d r=%d\n",
va, PTE.paddr(), partition_id, context_id, (int)real);
if (entry != -1) { if (entry != -1) {
assert(entry < size && entry >= 0); assert(entry < size && entry >= 0);
new_entry = &tlb[entry]; new_entry = &tlb[entry];
} else { } else {
if (!freeList.empty()) {
new_entry = freeList.front();
} else {
x = lastReplaced;
do {
++x;
if (x == size)
x = 0;
if (x == lastReplaced)
goto insertAllLocked;
} while (tlb[x].pte.locked());
lastReplaced = x;
new_entry = &tlb[x];
lookupTable.erase(new_entry->range);
}
/*
for (x = 0; x < size; x++) { for (x = 0; x < size; x++) {
if (!tlb[x].valid || !tlb[x].used) { if (!tlb[x].valid || !tlb[x].used) {
new_entry = &tlb[x]; new_entry = &tlb[x];
break; break;
} }
} }*/
} }
insertAllLocked:
// Update the last ently if their all locked // Update the last ently if their all locked
if (!new_entry) if (!new_entry) {
new_entry = &tlb[size-1]; new_entry = &tlb[size-1];
lookupTable.erase(new_entry->range);
}
freeList.remove(new_entry);
DPRINTF(TLB, "Using entry: %#X\n", new_entry);
assert(PTE.valid()); assert(PTE.valid());
new_entry->range.va = va; new_entry->range.va = va;
new_entry->range.size = PTE.size(); new_entry->range.size = PTE.size() - 1;
new_entry->range.partitionId = partition_id; new_entry->range.partitionId = partition_id;
new_entry->range.contextId = context_id; new_entry->range.contextId = context_id;
new_entry->range.real = real; new_entry->range.real = real;
@ -112,19 +160,9 @@ TLB::insert(Addr va, int partition_id, int context_id, bool real,
usedEntries++; usedEntries++;
// Demap any entry that conflicts
i = lookupTable.find(new_entry->range);
if (i != lookupTable.end()) {
i->second->valid = false;
if (i->second->used) {
i->second->used = false;
usedEntries--;
}
DPRINTF(TLB, "TLB: Found conflicting entry, deleting it\n");
lookupTable.erase(i);
}
lookupTable.insert(new_entry->range, new_entry);; i = lookupTable.insert(new_entry->range, new_entry);
assert(i != lookupTable.end());
// If all entries have there used bit set, clear it on them all, but the // If all entries have there used bit set, clear it on them all, but the
// one we just inserted // one we just inserted
@ -148,7 +186,7 @@ TLB::lookup(Addr va, int partition_id, bool real, int context_id)
va, partition_id, context_id, real); va, partition_id, context_id, real);
// Assemble full address structure // Assemble full address structure
tr.va = va; tr.va = va;
tr.size = va + MachineBytes; tr.size = MachineBytes;
tr.contextId = context_id; tr.contextId = context_id;
tr.partitionId = partition_id; tr.partitionId = partition_id;
tr.real = real; tr.real = real;
@ -180,6 +218,7 @@ TLB::lookup(Addr va, int partition_id, bool real, int context_id)
void void
TLB::dumpAll() TLB::dumpAll()
{ {
MapIter i;
for (int x = 0; x < size; x++) { for (int x = 0; x < size; x++) {
if (tlb[x].valid) { if (tlb[x].valid) {
DPRINTFN("%4d: %#2x:%#2x %c %#4x %#8x %#8x %#16x\n", DPRINTFN("%4d: %#2x:%#2x %c %#4x %#8x %#8x %#16x\n",
@ -196,11 +235,14 @@ TLB::demapPage(Addr va, int partition_id, bool real, int context_id)
TlbRange tr; TlbRange tr;
MapIter i; MapIter i;
DPRINTF(IPR, "TLB: Demapping Page va=%#x pid=%#d cid=%d r=%d\n",
va, partition_id, context_id, real);
cacheValid = false; cacheValid = false;
// Assemble full address structure // Assemble full address structure
tr.va = va; tr.va = va;
tr.size = va + MachineBytes; tr.size = MachineBytes;
tr.contextId = context_id; tr.contextId = context_id;
tr.partitionId = partition_id; tr.partitionId = partition_id;
tr.real = real; tr.real = real;
@ -208,11 +250,14 @@ TLB::demapPage(Addr va, int partition_id, bool real, int context_id)
// Demap any entry that conflicts // Demap any entry that conflicts
i = lookupTable.find(tr); i = lookupTable.find(tr);
if (i != lookupTable.end()) { if (i != lookupTable.end()) {
DPRINTF(IPR, "TLB: Demapped page\n");
i->second->valid = false; i->second->valid = false;
if (i->second->used) { if (i->second->used) {
i->second->used = false; i->second->used = false;
usedEntries--; usedEntries--;
} }
freeList.push_front(i->second);
DPRINTF(TLB, "Freeing TLB entry : %#X\n", i->second);
lookupTable.erase(i); lookupTable.erase(i);
} }
} }
@ -221,10 +266,16 @@ void
TLB::demapContext(int partition_id, int context_id) TLB::demapContext(int partition_id, int context_id)
{ {
int x; int x;
DPRINTF(IPR, "TLB: Demapping Context pid=%#d cid=%d\n",
partition_id, context_id);
cacheValid = false; cacheValid = false;
for (x = 0; x < size; x++) { for (x = 0; x < size; x++) {
if (tlb[x].range.contextId == context_id && if (tlb[x].range.contextId == context_id &&
tlb[x].range.partitionId == partition_id) { tlb[x].range.partitionId == partition_id) {
if (tlb[x].valid == true) {
freeList.push_front(&tlb[x]);
DPRINTF(TLB, "Freeing TLB entry : %#X\n", &tlb[x]);
}
tlb[x].valid = false; tlb[x].valid = false;
if (tlb[x].used) { if (tlb[x].used) {
tlb[x].used = false; tlb[x].used = false;
@ -239,9 +290,14 @@ void
TLB::demapAll(int partition_id) TLB::demapAll(int partition_id)
{ {
int x; int x;
DPRINTF(TLB, "TLB: Demapping All pid=%#d\n", partition_id);
cacheValid = false; cacheValid = false;
for (x = 0; x < size; x++) { for (x = 0; x < size; x++) {
if (!tlb[x].pte.locked() && tlb[x].range.partitionId == partition_id) { if (!tlb[x].pte.locked() && tlb[x].range.partitionId == partition_id) {
if (tlb[x].valid == true){
freeList.push_front(&tlb[x]);
DPRINTF(TLB, "Freeing TLB entry : %#X\n", &tlb[x]);
}
tlb[x].valid = false; tlb[x].valid = false;
if (tlb[x].used) { if (tlb[x].used) {
tlb[x].used = false; tlb[x].used = false;
@ -258,7 +314,10 @@ TLB::invalidateAll()
int x; int x;
cacheValid = false; cacheValid = false;
freeList.clear();
for (x = 0; x < size; x++) { for (x = 0; x < size; x++) {
if (tlb[x].valid == true)
freeList.push_back(&tlb[x]);
tlb[x].valid = false; tlb[x].valid = false;
} }
usedEntries = 0; usedEntries = 0;
@ -266,17 +325,26 @@ TLB::invalidateAll()
uint64_t uint64_t
TLB::TteRead(int entry) { TLB::TteRead(int entry) {
if (entry >= size)
panic("entry: %d\n", entry);
assert(entry < size); assert(entry < size);
return tlb[entry].pte(); if (tlb[entry].valid)
return tlb[entry].pte();
else
return (uint64_t)-1ll;
} }
uint64_t uint64_t
TLB::TagRead(int entry) { TLB::TagRead(int entry) {
assert(entry < size); assert(entry < size);
uint64_t tag; uint64_t tag;
if (!tlb[entry].valid)
return (uint64_t)-1ll;
tag = tlb[entry].range.contextId | tlb[entry].range.va | tag = tlb[entry].range.contextId;
(uint64_t)tlb[entry].range.partitionId << 61; tag |= tlb[entry].range.va;
tag |= (uint64_t)tlb[entry].range.partitionId << 61;
tag |= tlb[entry].range.real ? ULL(1) << 60 : 0; tag |= tlb[entry].range.real ? ULL(1) << 60 : 0;
tag |= (uint64_t)~tlb[entry].pte._size() << 56; tag |= (uint64_t)~tlb[entry].pte._size() << 56;
return tag; return tag;
@ -492,13 +560,13 @@ DTB::translate(RequestPtr &req, ThreadContext *tc, bool write)
// Be fast if we can! // Be fast if we can!
if (cacheValid && cacheState == tlbdata) { if (cacheValid && cacheState == tlbdata) {
if (cacheEntry[0] && cacheAsi[0] == asi && cacheEntry[0]->range.va < vaddr + size && if (cacheEntry[0] && cacheAsi[0] == asi && cacheEntry[0]->range.va < vaddr + size &&
cacheEntry[0]->range.va + cacheEntry[0]->range.size >= vaddr) { cacheEntry[0]->range.va + cacheEntry[0]->range.size > vaddr) {
req->setPaddr(cacheEntry[0]->pte.paddr() & ~(cacheEntry[0]->pte.size()-1) | req->setPaddr(cacheEntry[0]->pte.paddr() & ~(cacheEntry[0]->pte.size()-1) |
vaddr & cacheEntry[0]->pte.size()-1 ); vaddr & cacheEntry[0]->pte.size()-1 );
return NoFault; return NoFault;
} }
if (cacheEntry[1] && cacheAsi[1] == asi && cacheEntry[1]->range.va < vaddr + size && if (cacheEntry[1] && cacheAsi[1] == asi && cacheEntry[1]->range.va < vaddr + size &&
cacheEntry[1]->range.va + cacheEntry[1]->range.size >= vaddr) { cacheEntry[1]->range.va + cacheEntry[1]->range.size > vaddr) {
req->setPaddr(cacheEntry[1]->pte.paddr() & ~(cacheEntry[1]->pte.size()-1) | req->setPaddr(cacheEntry[1]->pte.paddr() & ~(cacheEntry[1]->pte.size()-1) |
vaddr & cacheEntry[1]->pte.size()-1 ); vaddr & cacheEntry[1]->pte.size()-1 );
return NoFault; return NoFault;
@ -575,6 +643,9 @@ DTB::translate(RequestPtr &req, ThreadContext *tc, bool write)
if (write && asi == ASI_LDTX_P) if (write && asi == ASI_LDTX_P)
// block init store (like write hint64) // block init store (like write hint64)
goto continueDtbFlow; goto continueDtbFlow;
if (!write && asi == ASI_QUAD_LDD)
goto continueDtbFlow;
if (AsiIsTwin(asi)) if (AsiIsTwin(asi))
panic("Twin ASIs not supported\n"); panic("Twin ASIs not supported\n");
if (AsiIsPartialStore(asi)) if (AsiIsPartialStore(asi))
@ -655,8 +726,12 @@ continueDtbFlow:
} }
// cache translation date for next translation // cache translation date for next translation
cacheValid = true;
cacheState = tlbdata; cacheState = tlbdata;
if (!cacheValid) {
cacheEntry[1] = NULL;
cacheEntry[0] = NULL;
}
if (cacheEntry[0] != e && cacheEntry[1] != e) { if (cacheEntry[0] != e && cacheEntry[1] != e) {
cacheEntry[1] = cacheEntry[0]; cacheEntry[1] = cacheEntry[0];
cacheEntry[0] = e; cacheEntry[0] = e;
@ -665,7 +740,7 @@ continueDtbFlow:
if (implicit) if (implicit)
cacheAsi[0] = (ASI)0; cacheAsi[0] = (ASI)0;
} }
cacheValid = true;
req->setPaddr(e->pte.paddr() & ~(e->pte.size()-1) | req->setPaddr(e->pte.paddr() & ~(e->pte.size()-1) |
vaddr & e->pte.size()-1); vaddr & e->pte.size()-1);
DPRINTF(TLB, "TLB: %#X -> %#X\n", vaddr, req->getPaddr()); DPRINTF(TLB, "TLB: %#X -> %#X\n", vaddr, req->getPaddr());
@ -684,7 +759,7 @@ handleQueueRegAccess:
writeSfr(tc, vaddr, write, Primary, true, IllegalAsi, asi); writeSfr(tc, vaddr, write, Primary, true, IllegalAsi, asi);
return new PrivilegedAction; return new PrivilegedAction;
} }
if (priv && vaddr & 0xF || vaddr > 0x3f8 || vaddr < 0x3c0) { if (!hpriv && vaddr & 0xF || vaddr > 0x3f8 || vaddr < 0x3c0) {
writeSfr(tc, vaddr, write, Primary, true, IllegalAsi, asi); writeSfr(tc, vaddr, write, Primary, true, IllegalAsi, asi);
return new DataAccessException; return new DataAccessException;
} }
@ -881,6 +956,9 @@ DTB::doMmuRegWrite(ThreadContext *tc, Packet *pkt)
int part_insert; int part_insert;
int entry_insert = -1; int entry_insert = -1;
bool real_insert; bool real_insert;
bool ignore;
int part_id;
int ctx_id;
PageTableEntry pte; PageTableEntry pte;
DPRINTF(IPR, "Memory Mapped IPR Write: asi=%#X a=%#x d=%#X\n", DPRINTF(IPR, "Memory Mapped IPR Write: asi=%#X a=%#x d=%#X\n",
@ -1000,6 +1078,41 @@ DTB::doMmuRegWrite(ThreadContext *tc, Packet *pkt)
PageTableEntry::sun4u); PageTableEntry::sun4u);
insert(va_insert, part_insert, ct_insert, real_insert, pte, entry_insert); insert(va_insert, part_insert, ct_insert, real_insert, pte, entry_insert);
break; break;
case ASI_IMMU_DEMAP:
ignore = false;
ctx_id = -1;
part_id = tc->readMiscRegWithEffect(MISCREG_MMU_PART_ID);
switch (bits(va,5,4)) {
case 0:
ctx_id = tc->readMiscRegWithEffect(MISCREG_MMU_P_CONTEXT);
break;
case 1:
ignore = true;
break;
case 3:
ctx_id = 0;
break;
default:
ignore = true;
}
switch(bits(va,7,6)) {
case 0: // demap page
if (!ignore)
tc->getITBPtr()->demapPage(mbits(va,63,13), part_id,
bits(va,9,9), ctx_id);
break;
case 1: //demap context
if (!ignore)
tc->getITBPtr()->demapContext(part_id, ctx_id);
break;
case 2:
tc->getITBPtr()->demapAll(part_id);
break;
default:
panic("Invalid type for IMMU demap\n");
}
break;
case ASI_DMMU: case ASI_DMMU:
switch (va) { switch (va) {
case 0x30: case 0x30:
@ -1012,6 +1125,40 @@ DTB::doMmuRegWrite(ThreadContext *tc, Packet *pkt)
goto doMmuWriteError; goto doMmuWriteError;
} }
break; break;
case ASI_DMMU_DEMAP:
ignore = false;
ctx_id = -1;
part_id = tc->readMiscRegWithEffect(MISCREG_MMU_PART_ID);
switch (bits(va,5,4)) {
case 0:
ctx_id = tc->readMiscRegWithEffect(MISCREG_MMU_P_CONTEXT);
break;
case 1:
ctx_id = tc->readMiscRegWithEffect(MISCREG_MMU_S_CONTEXT);
break;
case 3:
ctx_id = 0;
break;
default:
ignore = true;
}
switch(bits(va,7,6)) {
case 0: // demap page
if (!ignore)
demapPage(mbits(va,63,13), part_id, bits(va,9,9), ctx_id);
break;
case 1: //demap context
if (!ignore)
demapContext(part_id, ctx_id);
break;
case 2:
demapAll(part_id);
break;
default:
panic("Invalid type for IMMU demap\n");
}
break;
default: default:
doMmuWriteError: doMmuWriteError:
panic("need to impl DTB::doMmuRegWrite() got asi=%#x, va=%#x d=%#x\n", panic("need to impl DTB::doMmuRegWrite() got asi=%#x, va=%#x d=%#x\n",

View file

@ -54,10 +54,13 @@ class TLB : public SimObject
int size; int size;
int usedEntries; int usedEntries;
int lastReplaced;
uint64_t cacheState; uint64_t cacheState;
bool cacheValid; bool cacheValid;
std::list<TlbEntry*> freeList;
enum FaultTypes { enum FaultTypes {
OtherFault = 0, OtherFault = 0,
PrivViolation = 0x1, PrivViolation = 0x1,
@ -93,9 +96,6 @@ class TLB : public SimObject
/** Given an entry id, read that tlb entries' tag. */ /** Given an entry id, read that tlb entries' tag. */
uint64_t TagRead(int entry); uint64_t TagRead(int entry);
/** Give an entry id, read that tlb entries' tte */
uint64_t TteRead(int entry);
/** Remove all entries from the TLB */ /** Remove all entries from the TLB */
void invalidateAll(); void invalidateAll();
@ -128,6 +128,10 @@ class TLB : public SimObject
// Checkpointing // Checkpointing
virtual void serialize(std::ostream &os); virtual void serialize(std::ostream &os);
virtual void unserialize(Checkpoint *cp, const std::string &section); virtual void unserialize(Checkpoint *cp, const std::string &section);
/** Give an entry id, read that tlb entries' tte */
uint64_t TteRead(int entry);
}; };
class ITB : public TLB class ITB : public TLB

View file

@ -165,36 +165,6 @@ __csprintf(const std::string &format, ArgList &args)
#define csprintf(args...) \ #define csprintf(args...) \
__csprintf__(args, cp::ArgListNull()) __csprintf__(args, cp::ArgListNull())
template<class T>
inline ArgList &
operator<<(ArgList &list, const T &data)
{
list.append(data);
return list;
}
inline ArgList &
operator<<(std::ostream &str, ArgList &list)
{
list.stream = &str;
return list;
}
class ArgListTemp
{
private:
std::string format;
ArgList *args;
public:
ArgListTemp(const std::string &f) : format(f) { args = new ArgList; }
~ArgListTemp() { args->dump(format); delete args; }
operator ArgList *() { return args; }
};
#define cformat(format) \
(*((cp::ArgList *)cp::ArgListTemp(format)))
} }
#endif // __CPRINTF_HH__ #endif // __CPRINTF_HH__

View file

@ -288,13 +288,13 @@ format_integer(std::ostream &out, const T &data, Format &fmt)
{ _format_integer(out, data, fmt); } { _format_integer(out, data, fmt); }
inline void inline void
format_integer(std::ostream &out, char data, Format &fmt) format_integer(std::ostream &out, char data, Format &fmt)
{ _format_integer(out, data, fmt); } { _format_integer(out, (int)data, fmt); }
inline void inline void
format_integer(std::ostream &out, unsigned char data, Format &fmt) format_integer(std::ostream &out, unsigned char data, Format &fmt)
{ _format_integer(out, data, fmt); } { _format_integer(out, (int)data, fmt); }
inline void inline void
format_integer(std::ostream &out, signed char data, Format &fmt) format_integer(std::ostream &out, signed char data, Format &fmt)
{ _format_integer(out, data, fmt); } { _format_integer(out, (int)data, fmt); }
#if 0 #if 0
inline void inline void
format_integer(std::ostream &out, short data, Format &fmt) format_integer(std::ostream &out, short data, Format &fmt)

View file

@ -28,10 +28,8 @@
* Authors: Nathan Binkert * Authors: Nathan Binkert
*/ */
#ifndef __REFCNT_HH__ #ifndef __BASE_REFCNT_HH__
#define __REFCNT_HH__ #define __BASE_REFCNT_HH__
#include <stddef.h> //For the NULL macro definition
class RefCounted class RefCounted
{ {
@ -77,7 +75,7 @@ class RefCountingPtr
public: public:
RefCountingPtr() : data(NULL) {} RefCountingPtr() : data(0) {}
RefCountingPtr(T *data) { copy(data); } RefCountingPtr(T *data) { copy(data); }
RefCountingPtr(const RefCountingPtr &r) { copy(r.data); } RefCountingPtr(const RefCountingPtr &r) { copy(r.data); }
~RefCountingPtr() { del(); } ~RefCountingPtr() { del(); }
@ -90,8 +88,8 @@ class RefCountingPtr
const T &operator*() const { return *data; } const T &operator*() const { return *data; }
const T *get() const { return data; } const T *get() const { return data; }
RefCountingPtr &operator=(T *p) { set(p); return *this; } const RefCountingPtr &operator=(T *p) { set(p); return *this; }
RefCountingPtr &operator=(const RefCountingPtr &r) const RefCountingPtr &operator=(const RefCountingPtr &r)
{ return operator=(r.data); } { return operator=(r.data); }
bool operator!() const { return data == 0; } bool operator!() const { return data == 0; }
@ -122,4 +120,4 @@ template<class T>
bool operator!=(const T &l, const RefCountingPtr<T> &r) bool operator!=(const T &l, const RefCountingPtr<T> &r)
{ return l != r.get(); } { return l != r.get(); }
#endif // __REFCNT_HH__ #endif // __BASE_REFCNT_HH__

View file

@ -314,6 +314,7 @@ Trace::InstRecord::dump(ostream &outs)
bool diffCanrestore = false; bool diffCanrestore = false;
bool diffOtherwin = false; bool diffOtherwin = false;
bool diffCleanwin = false; bool diffCleanwin = false;
bool diffTlb = false;
Addr m5Pc, lgnPc; Addr m5Pc, lgnPc;
@ -409,13 +410,24 @@ Trace::InstRecord::dump(ostream &outs)
thread->readMiscReg(NumIntArchRegs + 6)) thread->readMiscReg(NumIntArchRegs + 6))
diffCleanwin = true; diffCleanwin = true;
for (int i = 0; i < 64; i++) {
if (shared_data->itb[i] != thread->getITBPtr()->TteRead(i))
diffTlb = true;
if (shared_data->dtb[i] != thread->getDTBPtr()->TteRead(i))
diffTlb = true;
}
if ((diffPC || diffCC || diffInst || diffRegs || diffTpc || if ((diffPC || diffCC || diffInst || diffRegs || diffTpc ||
diffTnpc || diffTstate || diffTt || diffHpstate || diffTnpc || diffTstate || diffTt || diffHpstate ||
diffHtstate || diffHtba || diffPstate || diffY || diffHtstate || diffHtba || diffPstate || diffY ||
diffCcr || diffTl || diffGl || diffAsi || diffPil || diffCcr || diffTl || diffGl || diffAsi || diffPil ||
diffCwp || diffCansave || diffCanrestore || diffCwp || diffCansave || diffCanrestore ||
diffOtherwin || diffCleanwin) diffOtherwin || diffCleanwin || diffTlb)
&& !((staticInst->machInst & 0xC1F80000) == 0x81D00000)) { && !((staticInst->machInst & 0xC1F80000) == 0x81D00000)
&& !(((staticInst->machInst & 0xC0000000) == 0xC0000000)
&& shared_data->tl == thread->readMiscReg(MISCREG_TL) + 1)
) {
outs << "Differences found between M5 and Legion:"; outs << "Differences found between M5 and Legion:";
if (diffPC) if (diffPC)
outs << " [PC]"; outs << " [PC]";
@ -463,6 +475,8 @@ Trace::InstRecord::dump(ostream &outs)
outs << " [Otherwin]"; outs << " [Otherwin]";
if (diffCleanwin) if (diffCleanwin)
outs << " [Cleanwin]"; outs << " [Cleanwin]";
if (diffTlb)
outs << " [Tlb]";
outs << endl << endl; outs << endl << endl;
outs << right << setfill(' ') << setw(15) outs << right << setfill(' ') << setw(15)
@ -593,6 +607,22 @@ Trace::InstRecord::dump(ostream &outs)
<< endl;*/ << endl;*/
} }
} }
printColumnLabels(outs);
char label[8];
for (int x = 0; x < 64; x++) {
if (shared_data->itb[x] != ULL(0xFFFFFFFFFFFFFFFF) ||
thread->getITBPtr()->TteRead(x) != ULL(0xFFFFFFFFFFFFFFFF)) {
sprintf(label, "I-TLB:%02d", x);
printRegPair(outs, label, thread->getITBPtr()->TteRead(x), shared_data->itb[x]);
}
}
for (int x = 0; x < 64; x++) {
if (shared_data->dtb[x] != ULL(0xFFFFFFFFFFFFFFFF) ||
thread->getDTBPtr()->TteRead(x) != ULL(0xFFFFFFFFFFFFFFFF)) {
sprintf(label, "D-TLB:%02d", x);
printRegPair(outs, label, thread->getDTBPtr()->TteRead(x), shared_data->dtb[x]);
}
}
thread->getITBPtr()->dumpAll(); thread->getITBPtr()->dumpAll();
thread->getDTBPtr()->dumpAll(); thread->getDTBPtr()->dumpAll();

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2006 The Regents of The University of Michigan * Copyright (c) 2006-2007 The Regents of The University of Michigan
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
@ -30,7 +30,7 @@
#include <unistd.h> #include <unistd.h>
#define VERSION 0xA1000006 #define VERSION 0xA1000007
#define OWN_M5 0x000000AA #define OWN_M5 0x000000AA
#define OWN_LEGION 0x00000055 #define OWN_LEGION 0x00000055
@ -72,6 +72,9 @@ typedef struct {
uint8_t otherwin; uint8_t otherwin;
uint8_t cleanwin; uint8_t cleanwin;
uint64_t itb[64];
uint64_t dtb[64];
} SharedData; } SharedData;
/** !!! ^^^ Increment VERSION on change ^^^ !!! **/ /** !!! ^^^ Increment VERSION on change ^^^ !!! **/

View file

@ -387,9 +387,12 @@ void
DefaultCommit<Impl>::updateStatus() DefaultCommit<Impl>::updateStatus()
{ {
// reset ROB changed variable // reset ROB changed variable
std::list<unsigned>::iterator threads = (*activeThreads).begin(); std::list<unsigned>::iterator threads = activeThreads->begin();
while (threads != (*activeThreads).end()) { std::list<unsigned>::iterator end = activeThreads->end();
while (threads != end) {
unsigned tid = *threads++; unsigned tid = *threads++;
changedROBNumEntries[tid] = false; changedROBNumEntries[tid] = false;
// Also check if any of the threads has a trap pending // Also check if any of the threads has a trap pending
@ -416,9 +419,10 @@ DefaultCommit<Impl>::setNextStatus()
{ {
int squashes = 0; int squashes = 0;
std::list<unsigned>::iterator threads = (*activeThreads).begin(); std::list<unsigned>::iterator threads = activeThreads->begin();
std::list<unsigned>::iterator end = activeThreads->end();
while (threads != (*activeThreads).end()) { while (threads != end) {
unsigned tid = *threads++; unsigned tid = *threads++;
if (commitStatus[tid] == ROBSquashing) { if (commitStatus[tid] == ROBSquashing) {
@ -439,9 +443,10 @@ template <class Impl>
bool bool
DefaultCommit<Impl>::changedROBEntries() DefaultCommit<Impl>::changedROBEntries()
{ {
std::list<unsigned>::iterator threads = (*activeThreads).begin(); std::list<unsigned>::iterator threads = activeThreads->begin();
std::list<unsigned>::iterator end = activeThreads->end();
while (threads != (*activeThreads).end()) { while (threads != end) {
unsigned tid = *threads++; unsigned tid = *threads++;
if (changedROBNumEntries[tid]) { if (changedROBNumEntries[tid]) {
@ -564,14 +569,15 @@ DefaultCommit<Impl>::tick()
return; return;
} }
if ((*activeThreads).size() <= 0) if (activeThreads->empty())
return; return;
std::list<unsigned>::iterator threads = (*activeThreads).begin(); std::list<unsigned>::iterator threads = activeThreads->begin();
std::list<unsigned>::iterator end = activeThreads->end();
// Check if any of the threads are done squashing. Change the // Check if any of the threads are done squashing. Change the
// status if they are done. // status if they are done.
while (threads != (*activeThreads).end()) { while (threads != end) {
unsigned tid = *threads++; unsigned tid = *threads++;
if (commitStatus[tid] == ROBSquashing) { if (commitStatus[tid] == ROBSquashing) {
@ -592,9 +598,9 @@ DefaultCommit<Impl>::tick()
markCompletedInsts(); markCompletedInsts();
threads = (*activeThreads).begin(); threads = activeThreads->begin();
while (threads != (*activeThreads).end()) { while (threads != end) {
unsigned tid = *threads++; unsigned tid = *threads++;
if (!rob->isEmpty(tid) && rob->readHeadInst(tid)->readyToCommit()) { if (!rob->isEmpty(tid) && rob->readHeadInst(tid)->readyToCommit()) {
@ -692,9 +698,10 @@ DefaultCommit<Impl>::commit()
//////////////////////////////////// ////////////////////////////////////
// Check for any possible squashes, handle them first // Check for any possible squashes, handle them first
//////////////////////////////////// ////////////////////////////////////
std::list<unsigned>::iterator threads = (*activeThreads).begin(); std::list<unsigned>::iterator threads = activeThreads->begin();
std::list<unsigned>::iterator end = activeThreads->end();
while (threads != (*activeThreads).end()) { while (threads != end) {
unsigned tid = *threads++; unsigned tid = *threads++;
// Not sure which one takes priority. I think if we have // Not sure which one takes priority. I think if we have
@ -802,9 +809,9 @@ DefaultCommit<Impl>::commit()
} }
//Check for any activity //Check for any activity
threads = (*activeThreads).begin(); threads = activeThreads->begin();
while (threads != (*activeThreads).end()) { while (threads != end) {
unsigned tid = *threads++; unsigned tid = *threads++;
if (changedROBNumEntries[tid]) { if (changedROBNumEntries[tid]) {
@ -971,20 +978,19 @@ DefaultCommit<Impl>::commitHead(DynInstPtr &head_inst, unsigned inst_num)
"instruction [sn:%lli] at the head of the ROB, PC %#x.\n", "instruction [sn:%lli] at the head of the ROB, PC %#x.\n",
head_inst->seqNum, head_inst->readPC()); head_inst->seqNum, head_inst->readPC());
#if !FULL_SYSTEM
// Hack to make sure syscalls/memory barriers/quiesces // Hack to make sure syscalls/memory barriers/quiesces
// aren't executed until all stores write back their data. // aren't executed until all stores write back their data.
// This direct communication shouldn't be used for // This direct communication shouldn't be used for
// anything other than this. // anything other than this.
if (inst_num > 0 || iewStage->hasStoresToWB())
#else
if ((head_inst->isMemBarrier() || head_inst->isWriteBarrier() || if ((head_inst->isMemBarrier() || head_inst->isWriteBarrier() ||
head_inst->isQuiesce()) && head_inst->isQuiesce()) &&
iewStage->hasStoresToWB()) iewStage->hasStoresToWB())
#endif
{ {
DPRINTF(Commit, "Waiting for all stores to writeback.\n"); DPRINTF(Commit, "Waiting for all stores to writeback.\n");
return false; return false;
} else if (inst_num > 0 || iewStage->hasStoresToWB()) {
DPRINTF(Commit, "Waiting to become head of commit.\n");
return false;
} }
toIEW->commitInfo[tid].nonSpecSeqNum = head_inst->seqNum; toIEW->commitInfo[tid].nonSpecSeqNum = head_inst->seqNum;
@ -1254,9 +1260,10 @@ template <class Impl>
bool bool
DefaultCommit<Impl>::robDoneSquashing() DefaultCommit<Impl>::robDoneSquashing()
{ {
std::list<unsigned>::iterator threads = (*activeThreads).begin(); std::list<unsigned>::iterator threads = activeThreads->begin();
std::list<unsigned>::iterator end = activeThreads->end();
while (threads != (*activeThreads).end()) { while (threads != end) {
unsigned tid = *threads++; unsigned tid = *threads++;
if (!rob->isDoneSquashing(tid)) if (!rob->isDoneSquashing(tid))
@ -1335,7 +1342,8 @@ DefaultCommit<Impl>::getCommittingThread()
return -1; return -1;
} }
} else { } else {
int tid = (*activeThreads).front(); assert(!activeThreads->empty());
int tid = activeThreads->front();
if (commitStatus[tid] == Running || if (commitStatus[tid] == Running ||
commitStatus[tid] == Idle || commitStatus[tid] == Idle ||
@ -1382,9 +1390,10 @@ DefaultCommit<Impl>::oldestReady()
unsigned oldest = 0; unsigned oldest = 0;
bool first = true; bool first = true;
std::list<unsigned>::iterator threads = (*activeThreads).begin(); std::list<unsigned>::iterator threads = activeThreads->begin();
std::list<unsigned>::iterator end = activeThreads->end();
while (threads != (*activeThreads).end()) { while (threads != end) {
unsigned tid = *threads++; unsigned tid = *threads++;
if (!rob->isEmpty(tid) && if (!rob->isEmpty(tid) &&

View file

@ -117,17 +117,18 @@ FullO3CPU<Impl>::ActivateThreadEvent::description()
template <class Impl> template <class Impl>
FullO3CPU<Impl>::DeallocateContextEvent::DeallocateContextEvent() FullO3CPU<Impl>::DeallocateContextEvent::DeallocateContextEvent()
: Event(&mainEventQueue, CPU_Tick_Pri) : Event(&mainEventQueue, CPU_Tick_Pri), tid(0), remove(false), cpu(NULL)
{ {
} }
template <class Impl> template <class Impl>
void void
FullO3CPU<Impl>::DeallocateContextEvent::init(int thread_num, FullO3CPU<Impl>::DeallocateContextEvent::init(int thread_num,
FullO3CPU<Impl> *thread_cpu) FullO3CPU<Impl> *thread_cpu)
{ {
tid = thread_num; tid = thread_num;
cpu = thread_cpu; cpu = thread_cpu;
remove = false;
} }
template <class Impl> template <class Impl>
@ -610,7 +611,8 @@ FullO3CPU<Impl>::suspendContext(int tid)
DPRINTF(O3CPU,"[tid: %i]: Suspending Thread Context.\n", tid); DPRINTF(O3CPU,"[tid: %i]: Suspending Thread Context.\n", tid);
bool deallocated = deallocateContext(tid, false, 1); bool deallocated = deallocateContext(tid, false, 1);
// If this was the last thread then unschedule the tick event. // If this was the last thread then unschedule the tick event.
if ((activeThreads.size() == 1 && !deallocated) || activeThreads.size() == 0) if (activeThreads.size() == 1 && !deallocated ||
activeThreads.size() == 0)
unscheduleTickEvent(); unscheduleTickEvent();
_status = Idle; _status = Idle;
} }

View file

@ -428,10 +428,12 @@ template<class Impl>
bool bool
DefaultDecode<Impl>::skidsEmpty() DefaultDecode<Impl>::skidsEmpty()
{ {
std::list<unsigned>::iterator threads = (*activeThreads).begin(); std::list<unsigned>::iterator threads = activeThreads->begin();
std::list<unsigned>::iterator end = activeThreads->end();
while (threads != (*activeThreads).end()) { while (threads != end) {
if (!skidBuffer[*threads++].empty()) unsigned tid = *threads++;
if (!skidBuffer[tid].empty())
return false; return false;
} }
@ -444,11 +446,10 @@ DefaultDecode<Impl>::updateStatus()
{ {
bool any_unblocking = false; bool any_unblocking = false;
std::list<unsigned>::iterator threads = (*activeThreads).begin(); std::list<unsigned>::iterator threads = activeThreads->begin();
std::list<unsigned>::iterator end = activeThreads->end();
threads = (*activeThreads).begin(); while (threads != end) {
while (threads != (*activeThreads).end()) {
unsigned tid = *threads++; unsigned tid = *threads++;
if (decodeStatus[tid] == Unblocking) { if (decodeStatus[tid] == Unblocking) {
@ -601,13 +602,14 @@ DefaultDecode<Impl>::tick()
toRenameIndex = 0; toRenameIndex = 0;
std::list<unsigned>::iterator threads = (*activeThreads).begin(); std::list<unsigned>::iterator threads = activeThreads->begin();
std::list<unsigned>::iterator end = activeThreads->end();
sortInsts(); sortInsts();
//Check stall and squash signals. //Check stall and squash signals.
while (threads != (*activeThreads).end()) { while (threads != end) {
unsigned tid = *threads++; unsigned tid = *threads++;
DPRINTF(Decode,"Processing [tid:%i]\n",tid); DPRINTF(Decode,"Processing [tid:%i]\n",tid);
status_change = checkSignalsAndUpdate(tid) || status_change; status_change = checkSignalsAndUpdate(tid) || status_change;

View file

@ -727,10 +727,10 @@ typename DefaultFetch<Impl>::FetchStatus
DefaultFetch<Impl>::updateFetchStatus() DefaultFetch<Impl>::updateFetchStatus()
{ {
//Check Running //Check Running
std::list<unsigned>::iterator threads = (*activeThreads).begin(); std::list<unsigned>::iterator threads = activeThreads->begin();
std::list<unsigned>::iterator end = activeThreads->end();
while (threads != (*activeThreads).end()) {
while (threads != end) {
unsigned tid = *threads++; unsigned tid = *threads++;
if (fetchStatus[tid] == Running || if (fetchStatus[tid] == Running ||
@ -785,12 +785,13 @@ template <class Impl>
void void
DefaultFetch<Impl>::tick() DefaultFetch<Impl>::tick()
{ {
std::list<unsigned>::iterator threads = (*activeThreads).begin(); std::list<unsigned>::iterator threads = activeThreads->begin();
std::list<unsigned>::iterator end = activeThreads->end();
bool status_change = false; bool status_change = false;
wroteToTimeBuffer = false; wroteToTimeBuffer = false;
while (threads != (*activeThreads).end()) { while (threads != end) {
unsigned tid = *threads++; unsigned tid = *threads++;
// Check the signals for each thread to determine the proper status // Check the signals for each thread to determine the proper status
@ -1313,7 +1314,9 @@ DefaultFetch<Impl>::getFetchingThread(FetchPriority &fetch_priority)
return -1; return -1;
} }
} else { } else {
int tid = *((*activeThreads).begin()); std::list<unsigned>::iterator thread = activeThreads->begin();
assert(thread != activeThreads->end());
int tid = *thread;
if (fetchStatus[tid] == Running || if (fetchStatus[tid] == Running ||
fetchStatus[tid] == IcacheAccessComplete || fetchStatus[tid] == IcacheAccessComplete ||
@ -1363,9 +1366,10 @@ DefaultFetch<Impl>::iqCount()
{ {
std::priority_queue<unsigned> PQ; std::priority_queue<unsigned> PQ;
std::list<unsigned>::iterator threads = (*activeThreads).begin(); std::list<unsigned>::iterator threads = activeThreads->begin();
std::list<unsigned>::iterator end = activeThreads->end();
while (threads != (*activeThreads).end()) { while (threads != end) {
unsigned tid = *threads++; unsigned tid = *threads++;
PQ.push(fromIEW->iewInfo[tid].iqCount); PQ.push(fromIEW->iewInfo[tid].iqCount);
@ -1393,10 +1397,10 @@ DefaultFetch<Impl>::lsqCount()
{ {
std::priority_queue<unsigned> PQ; std::priority_queue<unsigned> PQ;
std::list<unsigned>::iterator threads = activeThreads->begin();
std::list<unsigned>::iterator end = activeThreads->end();
std::list<unsigned>::iterator threads = (*activeThreads).begin(); while (threads != end) {
while (threads != (*activeThreads).end()) {
unsigned tid = *threads++; unsigned tid = *threads++;
PQ.push(fromIEW->iewInfo[tid].ldstqCount); PQ.push(fromIEW->iewInfo[tid].ldstqCount);
@ -1422,7 +1426,10 @@ template<class Impl>
int int
DefaultFetch<Impl>::branchCount() DefaultFetch<Impl>::branchCount()
{ {
std::list<unsigned>::iterator threads = (*activeThreads).begin(); std::list<unsigned>::iterator thread = activeThreads->begin();
assert(thread != activeThreads->end());
unsigned tid = *thread;
panic("Branch Count Fetch policy unimplemented\n"); panic("Branch Count Fetch policy unimplemented\n");
return *threads; return 0 * tid;
} }

View file

@ -695,10 +695,12 @@ DefaultIEW<Impl>::skidCount()
{ {
int max=0; int max=0;
std::list<unsigned>::iterator threads = (*activeThreads).begin(); std::list<unsigned>::iterator threads = activeThreads->begin();
std::list<unsigned>::iterator end = activeThreads->end();
while (threads != (*activeThreads).end()) { while (threads != end) {
unsigned thread_count = skidBuffer[*threads++].size(); unsigned tid = *threads++;
unsigned thread_count = skidBuffer[tid].size();
if (max < thread_count) if (max < thread_count)
max = thread_count; max = thread_count;
} }
@ -710,10 +712,13 @@ template<class Impl>
bool bool
DefaultIEW<Impl>::skidsEmpty() DefaultIEW<Impl>::skidsEmpty()
{ {
std::list<unsigned>::iterator threads = (*activeThreads).begin(); std::list<unsigned>::iterator threads = activeThreads->begin();
std::list<unsigned>::iterator end = activeThreads->end();
while (threads != (*activeThreads).end()) { while (threads != end) {
if (!skidBuffer[*threads++].empty()) unsigned tid = *threads++;
if (!skidBuffer[tid].empty())
return false; return false;
} }
@ -726,11 +731,10 @@ DefaultIEW<Impl>::updateStatus()
{ {
bool any_unblocking = false; bool any_unblocking = false;
std::list<unsigned>::iterator threads = (*activeThreads).begin(); std::list<unsigned>::iterator threads = activeThreads->begin();
std::list<unsigned>::iterator end = activeThreads->end();
threads = (*activeThreads).begin(); while (threads != end) {
while (threads != (*activeThreads).end()) {
unsigned tid = *threads++; unsigned tid = *threads++;
if (dispatchStatus[tid] == Unblocking) { if (dispatchStatus[tid] == Unblocking) {
@ -1144,13 +1148,11 @@ DefaultIEW<Impl>::dispatchInsts(unsigned tid)
} }
toRename->iewInfo[tid].dispatchedToLSQ++; toRename->iewInfo[tid].dispatchedToLSQ++;
#if FULL_SYSTEM
} else if (inst->isMemBarrier() || inst->isWriteBarrier()) { } else if (inst->isMemBarrier() || inst->isWriteBarrier()) {
// Same as non-speculative stores. // Same as non-speculative stores.
inst->setCanCommit(); inst->setCanCommit();
instQueue.insertBarrier(inst); instQueue.insertBarrier(inst);
add_to_iq = false; add_to_iq = false;
#endif
} else if (inst->isNonSpeculative()) { } else if (inst->isNonSpeculative()) {
DPRINTF(IEW, "[tid:%i]: Issue: Nonspeculative instruction " DPRINTF(IEW, "[tid:%i]: Issue: Nonspeculative instruction "
"encountered, skipping.\n", tid); "encountered, skipping.\n", tid);
@ -1250,9 +1252,10 @@ DefaultIEW<Impl>::executeInsts()
wbNumInst = 0; wbNumInst = 0;
wbCycle = 0; wbCycle = 0;
std::list<unsigned>::iterator threads = (*activeThreads).begin(); std::list<unsigned>::iterator threads = activeThreads->begin();
std::list<unsigned>::iterator end = activeThreads->end();
while (threads != (*activeThreads).end()) { while (threads != end) {
unsigned tid = *threads++; unsigned tid = *threads++;
fetchRedirect[tid] = false; fetchRedirect[tid] = false;
} }
@ -1491,11 +1494,12 @@ DefaultIEW<Impl>::tick()
// Free function units marked as being freed this cycle. // Free function units marked as being freed this cycle.
fuPool->processFreeUnits(); fuPool->processFreeUnits();
std::list<unsigned>::iterator threads = (*activeThreads).begin(); std::list<unsigned>::iterator threads = activeThreads->begin();
std::list<unsigned>::iterator end = activeThreads->end();
// Check stall and squash signals, dispatch any instructions. // Check stall and squash signals, dispatch any instructions.
while (threads != (*activeThreads).end()) { while (threads != end) {
unsigned tid = *threads++; unsigned tid = *threads++;
DPRINTF(IEW,"Issue: Processing [tid:%i]\n",tid); DPRINTF(IEW,"Issue: Processing [tid:%i]\n",tid);
@ -1535,8 +1539,8 @@ DefaultIEW<Impl>::tick()
// nonspeculative instruction. // nonspeculative instruction.
// This is pretty inefficient... // This is pretty inefficient...
threads = (*activeThreads).begin(); threads = activeThreads->begin();
while (threads != (*activeThreads).end()) { while (threads != end) {
unsigned tid = (*threads++); unsigned tid = (*threads++);
DPRINTF(IEW,"Processing [tid:%i]\n",tid); DPRINTF(IEW,"Processing [tid:%i]\n",tid);

View file

@ -426,16 +426,18 @@ void
InstructionQueue<Impl>::resetEntries() InstructionQueue<Impl>::resetEntries()
{ {
if (iqPolicy != Dynamic || numThreads > 1) { if (iqPolicy != Dynamic || numThreads > 1) {
int active_threads = (*activeThreads).size(); int active_threads = activeThreads->size();
std::list<unsigned>::iterator threads = (*activeThreads).begin(); std::list<unsigned>::iterator threads = activeThreads->begin();
std::list<unsigned>::iterator list_end = (*activeThreads).end(); std::list<unsigned>::iterator end = activeThreads->end();
while (threads != end) {
unsigned tid = *threads++;
while (threads != list_end) {
if (iqPolicy == Partitioned) { if (iqPolicy == Partitioned) {
maxEntries[*threads++] = numEntries / active_threads; maxEntries[tid] = numEntries / active_threads;
} else if(iqPolicy == Threshold && active_threads == 1) { } else if(iqPolicy == Threshold && active_threads == 1) {
maxEntries[*threads++] = numEntries; maxEntries[tid] = numEntries;
} }
} }
} }

View file

@ -244,10 +244,7 @@ void
LSQ<Impl>::resetEntries() LSQ<Impl>::resetEntries()
{ {
if (lsqPolicy != Dynamic || numThreads > 1) { if (lsqPolicy != Dynamic || numThreads > 1) {
int active_threads = (*activeThreads).size(); int active_threads = activeThreads->size();
std::list<unsigned>::iterator threads = (*activeThreads).begin();
std::list<unsigned>::iterator list_end = (*activeThreads).end();
int maxEntries; int maxEntries;
@ -259,8 +256,13 @@ LSQ<Impl>::resetEntries()
maxEntries = LQEntries; maxEntries = LQEntries;
} }
while (threads != list_end) { std::list<unsigned>::iterator threads = activeThreads->begin();
resizeEntries(maxEntries,*threads++); std::list<unsigned>::iterator end = activeThreads->end();
while (threads != end) {
unsigned tid = *threads++;
resizeEntries(maxEntries, tid);
} }
} }
} }
@ -285,10 +287,11 @@ template<class Impl>
void void
LSQ<Impl>::tick() LSQ<Impl>::tick()
{ {
std::list<unsigned>::iterator active_threads = (*activeThreads).begin(); std::list<unsigned>::iterator threads = activeThreads->begin();
std::list<unsigned>::iterator end = activeThreads->end();
while (active_threads != (*activeThreads).end()) { while (threads != end) {
unsigned tid = *active_threads++; unsigned tid = *threads++;
thread[tid].tick(); thread[tid].tick();
} }
@ -334,10 +337,11 @@ template<class Impl>
void void
LSQ<Impl>::writebackStores() LSQ<Impl>::writebackStores()
{ {
std::list<unsigned>::iterator active_threads = (*activeThreads).begin(); std::list<unsigned>::iterator threads = activeThreads->begin();
std::list<unsigned>::iterator end = activeThreads->end();
while (active_threads != (*activeThreads).end()) { while (threads != end) {
unsigned tid = *active_threads++; unsigned tid = *threads++;
if (numStoresToWB(tid) > 0) { if (numStoresToWB(tid) > 0) {
DPRINTF(Writeback,"[tid:%i] Writing back stores. %i stores " DPRINTF(Writeback,"[tid:%i] Writing back stores. %i stores "
@ -353,10 +357,12 @@ bool
LSQ<Impl>::violation() LSQ<Impl>::violation()
{ {
/* Answers: Does Anybody Have a Violation?*/ /* Answers: Does Anybody Have a Violation?*/
std::list<unsigned>::iterator active_threads = (*activeThreads).begin(); std::list<unsigned>::iterator threads = activeThreads->begin();
std::list<unsigned>::iterator end = activeThreads->end();
while (threads != end) {
unsigned tid = *threads++;
while (active_threads != (*activeThreads).end()) {
unsigned tid = *active_threads++;
if (thread[tid].violation()) if (thread[tid].violation())
return true; return true;
} }
@ -370,10 +376,12 @@ LSQ<Impl>::getCount()
{ {
unsigned total = 0; unsigned total = 0;
std::list<unsigned>::iterator active_threads = (*activeThreads).begin(); std::list<unsigned>::iterator threads = activeThreads->begin();
std::list<unsigned>::iterator end = activeThreads->end();
while (threads != end) {
unsigned tid = *threads++;
while (active_threads != (*activeThreads).end()) {
unsigned tid = *active_threads++;
total += getCount(tid); total += getCount(tid);
} }
@ -386,10 +394,12 @@ LSQ<Impl>::numLoads()
{ {
unsigned total = 0; unsigned total = 0;
std::list<unsigned>::iterator active_threads = (*activeThreads).begin(); std::list<unsigned>::iterator threads = activeThreads->begin();
std::list<unsigned>::iterator end = activeThreads->end();
while (threads != end) {
unsigned tid = *threads++;
while (active_threads != (*activeThreads).end()) {
unsigned tid = *active_threads++;
total += numLoads(tid); total += numLoads(tid);
} }
@ -402,10 +412,12 @@ LSQ<Impl>::numStores()
{ {
unsigned total = 0; unsigned total = 0;
std::list<unsigned>::iterator active_threads = (*activeThreads).begin(); std::list<unsigned>::iterator threads = activeThreads->begin();
std::list<unsigned>::iterator end = activeThreads->end();
while (threads != end) {
unsigned tid = *threads++;
while (active_threads != (*activeThreads).end()) {
unsigned tid = *active_threads++;
total += thread[tid].numStores(); total += thread[tid].numStores();
} }
@ -418,10 +430,12 @@ LSQ<Impl>::numLoadsReady()
{ {
unsigned total = 0; unsigned total = 0;
std::list<unsigned>::iterator active_threads = (*activeThreads).begin(); std::list<unsigned>::iterator threads = activeThreads->begin();
std::list<unsigned>::iterator end = activeThreads->end();
while (threads != end) {
unsigned tid = *threads++;
while (active_threads != (*activeThreads).end()) {
unsigned tid = *active_threads++;
total += thread[tid].numLoadsReady(); total += thread[tid].numLoadsReady();
} }
@ -434,10 +448,12 @@ LSQ<Impl>::numFreeEntries()
{ {
unsigned total = 0; unsigned total = 0;
std::list<unsigned>::iterator active_threads = (*activeThreads).begin(); std::list<unsigned>::iterator threads = activeThreads->begin();
std::list<unsigned>::iterator end = activeThreads->end();
while (threads != end) {
unsigned tid = *threads++;
while (active_threads != (*activeThreads).end()) {
unsigned tid = *active_threads++;
total += thread[tid].numFreeEntries(); total += thread[tid].numFreeEntries();
} }
@ -448,7 +464,7 @@ template<class Impl>
unsigned unsigned
LSQ<Impl>::numFreeEntries(unsigned tid) LSQ<Impl>::numFreeEntries(unsigned tid)
{ {
//if( lsqPolicy == Dynamic ) //if (lsqPolicy == Dynamic)
//return numFreeEntries(); //return numFreeEntries();
//else //else
return thread[tid].numFreeEntries(); return thread[tid].numFreeEntries();
@ -458,11 +474,13 @@ template<class Impl>
bool bool
LSQ<Impl>::isFull() LSQ<Impl>::isFull()
{ {
std::list<unsigned>::iterator active_threads = (*activeThreads).begin(); std::list<unsigned>::iterator threads = activeThreads->begin();
std::list<unsigned>::iterator end = activeThreads->end();
while (active_threads != (*activeThreads).end()) { while (threads != end) {
unsigned tid = *active_threads++; unsigned tid = *threads++;
if (! (thread[tid].lqFull() || thread[tid].sqFull()) )
if (!(thread[tid].lqFull() || thread[tid].sqFull()))
return false; return false;
} }
@ -475,7 +493,7 @@ LSQ<Impl>::isFull(unsigned tid)
{ {
//@todo: Change to Calculate All Entries for //@todo: Change to Calculate All Entries for
//Dynamic Policy //Dynamic Policy
if( lsqPolicy == Dynamic ) if (lsqPolicy == Dynamic)
return isFull(); return isFull();
else else
return thread[tid].lqFull() || thread[tid].sqFull(); return thread[tid].lqFull() || thread[tid].sqFull();
@ -485,10 +503,12 @@ template<class Impl>
bool bool
LSQ<Impl>::lqFull() LSQ<Impl>::lqFull()
{ {
std::list<unsigned>::iterator active_threads = (*activeThreads).begin(); std::list<unsigned>::iterator threads = activeThreads->begin();
std::list<unsigned>::iterator end = activeThreads->end();
while (threads != end) {
unsigned tid = *threads++;
while (active_threads != (*activeThreads).end()) {
unsigned tid = *active_threads++;
if (!thread[tid].lqFull()) if (!thread[tid].lqFull())
return false; return false;
} }
@ -502,7 +522,7 @@ LSQ<Impl>::lqFull(unsigned tid)
{ {
//@todo: Change to Calculate All Entries for //@todo: Change to Calculate All Entries for
//Dynamic Policy //Dynamic Policy
if( lsqPolicy == Dynamic ) if (lsqPolicy == Dynamic)
return lqFull(); return lqFull();
else else
return thread[tid].lqFull(); return thread[tid].lqFull();
@ -512,10 +532,12 @@ template<class Impl>
bool bool
LSQ<Impl>::sqFull() LSQ<Impl>::sqFull()
{ {
std::list<unsigned>::iterator active_threads = (*activeThreads).begin(); std::list<unsigned>::iterator threads = activeThreads->begin();
std::list<unsigned>::iterator end = activeThreads->end();
while (threads != end) {
unsigned tid = *threads++;
while (active_threads != (*activeThreads).end()) {
unsigned tid = *active_threads++;
if (!sqFull(tid)) if (!sqFull(tid))
return false; return false;
} }
@ -529,7 +551,7 @@ LSQ<Impl>::sqFull(unsigned tid)
{ {
//@todo: Change to Calculate All Entries for //@todo: Change to Calculate All Entries for
//Dynamic Policy //Dynamic Policy
if( lsqPolicy == Dynamic ) if (lsqPolicy == Dynamic)
return sqFull(); return sqFull();
else else
return thread[tid].sqFull(); return thread[tid].sqFull();
@ -539,10 +561,12 @@ template<class Impl>
bool bool
LSQ<Impl>::isStalled() LSQ<Impl>::isStalled()
{ {
std::list<unsigned>::iterator active_threads = (*activeThreads).begin(); std::list<unsigned>::iterator threads = activeThreads->begin();
std::list<unsigned>::iterator end = activeThreads->end();
while (threads != end) {
unsigned tid = *threads++;
while (active_threads != (*activeThreads).end()) {
unsigned tid = *active_threads++;
if (!thread[tid].isStalled()) if (!thread[tid].isStalled())
return false; return false;
} }
@ -554,7 +578,7 @@ template<class Impl>
bool bool
LSQ<Impl>::isStalled(unsigned tid) LSQ<Impl>::isStalled(unsigned tid)
{ {
if( lsqPolicy == Dynamic ) if (lsqPolicy == Dynamic)
return isStalled(); return isStalled();
else else
return thread[tid].isStalled(); return thread[tid].isStalled();
@ -564,13 +588,15 @@ template<class Impl>
bool bool
LSQ<Impl>::hasStoresToWB() LSQ<Impl>::hasStoresToWB()
{ {
std::list<unsigned>::iterator active_threads = (*activeThreads).begin(); std::list<unsigned>::iterator threads = activeThreads->begin();
std::list<unsigned>::iterator end = activeThreads->end();
if ((*activeThreads).empty()) if (threads == end)
return false; return false;
while (active_threads != (*activeThreads).end()) { while (threads != end) {
unsigned tid = *active_threads++; unsigned tid = *threads++;
if (!hasStoresToWB(tid)) if (!hasStoresToWB(tid))
return false; return false;
} }
@ -582,10 +608,12 @@ template<class Impl>
bool bool
LSQ<Impl>::willWB() LSQ<Impl>::willWB()
{ {
std::list<unsigned>::iterator active_threads = (*activeThreads).begin(); std::list<unsigned>::iterator threads = activeThreads->begin();
std::list<unsigned>::iterator end = activeThreads->end();
while (threads != end) {
unsigned tid = *threads++;
while (active_threads != (*activeThreads).end()) {
unsigned tid = *active_threads++;
if (!willWB(tid)) if (!willWB(tid))
return false; return false;
} }
@ -597,10 +625,12 @@ template<class Impl>
void void
LSQ<Impl>::dumpInsts() LSQ<Impl>::dumpInsts()
{ {
std::list<unsigned>::iterator active_threads = (*activeThreads).begin(); std::list<unsigned>::iterator threads = activeThreads->begin();
std::list<unsigned>::iterator end = activeThreads->end();
while (threads != end) {
unsigned tid = *threads++;
while (active_threads != (*activeThreads).end()) {
unsigned tid = *active_threads++;
thread[tid].dumpInsts(); thread[tid].dumpInsts();
} }
} }

View file

@ -509,7 +509,6 @@ LSQUnit<Impl>::read(Request *req, T &data, int load_idx)
"storeHead: %i addr: %#x\n", "storeHead: %i addr: %#x\n",
load_idx, store_idx, storeHead, req->getPaddr()); load_idx, store_idx, storeHead, req->getPaddr());
#if FULL_SYSTEM
if (req->isLocked()) { if (req->isLocked()) {
// Disable recording the result temporarily. Writing to misc // Disable recording the result temporarily. Writing to misc
// regs normally updates the result, but this is not the // regs normally updates the result, but this is not the
@ -518,7 +517,6 @@ LSQUnit<Impl>::read(Request *req, T &data, int load_idx)
TheISA::handleLockedRead(load_inst.get(), req); TheISA::handleLockedRead(load_inst.get(), req);
load_inst->recordResult = true; load_inst->recordResult = true;
} }
#endif
while (store_idx != -1) { while (store_idx != -1) {
// End once we've reached the top of the LSQ // End once we've reached the top of the LSQ

View file

@ -429,10 +429,11 @@ DefaultRename<Impl>::tick()
sortInsts(); sortInsts();
std::list<unsigned>::iterator threads = (*activeThreads).begin(); std::list<unsigned>::iterator threads = activeThreads->begin();
std::list<unsigned>::iterator end = activeThreads->end();
// Check stall and squash signals. // Check stall and squash signals.
while (threads != (*activeThreads).end()) { while (threads != end) {
unsigned tid = *threads++; unsigned tid = *threads++;
DPRINTF(Rename, "Processing [tid:%i]\n", tid); DPRINTF(Rename, "Processing [tid:%i]\n", tid);
@ -451,9 +452,9 @@ DefaultRename<Impl>::tick()
cpu->activityThisCycle(); cpu->activityThisCycle();
} }
threads = (*activeThreads).begin(); threads = activeThreads->begin();
while (threads != (*activeThreads).end()) { while (threads != end) {
unsigned tid = *threads++; unsigned tid = *threads++;
// If we committed this cycle then doneSeqNum will be > 0 // If we committed this cycle then doneSeqNum will be > 0
@ -805,10 +806,13 @@ template<class Impl>
bool bool
DefaultRename<Impl>::skidsEmpty() DefaultRename<Impl>::skidsEmpty()
{ {
std::list<unsigned>::iterator threads = (*activeThreads).begin(); std::list<unsigned>::iterator threads = activeThreads->begin();
std::list<unsigned>::iterator end = activeThreads->end();
while (threads != (*activeThreads).end()) { while (threads != end) {
if (!skidBuffer[*threads++].empty()) unsigned tid = *threads++;
if (!skidBuffer[tid].empty())
return false; return false;
} }
@ -821,11 +825,10 @@ DefaultRename<Impl>::updateStatus()
{ {
bool any_unblocking = false; bool any_unblocking = false;
std::list<unsigned>::iterator threads = (*activeThreads).begin(); std::list<unsigned>::iterator threads = activeThreads->begin();
std::list<unsigned>::iterator end = activeThreads->end();
threads = (*activeThreads).begin(); while (threads != end) {
while (threads != (*activeThreads).end()) {
unsigned tid = *threads++; unsigned tid = *threads++;
if (renameStatus[tid] == Unblocking) { if (renameStatus[tid] == Unblocking) {

View file

@ -180,6 +180,8 @@ SimpleRenameMap::rename(RegIndex arch_reg)
// Subtract off the base offset for miscellaneous registers. // Subtract off the base offset for miscellaneous registers.
arch_reg = arch_reg - numLogicalRegs; arch_reg = arch_reg - numLogicalRegs;
DPRINTF(Rename, "Renamed misc reg %d\n", arch_reg);
// No renaming happens to the misc. registers. They are // No renaming happens to the misc. registers. They are
// simply the registers that come after all the physical // simply the registers that come after all the physical
// registers; thus take the base architected register and add // registers; thus take the base architected register and add
@ -194,6 +196,9 @@ SimpleRenameMap::rename(RegIndex arch_reg)
assert(renamed_reg < numPhysicalRegs + numMiscRegs); assert(renamed_reg < numPhysicalRegs + numMiscRegs);
} }
DPRINTF(Rename, "Renamed reg %d to physical reg %d old mapping was %d\n",
arch_reg, renamed_reg, prev_reg);
return RenameInfo(renamed_reg, prev_reg); return RenameInfo(renamed_reg, prev_reg);
} }

View file

@ -155,16 +155,18 @@ void
ROB<Impl>::resetEntries() ROB<Impl>::resetEntries()
{ {
if (robPolicy != Dynamic || numThreads > 1) { if (robPolicy != Dynamic || numThreads > 1) {
int active_threads = (*activeThreads).size(); int active_threads = activeThreads->size();
std::list<unsigned>::iterator threads = (*activeThreads).begin(); std::list<unsigned>::iterator threads = activeThreads->begin();
std::list<unsigned>::iterator list_end = (*activeThreads).end(); std::list<unsigned>::iterator end = activeThreads->end();
while (threads != end) {
unsigned tid = *threads++;
while (threads != list_end) {
if (robPolicy == Partitioned) { if (robPolicy == Partitioned) {
maxEntries[*threads++] = numEntries / active_threads; maxEntries[tid] = numEntries / active_threads;
} else if (robPolicy == Threshold && active_threads == 1) { } else if (robPolicy == Threshold && active_threads == 1) {
maxEntries[*threads++] = numEntries; maxEntries[tid] = numEntries;
} }
} }
} }
@ -318,9 +320,10 @@ bool
ROB<Impl>::canCommit() ROB<Impl>::canCommit()
{ {
//@todo: set ActiveThreads through ROB or CPU //@todo: set ActiveThreads through ROB or CPU
std::list<unsigned>::iterator threads = (*activeThreads).begin(); std::list<unsigned>::iterator threads = activeThreads->begin();
std::list<unsigned>::iterator end = activeThreads->end();
while (threads != (*activeThreads).end()) { while (threads != end) {
unsigned tid = *threads++; unsigned tid = *threads++;
if (isHeadReady(tid)) { if (isHeadReady(tid)) {
@ -432,22 +435,23 @@ ROB<Impl>::updateHead()
bool first_valid = true; bool first_valid = true;
// @todo: set ActiveThreads through ROB or CPU // @todo: set ActiveThreads through ROB or CPU
std::list<unsigned>::iterator threads = (*activeThreads).begin(); std::list<unsigned>::iterator threads = activeThreads->begin();
std::list<unsigned>::iterator end = activeThreads->end();
while (threads != (*activeThreads).end()) { while (threads != end) {
unsigned thread_num = *threads++; unsigned tid = *threads++;
if (instList[thread_num].empty()) if (instList[tid].empty())
continue; continue;
if (first_valid) { if (first_valid) {
head = instList[thread_num].begin(); head = instList[tid].begin();
lowest_num = (*head)->seqNum; lowest_num = (*head)->seqNum;
first_valid = false; first_valid = false;
continue; continue;
} }
InstIt head_thread = instList[thread_num].begin(); InstIt head_thread = instList[tid].begin();
DynInstPtr head_inst = (*head_thread); DynInstPtr head_inst = (*head_thread);
@ -472,9 +476,10 @@ ROB<Impl>::updateTail()
tail = instList[0].end(); tail = instList[0].end();
bool first_valid = true; bool first_valid = true;
std::list<unsigned>::iterator threads = (*activeThreads).begin(); std::list<unsigned>::iterator threads = activeThreads->begin();
std::list<unsigned>::iterator end = activeThreads->end();
while (threads != (*activeThreads).end()) { while (threads != end) {
unsigned tid = *threads++; unsigned tid = *threads++;
if (instList[tid].empty()) { if (instList[tid].empty()) {

View file

@ -342,16 +342,18 @@ void
InstQueue<Impl>::resetEntries() InstQueue<Impl>::resetEntries()
{ {
if (iqPolicy != Dynamic || numThreads > 1) { if (iqPolicy != Dynamic || numThreads > 1) {
int active_threads = (*activeThreads).size(); int active_threads = activeThreads->size();
list<unsigned>::iterator threads = (*activeThreads).begin(); std::list<unsigned>::iterator threads = activeThreads->begin();
list<unsigned>::iterator list_end = (*activeThreads).end(); std::list<unsigned>::iterator end = activeThreads->end();
while (threads != end) {
unsigned tid = *threads++;
while (threads != list_end) {
if (iqPolicy == Partitioned) { if (iqPolicy == Partitioned) {
maxEntries[*threads++] = numEntries / active_threads; maxEntries[tid] = numEntries / active_threads;
} else if(iqPolicy == Threshold && active_threads == 1) { } else if (iqPolicy == Threshold && active_threads == 1) {
maxEntries[*threads++] = numEntries; maxEntries[tid] = numEntries;
} }
} }
} }

View file

@ -88,6 +88,8 @@ TsunamiCChip::read(PacketPtr pkt)
switch (pkt->getSize()) { switch (pkt->getSize()) {
case sizeof(uint64_t): case sizeof(uint64_t):
pkt->set<uint64_t>(0);
if (daddr & TSDEV_CC_BDIMS) if (daddr & TSDEV_CC_BDIMS)
{ {
pkt->set(dim[(daddr >> 4) & 0x3F]); pkt->set(dim[(daddr >> 4) & 0x3F]);

View file

@ -692,6 +692,10 @@ class BaseCache : public MemObject
} }
return true; return true;
} }
virtual bool inCache(Addr addr) = 0;
virtual bool inMissQueue(Addr addr) = 0;
}; };
#endif //__BASE_CACHE_HH__ #endif //__BASE_CACHE_HH__

View file

@ -38,8 +38,6 @@
#include "mem/config/cache.hh" #include "mem/config/cache.hh"
#include "mem/cache/tags/cache_tags.hh"
#if defined(USE_CACHE_LRU) #if defined(USE_CACHE_LRU)
#include "mem/cache/tags/lru.hh" #include "mem/cache/tags/lru.hh"
#endif #endif
@ -73,28 +71,28 @@
#if defined(USE_CACHE_FALRU) #if defined(USE_CACHE_FALRU)
template class Cache<CacheTags<FALRU>, SimpleCoherence>; template class Cache<FALRU, SimpleCoherence>;
template class Cache<CacheTags<FALRU>, UniCoherence>; template class Cache<FALRU, UniCoherence>;
#endif #endif
#if defined(USE_CACHE_IIC) #if defined(USE_CACHE_IIC)
template class Cache<CacheTags<IIC>, SimpleCoherence>; template class Cache<IIC, SimpleCoherence>;
template class Cache<CacheTags<IIC>, UniCoherence>; template class Cache<IIC, UniCoherence>;
#endif #endif
#if defined(USE_CACHE_LRU) #if defined(USE_CACHE_LRU)
template class Cache<CacheTags<LRU>, SimpleCoherence>; template class Cache<LRU, SimpleCoherence>;
template class Cache<CacheTags<LRU>, UniCoherence>; template class Cache<LRU, UniCoherence>;
#endif #endif
#if defined(USE_CACHE_SPLIT) #if defined(USE_CACHE_SPLIT)
template class Cache<CacheTags<Split>, SimpleCoherence>; template class Cache<Split, SimpleCoherence>;
template class Cache<CacheTags<Split>, UniCoherence>; template class Cache<Split, UniCoherence>;
#endif #endif
#if defined(USE_CACHE_SPLIT_LIFO) #if defined(USE_CACHE_SPLIT_LIFO)
template class Cache<CacheTags<SplitLIFO>, SimpleCoherence>; template class Cache<SplitLIFO, SimpleCoherence>;
template class Cache<CacheTags<SplitLIFO>, UniCoherence>; template class Cache<SplitLIFO, UniCoherence>;
#endif #endif
#endif //DOXYGEN_SHOULD_SKIP_THIS #endif //DOXYGEN_SHOULD_SKIP_THIS

191
src/mem/cache/cache.hh vendored
View file

@ -38,16 +38,17 @@
#ifndef __CACHE_HH__ #ifndef __CACHE_HH__
#define __CACHE_HH__ #define __CACHE_HH__
#include "base/compression/base.hh"
#include "base/misc.hh" // fatal, panic, and warn #include "base/misc.hh" // fatal, panic, and warn
#include "cpu/smt.hh" // SMT_MAX_THREADS #include "cpu/smt.hh" // SMT_MAX_THREADS
#include "mem/cache/base_cache.hh" #include "mem/cache/base_cache.hh"
#include "mem/cache/cache_blk.hh"
#include "mem/cache/miss/miss_buffer.hh" #include "mem/cache/miss/miss_buffer.hh"
#include "mem/cache/prefetch/prefetcher.hh"
//Forward decleration //Forward decleration
class MSHR; class MSHR;
class BasePrefetcher;
/** /**
* A template-policy based cache. The behavior of the cache can be altered by * A template-policy based cache. The behavior of the cache can be altered by
@ -62,6 +63,8 @@ class Cache : public BaseCache
public: public:
/** Define the type of cache block to use. */ /** Define the type of cache block to use. */
typedef typename TagStore::BlkType BlkType; typedef typename TagStore::BlkType BlkType;
/** A typedef for a list of BlkType pointers. */
typedef typename TagStore::BlkList BlkList;
bool prefetchAccess; bool prefetchAccess;
@ -115,7 +118,7 @@ class Cache : public BaseCache
Coherence *coherence; Coherence *coherence;
/** Prefetcher */ /** Prefetcher */
Prefetcher<TagStore> *prefetcher; BasePrefetcher *prefetcher;
/** /**
* The clock ratio of the outgoing bus. * The clock ratio of the outgoing bus.
@ -141,6 +144,141 @@ class Cache : public BaseCache
PacketPtr invalidatePkt; PacketPtr invalidatePkt;
Request *invalidateReq; Request *invalidateReq;
/**
* Policy class for performing compression.
*/
CompressionAlgorithm *compressionAlg;
/**
* The block size of this cache. Set to value in the Tags object.
*/
const int16_t blkSize;
/**
* Can this cache should allocate a block on a line-sized write miss.
*/
const bool doFastWrites;
const bool prefetchMiss;
/**
* Can the data can be stored in a compressed form.
*/
const bool storeCompressed;
/**
* Do we need to compress blocks on writebacks (i.e. because
* writeback bus is compressed but storage is not)?
*/
const bool compressOnWriteback;
/**
* The latency of a compression operation.
*/
const int16_t compLatency;
/**
* Should we use an adaptive compression scheme.
*/
const bool adaptiveCompression;
/**
* Do writebacks need to be compressed (i.e. because writeback bus
* is compressed), whether or not they're already compressed for
* storage.
*/
const bool writebackCompressed;
/**
* Compare the internal block data to the fast access block data.
* @param blk The cache block to check.
* @return True if the data is the same.
*/
bool verifyData(BlkType *blk);
/**
* Update the internal data of the block. The data to write is assumed to
* be in the fast access data.
* @param blk The block with the data to update.
* @param writebacks A list to store any generated writebacks.
* @param compress_block True if we should compress this block
*/
void updateData(BlkType *blk, PacketList &writebacks, bool compress_block);
/**
* Handle a replacement for the given request.
* @param blk A pointer to the block, usually NULL
* @param pkt The memory request to satisfy.
* @param new_state The new state of the block.
* @param writebacks A list to store any generated writebacks.
*/
BlkType* doReplacement(BlkType *blk, PacketPtr &pkt,
CacheBlk::State new_state, PacketList &writebacks);
/**
* Does all the processing necessary to perform the provided request.
* @param pkt The memory request to perform.
* @param lat The latency of the access.
* @param writebacks List for any writebacks that need to be performed.
* @param update True if the replacement data should be updated.
* @return Pointer to the cache block touched by the request. NULL if it
* was a miss.
*/
BlkType* handleAccess(PacketPtr &pkt, int & lat,
PacketList & writebacks, bool update = true);
/**
* Populates a cache block and handles all outstanding requests for the
* satisfied fill request. This version takes an MSHR pointer and uses its
* request to fill the cache block, while repsonding to its targets.
* @param blk The cache block if it already exists.
* @param mshr The MSHR that contains the fill data and targets to satisfy.
* @param new_state The state of the new cache block.
* @param writebacks List for any writebacks that need to be performed.
* @return Pointer to the new cache block.
*/
BlkType* handleFill(BlkType *blk, MSHR * mshr, CacheBlk::State new_state,
PacketList & writebacks, PacketPtr pkt);
/**
* Populates a cache block and handles all outstanding requests for the
* satisfied fill request. This version takes two memory requests. One
* contains the fill data, the other is an optional target to satisfy.
* Used for Cache::probe.
* @param blk The cache block if it already exists.
* @param pkt The memory request with the fill data.
* @param new_state The state of the new cache block.
* @param writebacks List for any writebacks that need to be performed.
* @param target The memory request to perform after the fill.
* @return Pointer to the new cache block.
*/
BlkType* handleFill(BlkType *blk, PacketPtr &pkt,
CacheBlk::State new_state,
PacketList & writebacks, PacketPtr target = NULL);
/**
* Sets the blk to the new state and handles the given request.
* @param blk The cache block being snooped.
* @param new_state The new coherence state for the block.
* @param pkt The request to satisfy
*/
void handleSnoop(BlkType *blk, CacheBlk::State new_state,
PacketPtr &pkt);
/**
* Sets the blk to the new state.
* @param blk The cache block being snooped.
* @param new_state The new coherence state for the block.
*/
void handleSnoop(BlkType *blk, CacheBlk::State new_state);
/**
* Create a writeback request for the given block.
* @param blk The block to writeback.
* @return The writeback request for the block.
*/
PacketPtr writebackBlk(BlkType *blk);
public: public:
class Params class Params
@ -150,18 +288,41 @@ class Cache : public BaseCache
MissBuffer *missQueue; MissBuffer *missQueue;
Coherence *coherence; Coherence *coherence;
BaseCache::Params baseParams; BaseCache::Params baseParams;
Prefetcher<TagStore> *prefetcher; BasePrefetcher*prefetcher;
bool prefetchAccess; bool prefetchAccess;
int hitLatency; int hitLatency;
CompressionAlgorithm *compressionAlg;
const int16_t blkSize;
const bool doFastWrites;
const bool prefetchMiss;
const bool storeCompressed;
const bool compressOnWriteback;
const int16_t compLatency;
const bool adaptiveCompression;
const bool writebackCompressed;
Params(TagStore *_tags, MissBuffer *mq, Coherence *coh, Params(TagStore *_tags, MissBuffer *mq, Coherence *coh,
BaseCache::Params params, BaseCache::Params params,
Prefetcher<TagStore> *_prefetcher, BasePrefetcher *_prefetcher,
bool prefetch_access, int hit_latency) bool prefetch_access, int hit_latency,
bool do_fast_writes,
bool store_compressed, bool adaptive_compression,
bool writeback_compressed,
CompressionAlgorithm *_compressionAlg, int comp_latency,
bool prefetch_miss)
: tags(_tags), missQueue(mq), coherence(coh), : tags(_tags), missQueue(mq), coherence(coh),
baseParams(params), baseParams(params),
prefetcher(_prefetcher), prefetchAccess(prefetch_access), prefetcher(_prefetcher), prefetchAccess(prefetch_access),
hitLatency(hit_latency) hitLatency(hit_latency),
compressionAlg(_compressionAlg),
blkSize(_tags->getBlockSize()),
doFastWrites(do_fast_writes),
prefetchMiss(prefetch_miss),
storeCompressed(store_compressed),
compressOnWriteback(!store_compressed && writeback_compressed),
compLatency(comp_latency),
adaptiveCompression(adaptive_compression),
writebackCompressed(writeback_compressed)
{ {
} }
}; };
@ -222,14 +383,6 @@ class Cache : public BaseCache
void snoopResponse(PacketPtr &pkt); void snoopResponse(PacketPtr &pkt);
/**
* Invalidates the block containing address if found.
* @param addr The address to look for.
* @param asid The address space ID of the address.
* @todo Is this function necessary?
*/
void invalidateBlk(Addr addr);
/** /**
* Squash all requests associated with specified thread. * Squash all requests associated with specified thread.
* intended for use by I-cache. * intended for use by I-cache.
@ -273,6 +426,14 @@ class Cache : public BaseCache
* @return The estimated completion time. * @return The estimated completion time.
*/ */
Tick snoopProbe(PacketPtr &pkt); Tick snoopProbe(PacketPtr &pkt);
bool inCache(Addr addr) {
return (tags->findBlock(addr) != 0);
}
bool inMissQueue(Addr addr) {
return (missQueue->findMSHR(addr) != 0);
}
}; };
#endif // __CACHE_HH__ #endif // __CACHE_HH__

View file

@ -70,9 +70,6 @@
#include "base/compression/null_compression.hh" #include "base/compression/null_compression.hh"
#include "base/compression/lzss_compression.hh" #include "base/compression/lzss_compression.hh"
// CacheTags Templates
#include "mem/cache/tags/cache_tags.hh"
// MissQueue Templates // MissQueue Templates
#include "mem/cache/miss/miss_queue.hh" #include "mem/cache/miss/miss_queue.hh"
#include "mem/cache/miss/blocking_buffer.hh" #include "mem/cache/miss/blocking_buffer.hh"
@ -108,8 +105,6 @@ BEGIN_DECLARE_SIM_OBJECT_PARAMS(BaseCache)
Param<int> tgts_per_mshr; Param<int> tgts_per_mshr;
Param<int> write_buffers; Param<int> write_buffers;
Param<bool> prioritizeRequests; Param<bool> prioritizeRequests;
// SimObjectParam<Bus *> in_bus;
// SimObjectParam<Bus *> out_bus;
SimObjectParam<CoherenceProtocol *> protocol; SimObjectParam<CoherenceProtocol *> protocol;
Param<Addr> trace_addr; Param<Addr> trace_addr;
Param<int> hash_delay; Param<int> hash_delay;
@ -122,7 +117,6 @@ BEGIN_DECLARE_SIM_OBJECT_PARAMS(BaseCache)
Param<int> compression_latency; Param<int> compression_latency;
Param<int> subblock_size; Param<int> subblock_size;
Param<Counter> max_miss_count; Param<Counter> max_miss_count;
// SimObjectParam<HierParams *> hier;
VectorParam<Range<Addr> > addr_range; VectorParam<Range<Addr> > addr_range;
// SimObjectParam<MemTraceWriter *> mem_trace; // SimObjectParam<MemTraceWriter *> mem_trace;
Param<bool> split; Param<bool> split;
@ -156,9 +150,6 @@ BEGIN_INIT_SIM_OBJECT_PARAMS(BaseCache)
INIT_PARAM_DFLT(write_buffers, "number of write buffers", 8), INIT_PARAM_DFLT(write_buffers, "number of write buffers", 8),
INIT_PARAM_DFLT(prioritizeRequests, "always service demand misses first", INIT_PARAM_DFLT(prioritizeRequests, "always service demand misses first",
false), false),
/* INIT_PARAM_DFLT(in_bus, "incoming bus object", NULL),
INIT_PARAM(out_bus, "outgoing bus object"),
*/
INIT_PARAM_DFLT(protocol, "coherence protocol to use in the cache", NULL), INIT_PARAM_DFLT(protocol, "coherence protocol to use in the cache", NULL),
INIT_PARAM_DFLT(trace_addr, "address to trace", 0), INIT_PARAM_DFLT(trace_addr, "address to trace", 0),
@ -182,10 +173,6 @@ BEGIN_INIT_SIM_OBJECT_PARAMS(BaseCache)
INIT_PARAM_DFLT(max_miss_count, INIT_PARAM_DFLT(max_miss_count,
"The number of misses to handle before calling exit", "The number of misses to handle before calling exit",
0), 0),
/* INIT_PARAM_DFLT(hier,
"Hierarchy global variables",
&defaultHierParams),
*/
INIT_PARAM_DFLT(addr_range, "The address range in bytes", INIT_PARAM_DFLT(addr_range, "The address range in bytes",
vector<Range<Addr> >(1,RangeIn((Addr)0, MaxAddr))), vector<Range<Addr> >(1,RangeIn((Addr)0, MaxAddr))),
// INIT_PARAM_DFLT(mem_trace, "Memory trace to write accesses to", NULL), // INIT_PARAM_DFLT(mem_trace, "Memory trace to write accesses to", NULL),
@ -208,47 +195,47 @@ BEGIN_INIT_SIM_OBJECT_PARAMS(BaseCache)
END_INIT_SIM_OBJECT_PARAMS(BaseCache) END_INIT_SIM_OBJECT_PARAMS(BaseCache)
#define BUILD_CACHE(t, c) do { \ #define BUILD_CACHE(TAGS, tags, c) \
Prefetcher<CacheTags<t> > *pf; \ do { \
if (pf_policy == "tagged") { \ BasePrefetcher *pf; \
BUILD_TAGGED_PREFETCHER(t); \ if (pf_policy == "tagged") { \
} \ BUILD_TAGGED_PREFETCHER(TAGS); \
else if (pf_policy == "stride") { \ } \
BUILD_STRIDED_PREFETCHER(t); \ else if (pf_policy == "stride") { \
} \ BUILD_STRIDED_PREFETCHER(TAGS); \
else if (pf_policy == "ghb") { \ } \
BUILD_GHB_PREFETCHER(t); \ else if (pf_policy == "ghb") { \
} \ BUILD_GHB_PREFETCHER(TAGS); \
else { \ } \
BUILD_NULL_PREFETCHER(t); \ else { \
} \ BUILD_NULL_PREFETCHER(TAGS); \
Cache<CacheTags<t>, c>::Params params(tagStore, mq, coh, \ } \
base_params, \ Cache<TAGS, c>::Params params(tags, mq, coh, base_params, \
pf, \ pf, prefetch_access, hit_latency, \
prefetch_access, hit_latency); \ true, \
Cache<CacheTags<t>, c> *retval = \ store_compressed, \
new Cache<CacheTags<t>, c>(getInstanceName(), params); \ adaptive_compression, \
return retval; \ compressed_bus, \
compAlg, compression_latency, \
prefetch_miss); \
Cache<TAGS, c> *retval = \
new Cache<TAGS, c>(getInstanceName(), params); \
return retval; \
} while (0) } while (0)
#define BUILD_CACHE_PANIC(x) do { \ #define BUILD_CACHE_PANIC(x) do { \
panic("%s not compiled into M5", x); \ panic("%s not compiled into M5", x); \
} while (0) } while (0)
#define BUILD_COMPRESSED_CACHE(TAGS, tags, c) \ #define BUILD_COMPRESSED_CACHE(TAGS, tags, c) \
do { \ do { \
CompressionAlgorithm *compAlg; \ CompressionAlgorithm *compAlg; \
if (compressed_bus || store_compressed) { \ if (compressed_bus || store_compressed) { \
compAlg = new LZSSCompression(); \ compAlg = new LZSSCompression(); \
} else { \ } else { \
compAlg = new NullCompression(); \ compAlg = new NullCompression(); \
} \ } \
CacheTags<TAGS> *tagStore = \ BUILD_CACHE(TAGS, tags, c); \
new CacheTags<TAGS>(tags, compression_latency, true, \
store_compressed, adaptive_compression, \
compressed_bus, \
compAlg, prefetch_miss); \
BUILD_CACHE(TAGS, c); \
} while (0) } while (0)
#if defined(USE_CACHE_FALRU) #if defined(USE_CACHE_FALRU)
@ -327,55 +314,55 @@ return retval; \
} while (0) } while (0)
#if defined(USE_TAGGED) #if defined(USE_TAGGED)
#define BUILD_TAGGED_PREFETCHER(t) pf = new \ #define BUILD_TAGGED_PREFETCHER(t) \
TaggedPrefetcher<CacheTags<t> >(prefetcher_size, \ pf = new TaggedPrefetcher(prefetcher_size, \
!prefetch_past_page, \ !prefetch_past_page, \
prefetch_serial_squash, \ prefetch_serial_squash, \
prefetch_cache_check_push, \ prefetch_cache_check_push, \
prefetch_data_accesses_only, \ prefetch_data_accesses_only, \
prefetch_latency, \ prefetch_latency, \
prefetch_degree) prefetch_degree)
#else #else
#define BUILD_TAGGED_PREFETCHER(t) BUILD_CACHE_PANIC("Tagged Prefetcher") #define BUILD_TAGGED_PREFETCHER(t) BUILD_CACHE_PANIC("Tagged Prefetcher")
#endif #endif
#if defined(USE_STRIDED) #if defined(USE_STRIDED)
#define BUILD_STRIDED_PREFETCHER(t) pf = new \ #define BUILD_STRIDED_PREFETCHER(t) \
StridePrefetcher<CacheTags<t> >(prefetcher_size, \ pf = new StridePrefetcher(prefetcher_size, \
!prefetch_past_page, \ !prefetch_past_page, \
prefetch_serial_squash, \ prefetch_serial_squash, \
prefetch_cache_check_push, \ prefetch_cache_check_push, \
prefetch_data_accesses_only, \ prefetch_data_accesses_only, \
prefetch_latency, \ prefetch_latency, \
prefetch_degree, \ prefetch_degree, \
prefetch_use_cpu_id) prefetch_use_cpu_id)
#else #else
#define BUILD_STRIDED_PREFETCHER(t) BUILD_CACHE_PANIC("Stride Prefetcher") #define BUILD_STRIDED_PREFETCHER(t) BUILD_CACHE_PANIC("Stride Prefetcher")
#endif #endif
#if defined(USE_GHB) #if defined(USE_GHB)
#define BUILD_GHB_PREFETCHER(t) pf = new \ #define BUILD_GHB_PREFETCHER(t) \
GHBPrefetcher<CacheTags<t> >(prefetcher_size, \ pf = new GHBPrefetcher(prefetcher_size, \
!prefetch_past_page, \ !prefetch_past_page, \
prefetch_serial_squash, \ prefetch_serial_squash, \
prefetch_cache_check_push, \ prefetch_cache_check_push, \
prefetch_data_accesses_only, \ prefetch_data_accesses_only, \
prefetch_latency, \ prefetch_latency, \
prefetch_degree, \ prefetch_degree, \
prefetch_use_cpu_id) prefetch_use_cpu_id)
#else #else
#define BUILD_GHB_PREFETCHER(t) BUILD_CACHE_PANIC("GHB Prefetcher") #define BUILD_GHB_PREFETCHER(t) BUILD_CACHE_PANIC("GHB Prefetcher")
#endif #endif
#if defined(USE_TAGGED) #if defined(USE_TAGGED)
#define BUILD_NULL_PREFETCHER(t) pf = new \ #define BUILD_NULL_PREFETCHER(t) \
TaggedPrefetcher<CacheTags<t> >(prefetcher_size, \ pf = new TaggedPrefetcher(prefetcher_size, \
!prefetch_past_page, \ !prefetch_past_page, \
prefetch_serial_squash, \ prefetch_serial_squash, \
prefetch_cache_check_push, \ prefetch_cache_check_push, \
prefetch_data_accesses_only, \ prefetch_data_accesses_only, \
prefetch_latency, \ prefetch_latency, \
prefetch_degree) prefetch_degree)
#else #else
#define BUILD_NULL_PREFETCHER(t) BUILD_CACHE_PANIC("NULL Prefetcher (uses Tagged)") #define BUILD_NULL_PREFETCHER(t) BUILD_CACHE_PANIC("NULL Prefetcher (uses Tagged)")
#endif #endif

View file

@ -49,7 +49,7 @@
#include "mem/cache/cache.hh" #include "mem/cache/cache.hh"
#include "mem/cache/cache_blk.hh" #include "mem/cache/cache_blk.hh"
#include "mem/cache/miss/mshr.hh" #include "mem/cache/miss/mshr.hh"
#include "mem/cache/prefetch/prefetcher.hh" #include "mem/cache/prefetch/base_prefetcher.hh"
#include "sim/sim_exit.hh" // for SimExitEvent #include "sim/sim_exit.hh" // for SimExitEvent
@ -72,16 +72,22 @@ Cache(const std::string &_name,
prefetchAccess(params.prefetchAccess), prefetchAccess(params.prefetchAccess),
tags(params.tags), missQueue(params.missQueue), tags(params.tags), missQueue(params.missQueue),
coherence(params.coherence), prefetcher(params.prefetcher), coherence(params.coherence), prefetcher(params.prefetcher),
hitLatency(params.hitLatency) hitLatency(params.hitLatency),
compressionAlg(params.compressionAlg),
blkSize(params.blkSize),
doFastWrites(params.doFastWrites),
prefetchMiss(params.prefetchMiss),
storeCompressed(params.storeCompressed),
compressOnWriteback(params.compressOnWriteback),
compLatency(params.compLatency),
adaptiveCompression(params.adaptiveCompression),
writebackCompressed(params.writebackCompressed)
{ {
tags->setCache(this); tags->setCache(this);
tags->setPrefetcher(prefetcher);
missQueue->setCache(this); missQueue->setCache(this);
missQueue->setPrefetcher(prefetcher); missQueue->setPrefetcher(prefetcher);
coherence->setCache(this); coherence->setCache(this);
prefetcher->setCache(this); prefetcher->setCache(this);
prefetcher->setTags(tags);
prefetcher->setBuffer(missQueue);
invalidateReq = new Request((Addr) NULL, blkSize, 0); invalidateReq = new Request((Addr) NULL, blkSize, 0);
invalidatePkt = new Packet(invalidateReq, Packet::InvalidateReq, 0); invalidatePkt = new Packet(invalidateReq, Packet::InvalidateReq, 0);
} }
@ -97,6 +103,433 @@ Cache<TagStore,Coherence>::regStats()
prefetcher->regStats(name()); prefetcher->regStats(name());
} }
template<class TagStore, class Coherence>
typename Cache<TagStore,Coherence>::BlkType*
Cache<TagStore,Coherence>::handleAccess(PacketPtr &pkt, int & lat,
PacketList & writebacks, bool update)
{
// Set the block offset here
int offset = tags->extractBlkOffset(pkt->getAddr());
BlkType *blk = NULL;
if (update) {
blk = tags->findBlock(pkt->getAddr(), lat);
} else {
blk = tags->findBlock(pkt->getAddr());
lat = 0;
}
if (blk != NULL) {
if (!update) {
if (pkt->isWrite()){
assert(offset < blkSize);
assert(pkt->getSize() <= blkSize);
assert(offset+pkt->getSize() <= blkSize);
memcpy(blk->data + offset, pkt->getPtr<uint8_t>(),
pkt->getSize());
} else if (!(pkt->flags & SATISFIED)) {
pkt->flags |= SATISFIED;
pkt->result = Packet::Success;
assert(offset < blkSize);
assert(pkt->getSize() <= blkSize);
assert(offset + pkt->getSize() <=blkSize);
memcpy(pkt->getPtr<uint8_t>(), blk->data + offset,
pkt->getSize());
}
return blk;
}
// Hit
if (blk->isPrefetch()) {
//Signal that this was a hit under prefetch (no need for
//use prefetch (only can get here if true)
DPRINTF(HWPrefetch, "Hit a block that was prefetched\n");
blk->status &= ~BlkHWPrefetched;
if (prefetchMiss) {
//If we are using the miss stream, signal the
//prefetcher otherwise the access stream would have
//already signaled this hit
prefetcher->handleMiss(pkt, curTick);
}
}
if ((pkt->isWrite() && blk->isWritable()) ||
(pkt->isRead() && blk->isValid())) {
// We are satisfying the request
pkt->flags |= SATISFIED;
if (blk->isCompressed()) {
// If the data is compressed, need to increase the latency
lat += (compLatency/4);
}
bool write_data = false;
assert(verifyData(blk));
assert(offset < blkSize);
assert(pkt->getSize() <= blkSize);
assert(offset+pkt->getSize() <= blkSize);
if (pkt->isWrite()) {
if (blk->checkWrite(pkt->req)) {
write_data = true;
blk->status |= BlkDirty;
memcpy(blk->data + offset, pkt->getPtr<uint8_t>(),
pkt->getSize());
}
} else {
assert(pkt->isRead());
if (pkt->req->isLocked()) {
blk->trackLoadLocked(pkt->req);
}
memcpy(pkt->getPtr<uint8_t>(), blk->data + offset,
pkt->getSize());
}
if (write_data ||
(adaptiveCompression && blk->isCompressed()))
{
// If we wrote data, need to update the internal block
// data.
updateData(blk, writebacks,
!(adaptiveCompression &&
blk->isReferenced()));
}
} else {
// permission violation, treat it as a miss
blk = NULL;
}
} else {
// complete miss (no matching block)
if (pkt->req->isLocked() && pkt->isWrite()) {
// miss on store conditional... just give up now
pkt->req->setScResult(0);
pkt->flags |= SATISFIED;
}
}
return blk;
}
template<class TagStore, class Coherence>
typename Cache<TagStore,Coherence>::BlkType*
Cache<TagStore,Coherence>::handleFill(BlkType *blk, PacketPtr &pkt,
CacheBlk::State new_state,
PacketList & writebacks,
PacketPtr target)
{
#ifndef NDEBUG
BlkType *tmp_blk = tags->findBlock(pkt->getAddr());
assert(tmp_blk == blk);
#endif
blk = doReplacement(blk, pkt, new_state, writebacks);
if (pkt->isRead()) {
memcpy(blk->data, pkt->getPtr<uint8_t>(), blkSize);
}
blk->whenReady = pkt->finishTime;
// Respond to target, if any
if (target) {
target->flags |= SATISFIED;
if (target->cmd == Packet::InvalidateReq) {
tags->invalidateBlk(blk);
blk = NULL;
}
if (blk && (target->isWrite() ? blk->isWritable() : blk->isValid())) {
assert(target->isWrite() || target->isRead());
assert(target->getOffset(blkSize) + target->getSize() <= blkSize);
if (target->isWrite()) {
if (blk->checkWrite(pkt->req)) {
blk->status |= BlkDirty;
memcpy(blk->data + target->getOffset(blkSize),
target->getPtr<uint8_t>(), target->getSize());
}
} else {
if (pkt->req->isLocked()) {
blk->trackLoadLocked(pkt->req);
}
memcpy(target->getPtr<uint8_t>(),
blk->data + target->getOffset(blkSize),
target->getSize());
}
}
}
if (blk) {
// Need to write the data into the block
updateData(blk, writebacks, !adaptiveCompression || true);
}
return blk;
}
template<class TagStore, class Coherence>
typename Cache<TagStore,Coherence>::BlkType*
Cache<TagStore,Coherence>::handleFill(BlkType *blk, MSHR * mshr,
CacheBlk::State new_state,
PacketList & writebacks, PacketPtr pkt)
{
/*
#ifndef NDEBUG
BlkType *tmp_blk = findBlock(mshr->pkt->getAddr());
assert(tmp_blk == blk);
#endif
PacketPtr pkt = mshr->pkt;*/
blk = doReplacement(blk, pkt, new_state, writebacks);
if (pkt->isRead()) {
memcpy(blk->data, pkt->getPtr<uint8_t>(), blkSize);
}
blk->whenReady = pkt->finishTime;
// respond to MSHR targets, if any
// First offset for critical word first calculations
int initial_offset = 0;
if (mshr->hasTargets()) {
initial_offset = mshr->getTarget()->getOffset(blkSize);
}
while (mshr->hasTargets()) {
PacketPtr target = mshr->getTarget();
target->flags |= SATISFIED;
// How many bytes pass the first request is this one
int transfer_offset = target->getOffset(blkSize) - initial_offset;
if (transfer_offset < 0) {
transfer_offset += blkSize;
}
// If critical word (no offset) return first word time
Tick completion_time = tags->getHitLatency() +
transfer_offset ? pkt->finishTime : pkt->firstWordTime;
if (target->cmd == Packet::InvalidateReq) {
//Mark the blk as invalid now, if it hasn't been already
if (blk) {
tags->invalidateBlk(blk);
blk = NULL;
}
//Also get rid of the invalidate
mshr->popTarget();
DPRINTF(Cache, "Popping off a Invalidate for addr %x\n",
pkt->getAddr());
continue;
}
if (blk && (target->isWrite() ? blk->isWritable() : blk->isValid())) {
assert(target->isWrite() || target->isRead());
assert(target->getOffset(blkSize) + target->getSize() <= blkSize);
if (target->isWrite()) {
if (blk->checkWrite(pkt->req)) {
blk->status |= BlkDirty;
memcpy(blk->data + target->getOffset(blkSize),
target->getPtr<uint8_t>(), target->getSize());
}
} else {
if (pkt->req->isLocked()) {
blk->trackLoadLocked(pkt->req);
}
memcpy(target->getPtr<uint8_t>(),
blk->data + target->getOffset(blkSize),
target->getSize());
}
} else {
// Invalid access, need to do another request
// can occur if block is invalidated, or not correct
// permissions
// mshr->pkt = pkt;
break;
}
respondToMiss(target, completion_time);
mshr->popTarget();
}
if (blk) {
// Need to write the data into the block
updateData(blk, writebacks, !adaptiveCompression || true);
}
return blk;
}
template<class TagStore, class Coherence>
void
Cache<TagStore,Coherence>::handleSnoop(BlkType *blk,
CacheBlk::State new_state,
PacketPtr &pkt)
{
//Must have the block to supply
assert(blk);
// Can only supply data, and if it hasn't already been supllied
assert(pkt->isRead());
assert(!(pkt->flags & SATISFIED));
pkt->flags |= SATISFIED;
Addr offset = pkt->getOffset(blkSize);
assert(offset < blkSize);
assert(pkt->getSize() <= blkSize);
assert(offset + pkt->getSize() <=blkSize);
memcpy(pkt->getPtr<uint8_t>(), blk->data + offset, pkt->getSize());
handleSnoop(blk, new_state);
}
template<class TagStore, class Coherence>
void
Cache<TagStore,Coherence>::handleSnoop(BlkType *blk,
CacheBlk::State new_state)
{
if (blk && blk->status != new_state) {
if ((new_state && BlkValid) == 0) {
tags->invalidateBlk(blk);
} else {
assert(new_state >= 0 && new_state < 128);
blk->status = new_state;
}
}
}
template<class TagStore, class Coherence>
PacketPtr
Cache<TagStore,Coherence>::writebackBlk(BlkType *blk)
{
assert(blk && blk->isValid() && blk->isModified());
int data_size = blkSize;
data_size = blk->size;
if (compressOnWriteback) {
// not already compressed
// need to compress to ship it
assert(data_size == blkSize);
uint8_t *tmp_data = new uint8_t[blkSize];
data_size = compressionAlg->compress(tmp_data,blk->data,
data_size);
delete [] tmp_data;
}
/* PacketPtr writeback =
buildWritebackReq(tags->regenerateBlkAddr(blk->tag, blk->set),
blk->asid, blkSize,
blk->data, data_size);
*/
Request *writebackReq =
new Request(tags->regenerateBlkAddr(blk->tag, blk->set), blkSize, 0);
PacketPtr writeback = new Packet(writebackReq, Packet::Writeback, -1);
writeback->allocate();
memcpy(writeback->getPtr<uint8_t>(),blk->data,blkSize);
blk->status &= ~BlkDirty;
return writeback;
}
template<class TagStore, class Coherence>
bool
Cache<TagStore,Coherence>::verifyData(BlkType *blk)
{
bool retval;
// The data stored in the blk
uint8_t *blk_data = new uint8_t[blkSize];
tags->readData(blk, blk_data);
// Pointer for uncompressed data, assumed uncompressed
uint8_t *tmp_data = blk_data;
// The size of the data being stored, assumed uncompressed
int data_size = blkSize;
// If the block is compressed need to uncompress to access
if (blk->isCompressed()){
// Allocate new storage for the data
tmp_data = new uint8_t[blkSize];
data_size = compressionAlg->uncompress(tmp_data,blk_data, blk->size);
assert(data_size == blkSize);
// Don't need to keep blk_data around
delete [] blk_data;
} else {
assert(blkSize == blk->size);
}
retval = memcmp(tmp_data, blk->data, blkSize) == 0;
delete [] tmp_data;
return retval;
}
template<class TagStore, class Coherence>
void
Cache<TagStore,Coherence>::updateData(BlkType *blk, PacketList &writebacks,
bool compress_block)
{
if (storeCompressed && compress_block) {
uint8_t *comp_data = new uint8_t[blkSize];
int new_size = compressionAlg->compress(comp_data, blk->data, blkSize);
if (new_size > (blkSize - tags->getSubBlockSize())){
// no benefit to storing it compressed
blk->status &= ~BlkCompressed;
tags->writeData(blk, blk->data, blkSize,
writebacks);
} else {
// Store the data compressed
blk->status |= BlkCompressed;
tags->writeData(blk, comp_data, new_size,
writebacks);
}
delete [] comp_data;
} else {
blk->status &= ~BlkCompressed;
tags->writeData(blk, blk->data, blkSize, writebacks);
}
}
template<class TagStore, class Coherence>
typename Cache<TagStore,Coherence>::BlkType*
Cache<TagStore,Coherence>::doReplacement(BlkType *blk, PacketPtr &pkt,
CacheBlk::State new_state,
PacketList &writebacks)
{
if (blk == NULL) {
// need to do a replacement
BlkList compress_list;
blk = tags->findReplacement(pkt, writebacks, compress_list);
while (adaptiveCompression && !compress_list.empty()) {
updateData(compress_list.front(), writebacks, true);
compress_list.pop_front();
}
if (blk->isValid()) {
DPRINTF(Cache, "replacement: replacing %x with %x: %s\n",
tags->regenerateBlkAddr(blk->tag,blk->set), pkt->getAddr(),
(blk->isModified()) ? "writeback" : "clean");
if (blk->isModified()) {
// Need to write the data back
writebacks.push_back(writebackBlk(blk));
}
}
blk->tag = tags->extractTag(pkt->getAddr(), blk);
} else {
// must be a status change
// assert(blk->status != new_state);
if (blk->status == new_state) warn("Changing state to same value\n");
}
blk->status = new_state;
return blk;
}
template<class TagStore, class Coherence> template<class TagStore, class Coherence>
bool bool
Cache<TagStore,Coherence>::access(PacketPtr &pkt) Cache<TagStore,Coherence>::access(PacketPtr &pkt)
@ -112,7 +545,7 @@ Cache<TagStore,Coherence>::access(PacketPtr &pkt)
prefetcher->handleMiss(pkt, curTick); prefetcher->handleMiss(pkt, curTick);
} }
if (!pkt->req->isUncacheable()) { if (!pkt->req->isUncacheable()) {
blk = tags->handleAccess(pkt, lat, writebacks); blk = handleAccess(pkt, lat, writebacks);
} else { } else {
size = pkt->getSize(); size = pkt->getSize();
} }
@ -130,7 +563,7 @@ Cache<TagStore,Coherence>::access(PacketPtr &pkt)
warn("WriteInv doing a fastallocate" warn("WriteInv doing a fastallocate"
"with an outstanding miss to the same address\n"); "with an outstanding miss to the same address\n");
} }
blk = tags->handleFill(NULL, pkt, BlkValid | BlkWritable, blk = handleFill(NULL, pkt, BlkValid | BlkWritable,
writebacks); writebacks);
++fastWrites; ++fastWrites;
} }
@ -195,7 +628,7 @@ Cache<TagStore,Coherence>::getPacket()
if (!pkt->req->isUncacheable()) { if (!pkt->req->isUncacheable()) {
if (pkt->cmd == Packet::HardPFReq) if (pkt->cmd == Packet::HardPFReq)
misses[Packet::HardPFReq][0/*pkt->req->getThreadNum()*/]++; misses[Packet::HardPFReq][0/*pkt->req->getThreadNum()*/]++;
BlkType *blk = tags->findBlock(pkt); BlkType *blk = tags->findBlock(pkt->getAddr());
Packet::Command cmd = coherence->getBusCmd(pkt->cmd, Packet::Command cmd = coherence->getBusCmd(pkt->cmd,
(blk)? blk->status : 0); (blk)? blk->status : 0);
missQueue->setBusCmd(pkt, cmd); missQueue->setBusCmd(pkt, cmd);
@ -224,7 +657,7 @@ Cache<TagStore,Coherence>::sendResult(PacketPtr &pkt, MSHR* mshr,
if (upgrade) { if (upgrade) {
assert(pkt); //Upgrades need to be fixed assert(pkt); //Upgrades need to be fixed
pkt->flags &= ~CACHE_LINE_FILL; pkt->flags &= ~CACHE_LINE_FILL;
BlkType *blk = tags->findBlock(pkt); BlkType *blk = tags->findBlock(pkt->getAddr());
CacheBlk::State old_state = (blk) ? blk->status : 0; CacheBlk::State old_state = (blk) ? blk->status : 0;
CacheBlk::State new_state = coherence->getNewState(pkt,old_state); CacheBlk::State new_state = coherence->getNewState(pkt,old_state);
if (old_state != new_state) if (old_state != new_state)
@ -233,7 +666,7 @@ Cache<TagStore,Coherence>::sendResult(PacketPtr &pkt, MSHR* mshr,
//Set the state on the upgrade //Set the state on the upgrade
memcpy(pkt->getPtr<uint8_t>(), blk->data, blkSize); memcpy(pkt->getPtr<uint8_t>(), blk->data, blkSize);
PacketList writebacks; PacketList writebacks;
tags->handleFill(blk, mshr, new_state, writebacks, pkt); handleFill(blk, mshr, new_state, writebacks, pkt);
assert(writebacks.empty()); assert(writebacks.empty());
missQueue->handleResponse(pkt, curTick + hitLatency); missQueue->handleResponse(pkt, curTick + hitLatency);
} }
@ -275,7 +708,7 @@ Cache<TagStore,Coherence>::handleResponse(PacketPtr &pkt)
if (pkt->isCacheFill() && !pkt->isNoAllocate()) { if (pkt->isCacheFill() && !pkt->isNoAllocate()) {
DPRINTF(Cache, "Block for addr %x being updated in Cache\n", DPRINTF(Cache, "Block for addr %x being updated in Cache\n",
pkt->getAddr()); pkt->getAddr());
blk = tags->findBlock(pkt); blk = tags->findBlock(pkt->getAddr());
CacheBlk::State old_state = (blk) ? blk->status : 0; CacheBlk::State old_state = (blk) ? blk->status : 0;
PacketList writebacks; PacketList writebacks;
CacheBlk::State new_state = coherence->getNewState(pkt,old_state); CacheBlk::State new_state = coherence->getNewState(pkt,old_state);
@ -284,7 +717,7 @@ Cache<TagStore,Coherence>::handleResponse(PacketPtr &pkt)
"state %i to %i\n", "state %i to %i\n",
pkt->getAddr(), pkt->getAddr(),
old_state, new_state); old_state, new_state);
blk = tags->handleFill(blk, (MSHR*)pkt->senderState, blk = handleFill(blk, (MSHR*)pkt->senderState,
new_state, writebacks, pkt); new_state, writebacks, pkt);
while (!writebacks.empty()) { while (!writebacks.empty()) {
missQueue->doWriteback(writebacks.front()); missQueue->doWriteback(writebacks.front());
@ -332,7 +765,7 @@ Cache<TagStore,Coherence>::snoop(PacketPtr &pkt)
} }
Addr blk_addr = pkt->getAddr() & ~(Addr(blkSize-1)); Addr blk_addr = pkt->getAddr() & ~(Addr(blkSize-1));
BlkType *blk = tags->findBlock(pkt); BlkType *blk = tags->findBlock(pkt->getAddr());
MSHR *mshr = missQueue->findMSHR(blk_addr); MSHR *mshr = missQueue->findMSHR(blk_addr);
if (coherence->hasProtocol() || pkt->isInvalidate()) { if (coherence->hasProtocol() || pkt->isInvalidate()) {
//@todo Move this into handle bus req //@todo Move this into handle bus req
@ -435,7 +868,7 @@ Cache<TagStore,Coherence>::snoop(PacketPtr &pkt)
"now supplying data, new state is %i\n", "now supplying data, new state is %i\n",
pkt->cmdString(), blk_addr, new_state); pkt->cmdString(), blk_addr, new_state);
tags->handleSnoop(blk, new_state, pkt); handleSnoop(blk, new_state, pkt);
respondToSnoop(pkt, curTick + hitLatency); respondToSnoop(pkt, curTick + hitLatency);
return; return;
} }
@ -443,7 +876,7 @@ Cache<TagStore,Coherence>::snoop(PacketPtr &pkt)
DPRINTF(Cache, "Cache snooped a %s request for addr %x, " DPRINTF(Cache, "Cache snooped a %s request for addr %x, "
"new state is %i\n", pkt->cmdString(), blk_addr, new_state); "new state is %i\n", pkt->cmdString(), blk_addr, new_state);
tags->handleSnoop(blk, new_state); handleSnoop(blk, new_state);
} }
template<class TagStore, class Coherence> template<class TagStore, class Coherence>
@ -465,13 +898,6 @@ Cache<TagStore,Coherence>::snoopResponse(PacketPtr &pkt)
} }
} }
template<class TagStore, class Coherence>
void
Cache<TagStore,Coherence>::invalidateBlk(Addr addr)
{
tags->invalidateBlk(addr);
}
/** /**
* @todo Fix to not assume write allocate * @todo Fix to not assume write allocate
@ -501,7 +927,7 @@ Cache<TagStore,Coherence>::probe(PacketPtr &pkt, bool update,
PacketList writebacks; PacketList writebacks;
int lat; int lat;
BlkType *blk = tags->handleAccess(pkt, lat, writebacks, update); BlkType *blk = handleAccess(pkt, lat, writebacks, update);
DPRINTF(Cache, "%s %x %s\n", pkt->cmdString(), DPRINTF(Cache, "%s %x %s\n", pkt->cmdString(),
pkt->getAddr(), (blk) ? "hit" : "miss"); pkt->getAddr(), (blk) ? "hit" : "miss");
@ -557,7 +983,7 @@ Cache<TagStore,Coherence>::probe(PacketPtr &pkt, bool update,
if (!pkt->req->isUncacheable() /*Uncacheables just go through*/ if (!pkt->req->isUncacheable() /*Uncacheables just go through*/
&& (pkt->cmd != Packet::Writeback)/*Writebacks on miss fall through*/) { && (pkt->cmd != Packet::Writeback)/*Writebacks on miss fall through*/) {
// Fetch the cache block to fill // Fetch the cache block to fill
BlkType *blk = tags->findBlock(pkt); BlkType *blk = tags->findBlock(pkt->getAddr());
Packet::Command temp_cmd = coherence->getBusCmd(pkt->cmd, Packet::Command temp_cmd = coherence->getBusCmd(pkt->cmd,
(blk)? blk->status : 0); (blk)? blk->status : 0);
@ -593,7 +1019,7 @@ return 0;
DPRINTF(Cache, "Block for blk addr %x moving from state " DPRINTF(Cache, "Block for blk addr %x moving from state "
"%i to %i\n", busPkt->getAddr(), old_state, new_state); "%i to %i\n", busPkt->getAddr(), old_state, new_state);
tags->handleFill(blk, busPkt, new_state, writebacks, pkt); handleFill(blk, busPkt, new_state, writebacks, pkt);
//Free the packet //Free the packet
delete busPkt; delete busPkt;
@ -639,7 +1065,7 @@ Cache<TagStore,Coherence>::snoopProbe(PacketPtr &pkt)
} }
Addr blk_addr = pkt->getAddr() & ~(Addr(blkSize-1)); Addr blk_addr = pkt->getAddr() & ~(Addr(blkSize-1));
BlkType *blk = tags->findBlock(pkt); BlkType *blk = tags->findBlock(pkt->getAddr());
MSHR *mshr = missQueue->findMSHR(blk_addr); MSHR *mshr = missQueue->findMSHR(blk_addr);
CacheBlk::State new_state = 0; CacheBlk::State new_state = 0;
bool satisfy = coherence->handleBusRequest(pkt,blk,mshr, new_state); bool satisfy = coherence->handleBusRequest(pkt,blk,mshr, new_state);
@ -648,14 +1074,14 @@ Cache<TagStore,Coherence>::snoopProbe(PacketPtr &pkt)
"now supplying data, new state is %i\n", "now supplying data, new state is %i\n",
pkt->cmdString(), blk_addr, new_state); pkt->cmdString(), blk_addr, new_state);
tags->handleSnoop(blk, new_state, pkt); handleSnoop(blk, new_state, pkt);
return hitLatency; return hitLatency;
} }
if (blk) if (blk)
DPRINTF(Cache, "Cache snooped a %s request for addr %x, " DPRINTF(Cache, "Cache snooped a %s request for addr %x, "
"new state is %i\n", "new state is %i\n",
pkt->cmdString(), blk_addr, new_state); pkt->cmdString(), blk_addr, new_state);
tags->handleSnoop(blk, new_state); handleSnoop(blk, new_state);
return 0; return 0;
} }

View file

@ -102,6 +102,26 @@ BasePrefetcher::regStats(const std::string &name)
; ;
} }
inline bool
BasePrefetcher::inCache(Addr addr)
{
if (cache->inCache(addr)) {
pfCacheHit++;
return true;
}
return false;
}
inline bool
BasePrefetcher::inMissQueue(Addr addr)
{
if (cache->inMissQueue(addr)) {
pfMSHRHit++;
return true;
}
return false;
}
PacketPtr PacketPtr
BasePrefetcher::getPacket() BasePrefetcher::getPacket()
{ {
@ -118,7 +138,7 @@ BasePrefetcher::getPacket()
pkt = *pf.begin(); pkt = *pf.begin();
pf.pop_front(); pf.pop_front();
if (!cacheCheckPush) { if (!cacheCheckPush) {
keepTrying = inCache(pkt); keepTrying = cache->inCache(pkt->getAddr());
} }
if (pf.empty()) { if (pf.empty()) {
cache->clearMasterRequest(Request_PF); cache->clearMasterRequest(Request_PF);
@ -190,7 +210,7 @@ BasePrefetcher::handleMiss(PacketPtr &pkt, Tick time)
//Check if it is already in the cache //Check if it is already in the cache
if (cacheCheckPush) { if (cacheCheckPush) {
if (inCache(prefetch)) { if (cache->inCache(prefetch->getAddr())) {
addr++; addr++;
delay++; delay++;
continue; continue;
@ -198,7 +218,7 @@ BasePrefetcher::handleMiss(PacketPtr &pkt, Tick time)
} }
//Check if it is already in the miss_queue //Check if it is already in the miss_queue
if (inMissQueue(prefetch->getAddr())) { if (cache->inMissQueue(prefetch->getAddr())) {
addr++; addr++;
delay++; delay++;
continue; continue;

View file

@ -36,10 +36,13 @@
#ifndef __MEM_CACHE_PREFETCH_BASE_PREFETCHER_HH__ #ifndef __MEM_CACHE_PREFETCH_BASE_PREFETCHER_HH__
#define __MEM_CACHE_PREFETCH_BASE_PREFETCHER_HH__ #define __MEM_CACHE_PREFETCH_BASE_PREFETCHER_HH__
#include "mem/packet.hh"
#include <list> #include <list>
#include "base/statistics.hh"
#include "mem/packet.hh"
class BaseCache; class BaseCache;
class BasePrefetcher class BasePrefetcher
{ {
protected: protected:
@ -95,6 +98,10 @@ class BasePrefetcher
void handleMiss(PacketPtr &pkt, Tick time); void handleMiss(PacketPtr &pkt, Tick time);
bool inCache(Addr addr);
bool inMissQueue(Addr addr);
PacketPtr getPacket(); PacketPtr getPacket();
bool havePending() bool havePending()
@ -106,10 +113,6 @@ class BasePrefetcher
std::list<Addr> &addresses, std::list<Addr> &addresses,
std::list<Tick> &delays) = 0; std::list<Tick> &delays) = 0;
virtual bool inCache(PacketPtr &pkt) = 0;
virtual bool inMissQueue(Addr address) = 0;
std::list<PacketPtr>::iterator inPrefetch(Addr address); std::list<PacketPtr>::iterator inPrefetch(Addr address);
}; };

View file

@ -31,18 +31,44 @@
/** /**
* @file * @file
* GHB Prefetcher template instantiations. * GHB Prefetcher implementation.
*/ */
#include "mem/cache/tags/cache_tags.hh"
#include "mem/cache/tags/lru.hh"
#include "mem/cache/prefetch/ghb_prefetcher.hh" #include "mem/cache/prefetch/ghb_prefetcher.hh"
#include "arch/isa_traits.hh"
// Template Instantiations void
#ifndef DOXYGEN_SHOULD_SKIP_THIS GHBPrefetcher::calculatePrefetch(PacketPtr &pkt, std::list<Addr> &addresses,
std::list<Tick> &delays)
{
Addr blkAddr = pkt->getAddr() & ~(Addr)(this->blkSize-1);
int cpuID = pkt->req->getCpuNum();
if (!useCPUId) cpuID = 0;
template class GHBPrefetcher<CacheTags<LRU> >;
#endif //DOXYGEN_SHOULD_SKIP_THIS int new_stride = blkAddr - last_miss_addr[cpuID];
int old_stride = last_miss_addr[cpuID] -
second_last_miss_addr[cpuID];
second_last_miss_addr[cpuID] = last_miss_addr[cpuID];
last_miss_addr[cpuID] = blkAddr;
if (new_stride == old_stride) {
for (int d=1; d <= degree; d++) {
Addr newAddr = blkAddr + d * new_stride;
if (this->pageStop &&
(blkAddr & ~(TheISA::VMPageSize - 1)) !=
(newAddr & ~(TheISA::VMPageSize - 1)))
{
//Spanned the page, so now stop
this->pfSpanPage += degree - d + 1;
return;
}
else
{
addresses.push_back(newAddr);
delays.push_back(latency);
}
}
}
}

View file

@ -30,31 +30,18 @@
/** /**
* @file * @file
* Describes a ghb prefetcher based on template policies. * Describes a ghb prefetcher.
*/ */
#ifndef __MEM_CACHE_PREFETCH_GHB_PREFETCHER_HH__ #ifndef __MEM_CACHE_PREFETCH_GHB_PREFETCHER_HH__
#define __MEM_CACHE_PREFETCH_GHB_PREFETCHER_HH__ #define __MEM_CACHE_PREFETCH_GHB_PREFETCHER_HH__
#include "base/misc.hh" // fatal, panic, and warn #include "mem/cache/prefetch/base_prefetcher.hh"
#include "mem/cache/prefetch/prefetcher.hh" class GHBPrefetcher : public BasePrefetcher
/**
* A template-policy based cache. The behavior of the cache can be altered by
* supplying different template policies. TagStore handles all tag and data
* storage @sa TagStore. MissBuffer handles all misses and writes/writebacks
* @sa MissQueue. Coherence handles all coherence policy details @sa
* UniCoherence, SimpleMultiCoherence.
*/
template <class TagStore>
class GHBPrefetcher : public Prefetcher<TagStore>
{ {
protected: protected:
MissBuffer* mq;
TagStore* tags;
Addr second_last_miss_addr[64/*MAX_CPUS*/]; Addr second_last_miss_addr[64/*MAX_CPUS*/];
Addr last_miss_addr[64/*MAX_CPUS*/]; Addr last_miss_addr[64/*MAX_CPUS*/];
@ -67,48 +54,16 @@ class GHBPrefetcher : public Prefetcher<TagStore>
GHBPrefetcher(int size, bool pageStop, bool serialSquash, GHBPrefetcher(int size, bool pageStop, bool serialSquash,
bool cacheCheckPush, bool onlyData, bool cacheCheckPush, bool onlyData,
Tick latency, int degree, bool useCPUId) Tick latency, int degree, bool useCPUId)
:Prefetcher<TagStore>(size, pageStop, serialSquash, : BasePrefetcher(size, pageStop, serialSquash,
cacheCheckPush, onlyData), cacheCheckPush, onlyData),
latency(latency), degree(degree), useCPUId(useCPUId) latency(latency), degree(degree), useCPUId(useCPUId)
{ {
} }
~GHBPrefetcher() {} ~GHBPrefetcher() {}
void calculatePrefetch(PacketPtr &pkt, std::list<Addr> &addresses, void calculatePrefetch(PacketPtr &pkt, std::list<Addr> &addresses,
std::list<Tick> &delays) std::list<Tick> &delays);
{
Addr blkAddr = pkt->getAddr() & ~(Addr)(this->blkSize-1);
int cpuID = pkt->req->getCpuNum();
if (!useCPUId) cpuID = 0;
int new_stride = blkAddr - last_miss_addr[cpuID];
int old_stride = last_miss_addr[cpuID] -
second_last_miss_addr[cpuID];
second_last_miss_addr[cpuID] = last_miss_addr[cpuID];
last_miss_addr[cpuID] = blkAddr;
if (new_stride == old_stride) {
for (int d=1; d <= degree; d++) {
Addr newAddr = blkAddr + d * new_stride;
if (this->pageStop &&
(blkAddr & ~(TheISA::VMPageSize - 1)) !=
(newAddr & ~(TheISA::VMPageSize - 1)))
{
//Spanned the page, so now stop
this->pfSpanPage += degree - d + 1;
return;
}
else
{
addresses.push_back(newAddr);
delays.push_back(latency);
}
}
}
}
}; };
#endif // __MEM_CACHE_PREFETCH_GHB_PREFETCHER_HH__ #endif // __MEM_CACHE_PREFETCH_GHB_PREFETCHER_HH__

View file

@ -34,15 +34,59 @@
* Stride Prefetcher template instantiations. * Stride Prefetcher template instantiations.
*/ */
#include "mem/cache/tags/cache_tags.hh"
#include "mem/cache/tags/lru.hh"
#include "mem/cache/prefetch/stride_prefetcher.hh" #include "mem/cache/prefetch/stride_prefetcher.hh"
// Template Instantiations void
#ifndef DOXYGEN_SHOULD_SKIP_THIS StridePrefetcher::calculatePrefetch(PacketPtr &pkt, std::list<Addr> &addresses,
std::list<Tick> &delays)
{
// Addr blkAddr = pkt->paddr & ~(Addr)(this->blkSize-1);
int cpuID = pkt->req->getCpuNum();
if (!useCPUId) cpuID = 0;
template class StridePrefetcher<CacheTags<LRU> >; /* Scan Table for IAddr Match */
/* std::list<strideEntry*>::iterator iter;
for (iter=table[cpuID].begin();
iter !=table[cpuID].end();
iter++) {
if ((*iter)->IAddr == pkt->pc) break;
}
#endif //DOXYGEN_SHOULD_SKIP_THIS if (iter != table[cpuID].end()) {
//Hit in table
int newStride = blkAddr - (*iter)->MAddr;
if (newStride == (*iter)->stride) {
(*iter)->confidence++;
}
else {
(*iter)->stride = newStride;
(*iter)->confidence--;
}
(*iter)->MAddr = blkAddr;
for (int d=1; d <= degree; d++) {
Addr newAddr = blkAddr + d * newStride;
if (this->pageStop &&
(blkAddr & ~(TheISA::VMPageSize - 1)) !=
(newAddr & ~(TheISA::VMPageSize - 1)))
{
//Spanned the page, so now stop
this->pfSpanPage += degree - d + 1;
return;
}
else
{
addresses.push_back(newAddr);
delays.push_back(latency);
}
}
}
else {
//Miss in table
//Find lowest confidence and replace
}
*/
}

View file

@ -30,31 +30,18 @@
/** /**
* @file * @file
* Describes a strided prefetcher based on template policies. * Describes a strided prefetcher.
*/ */
#ifndef __MEM_CACHE_PREFETCH_STRIDE_PREFETCHER_HH__ #ifndef __MEM_CACHE_PREFETCH_STRIDE_PREFETCHER_HH__
#define __MEM_CACHE_PREFETCH_STRIDE_PREFETCHER_HH__ #define __MEM_CACHE_PREFETCH_STRIDE_PREFETCHER_HH__
#include "base/misc.hh" // fatal, panic, and warn #include "mem/cache/prefetch/base_prefetcher.hh"
#include "mem/cache/prefetch/prefetcher.hh" class StridePrefetcher : public BasePrefetcher
/**
* A template-policy based cache. The behavior of the cache can be altered by
* supplying different template policies. TagStore handles all tag and data
* storage @sa TagStore. MissBuffer handles all misses and writes/writebacks
* @sa MissQueue. Coherence handles all coherence policy details @sa
* UniCoherence, SimpleMultiCoherence.
*/
template <class TagStore>
class StridePrefetcher : public Prefetcher<TagStore>
{ {
protected: protected:
MissBuffer* mq;
TagStore* tags;
class strideEntry class strideEntry
{ {
public: public:
@ -84,66 +71,16 @@ class StridePrefetcher : public Prefetcher<TagStore>
StridePrefetcher(int size, bool pageStop, bool serialSquash, StridePrefetcher(int size, bool pageStop, bool serialSquash,
bool cacheCheckPush, bool onlyData, bool cacheCheckPush, bool onlyData,
Tick latency, int degree, bool useCPUId) Tick latency, int degree, bool useCPUId)
:Prefetcher<TagStore>(size, pageStop, serialSquash, : BasePrefetcher(size, pageStop, serialSquash,
cacheCheckPush, onlyData), cacheCheckPush, onlyData),
latency(latency), degree(degree), useCPUId(useCPUId) latency(latency), degree(degree), useCPUId(useCPUId)
{ {
} }
~StridePrefetcher() {} ~StridePrefetcher() {}
void calculatePrefetch(PacketPtr &pkt, std::list<Addr> &addresses, void calculatePrefetch(PacketPtr &pkt, std::list<Addr> &addresses,
std::list<Tick> &delays) std::list<Tick> &delays);
{
// Addr blkAddr = pkt->paddr & ~(Addr)(this->blkSize-1);
int cpuID = pkt->req->getCpuNum();
if (!useCPUId) cpuID = 0;
/* Scan Table for IAddr Match */
/* std::list<strideEntry*>::iterator iter;
for (iter=table[cpuID].begin();
iter !=table[cpuID].end();
iter++) {
if ((*iter)->IAddr == pkt->pc) break;
}
if (iter != table[cpuID].end()) {
//Hit in table
int newStride = blkAddr - (*iter)->MAddr;
if (newStride == (*iter)->stride) {
(*iter)->confidence++;
}
else {
(*iter)->stride = newStride;
(*iter)->confidence--;
}
(*iter)->MAddr = blkAddr;
for (int d=1; d <= degree; d++) {
Addr newAddr = blkAddr + d * newStride;
if (this->pageStop &&
(blkAddr & ~(TheISA::VMPageSize - 1)) !=
(newAddr & ~(TheISA::VMPageSize - 1)))
{
//Spanned the page, so now stop
this->pfSpanPage += degree - d + 1;
return;
}
else
{
addresses.push_back(newAddr);
delays.push_back(latency);
}
}
}
else {
//Miss in table
//Find lowest confidence and replace
}
*/ }
}; };
#endif // __MEM_CACHE_PREFETCH_STRIDE_PREFETCHER_HH__ #endif // __MEM_CACHE_PREFETCH_STRIDE_PREFETCHER_HH__

View file

@ -36,20 +36,18 @@
#include "arch/isa_traits.hh" #include "arch/isa_traits.hh"
#include "mem/cache/prefetch/tagged_prefetcher.hh" #include "mem/cache/prefetch/tagged_prefetcher.hh"
template <class TagStore> TaggedPrefetcher::
TaggedPrefetcher<TagStore>::
TaggedPrefetcher(int size, bool pageStop, bool serialSquash, TaggedPrefetcher(int size, bool pageStop, bool serialSquash,
bool cacheCheckPush, bool onlyData, bool cacheCheckPush, bool onlyData,
Tick latency, int degree) Tick latency, int degree)
:Prefetcher<TagStore>(size, pageStop, serialSquash, : BasePrefetcher(size, pageStop, serialSquash,
cacheCheckPush, onlyData), cacheCheckPush, onlyData),
latency(latency), degree(degree) latency(latency), degree(degree)
{ {
} }
template <class TagStore>
void void
TaggedPrefetcher<TagStore>:: TaggedPrefetcher::
calculatePrefetch(PacketPtr &pkt, std::list<Addr> &addresses, calculatePrefetch(PacketPtr &pkt, std::list<Addr> &addresses,
std::list<Tick> &delays) std::list<Tick> &delays)
{ {

View file

@ -30,29 +30,18 @@
/** /**
* @file * @file
* Describes a tagged prefetcher based on template policies. * Describes a tagged prefetcher.
*/ */
#ifndef __MEM_CACHE_PREFETCH_TAGGED_PREFETCHER_HH__ #ifndef __MEM_CACHE_PREFETCH_TAGGED_PREFETCHER_HH__
#define __MEM_CACHE_PREFETCH_TAGGED_PREFETCHER_HH__ #define __MEM_CACHE_PREFETCH_TAGGED_PREFETCHER_HH__
#include "mem/cache/prefetch/prefetcher.hh" #include "mem/cache/prefetch/base_prefetcher.hh"
/** class TaggedPrefetcher : public BasePrefetcher
* A template-policy based cache. The behavior of the cache can be altered by
* supplying different template policies. TagStore handles all tag and data
* storage @sa TagStore. MissBuffer handles all misses and writes/writebacks
* @sa MissQueue. Coherence handles all coherence policy details @sa
* UniCoherence, SimpleMultiCoherence.
*/
template <class TagStore>
class TaggedPrefetcher : public Prefetcher<TagStore>
{ {
protected: protected:
MissBuffer* mq;
TagStore* tags;
Tick latency; Tick latency;
int degree; int degree;

View file

@ -153,12 +153,9 @@ FALRU::probe(Addr addr) const
} }
void void
FALRU::invalidateBlk(Addr addr) FALRU::invalidateBlk(FALRU::BlkType *blk)
{ {
Addr blkAddr = blkAlign(addr);
FALRUBlk* blk = (*tagHash.find(blkAddr)).second;
if (blk) { if (blk) {
assert(blk->tag == blkAddr);
blk->status = 0; blk->status = 0;
blk->isTouched = false; blk->isTouched = false;
tagsInUse--; tagsInUse--;
@ -202,44 +199,6 @@ FALRU::findBlock(Addr addr, int &lat, int *inCache)
return blk; return blk;
} }
FALRUBlk*
FALRU::findBlock(PacketPtr &pkt, int &lat, int *inCache)
{
Addr addr = pkt->getAddr();
accesses++;
int tmp_in_cache = 0;
Addr blkAddr = blkAlign(addr);
FALRUBlk* blk = hashLookup(blkAddr);
if (blk && blk->isValid()) {
assert(blk->tag == blkAddr);
tmp_in_cache = blk->inCache;
for (int i = 0; i < numCaches; i++) {
if (1<<i & blk->inCache) {
hits[i]++;
} else {
misses[i]++;
}
}
hits[numCaches]++;
if (blk != head){
moveToHead(blk);
}
} else {
blk = NULL;
for (int i = 0; i < numCaches+1; ++i) {
misses[i]++;
}
}
if (inCache) {
*inCache = tmp_in_cache;
}
lat = hitLatency;
//assert(check());
return blk;
}
FALRUBlk* FALRUBlk*
FALRU::findBlock(Addr addr) const FALRU::findBlock(Addr addr) const

View file

@ -173,11 +173,10 @@ public:
bool probe(Addr addr) const; bool probe(Addr addr) const;
/** /**
* Invalidate the cache block that contains the given addr. * Invalidate a cache block.
* @param asid The address space ID. * @param blk The block to invalidate.
* @param addr The address to invalidate.
*/ */
void invalidateBlk(Addr addr); void invalidateBlk(BlkType *blk);
/** /**
* Find the block in the cache and update the replacement data. Returns * Find the block in the cache and update the replacement data. Returns
@ -190,16 +189,6 @@ public:
*/ */
FALRUBlk* findBlock(Addr addr, int &lat, int *inCache = 0); FALRUBlk* findBlock(Addr addr, int &lat, int *inCache = 0);
/**
* Find the block in the cache and update the replacement data. Returns
* the access latency and the in cache flags as a side effect
* @param pkt The req whose block to find
* @param lat The latency of the access.
* @param inCache The FALRUBlk::inCache flags.
* @return Pointer to the cache block.
*/
FALRUBlk* findBlock(PacketPtr &pkt, int &lat, int *inCache = 0);
/** /**
* Find the block in the cache, do not update the replacement data. * Find the block in the cache, do not update the replacement data.
* @param addr The address to look for. * @param addr The address to look for.

View file

@ -284,65 +284,6 @@ IIC::findBlock(Addr addr, int &lat)
return tag_ptr; return tag_ptr;
} }
IICTag*
IIC::findBlock(PacketPtr &pkt, int &lat)
{
Addr addr = pkt->getAddr();
Addr tag = extractTag(addr);
unsigned set = hash(addr);
int set_lat;
unsigned long chain_ptr;
if (PROFILE_IIC)
setAccess.sample(set);
IICTag *tag_ptr = sets[set].findTag(tag, chain_ptr);
set_lat = 1;
if (tag_ptr == NULL && chain_ptr != tagNull) {
int secondary_depth;
tag_ptr = secondaryChain(tag, chain_ptr, &secondary_depth);
set_lat += secondary_depth;
// set depth for statistics fix this later!!! egh
sets[set].depth = set_lat;
if (tag_ptr != NULL) {
/* need to move tag into primary table */
// need to preserve chain: fix this egh
sets[set].tags[assoc-1]->chain_ptr = tag_ptr->chain_ptr;
tagSwap(tag_ptr - tagStore, sets[set].tags[assoc-1] - tagStore);
tag_ptr = sets[set].findTag(tag, chain_ptr);
assert(tag_ptr!=NULL);
}
}
set_lat = set_lat * hashDelay + hitLatency;
if (tag_ptr != NULL) {
// IIC replacement: if this is not the first element of
// list, reorder
sets[set].moveToHead(tag_ptr);
hitHashDepth.sample(sets[set].depth);
hashHit++;
hitDepthTotal += sets[set].depth;
tag_ptr->status |= BlkReferenced;
lat = set_lat;
if (tag_ptr->whenReady > curTick && tag_ptr->whenReady - curTick > set_lat) {
lat = tag_ptr->whenReady - curTick;
}
tag_ptr->refCount += 1;
}
else {
// fall through: cache block not found, not a hit...
missHashDepth.sample(sets[set].depth);
hashMiss++;
missDepthTotal += sets[set].depth;
lat = set_lat;
}
return tag_ptr;
}
IICTag* IICTag*
IIC::findBlock(Addr addr) const IIC::findBlock(Addr addr) const
@ -695,9 +636,8 @@ IIC::compressBlock(unsigned long index)
} }
void void
IIC::invalidateBlk(Addr addr) IIC::invalidateBlk(IIC::BlkType *tag_ptr)
{ {
IICTag* tag_ptr = findBlock(addr);
if (tag_ptr) { if (tag_ptr) {
for (int i = 0; i < tag_ptr->numData; ++i) { for (int i = 0; i < tag_ptr->numData; ++i) {
dataReferenceCount[tag_ptr->data_ptr[i]]--; dataReferenceCount[tag_ptr->data_ptr[i]]--;

View file

@ -435,11 +435,10 @@ class IIC : public BaseTags
void compressBlock(unsigned long index); void compressBlock(unsigned long index);
/** /**
* Invalidate the block containing the address. * Invalidate a block.
* @param asid The address space ID. * @param blk The block to invalidate.
* @param addr The address to invalidate.
*/ */
void invalidateBlk(Addr addr); void invalidateBlk(BlkType *blk);
/** /**
* Find the block and update the replacement data. This call also returns * Find the block and update the replacement data. This call also returns
@ -451,15 +450,6 @@ class IIC : public BaseTags
*/ */
IICTag* findBlock(Addr addr, int &lat); IICTag* findBlock(Addr addr, int &lat);
/**
* Find the block and update the replacement data. This call also returns
* the access latency as a side effect.
* @param pkt The req whose block to find
* @param lat The access latency.
* @return A pointer to the block found, if any.
*/
IICTag* findBlock(PacketPtr &pkt, int &lat);
/** /**
* Find the block, do not update the replacement data. * Find the block, do not update the replacement data.
* @param addr The address to find. * @param addr The address to find.

View file

@ -183,27 +183,6 @@ LRU::findBlock(Addr addr, int &lat)
return blk; return blk;
} }
LRUBlk*
LRU::findBlock(PacketPtr &pkt, int &lat)
{
Addr addr = pkt->getAddr();
Addr tag = extractTag(addr);
unsigned set = extractSet(addr);
LRUBlk *blk = sets[set].findBlk(tag);
lat = hitLatency;
if (blk != NULL) {
// move this block to head of the MRU list
sets[set].moveToHead(blk);
if (blk->whenReady > curTick
&& blk->whenReady - curTick > hitLatency) {
lat = blk->whenReady - curTick;
}
blk->refCount += 1;
}
return blk;
}
LRUBlk* LRUBlk*
LRU::findBlock(Addr addr) const LRU::findBlock(Addr addr) const
@ -240,9 +219,8 @@ LRU::findReplacement(PacketPtr &pkt, PacketList &writebacks,
} }
void void
LRU::invalidateBlk(Addr addr) LRU::invalidateBlk(LRU::BlkType *blk)
{ {
LRUBlk *blk = findBlock(addr);
if (blk) { if (blk) {
blk->status = 0; blk->status = 0;
blk->isTouched = false; blk->isTouched = false;

View file

@ -161,20 +161,10 @@ public:
bool probe(Addr addr) const; bool probe(Addr addr) const;
/** /**
* Invalidate the block containing the given address. * Invalidate the given block.
* @param asid The address space ID. * @param blk The block to invalidate.
* @param addr The address to invalidate.
*/ */
void invalidateBlk(Addr addr); void invalidateBlk(BlkType *blk);
/**
* Finds the given address in the cache and update replacement data.
* Returns the access latency as a side effect.
* @param pkt The request whose block to find.
* @param lat The access latency.
* @return Pointer to the cache block if found.
*/
LRUBlk* findBlock(PacketPtr &pkt, int &lat);
/** /**
* Finds the given address in the cache and update replacement data. * Finds the given address in the cache and update replacement data.

View file

@ -266,58 +266,6 @@ Split::probe(Addr addr) const
return success; return success;
} }
SplitBlk*
Split::findBlock(PacketPtr &pkt, int &lat)
{
Addr aligned = blkAlign(pkt->getAddr());
if (memHash.count(aligned)) {
memHash[aligned]++;
} else if (pkt->nic_pkt()) {
memHash[aligned] = 1;
}
SplitBlk *blk = lru->findBlock(pkt->getAddr(), lat);
if (blk) {
if (pkt->nic_pkt()) {
NR_CP_hits++;
} else {
CR_CP_hits++;
}
} else {
if (lifo && lifo_net) {
blk = lifo_net->findBlock(pkt->getAddr(), lat);
} else if (lru_net) {
blk = lru_net->findBlock(pkt->getAddr(), lat);
}
if (blk) {
if (pkt->nic_pkt()) {
NR_NP_hits++;
} else {
CR_NP_hits++;
}
}
}
if (blk) {
Tick latency = curTick - blk->ts;
if (blk->isNIC) {
if (!blk->isUsed && !pkt->nic_pkt()) {
useByCPUCycleDist.sample(latency);
nicUseByCPUCycleTotal += latency;
nicBlksUsedByCPU++;
}
}
blk->isUsed = true;
if (pkt->nic_pkt()) {
DPRINTF(Split, "found block in partition %d\n", blk->part);
}
}
return blk;
}
SplitBlk* SplitBlk*
Split::findBlock(Addr addr, int &lat) Split::findBlock(Addr addr, int &lat)
@ -403,14 +351,16 @@ Split::findReplacement(PacketPtr &pkt, PacketList &writebacks,
} }
void void
Split::invalidateBlk(Addr addr) Split::invalidateBlk(Split::BlkType *blk)
{ {
SplitBlk *blk = lru->findBlock(addr);
if (!blk) { if (!blk) {
fatal("FIXME!\n");
#if 0
if (lifo && lifo_net) if (lifo && lifo_net)
blk = lifo_net->findBlock(addr); blk = lifo_net->findBlock(addr);
else if (lru_net) else if (lru_net)
blk = lru_net->findBlock(addr); blk = lru_net->findBlock(addr);
#endif
if (!blk) if (!blk)
return; return;

View file

@ -184,11 +184,10 @@ class Split : public BaseTags
bool probe(Addr addr) const; bool probe(Addr addr) const;
/** /**
* Invalidate the block containing the given address. * Invalidate the given block.
* @param asid The address space ID. * @param blk The block to invalidate.
* @param addr The address to invalidate.
*/ */
void invalidateBlk(Addr addr); void invalidateBlk(BlkType *blk);
/** /**
* Finds the given address in the cache and update replacement data. * Finds the given address in the cache and update replacement data.
@ -200,15 +199,6 @@ class Split : public BaseTags
*/ */
SplitBlk* findBlock(Addr addr, int &lat); SplitBlk* findBlock(Addr addr, int &lat);
/**
* Finds the given address in the cache and update replacement data.
* Returns the access latency as a side effect.
* @param pkt The memory request whose block to find
* @param lat The access latency.
* @return Pointer to the cache block if found.
*/
SplitBlk* findBlock(PacketPtr &pkt, int &lat);
/** /**
* Finds the given address in the cache, do not update replacement data. * Finds the given address in the cache, do not update replacement data.
* @param addr The address to find. * @param addr The address to find.

View file

@ -254,31 +254,6 @@ SplitLIFO::findBlock(Addr addr, int &lat)
return blk; return blk;
} }
SplitBlk*
SplitLIFO::findBlock(PacketPtr &pkt, int &lat)
{
Addr addr = pkt->getAddr();
Addr tag = extractTag(addr);
unsigned set = extractSet(addr);
SplitBlk *blk = sets[set].findBlk(tag);
if (blk) {
DPRINTF(Split, "Found LIFO blk %#x in set %d, with tag %#x\n",
addr, set, tag);
hits++;
if (twoQueue) {
blk->isUsed = true;
sets[set].moveToFirstIn(blk);
} else {
sets[set].moveToLastIn(blk);
}
}
lat = hitLatency;
return blk;
}
SplitBlk* SplitBlk*
SplitLIFO::findBlock(Addr addr) const SplitLIFO::findBlock(Addr addr) const
@ -335,9 +310,8 @@ SplitLIFO::findReplacement(PacketPtr &pkt, PacketList &writebacks,
} }
void void
SplitLIFO::invalidateBlk(Addr addr) SplitLIFO::invalidateBlk(SplitLIFO::BlkType *blk)
{ {
SplitBlk *blk = findBlock(addr);
if (blk) { if (blk) {
blk->status = 0; blk->status = 0;
blk->isTouched = false; blk->isTouched = false;

View file

@ -184,11 +184,10 @@ public:
bool probe( Addr addr) const; bool probe( Addr addr) const;
/** /**
* Invalidate the block containing the given address. * Invalidate the given block.
* @param asid The address space ID. * @param blk The block to invalidate.
* @param addr The address to invalidate.
*/ */
void invalidateBlk(Addr addr); void invalidateBlk(BlkType *blk);
/** /**
* Finds the given address in the cache and update replacement data. * Finds the given address in the cache and update replacement data.
@ -200,15 +199,6 @@ public:
*/ */
SplitBlk* findBlock(Addr addr, int &lat); SplitBlk* findBlock(Addr addr, int &lat);
/**
* Finds the given address in the cache and update replacement data.
* Returns the access latency as a side effect.
* @param pkt The req whose block to find
* @param lat The access latency.
* @return Pointer to the cache block if found.
*/
SplitBlk* findBlock(PacketPtr &pkt, int &lat);
/** /**
* Finds the given address in the cache, do not update replacement data. * Finds the given address in the cache, do not update replacement data.
* @param addr The address to find. * @param addr The address to find.

View file

@ -202,27 +202,6 @@ SplitLRU::findBlock(Addr addr, int &lat)
return blk; return blk;
} }
SplitBlk*
SplitLRU::findBlock(PacketPtr &pkt, int &lat)
{
Addr addr = pkt->getAddr();
Addr tag = extractTag(addr);
unsigned set = extractSet(addr);
SplitBlk *blk = sets[set].findBlk(tag);
lat = hitLatency;
if (blk != NULL) {
// move this block to head of the MRU list
sets[set].moveToHead(blk);
if (blk->whenReady > curTick && blk->whenReady - curTick > hitLatency){
lat = blk->whenReady - curTick;
}
blk->refCount += 1;
hits++;
}
return blk;
}
SplitBlk* SplitBlk*
SplitLRU::findBlock(Addr addr) const SplitLRU::findBlock(Addr addr) const
@ -261,9 +240,8 @@ SplitLRU::findReplacement(PacketPtr &pkt, PacketList &writebacks,
} }
void void
SplitLRU::invalidateBlk(Addr addr) SplitLRU::invalidateBlk(SplitLRU::BlkType *blk)
{ {
SplitBlk *blk = findBlock(addr);
if (blk) { if (blk) {
blk->status = 0; blk->status = 0;
blk->isTouched = false; blk->isTouched = false;

View file

@ -167,11 +167,10 @@ public:
bool probe(Addr addr) const; bool probe(Addr addr) const;
/** /**
* Invalidate the block containing the given address. * Invalidate the given block.
* @param asid The address space ID. * @param blk The block to invalidate.
* @param addr The address to invalidate.
*/ */
void invalidateBlk(Addr addr); void invalidateBlk(BlkType *blk);
/** /**
* Finds the given address in the cache and update replacement data. * Finds the given address in the cache and update replacement data.
@ -183,15 +182,6 @@ public:
*/ */
SplitBlk* findBlock(Addr addr, int &lat); SplitBlk* findBlock(Addr addr, int &lat);
/**
* Finds the given address in the cache and update replacement data.
* Returns the access latency as a side effect.
* @param pkt The req whose block to find.
* @param lat The access latency.
* @return Pointer to the cache block if found.
*/
SplitBlk* findBlock(PacketPtr &pkt, int &lat);
/** /**
* Finds the given address in the cache, do not update replacement data. * Finds the given address in the cache, do not update replacement data.
* @param addr The address to find. * @param addr The address to find.

View file

@ -42,6 +42,7 @@
#include "arch/isa_traits.hh" #include "arch/isa_traits.hh"
#include "base/misc.hh" #include "base/misc.hh"
#include "config/full_system.hh" #include "config/full_system.hh"
#include "mem/packet_access.hh"
#include "mem/physical.hh" #include "mem/physical.hh"
#include "sim/builder.hh" #include "sim/builder.hh"
#include "sim/eventq.hh" #include "sim/eventq.hh"
@ -203,18 +204,60 @@ PhysicalMemory::doFunctionalAccess(PacketPtr pkt)
if (pkt->req->isLocked()) { if (pkt->req->isLocked()) {
trackLoadLocked(pkt->req); trackLoadLocked(pkt->req);
} }
DPRINTF(MemoryAccess, "Performing Read of size %i on address 0x%x\n",
pkt->getSize(), pkt->getAddr());
memcpy(pkt->getPtr<uint8_t>(), memcpy(pkt->getPtr<uint8_t>(),
pmemAddr + pkt->getAddr() - params()->addrRange.start, pmemAddr + pkt->getAddr() - params()->addrRange.start,
pkt->getSize()); pkt->getSize());
#if TRACING_ON
switch (pkt->getSize()) {
case sizeof(uint64_t):
DPRINTF(MemoryAccess, "Read of size %i on address 0x%x data 0x%x\n",
pkt->getSize(), pkt->getAddr(),pkt->get<uint64_t>());
break;
case sizeof(uint32_t):
DPRINTF(MemoryAccess, "Read of size %i on address 0x%x data 0x%x\n",
pkt->getSize(), pkt->getAddr(),pkt->get<uint32_t>());
break;
case sizeof(uint16_t):
DPRINTF(MemoryAccess, "Read of size %i on address 0x%x data 0x%x\n",
pkt->getSize(), pkt->getAddr(),pkt->get<uint16_t>());
break;
case sizeof(uint8_t):
DPRINTF(MemoryAccess, "Read of size %i on address 0x%x data 0x%x\n",
pkt->getSize(), pkt->getAddr(),pkt->get<uint8_t>());
break;
default:
DPRINTF(MemoryAccess, "Read of size %i on address 0x%x\n",
pkt->getSize(), pkt->getAddr());
}
#endif
} }
else if (pkt->isWrite()) { else if (pkt->isWrite()) {
if (writeOK(pkt->req)) { if (writeOK(pkt->req)) {
DPRINTF(MemoryAccess, "Performing Write of size %i on address 0x%x\n",
pkt->getSize(), pkt->getAddr());
memcpy(pmemAddr + pkt->getAddr() - params()->addrRange.start, memcpy(pmemAddr + pkt->getAddr() - params()->addrRange.start,
pkt->getPtr<uint8_t>(), pkt->getSize()); pkt->getPtr<uint8_t>(), pkt->getSize());
#if TRACING_ON
switch (pkt->getSize()) {
case sizeof(uint64_t):
DPRINTF(MemoryAccess, "Write of size %i on address 0x%x data 0x%x\n",
pkt->getSize(), pkt->getAddr(),pkt->get<uint64_t>());
break;
case sizeof(uint32_t):
DPRINTF(MemoryAccess, "Write of size %i on address 0x%x data 0x%x\n",
pkt->getSize(), pkt->getAddr(),pkt->get<uint32_t>());
break;
case sizeof(uint16_t):
DPRINTF(MemoryAccess, "Write of size %i on address 0x%x data 0x%x\n",
pkt->getSize(), pkt->getAddr(),pkt->get<uint16_t>());
break;
case sizeof(uint8_t):
DPRINTF(MemoryAccess, "Write of size %i on address 0x%x data 0x%x\n",
pkt->getSize(), pkt->getAddr(),pkt->get<uint8_t>());
break;
default:
DPRINTF(MemoryAccess, "Write of size %i on address 0x%x\n",
pkt->getSize(), pkt->getAddr());
}
#endif
} }
} }
else if (pkt->isInvalidate()) { else if (pkt->isInvalidate()) {

View file

@ -98,18 +98,16 @@ pyzip_files.append('m5/defines.py')
pyzip_files.append('m5/info.py') pyzip_files.append('m5/info.py')
pyzip_files.append(join(env['ROOT'], 'util/pbs/jobfile.py')) pyzip_files.append(join(env['ROOT'], 'util/pbs/jobfile.py'))
env.Command(['swig/debug_wrap.cc', 'm5/internal/debug.py'], def swig_it(basename):
'swig/debug.i', env.Command(['swig/%s_wrap.cc' % basename, 'm5/internal/%s.py' % basename],
'$SWIG $SWIGFLAGS -outdir ${TARGETS[1].dir} ' 'swig/%s.i' % basename,
'-o ${TARGETS[0]} $SOURCES') '$SWIG $SWIGFLAGS -outdir ${TARGETS[1].dir} '
'-o ${TARGETS[0]} $SOURCES')
pyzip_dep_files.append('m5/internal/%s.py' % basename)
env.Command(['swig/main_wrap.cc', 'm5/internal/main.py'], swig_it('main')
'swig/main.i', swig_it('debug')
'$SWIG $SWIGFLAGS -outdir ${TARGETS[1].dir} ' swig_it('event')
'-o ${TARGETS[0]} $SOURCES')
pyzip_dep_files.append('m5/internal/debug.py')
pyzip_dep_files.append('m5/internal/main.py')
# Action function to build the zip archive. Uses the PyZipFile module # Action function to build the zip archive. Uses the PyZipFile module
# included in the standard Python library. # included in the standard Python library.

View file

@ -41,7 +41,8 @@ class BaseCPU(SimObject):
"terminate when all threads have reached this load count") "terminate when all threads have reached this load count")
max_loads_any_thread = Param.Counter(0, max_loads_any_thread = Param.Counter(0,
"terminate when any thread reaches this load count") "terminate when any thread reaches this load count")
progress_interval = Param.Tick(0, "interval to print out the progress message") progress_interval = Param.Tick(0,
"interval to print out the progress message")
defer_registration = Param.Bool(False, defer_registration = Param.Bool(False,
"defer registration with system (for sampling)") "defer registration with system (for sampling)")

View file

@ -11,4 +11,4 @@ class SparcDTB(SparcTLB):
class SparcITB(SparcTLB): class SparcITB(SparcTLB):
type = 'SparcITB' type = 'SparcITB'
size = 48 size = 64

View file

@ -1,3 +1,33 @@
/*
* Copyright (c) 2006 The Regents of The University of Michigan
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Authors: Nathan Binkert
*/
%module debug %module debug
%{ %{

54
src/python/swig/event.i Normal file
View file

@ -0,0 +1,54 @@
/*
* Copyright (c) 2006 The Regents of The University of Michigan
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Authors: Nathan Binkert
*/
%module event
%{
#include "python/swig/pyevent.hh"
inline void
create(PyObject *object, Tick when)
{
new PythonEvent(object, when);
}
%}
%include "stdint.i"
%include "sim/host.hh"
%inline %{
extern void create(PyObject *object, Tick when);
%}
%wrapper %{
// fix up module name to reflect the fact that it's inside the m5 package
#undef SWIG_name
#define SWIG_name "m5.internal._event"
%}

36
src/python/swig/init.hh Normal file
View file

@ -0,0 +1,36 @@
/*
* Copyright (c) 2006 The Regents of The University of Michigan
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Authors: Nathan Binkert
*/
#ifndef __PYTHON_SWIG_INIT_HH__
#define __PYTHON_SWIG_INIT_HH__
void init_swig();
#endif // __PYTHON_SWIG_INIT_HH__

View file

@ -0,0 +1,66 @@
/*
* Copyright (c) 2006 The Regents of The University of Michigan
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Authors: Nathan Binkert
*/
#include <Python.h>
#include "python/swig/pyevent.hh"
PythonEvent::PythonEvent(PyObject *obj, Tick when, Priority priority)
: Event(&mainEventQueue, priority), object(obj)
{
if (object == NULL)
panic("Passed in invalid object");
Py_INCREF(object);
setFlags(AutoDelete);
schedule(when);
}
PythonEvent::~PythonEvent()
{
Py_DECREF(object);
}
void
PythonEvent::process()
{
PyObject *result;
result = PyObject_CallMethod(object, "process", "");
if (result) {
// Nothing to do just decrement the reference count
Py_DECREF(result);
} else {
// Somethign should be done to signal back to the main interpreter
// that there's been an exception.
}
}

View file

@ -0,0 +1,48 @@
/*
* Copyright (c) 2006 The Regents of The University of Michigan
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Authors: Nathan Binkert
*/
#ifndef __PYTHON_SWIG_PYEVENT_HH__
#define __PYTHON_SWIG_PYEVENT_HH__
#include "sim/eventq.hh"
class PythonEvent : public Event
{
private:
PyObject *object;
public:
PythonEvent(PyObject *obj, Tick when, Priority priority = Default_Pri);
~PythonEvent();
virtual void process();
};
#endif // __PYTHON_SWIG_PYEVENT_HH__

View file

@ -60,6 +60,7 @@
#include "cpu/smt.hh" #include "cpu/smt.hh"
#include "mem/mem_object.hh" #include "mem/mem_object.hh"
#include "mem/port.hh" #include "mem/port.hh"
#include "python/swig/init.hh"
#include "sim/async.hh" #include "sim/async.hh"
#include "sim/builder.hh" #include "sim/builder.hh"
#include "sim/host.hh" #include "sim/host.hh"
@ -117,11 +118,6 @@ abortHandler(int sigtype)
#endif #endif
} }
extern "C" {
void init_main();
void init_debug();
}
int int
main(int argc, char **argv) main(int argc, char **argv)
{ {
@ -159,8 +155,7 @@ main(int argc, char **argv)
PySys_SetArgv(argc, argv); PySys_SetArgv(argc, argv);
// initialize SWIG modules // initialize SWIG modules
init_main(); init_swig();
init_debug();
PyRun_SimpleString("import m5.main"); PyRun_SimpleString("import m5.main");
PyRun_SimpleString("m5.main.main()"); PyRun_SimpleString("m5.main.main()");

View file

@ -1,4 +1,4 @@
# Copyright (c) 2006 The Regents of The University of Michigan # Copyright (c) 2006-2007 The Regents of The University of Michigan
# All rights reserved. # All rights reserved.
# #
# Redistribution and use in source and binary forms, with or without # Redistribution and use in source and binary forms, with or without
@ -47,48 +47,48 @@ base/traceflags.cc base/traceflags.hh: $(SRCDIR)/base/traceflags.py
cd base; \ cd base; \
$(PYTHON) $< $(PYTHON) $<
bitvectest: test/bitvectest.cc bitvectest: unittest/bitvectest.cc
$(CXX) $(CCFLAGS) -o $@ $^ $(CXX) $(CCFLAGS) -o $@ $^
circletest: test/circletest.cc base/circlebuf.cc circletest: unittest/circletest.cc base/circlebuf.cc
$(CXX) $(CCFLAGS) -o $@ $^ $(CXX) $(CCFLAGS) -o $@ $^
cprintftest: test/cprintftest.cc base/cprintf.cc cprintftest: unittest/cprintftest.cc base/cprintf.cc
$(CXX) $(CCFLAGS) -o $@ $^ $(CXX) $(CCFLAGS) -o $@ $^
initest: test/initest.cc base/str.cc base/inifile.cc base/cprintf.cc initest: unittest/initest.cc base/str.cc base/inifile.cc base/cprintf.cc
$(CXX) $(CCFLAGS) -o $@ $^ $(CXX) $(CCFLAGS) -o $@ $^
lrutest: test/lru_test.cc lrutest: unittest/lru_test.cc
$(CXX) $(CCFLAGS) -o $@ $^ $(CXX) $(CCFLAGS) -o $@ $^
nmtest: test/nmtest.cc base/output.cc base/hostinfo.cc base/cprintf.cc base/misc.cc base/loader/object_file.cc base/loader/symtab.cc base/misc.cc base/str.cc base/loader/aout_object.cc base/loader/ecoff_object.cc base/loader/elf_object.cc nmtest: unittest/nmtest.cc base/output.cc base/hostinfo.cc base/cprintf.cc base/misc.cc base/loader/object_file.cc base/loader/symtab.cc base/misc.cc base/str.cc base/loader/aout_object.cc base/loader/ecoff_object.cc base/loader/elf_object.cc
$(CXX) $(CCFLAGS) -I/n/ziff/z/binkertn/build/work/ALPHA_FS -lelf -o $@ $^ $(CXX) $(CCFLAGS) -I/n/ziff/z/binkertn/build/work/ALPHA_FS -lelf -o $@ $^
offtest: test/offtest.cc offtest: unittest/offtest.cc
$(CXX) $(CCFLAGS) -o $@ $^ $(CXX) $(CCFLAGS) -o $@ $^
rangetest: test/rangetest.cc base/range.cc base/str.cc rangetest: unittest/rangetest.cc base/range.cc base/str.cc
$(CXX) $(CCFLAGS) -o $@ $^ $(CXX) $(CCFLAGS) -o $@ $^
STATTEST+= base/cprintf.cc base/hostinfo.cc base/misc.cc base/mysql.cc STATTEST+= base/cprintf.cc base/hostinfo.cc base/misc.cc base/mysql.cc
STATTEST+= base/python.cc base/str.cc base/time.cc STATTEST+= base/python.cc base/str.cc base/time.cc
STATTEST+= base/statistics.cc base/stats/mysql.cc base/stats/python.cc STATTEST+= base/statistics.cc base/stats/mysql.cc base/stats/python.cc
STATTEST+= base/stats/statdb.cc base/stats/text.cc base/stats/visit.cc STATTEST+= base/stats/statdb.cc base/stats/text.cc base/stats/visit.cc
STATTEST+= test/stattest.cc STATTEST+= unittest/stattest.cc
stattest: $(STATTEST) stattest: $(STATTEST)
$(CXX) $(CCFLAGS) $(MYSQL) -o $@ $^ $(CXX) $(CCFLAGS) $(MYSQL) -o $@ $^
strnumtest: test/strnumtest.cc base/str.cc strnumtest: unittest/strnumtest.cc base/str.cc
$(CXX) $(CCFLAGS) -o $@ $^ $(CXX) $(CCFLAGS) -o $@ $^
symtest: test/symtest.cc base/misc.cc base/symtab.cc base/str.cc symtest: unittest/symtest.cc base/misc.cc base/symtab.cc base/str.cc
$(CXX) $(CCFLAGS) -o $@ $^ $(CXX) $(CCFLAGS) -o $@ $^
tokentest: test/tokentest.cc base/str.cc tokentest: unittest/tokentest.cc base/str.cc
$(CXX) $(CCFLAGS) -o $@ $^ $(CXX) $(CCFLAGS) -o $@ $^
TRACE+=test/tracetest.cc base/trace.cc base/trace_flags.cc base/cprintf.cc TRACE+=unittest/tracetest.cc base/trace.cc base/trace_flags.cc base/cprintf.cc
TRACE+=base/str.cc base/misc.cc TRACE+=base/str.cc base/misc.cc
tracetest: $(TRACE) tracetest: $(TRACE)
$(CXX) $(CCFLAGS) -o $@ $^ $(CXX) $(CCFLAGS) -o $@ $^

View file

@ -43,13 +43,11 @@ main()
char foo[9]; char foo[9];
cprintf("%s\n", foo); cprintf("%s\n", foo);
cprintf("%d\n", 'A');
cprintf("%shits%%s + %smisses%%s\n", "test", "test"); cprintf("%shits%%s + %smisses%%s\n", "test", "test");
cprintf("%%s%-10s %c he went home \'\"%d %#o %#x %1.5f %1.2E\n", cprintf("%%s%-10s %c he went home \'\"%d %#o %#x %1.5f %1.2E\n",
"hello", 'A', 1, 0xff, 0xfffffffffffffULL, 3.141592653589, 1.1e10); "hello", 'A', 1, 0xff, 0xfffffffffffffULL, 3.141592653589, 1.1e10);
cout << cformat("%s %#x %s\n") << "hello" << 0 << "foo 0\n";
cerr << cformat("%s %#x\n") << "hello" << 1 << "foo 1\n";
cprintf("another test\n"); cprintf("another test\n");
stringstream buffer; stringstream buffer;

168
util/chkformat Executable file
View file

@ -0,0 +1,168 @@
#!/usr/bin/env python
# Copyright (c) 2006 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Nathan Binkert
from getopt import getopt, GetoptError
import re
import sys
tabsize = 8
lead = re.compile(r'^([ \t])+')
trail = re.compile(r'[ \t]+$')
any_control = re.compile(r'\b(if|while|for)[ \t]*[(]')
good_control = re.compile(r'\b(if|while|for) [(]')
def linelen(line):
tabs = line.count('\t')
if not tabs:
return len(line)
count = 0
for c in line:
if c == '\t':
count += tabsize - count % tabsize
else:
count += 1
return count
toolong = 0
toolong80 = 0
leadtabs = 0
trailwhite = 0
badcontrol = 0
cret = 0
def validate(filename, verbose, code):
global toolong, toolong80, leadtabs, trailwhite, badcontrol, cret
def msg(lineno, line, message):
print '%s:%d>' % (filename, lineno + 1), message
if verbose > 2:
print line
def bad():
if code is not None:
sys.exit(code)
cpp = filename.endswith('.cc') or filename.endswith('.hh')
py = filename.endswith('.py')
if py + cpp != 1:
raise AttributeError, \
"I don't know how to deal with the file %s" % filename
try:
f = file(filename, 'r')
except OSError:
if verbose > 0:
print 'could not open file %s' % filename
bad()
return
for i,line in enumerate(f):
line = line.rstrip('\n')
# no carriage returns
if line.find('\r') != -1:
cret += 1
if verbose > 1:
msg(i, line, 'carriage return found')
bad()
# lines max out at 79 chars
llen = linelen(line)
if llen > 79:
toolong += 1
if llen == 80:
toolong80 += 1
if verbose > 1:
msg(i, line, 'line too long (%d chars)' % llen)
bad()
# no tabs used to indent
match = lead.search(line)
if match and match.group(1).find('\t') != -1:
leadtabs += 1
if verbose > 1:
msg(i, line, 'using tabs to indent')
bad()
# no trailing whitespace
if trail.search(line):
trailwhite +=1
if verbose > 1:
msg(i, line, 'trailing whitespace')
bad()
# for c++, exactly one space betwen if/while/for and (
if cpp:
match = any_control.search(line)
if match and not good_control.search(line):
badcontrol += 1
if verbose > 1:
msg(i, line, 'improper spacing after %s' % match.group(1))
bad()
if __name__ == '__main__':
progname = sys.argv[0]
def usage(code=None):
print >>sys.stderr, '''%s [-n] [-q] [-v] <filenames>''' % progname
if code is not None:
sys.exit(code)
try:
opts, args = getopt(sys.argv[1:], '-nv')
except GetoptError:
usage(2)
code = 1
verbose = 1
for opt,arg in opts:
if opt == '-n':
code = None
if opt == '-q':
verbose -= 1
if opt == '-v':
verbose += 1
for filename in args:
validate(filename, verbose=verbose, code=code)
if verbose > 0:
print '''\
%d violations of lines over 79 chars. %d of which are 80 chars exactly.
%d cases of whitespace at the end of a line.
%d cases of tabs to indent.
%d bad parens after if/while/for.
%d carriage returns found.
''' % (toolong, toolong80, trailwhite, leadtabs, badcontrol, cret)

323
util/compile Executable file
View file

@ -0,0 +1,323 @@
#!/usr/bin/env python
# Copyright (c) 2006 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Nathan Binkert
import os, re, sys
from os.path import isdir, isfile, join as joinpath
homedir = os.environ['HOME']
def do_compile():
#
# Find SCons
#
search_dirs = [ joinpath(homedir, 'local/lib'), '/opt/local/lib',
'/usr/local/lib', '/usr/lib' ]
if os.environ.has_key("SCONS_LIB_DIR"):
search_dirs.append(os.environ["SCONS_LIB_DIR"])
local = re.compile(r'^scons-local-([0-9]*)\.([0-9]*)\.([0-9]*)$')
standard = re.compile(r'^scons-([0-9]*)\.([0-9]*)\.([0-9]*)$')
scons_dirs = []
for dir in search_dirs:
if not isdir(dir):
continue
entries = os.listdir(dir)
for entry in entries:
if not entry.startswith('scons'):
continue
version = (0,0,0)
path = joinpath(dir, entry)
match = local.search(entry)
if not match:
match = standard.search(entry)
if match:
version = match.group(1), match.group(2), match.group(3)
scons_dirs.append((version, path))
scons_dirs.sort()
scons_dirs.reverse()
if not scons_dirs:
print >>sys.stderr, \
"could not find scons in the following dirs: %s" % search_dirs
sys.exit(1)
sys.path = [ scons_dirs[0][1] ] + sys.path
# invoke SCons
import SCons.Script
SCons.Script.main()
#
# do argument parsing
#
progname = sys.argv[0]
import optparse
usage = '''%prog [compile options] <version> [SCons options]
%prog assumes that the user has a directory called ~/m5/<version> where
the source tree resides, and a directory called ~/build, where %prog
will create ~/build/<version> if it does not exist and build the resulting
simulators there.
If ~/build is set up in such a way that it points to a local disk on
each host, compiles will be very efficient. For example:
~/build -> /z/<username>/.build (Assuming that /z is a local disk and
not NFS mounted, whereas your home directory is NFS mounted).
'''
version = '%prog 0.1'
parser = optparse.OptionParser(usage=usage, version=version,
formatter=optparse.TitledHelpFormatter())
parser.disable_interspersed_args()
# current option group
group = None
def set_group(*args, **kwargs):
'''set the current option group'''
global group
if not args and not kwargs:
group = None
else:
group = parser.add_option_group(*args, **kwargs)
def add_option(*args, **kwargs):
if group:
return group.add_option(*args, **kwargs)
else:
return parser.add_option(*args, **kwargs)
def bool_option(name, default, help):
'''add a boolean option called --name and --no-name.
Display help depending on which is the default'''
tname = '--%s' % name
fname = '--no-%s' % name
dest = name.replace('-', '_')
if default:
thelp = optparse.SUPPRESS_HELP
fhelp = help
else:
thelp = help
fhelp = optparse.SUPPRESS_HELP
add_option(tname, action="store_true", default=default, help=thelp)
add_option(fname, action="store_false", dest=dest, help=fhelp)
add_option('-n', '--no-compile', default=False, action='store_true',
help="don't actually compile, just echo SCons command line")
add_option('--everything', default=False, action='store_true',
help="compile everything that can be compiled")
add_option('-E', "--experimental", action='store_true', default=False,
help="enable experimental builds")
add_option('-v', "--verbose", default=False, action='store_true',
help="be verbose")
set_group("Output binary types")
bool_option("debug", default=False, help="compile debug binaries")
bool_option("opt", default=False, help="compile opt binaries")
bool_option("fast", default=False, help="compile fast binaries")
bool_option("prof", default=False, help="compile profile binaries")
add_option('-a', "--all-bin", default=False, action='store_true',
help="compile debug, opt, and fast binaries")
set_group("ISA options")
bool_option("alpha", default=False, help="compile Alpha")
bool_option("mips", default=False, help="compile MIPS")
bool_option("sparc", default=False, help="compile SPARC")
add_option('-i', "--all-isa", default=False, action='store_true',
help="compile all ISAs")
set_group("Emulation options")
bool_option("syscall", default=True,
help="Do not compile System Call Emulation mode")
bool_option("fullsys", default=True,
help="Do not compile Full System mode")
def usage(exitcode=None):
parser.print_help()
if exitcode is not None:
sys.exit(exitcode)
(options, args) = parser.parse_args()
if options.everything:
options.all_bin = True
options.prof = True
options.all_isa = True
if options.all_bin:
options.debug = True
options.opt = True
options.fast = True
binaries = []
if options.debug:
binaries.append('m5.debug')
if options.opt:
binaries.append('m5.opt')
if options.fast:
binaries.append('m5.fast')
if options.prof:
binaries.append('m5.prof')
if not binaries:
binaries.append('m5.debug')
if options.all_isa:
options.alpha = True
options.mips = True
options.sparc = True
isas = []
if options.alpha:
isas.append('alpha')
if options.mips:
isas.append('mips')
if options.sparc:
isas.append('sparc')
if not isas:
isas.append('alpha')
modes = []
if options.syscall:
modes.append('syscall')
if options.fullsys:
modes.append('fullsys')
if not modes:
sys.exit("must specify at least one mode")
#
# Convert options into SCons command line arguments
#
# valid combinations of ISA and emulation mode
valid = { ('alpha', 'syscall') : 'ALPHA_SE',
('alpha', 'fullsys') : 'ALPHA_FS',
('mips', 'syscall') : 'MIPS_SE',
('sparc', 'syscall') : 'SPARC_SE' }
# experimental combinations of ISA and emulation mode
experiment = { ('mips', 'fullsys') : 'MIPS_FS',
('sparc', 'fullsys') : 'SPARC_FS' }
if options.experimental:
valid.update(experiment)
builds = []
for isa in isas:
for mode in modes:
try:
build = valid[(isa, mode)]
builds.append(build)
except KeyError:
pass
if not builds:
sys.exit("must specify at least one valid combination of ISA and mode")
if not args:
usage(2)
version = args[0]
del args[0]
for bin in binaries:
for build in builds:
args.append('%s/%s' % (build, bin))
#
# set up compile
#
build_base = joinpath(homedir, 'build')
m5_base = joinpath(homedir, 'm5')
if not isdir(build_base):
sys.exit('build directory %s not found' % build_base)
if not isdir(m5_base):
sys.exit('m5 base directory %s not found' % m5_base)
m5_dir = joinpath(m5_base, version)
if not isdir(m5_dir):
sys.exit('source directory %s not found' % m5_dir)
# support M5 1.x
oldstyle = isfile(joinpath(m5_dir, 'SConscript'))
if oldstyle:
ext_dir = joinpath(m5_base, 'ext')
test_dir = joinpath(m5_base, 'test.' + version)
if not isdir(ext_dir):
sys.exit('ext directory not found at %s' % ext_dir)
if not isdir(test_dir):
sys.exit('test directory not found at %s' % test_dir)
build_dir = joinpath(build_base, version)
if not isdir(build_dir):
os.mkdir(build_dir)
# need some symlinks for m5 1.x
if oldstyle:
os.symlink(m5_dir, joinpath(build_dir, 'm5'))
os.symlink(ext_dir, joinpath(build_dir, 'ext'))
os.symlink(test_dir, joinpath(build_dir, 'test'))
os.symlink(joinpath(m5_dir, 'build', 'SConstruct'),
joinpath(build_dir, 'SConstruct'))
os.symlink(joinpath(m5_dir, 'build', 'default_options'),
joinpath(build_dir, 'default_options'))
sys.argv = [ progname ]
if oldstyle:
os.chdir(build_dir)
sys.argv.extend(args)
else:
os.chdir(m5_dir)
for arg in args:
if not arg.startswith('-') and '=' not in arg:
arg = joinpath(build_dir, 'build', arg)
sys.argv.append(arg)
if options.no_compile or options.verbose:
for arg in sys.argv[1:]:
print arg
if not options.no_compile:
do_compile()

83
util/fixwhite Executable file
View file

@ -0,0 +1,83 @@
#! /usr/bin/env python
# Copyright (c) 2006 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Nathan Binkert
import re
import os
import sys
from getopt import getopt, GetoptError
tabs = re.compile(r'^[ \t]+')
def fixwhite(filename, tabsize):
try:
f = file(filename, 'r+')
except OSError, msg:
print 'could not open file %s: %s' % (filename, msg)
return
lines = list(f)
f.seek(0)
f.truncate()
for line in lines:
if tabs.search(line):
newline = ''
for i,c in enumerate(line):
if c == ' ':
newline += ' '
elif c == '\t':
newline += ' ' * (tabsize - len(newline) % tabsize)
else:
newline += line[i:]
break
line = newline
print >>f, line.rstrip()
if __name__ == '__main__':
progname = sys.argv[0]
def usage(code=None):
print >>sys.stderr, '''%s [-t <tabsize>] <filenames>''' % progname
if code is not None:
sys.exit(code)
try:
opts, args = getopt(sys.argv[1:], '-t:')
except GetoptError:
usage(2)
tabsize = 8
for opt,arg in opts:
if opt == '-t':
tabsize = int(arg)
for filename in args:
fixwhite(filename, tabsize)