Merge zizzer:/bk/newmem

into  zazzer.eecs.umich.edu:/z/rdreslin/m5bk/newmemcleanest

src/mem/tport.cc:
    Merge PacketPtr changes

--HG--
extra : convert_revision : 0329c5803a3df67af3dda89bd9d4753fd1a286d1
This commit is contained in:
Ron Dreslinski 2006-10-20 13:04:59 -04:00
commit 54ed57cc4c
7 changed files with 17 additions and 98 deletions

View file

@ -113,7 +113,7 @@ MemTest::MemTest(const string &name,
// PhysicalMemory *check_mem,
unsigned _memorySize,
unsigned _percentReads,
// unsigned _percentCopies,
unsigned _percentFunctional,
unsigned _percentUncacheable,
unsigned _progressInterval,
unsigned _percentSourceUnaligned,
@ -130,7 +130,7 @@ MemTest::MemTest(const string &name,
// checkMem(check_mem),
size(_memorySize),
percentReads(_percentReads),
// percentCopies(_percentCopies),
percentFunctional(_percentFunctional),
percentUncacheable(_percentUncacheable),
progressInterval(_progressInterval),
nextProgressMessage(_progressInterval),
@ -345,7 +345,7 @@ MemTest::tick()
} else {
paddr = ((base) ? baseAddr1 : baseAddr2) + offset;
}
bool probe = (random() % 2 == 1) && !(flags & UNCACHEABLE);
bool probe = (random() % 100 < percentFunctional) && !(flags & UNCACHEABLE);
//bool probe = false;
paddr &= ~((1 << access_size) - 1);
@ -501,7 +501,7 @@ BEGIN_DECLARE_SIM_OBJECT_PARAMS(MemTest)
// SimObjectParam<PhysicalMemory *> check_mem;
Param<unsigned> memory_size;
Param<unsigned> percent_reads;
// Param<unsigned> percent_copies;
Param<unsigned> percent_functional;
Param<unsigned> percent_uncacheable;
Param<unsigned> progress_interval;
Param<unsigned> percent_source_unaligned;
@ -520,7 +520,7 @@ BEGIN_INIT_SIM_OBJECT_PARAMS(MemTest)
// INIT_PARAM(check_mem, "check memory"),
INIT_PARAM(memory_size, "memory size"),
INIT_PARAM(percent_reads, "target read percentage"),
// INIT_PARAM(percent_copies, "target copy percentage"),
INIT_PARAM(percent_functional, "percentage of access that are functional"),
INIT_PARAM(percent_uncacheable, "target uncacheable percentage"),
INIT_PARAM(progress_interval, "progress report interval (in accesses)"),
INIT_PARAM(percent_source_unaligned,
@ -537,7 +537,7 @@ END_INIT_SIM_OBJECT_PARAMS(MemTest)
CREATE_SIM_OBJECT(MemTest)
{
return new MemTest(getInstanceName(), /*cache->getInterface(),*/ /*main_mem,*/
/*check_mem,*/ memory_size, percent_reads, /*percent_copies,*/
/*check_mem,*/ memory_size, percent_reads, percent_functional,
percent_uncacheable, progress_interval,
percent_source_unaligned, percent_dest_unaligned,
trace_addr, max_loads, atomic);

View file

@ -55,7 +55,7 @@ class MemTest : public MemObject
// PhysicalMemory *check_mem,
unsigned _memorySize,
unsigned _percentReads,
// unsigned _percentCopies,
unsigned _percentFunctional,
unsigned _percentUncacheable,
unsigned _progressInterval,
unsigned _percentSourceUnaligned,
@ -144,7 +144,7 @@ class MemTest : public MemObject
unsigned size; // size of testing memory region
unsigned percentReads; // target percentage of read accesses
// unsigned percentCopies; // target percentage of copy accesses
unsigned percentFunctional; // target percentage of functional accesses
unsigned percentUncacheable;
int id;

View file

@ -115,32 +115,7 @@ BaseCache::CachePort::recvFunctional(PacketPtr pkt)
// If the target contains data, and it overlaps the
// probed request, need to update data
if (target->intersect(pkt)) {
uint8_t* pkt_data;
uint8_t* write_data;
int data_size;
if (target->getAddr() < pkt->getAddr()) {
int offset = pkt->getAddr() - target->getAddr();
pkt_data = pkt->getPtr<uint8_t>();
write_data = target->getPtr<uint8_t>() + offset;
data_size = target->getSize() - offset;
assert(data_size > 0);
if (data_size > pkt->getSize())
data_size = pkt->getSize();
} else {
int offset = target->getAddr() - pkt->getAddr();
pkt_data = pkt->getPtr<uint8_t>() + offset;
write_data = target->getPtr<uint8_t>();
data_size = pkt->getSize() - offset;
assert(data_size >= pkt->getSize());
if (data_size > target->getSize())
data_size = target->getSize();
}
if (pkt->isWrite()) {
memcpy(pkt_data, write_data, data_size);
} else {
memcpy(write_data, pkt_data, data_size);
}
fixPacket(pkt, target);
}
}
cache->doFunctionalAccess(pkt, isCpuSide);

View file

@ -560,7 +560,6 @@ Cache<TagStore,Buffering,Coherence>::probe(PacketPtr &pkt, bool update,
if (!update) {
// Check for data in MSHR and writebuffer.
if (mshr) {
warn("Found outstanding miss on an non-update probe");
MSHR::TargetList *targets = mshr->getTargetList();
MSHR::TargetList::iterator i = targets->begin();
MSHR::TargetList::iterator end = targets->end();
@ -568,71 +567,15 @@ Cache<TagStore,Buffering,Coherence>::probe(PacketPtr &pkt, bool update,
PacketPtr target = *i;
// If the target contains data, and it overlaps the
// probed request, need to update data
if (target->isWrite() && target->intersect(pkt)) {
uint8_t* pkt_data;
uint8_t* write_data;
int data_size;
if (target->getAddr() < pkt->getAddr()) {
int offset = pkt->getAddr() - target->getAddr();
pkt_data = pkt->getPtr<uint8_t>();
write_data = target->getPtr<uint8_t>() + offset;
data_size = target->getSize() - offset;
assert(data_size > 0);
if (data_size > pkt->getSize())
data_size = pkt->getSize();
} else {
int offset = target->getAddr() - pkt->getAddr();
pkt_data = pkt->getPtr<uint8_t>() + offset;
write_data = target->getPtr<uint8_t>();
data_size = pkt->getSize() - offset;
assert(data_size >= pkt->getSize());
if (data_size > target->getSize())
data_size = target->getSize();
}
if (pkt->isWrite()) {
memcpy(pkt_data, write_data, data_size);
} else {
pkt->flags |= SATISFIED;
pkt->result = Packet::Success;
memcpy(write_data, pkt_data, data_size);
}
if (target->intersect(pkt)) {
fixPacket(pkt, target);
}
}
}
for (int i = 0; i < writes.size(); ++i) {
PacketPtr write = writes[i]->pkt;
if (write->intersect(pkt)) {
warn("Found outstanding write on an non-update probe");
uint8_t* pkt_data;
uint8_t* write_data;
int data_size;
if (write->getAddr() < pkt->getAddr()) {
int offset = pkt->getAddr() - write->getAddr();
pkt_data = pkt->getPtr<uint8_t>();
write_data = write->getPtr<uint8_t>() + offset;
data_size = write->getSize() - offset;
assert(data_size > 0);
if (data_size > pkt->getSize())
data_size = pkt->getSize();
} else {
int offset = write->getAddr() - pkt->getAddr();
pkt_data = pkt->getPtr<uint8_t>() + offset;
write_data = write->getPtr<uint8_t>();
data_size = pkt->getSize() - offset;
assert(data_size >= pkt->getSize());
if (data_size > write->getSize())
data_size = write->getSize();
}
if (pkt->isWrite()) {
memcpy(pkt_data, write_data, data_size);
} else {
pkt->flags |= SATISFIED;
pkt->result = Packet::Success;
memcpy(write_data, pkt_data, data_size);
}
fixPacket(pkt, write);
}
}
if (pkt->isRead()

View file

@ -151,7 +151,7 @@ fixPacket(PacketPtr func, PacketPtr timing)
Addr timingStart = timing->getAddr();
Addr timingEnd = timing->getAddr() + timing->getSize() - 1;
assert(!(funcStart > timingEnd || timingStart < funcEnd));
assert(!(funcStart > timingEnd || timingStart > funcEnd));
if (DTRACE(FunctionalAccess)) {
DebugOut() << func;

View file

@ -33,12 +33,11 @@
void
SimpleTimingPort::recvFunctional(PacketPtr pkt)
{
//First check queued events
std::list<PacketPtr>::iterator i = transmitList.begin();
std::list<PacketPtr>::iterator end = transmitList.end();
bool cont = true;
while (i != end && cont) {
while (i != end) {
PacketPtr target = *i;
// If the target contains data, and it overlaps the
// probed request, need to update data
@ -46,8 +45,9 @@ SimpleTimingPort::recvFunctional(PacketPtr pkt)
fixPacket(pkt, target);
}
//Then just do an atomic access and throw away the returned latency
if (cont)
if (pkt->result != Packet::Success)
recvAtomic(pkt);
}

View file

@ -13,6 +13,7 @@ class MemTest(SimObject):
percent_reads = Param.Percent(65, "target read percentage")
percent_source_unaligned = Param.Percent(50,
"percent of copy source address that are unaligned")
percent_functional = Param.Percent(50, "percent of access that are functional")
percent_uncacheable = Param.Percent(10,
"target uncacheable percentage")
progress_interval = Param.Counter(1000000,