ruby: continue style pass

This commit is contained in:
Nathan Binkert 2010-03-23 22:49:43 -07:00
parent d2eb589675
commit a2652a048a
12 changed files with 1507 additions and 1509 deletions

View file

@ -1,4 +1,3 @@
/*
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
* All rights reserved.
@ -27,13 +26,8 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* $Id$
*
*/
#include "mem/ruby/profiler/AccessTraceForAddress.hh"
#include "mem/ruby/common/Histogram.hh"
#include "mem/ruby/profiler/AccessTraceForAddress.hh"
AccessTraceForAddress::AccessTraceForAddress()
{
@ -60,7 +54,8 @@ AccessTraceForAddress::~AccessTraceForAddress()
}
}
void AccessTraceForAddress::print(ostream& out) const
void
AccessTraceForAddress::print(ostream& out) const
{
out << m_addr;
@ -79,7 +74,10 @@ void AccessTraceForAddress::print(ostream& out) const
}
}
void AccessTraceForAddress::update(CacheRequestType type, AccessModeType access_mode, NodeID cpu, bool sharing_miss)
void
AccessTraceForAddress::update(CacheRequestType type,
AccessModeType access_mode, NodeID cpu,
bool sharing_miss)
{
m_touched_by.add(cpu);
m_total++;
@ -102,7 +100,8 @@ void AccessTraceForAddress::update(CacheRequestType type, AccessModeType access_
}
}
int AccessTraceForAddress::getTotal() const
int
AccessTraceForAddress::getTotal() const
{
if (m_histogram_ptr == NULL) {
return m_total;
@ -111,7 +110,8 @@ int AccessTraceForAddress::getTotal() const
}
}
void AccessTraceForAddress::addSample(int value)
void
AccessTraceForAddress::addSample(int value)
{
assert(m_total == 0);
if (m_histogram_ptr == NULL) {
@ -119,8 +119,3 @@ void AccessTraceForAddress::addSample(int value)
}
m_histogram_ptr->add(value);
}
bool node_less_then_eq(const AccessTraceForAddress* n1, const AccessTraceForAddress* n2)
{
return (n1->getTotal() > n2->getTotal());
}

View file

@ -1,4 +1,3 @@
/*
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
* All rights reserved.
@ -27,36 +26,27 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* $Id$
*
* Description:
*
*/
#ifndef __MEM_RUBY_PROFILER_ACCESSTRACEFORADDRESS_HH__
#define __MEM_RUBY_PROFILER_ACCESSTRACEFORADDRESS_HH__
#ifndef ACCESSTRACEFORADDRESS_H
#define ACCESSTRACEFORADDRESS_H
#include "mem/ruby/common/Global.hh"
#include "mem/ruby/common/Address.hh"
#include "mem/protocol/CacheRequestType.hh"
#include "mem/protocol/AccessModeType.hh"
#include "mem/ruby/system/NodeID.hh"
#include "mem/protocol/CacheRequestType.hh"
#include "mem/ruby/common/Address.hh"
#include "mem/ruby/common/Global.hh"
#include "mem/ruby/common/Set.hh"
#include "mem/ruby/system/NodeID.hh"
class Histogram;
class AccessTraceForAddress {
class AccessTraceForAddress
{
public:
// Constructors
AccessTraceForAddress();
explicit AccessTraceForAddress(const Address& addr);
// Destructor
~AccessTraceForAddress();
// Public Methods
void update(CacheRequestType type, AccessModeType access_mode, NodeID cpu, bool sharing_miss);
void update(CacheRequestType type, AccessModeType access_mode, NodeID cpu,
bool sharing_miss);
int getTotal() const;
int getSharing() const { return m_sharing; }
int getTouchedBy() const { return m_touched_by.count(); }
@ -64,15 +54,8 @@ public:
void addSample(int value);
void print(ostream& out) const;
private:
// Private Methods
// Private copy constructor and assignment operator
// AccessTraceForAddress(const AccessTraceForAddress& obj);
// AccessTraceForAddress& operator=(const AccessTraceForAddress& obj);
// Data Members (m_ prefix)
Address m_addr;
uint64 m_loads;
uint64 m_stores;
@ -84,20 +67,19 @@ private:
Histogram* m_histogram_ptr;
};
bool node_less_then_eq(const AccessTraceForAddress* n1, const AccessTraceForAddress* n2);
inline bool
node_less_then_eq(const AccessTraceForAddress* n1,
const AccessTraceForAddress* n2)
{
return n1->getTotal() > n2->getTotal();
}
// Output operator declaration
ostream& operator<<(ostream& out, const AccessTraceForAddress& obj);
// ******************* Definitions *******************
// Output operator definition
extern inline
ostream& operator<<(ostream& out, const AccessTraceForAddress& obj)
inline ostream&
operator<<(ostream& out, const AccessTraceForAddress& obj)
{
obj.print(out);
out << flush;
return out;
}
#endif //ACCESSTRACEFORADDRESS_H
#endif // __MEM_RUBY_PROFILER_ACCESSTRACEFORADDRESS_HH__

View file

@ -1,4 +1,3 @@
/*
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
* All rights reserved.
@ -27,201 +26,28 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* AddressProfiler.cc
*
* Description: See AddressProfiler.hh
*
* $Id$
*
*/
#include "mem/ruby/profiler/AddressProfiler.hh"
#include "mem/gems_common/Map.hh"
#include "mem/gems_common/PrioHeap.hh"
#include "mem/protocol/CacheMsg.hh"
#include "mem/ruby/profiler/AccessTraceForAddress.hh"
#include "mem/gems_common/PrioHeap.hh"
#include "mem/gems_common/Map.hh"
#include "mem/ruby/system/System.hh"
#include "mem/ruby/profiler/AddressProfiler.hh"
#include "mem/ruby/profiler/Profiler.hh"
#include "mem/ruby/system/System.hh"
typedef AddressProfiler::AddressMap AddressMap;
// Helper functions
static AccessTraceForAddress& lookupTraceForAddress(const Address& addr,
Map<Address,
AccessTraceForAddress>* record_map);
static void printSorted(ostream& out,
int num_of_sequencers,
const Map<Address, AccessTraceForAddress>* record_map,
string description);
AddressProfiler::AddressProfiler(int num_of_sequencers)
AccessTraceForAddress&
lookupTraceForAddress(const Address& addr, AddressMap* record_map)
{
m_dataAccessTrace = new Map<Address, AccessTraceForAddress>;
m_macroBlockAccessTrace = new Map<Address, AccessTraceForAddress>;
m_programCounterAccessTrace = new Map<Address, AccessTraceForAddress>;
m_retryProfileMap = new Map<Address, AccessTraceForAddress>;
m_num_of_sequencers = num_of_sequencers;
clearStats();
if (!record_map->exist(addr)) {
record_map->add(addr, AccessTraceForAddress(addr));
}
return record_map->lookup(addr);
}
AddressProfiler::~AddressProfiler()
{
delete m_dataAccessTrace;
delete m_macroBlockAccessTrace;
delete m_programCounterAccessTrace;
delete m_retryProfileMap;
}
void AddressProfiler::setHotLines(bool hot_lines){
m_hot_lines = hot_lines;
}
void AddressProfiler::setAllInstructions(bool all_instructions){
m_all_instructions = all_instructions;
}
void AddressProfiler::printStats(ostream& out) const
{
if (m_hot_lines) {
out << endl;
out << "AddressProfiler Stats" << endl;
out << "---------------------" << endl;
out << endl;
out << "sharing_misses: " << m_sharing_miss_counter << endl;
out << "getx_sharing_histogram: " << m_getx_sharing_histogram << endl;
out << "gets_sharing_histogram: " << m_gets_sharing_histogram << endl;
out << endl;
out << "Hot Data Blocks" << endl;
out << "---------------" << endl;
out << endl;
printSorted(out, m_num_of_sequencers, m_dataAccessTrace, "block_address");
out << endl;
out << "Hot MacroData Blocks" << endl;
out << "--------------------" << endl;
out << endl;
printSorted(out, m_num_of_sequencers, m_macroBlockAccessTrace, "macroblock_address");
out << "Hot Instructions" << endl;
out << "----------------" << endl;
out << endl;
printSorted(out, m_num_of_sequencers, m_programCounterAccessTrace, "pc_address");
}
if (m_all_instructions){
out << endl;
out << "All Instructions Profile:" << endl;
out << "-------------------------" << endl;
out << endl;
printSorted(out, m_num_of_sequencers, m_programCounterAccessTrace, "pc_address");
out << endl;
}
if (m_retryProfileHisto.size() > 0) {
out << "Retry Profile" << endl;
out << "-------------" << endl;
out << endl;
out << "retry_histogram_absolute: " << m_retryProfileHisto << endl;
out << "retry_histogram_write: " << m_retryProfileHistoWrite << endl;
out << "retry_histogram_read: " << m_retryProfileHistoRead << endl;
out << "retry_histogram_percent: ";
m_retryProfileHisto.printPercent(out);
out << endl;
printSorted(out, m_num_of_sequencers, m_retryProfileMap, "block_address");
out << endl;
}
}
void AddressProfiler::clearStats()
{
// Clear the maps
m_sharing_miss_counter = 0;
m_dataAccessTrace->clear();
m_macroBlockAccessTrace->clear();
m_programCounterAccessTrace->clear();
m_retryProfileMap->clear();
m_retryProfileHisto.clear();
m_retryProfileHistoRead.clear();
m_retryProfileHistoWrite.clear();
m_getx_sharing_histogram.clear();
m_gets_sharing_histogram.clear();
}
void AddressProfiler::profileGetX(const Address& datablock, const Address& PC, const Set& owner, const Set& sharers, NodeID requestor)
{
Set indirection_set;
indirection_set.addSet(sharers);
indirection_set.addSet(owner);
indirection_set.remove(requestor);
int num_indirections = indirection_set.count();
m_getx_sharing_histogram.add(num_indirections);
bool indirection_miss = (num_indirections > 0);
addTraceSample(datablock, PC, CacheRequestType_ST, AccessModeType(0), requestor, indirection_miss);
}
void AddressProfiler::profileGetS(const Address& datablock, const Address& PC, const Set& owner, const Set& sharers, NodeID requestor)
{
Set indirection_set;
indirection_set.addSet(owner);
indirection_set.remove(requestor);
int num_indirections = indirection_set.count();
m_gets_sharing_histogram.add(num_indirections);
bool indirection_miss = (num_indirections > 0);
addTraceSample(datablock, PC, CacheRequestType_LD, AccessModeType(0), requestor, indirection_miss);
}
void AddressProfiler::addTraceSample(Address data_addr, Address pc_addr, CacheRequestType type, AccessModeType access_mode, NodeID id, bool sharing_miss)
{
if (m_all_instructions) {
if (sharing_miss) {
m_sharing_miss_counter++;
}
// record data address trace info
data_addr.makeLineAddress();
lookupTraceForAddress(data_addr, m_dataAccessTrace).update(type, access_mode, id, sharing_miss);
// record macro data address trace info
Address macro_addr(data_addr.maskLowOrderBits(10)); // 6 for datablock, 4 to make it 16x more coarse
lookupTraceForAddress(macro_addr, m_macroBlockAccessTrace).update(type, access_mode, id, sharing_miss);
// record program counter address trace info
lookupTraceForAddress(pc_addr, m_programCounterAccessTrace).update(type, access_mode, id, sharing_miss);
}
if (m_all_instructions) {
// This code is used if the address profiler is an all-instructions profiler
// record program counter address trace info
lookupTraceForAddress(pc_addr, m_programCounterAccessTrace).update(type, access_mode, id, sharing_miss);
}
}
void AddressProfiler::profileRetry(const Address& data_addr, AccessType type, int count)
{
m_retryProfileHisto.add(count);
if (type == AccessType_Read) {
m_retryProfileHistoRead.add(count);
} else {
m_retryProfileHistoWrite.add(count);
}
if (count > 1) {
lookupTraceForAddress(data_addr, m_retryProfileMap).addSample(count);
}
}
// ***** Normal Functions ******
static void printSorted(ostream& out,
int num_of_sequencers,
const Map<Address, AccessTraceForAddress>* record_map,
void
printSorted(ostream& out, int num_of_sequencers, const AddressMap* record_map,
string description)
{
const int records_printed = 100;
@ -241,7 +67,8 @@ static void printSorted(ostream& out,
else
out << "Total_data_misses_" << description << ": " << misses << endl;
out << "total | load store atomic | user supervisor | sharing | touched-by" << endl;
out << "total | load store atomic | user supervisor | sharing | touched-by"
<< endl;
Histogram remaining_records(1, 100);
Histogram all_records(1, 100);
@ -259,7 +86,7 @@ static void printSorted(ostream& out,
}
int counter = 0;
while((heap.size() > 0) && (counter < records_printed)) {
while (heap.size() > 0 && counter < records_printed) {
AccessTraceForAddress* record = heap.extractMin();
double percent = 100.0 * (record->getTotal() / double(misses));
out << description << " | " << percent << " % " << *record << endl;
@ -280,19 +107,212 @@ static void printSorted(ostream& out,
m_touched_weighted_vec[record->getTouchedBy()] += record->getTotal();
}
out << endl;
out << "all_records_" << description << ": " << all_records << endl;
out << "all_records_log_" << description << ": " << all_records_log << endl;
out << "remaining_records_" << description << ": " << remaining_records << endl;
out << "remaining_records_log_" << description << ": " << remaining_records_log << endl;
out << "touched_by_" << description << ": " << m_touched_vec << endl;
out << "touched_by_weighted_" << description << ": " << m_touched_weighted_vec << endl;
out << "all_records_" << description << ": "
<< all_records << endl
<< "all_records_log_" << description << ": "
<< all_records_log << endl
<< "remaining_records_" << description << ": "
<< remaining_records << endl
<< "remaining_records_log_" << description << ": "
<< remaining_records_log << endl
<< "touched_by_" << description << ": "
<< m_touched_vec << endl
<< "touched_by_weighted_" << description << ": "
<< m_touched_weighted_vec << endl
<< endl;
}
AddressProfiler::AddressProfiler(int num_of_sequencers)
{
m_dataAccessTrace = new AddressMap;
m_macroBlockAccessTrace = new AddressMap;
m_programCounterAccessTrace = new AddressMap;
m_retryProfileMap = new AddressMap;
m_num_of_sequencers = num_of_sequencers;
clearStats();
}
AddressProfiler::~AddressProfiler()
{
delete m_dataAccessTrace;
delete m_macroBlockAccessTrace;
delete m_programCounterAccessTrace;
delete m_retryProfileMap;
}
void
AddressProfiler::setHotLines(bool hot_lines)
{
m_hot_lines = hot_lines;
}
void
AddressProfiler::setAllInstructions(bool all_instructions)
{
m_all_instructions = all_instructions;
}
void
AddressProfiler::printStats(ostream& out) const
{
if (m_hot_lines) {
out << endl;
out << "AddressProfiler Stats" << endl;
out << "---------------------" << endl;
out << endl;
out << "sharing_misses: " << m_sharing_miss_counter << endl;
out << "getx_sharing_histogram: " << m_getx_sharing_histogram << endl;
out << "gets_sharing_histogram: " << m_gets_sharing_histogram << endl;
out << endl;
out << "Hot Data Blocks" << endl;
out << "---------------" << endl;
out << endl;
printSorted(out, m_num_of_sequencers, m_dataAccessTrace,
"block_address");
out << endl;
out << "Hot MacroData Blocks" << endl;
out << "--------------------" << endl;
out << endl;
printSorted(out, m_num_of_sequencers, m_macroBlockAccessTrace,
"macroblock_address");
out << "Hot Instructions" << endl;
out << "----------------" << endl;
out << endl;
printSorted(out, m_num_of_sequencers, m_programCounterAccessTrace,
"pc_address");
}
if (m_all_instructions) {
out << endl;
out << "All Instructions Profile:" << endl;
out << "-------------------------" << endl;
out << endl;
printSorted(out, m_num_of_sequencers, m_programCounterAccessTrace,
"pc_address");
out << endl;
}
static AccessTraceForAddress& lookupTraceForAddress(const Address& addr, Map<Address, AccessTraceForAddress>* record_map)
if (m_retryProfileHisto.size() > 0) {
out << "Retry Profile" << endl;
out << "-------------" << endl;
out << endl;
out << "retry_histogram_absolute: " << m_retryProfileHisto << endl;
out << "retry_histogram_write: " << m_retryProfileHistoWrite << endl;
out << "retry_histogram_read: " << m_retryProfileHistoRead << endl;
out << "retry_histogram_percent: ";
m_retryProfileHisto.printPercent(out);
out << endl;
printSorted(out, m_num_of_sequencers, m_retryProfileMap,
"block_address");
out << endl;
}
}
void
AddressProfiler::clearStats()
{
if(record_map->exist(addr) == false){
record_map->add(addr, AccessTraceForAddress(addr));
// Clear the maps
m_sharing_miss_counter = 0;
m_dataAccessTrace->clear();
m_macroBlockAccessTrace->clear();
m_programCounterAccessTrace->clear();
m_retryProfileMap->clear();
m_retryProfileHisto.clear();
m_retryProfileHistoRead.clear();
m_retryProfileHistoWrite.clear();
m_getx_sharing_histogram.clear();
m_gets_sharing_histogram.clear();
}
void
AddressProfiler::profileGetX(const Address& datablock, const Address& PC,
const Set& owner, const Set& sharers,
NodeID requestor)
{
Set indirection_set;
indirection_set.addSet(sharers);
indirection_set.addSet(owner);
indirection_set.remove(requestor);
int num_indirections = indirection_set.count();
m_getx_sharing_histogram.add(num_indirections);
bool indirection_miss = (num_indirections > 0);
addTraceSample(datablock, PC, CacheRequestType_ST, AccessModeType(0),
requestor, indirection_miss);
}
void
AddressProfiler::profileGetS(const Address& datablock, const Address& PC,
const Set& owner, const Set& sharers,
NodeID requestor)
{
Set indirection_set;
indirection_set.addSet(owner);
indirection_set.remove(requestor);
int num_indirections = indirection_set.count();
m_gets_sharing_histogram.add(num_indirections);
bool indirection_miss = (num_indirections > 0);
addTraceSample(datablock, PC, CacheRequestType_LD, AccessModeType(0),
requestor, indirection_miss);
}
void
AddressProfiler::addTraceSample(Address data_addr, Address pc_addr,
CacheRequestType type,
AccessModeType access_mode, NodeID id,
bool sharing_miss)
{
if (m_all_instructions) {
if (sharing_miss) {
m_sharing_miss_counter++;
}
// record data address trace info
data_addr.makeLineAddress();
lookupTraceForAddress(data_addr, m_dataAccessTrace).
update(type, access_mode, id, sharing_miss);
// record macro data address trace info
// 6 for datablock, 4 to make it 16x more coarse
Address macro_addr(data_addr.maskLowOrderBits(10));
lookupTraceForAddress(macro_addr, m_macroBlockAccessTrace).
update(type, access_mode, id, sharing_miss);
// record program counter address trace info
lookupTraceForAddress(pc_addr, m_programCounterAccessTrace).
update(type, access_mode, id, sharing_miss);
}
if (m_all_instructions) {
// This code is used if the address profiler is an
// all-instructions profiler record program counter address
// trace info
lookupTraceForAddress(pc_addr, m_programCounterAccessTrace).
update(type, access_mode, id, sharing_miss);
}
}
void
AddressProfiler::profileRetry(const Address& data_addr, AccessType type,
int count)
{
m_retryProfileHisto.add(count);
if (type == AccessType_Read) {
m_retryProfileHistoRead.add(count);
} else {
m_retryProfileHistoWrite.add(count);
}
if (count > 1) {
lookupTraceForAddress(data_addr, m_retryProfileMap).addSample(count);
}
return record_map->lookup(addr);
}

View file

@ -1,4 +1,3 @@
/*
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
* All rights reserved.
@ -27,70 +26,64 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* AddressProfiler.hh
*
* Description:
*
* $Id$
*
*/
#ifndef __MEM_RUBY_PROFILER_ADDRESSPROFILER_HH__
#define __MEM_RUBY_PROFILER_ADDRESSPROFILER_HH__
#ifndef ADDRESSPROFILER_H
#define ADDRESSPROFILER_H
#include "mem/ruby/common/Global.hh"
#include "mem/ruby/system/NodeID.hh"
#include "mem/ruby/common/Histogram.hh"
#include "mem/ruby/common/Address.hh"
#include "mem/protocol/CacheMsg.hh"
#include "mem/protocol/AccessType.hh"
#include "mem/protocol/CacheMsg.hh"
#include "mem/ruby/common/Address.hh"
#include "mem/ruby/common/Global.hh"
#include "mem/ruby/common/Histogram.hh"
#include "mem/ruby/system/NodeID.hh"
class AccessTraceForAddress;
class Set;
template <class KEY_TYPE, class VALUE_TYPE> class Map;
class AddressProfiler {
class AddressProfiler
{
public:
// Constructors
AddressProfiler(int num_of_sequencers);
typedef Map<Address, AccessTraceForAddress> AddressMap;
// Destructor
public:
AddressProfiler(int num_of_sequencers);
~AddressProfiler();
// Public Methods
void printStats(ostream& out) const;
void clearStats();
void addTraceSample(Address data_addr, Address pc_addr, CacheRequestType type, AccessModeType access_mode, NodeID id, bool sharing_miss);
void addTraceSample(Address data_addr, Address pc_addr,
CacheRequestType type, AccessModeType access_mode,
NodeID id, bool sharing_miss);
void profileRetry(const Address& data_addr, AccessType type, int count);
void profileGetX(const Address& datablock, const Address& PC, const Set& owner, const Set& sharers, NodeID requestor);
void profileGetS(const Address& datablock, const Address& PC, const Set& owner, const Set& sharers, NodeID requestor);
void profileGetX(const Address& datablock, const Address& PC,
const Set& owner, const Set& sharers, NodeID requestor);
void profileGetS(const Address& datablock, const Address& PC,
const Set& owner, const Set& sharers, NodeID requestor);
void print(ostream& out) const;
//added by SS
void setHotLines(bool hot_lines);
void setAllInstructions(bool all_instructions);
private:
// Private Methods
private:
// Private copy constructor and assignment operator
AddressProfiler(const AddressProfiler& obj);
AddressProfiler& operator=(const AddressProfiler& obj);
// Data Members (m_ prefix)
int64 m_sharing_miss_counter;
Map<Address, AccessTraceForAddress>* m_dataAccessTrace;
Map<Address, AccessTraceForAddress>* m_macroBlockAccessTrace;
Map<Address, AccessTraceForAddress>* m_programCounterAccessTrace;
Map<Address, AccessTraceForAddress>* m_retryProfileMap;
AddressMap* m_dataAccessTrace;
AddressMap* m_macroBlockAccessTrace;
AddressMap* m_programCounterAccessTrace;
AddressMap* m_retryProfileMap;
Histogram m_retryProfileHisto;
Histogram m_retryProfileHistoWrite;
Histogram m_retryProfileHistoRead;
Histogram m_getx_sharing_histogram;
Histogram m_gets_sharing_histogram;
//added by SS
bool m_hot_lines;
bool m_all_instructions;
@ -98,18 +91,12 @@ private:
int m_num_of_sequencers;
};
// Output operator declaration
ostream& operator<<(ostream& out, const AddressProfiler& obj);
// ******************* Definitions *******************
// Output operator definition
extern inline
ostream& operator<<(ostream& out, const AddressProfiler& obj)
inline ostream&
operator<<(ostream& out, const AddressProfiler& obj)
{
obj.print(out);
out << flush;
return out;
}
#endif //ADDRESSPROFILER_H
#endif // __MEM_RUBY_PROFILER_ADDRESSPROFILER_HH__

View file

@ -1,4 +1,3 @@
/*
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
* All rights reserved.
@ -27,21 +26,12 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* CacheProfiler.C
*
* Description: See CacheProfiler.hh
*
* $Id$
*
*/
#include "mem/ruby/profiler/CacheProfiler.hh"
#include "mem/ruby/profiler/AccessTraceForAddress.hh"
#include "mem/gems_common/PrioHeap.hh"
#include "mem/ruby/system/System.hh"
#include "mem/ruby/profiler/Profiler.hh"
#include "mem/gems_common/Vector.hh"
#include "mem/ruby/profiler/AccessTraceForAddress.hh"
#include "mem/ruby/profiler/CacheProfiler.hh"
#include "mem/ruby/profiler/Profiler.hh"
#include "mem/ruby/system/System.hh"
CacheProfiler::CacheProfiler(const string& description)
{
@ -57,7 +47,8 @@ CacheProfiler::~CacheProfiler()
delete m_requestTypeVec_ptr;
}
void CacheProfiler::printStats(ostream& out) const
void
CacheProfiler::printStats(ostream& out) const
{
out << "Cache Stats: " << m_description << endl;
string description = " " + m_description;
@ -80,8 +71,11 @@ void CacheProfiler::printStats(ostream& out) const
if (requests > 0) {
for (int i = 0; i < int(CacheRequestType_NUM); i++) {
if (m_requestTypeVec_ptr->ref(i) > 0) {
out << description << "_request_type_" << CacheRequestType_to_string(CacheRequestType(i)) << ": "
<< (100.0 * double((m_requestTypeVec_ptr->ref(i)))) / double(requests)
out << description << "_request_type_"
<< CacheRequestType_to_string(CacheRequestType(i))
<< ": "
<< 100.0 * (double)m_requestTypeVec_ptr->ref(i) /
(double)requests
<< "%" << endl;
}
}
@ -90,18 +84,21 @@ void CacheProfiler::printStats(ostream& out) const
for (int i = 0; i < AccessModeType_NUM; i++){
if (m_accessModeTypeHistogram[i] > 0) {
out << description << "_access_mode_type_" << (AccessModeType) i << ": " << m_accessModeTypeHistogram[i]
<< " " << (100.0 * m_accessModeTypeHistogram[i]) / requests << "%" << endl;
out << description << "_access_mode_type_"
<< (AccessModeType) i << ": "
<< m_accessModeTypeHistogram[i] << " "
<< 100.0 * m_accessModeTypeHistogram[i] / requests
<< "%" << endl;
}
}
}
out << description << "_request_size: " << m_requestSize << endl;
out << endl;
}
void CacheProfiler::clearStats()
void
CacheProfiler::clearStats()
{
for (int i = 0; i < int(CacheRequestType_NUM); i++) {
m_requestTypeVec_ptr->ref(i) = 0;
@ -117,7 +114,10 @@ void CacheProfiler::clearStats()
}
}
void CacheProfiler::addStatSample(CacheRequestType requestType, AccessModeType type, int msgSize, PrefetchBit pfBit)
void
CacheProfiler::addStatSample(CacheRequestType requestType,
AccessModeType type, int msgSize,
PrefetchBit pfBit)
{
m_misses++;
@ -130,7 +130,8 @@ void CacheProfiler::addStatSample(CacheRequestType requestType, AccessModeType t
} else if (pfBit == PrefetchBit_Yes) {
m_prefetches++;
m_sw_prefetches++;
} else { // must be L1_HW || L2_HW prefetch
} else {
// must be L1_HW || L2_HW prefetch
m_prefetches++;
m_hw_prefetches++;
}

View file

@ -1,4 +1,3 @@
/*
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
* All rights reserved.
@ -27,53 +26,40 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* CacheProfiler.hh
*
* Description:
*
* $Id$
*
*/
#ifndef CACHEPROFILER_H
#define CACHEPROFILER_H
#ifndef __MEM_RUBY_PROFILER_CACHEPROFILER_HH__
#define __MEM_RUBY_PROFILER_CACHEPROFILER_HH__
#include <iostream>
#include <string>
#include "mem/ruby/common/Global.hh"
#include "mem/ruby/system/NodeID.hh"
#include "mem/ruby/common/Histogram.hh"
#include "mem/protocol/AccessModeType.hh"
#include "mem/protocol/PrefetchBit.hh"
#include "mem/protocol/CacheRequestType.hh"
#include "mem/protocol/PrefetchBit.hh"
#include "mem/ruby/common/Global.hh"
#include "mem/ruby/common/Histogram.hh"
#include "mem/ruby/system/NodeID.hh"
template <class TYPE> class Vector;
class CacheProfiler {
class CacheProfiler
{
public:
// Constructors
CacheProfiler(const std::string& description);
// Destructor
~CacheProfiler();
// Public Methods
void printStats(std::ostream& out) const;
void clearStats();
void addStatSample(CacheRequestType requestType, AccessModeType type, int msgSize, PrefetchBit pfBit);
void addStatSample(CacheRequestType requestType, AccessModeType type,
int msgSize, PrefetchBit pfBit);
void print(std::ostream& out) const;
private:
// Private Methods
private:
// Private copy constructor and assignment operator
CacheProfiler(const CacheProfiler& obj);
CacheProfiler& operator=(const CacheProfiler& obj);
// Data Members (m_ prefix)
std::string m_description;
Histogram m_requestSize;
int64 m_misses;
@ -86,18 +72,12 @@ private:
Vector <int>* m_requestTypeVec_ptr;
};
// Output operator declaration
std::ostream& operator<<(std::ostream& out, const CacheProfiler& obj);
// ******************* Definitions *******************
// Output operator definition
extern inline
std::ostream& operator<<(std::ostream& out, const CacheProfiler& obj)
inline std::ostream&
operator<<(std::ostream& out, const CacheProfiler& obj)
{
obj.print(out);
out << std::flush;
return out;
}
#endif //CACHEPROFILER_H
#endif // __MEM_RUBY_PROFILER_CACHEPROFILER_HH__

View file

@ -1,4 +1,3 @@
/*
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
* All rights reserved.
@ -32,19 +31,14 @@
using namespace std;
MemCntrlProfiler::MemCntrlProfiler(const string& description,
int banks_per_rank,
int ranks_per_dimm,
int dimms_per_channel)
int banks_per_rank, int ranks_per_dimm, int dimms_per_channel)
{
m_description = description;
m_banks_per_rank = banks_per_rank;
m_ranks_per_dimm = ranks_per_dimm;
m_dimms_per_channel = dimms_per_channel;
int totalBanks = banks_per_rank *
ranks_per_dimm *
dimms_per_channel;
int totalBanks = banks_per_rank * ranks_per_dimm * dimms_per_channel;
m_memBankCount.setSize(totalBanks);
clearStats();
@ -54,50 +48,65 @@ MemCntrlProfiler::~MemCntrlProfiler()
{
}
void MemCntrlProfiler::printStats(ostream& out) const
void
MemCntrlProfiler::printStats(ostream& out) const
{
if (m_memReq || m_memRefresh) { // if there's a memory controller at all
if (!m_memReq && !m_memRefresh) {
out << "Memory Controller: " << m_description
<< " no stats recorded." << endl
<< endl
<< endl;
return;
}
// if there's a memory controller at all
uint64 total_stalls = m_memInputQ + m_memBankQ + m_memWaitCycles;
double stallsPerReq = total_stalls * 1.0 / m_memReq;
out << "Memory controller: " << m_description << ":" << endl;
out << " memory_total_requests: " << m_memReq << endl; // does not include refreshes
// does not include refreshes
out << " memory_total_requests: " << m_memReq << endl;
out << " memory_reads: " << m_memRead << endl;
out << " memory_writes: " << m_memWrite << endl;
out << " memory_refreshes: " << m_memRefresh << endl;
out << " memory_total_request_delays: " << total_stalls << endl;
out << " memory_delays_per_request: " << stallsPerReq << endl;
out << " memory_delays_in_input_queue: " << m_memInputQ << endl;
out << " memory_delays_behind_head_of_bank_queue: " << m_memBankQ << endl;
out << " memory_delays_stalled_at_head_of_bank_queue: " << m_memWaitCycles << endl;
// Note: The following "memory stalls" entries are a breakdown of the
// cycles which already showed up in m_memWaitCycles. The order is
// significant; it is the priority of attributing the cycles.
// For example, bank_busy is before arbitration because if the bank was
// busy, we didn't even check arbitration.
// Note: "not old enough" means that since we grouped waiting heads-of-queues
// into batches to avoid starvation, a request in a newer batch
// didn't try to arbitrate yet because there are older requests waiting.
out << " memory_delays_behind_head_of_bank_queue: "
<< m_memBankQ << endl;
out << " memory_delays_stalled_at_head_of_bank_queue: "
<< m_memWaitCycles << endl;
// Note: The following "memory stalls" entries are a breakdown of
// the cycles which already showed up in m_memWaitCycles. The
// order is significant; it is the priority of attributing the
// cycles. For example, bank_busy is before arbitration because
// if the bank was busy, we didn't even check arbitration.
// Note: "not old enough" means that since we grouped waiting
// heads-of-queues into batches to avoid starvation, a request in
// a newer batch didn't try to arbitrate yet because there are
// older requests waiting.
out << " memory_stalls_for_bank_busy: " << m_memBankBusy << endl;
out << " memory_stalls_for_random_busy: " << m_memRandBusy << endl;
out << " memory_stalls_for_anti_starvation: " << m_memNotOld << endl;
out << " memory_stalls_for_arbitration: " << m_memArbWait << endl;
out << " memory_stalls_for_bus: " << m_memBusBusy << endl;
out << " memory_stalls_for_tfaw: " << m_memTfawBusy << endl;
out << " memory_stalls_for_read_write_turnaround: " << m_memReadWriteBusy << endl;
out << " memory_stalls_for_read_read_turnaround: " << m_memDataBusBusy << endl;
out << " memory_stalls_for_read_write_turnaround: "
<< m_memReadWriteBusy << endl;
out << " memory_stalls_for_read_read_turnaround: "
<< m_memDataBusBusy << endl;
out << " accesses_per_bank: ";
for (int bank = 0; bank < m_memBankCount.size(); bank++) {
out << m_memBankCount[bank] << " ";
}
} else {
out << "Memory Controller: " << m_description
<< " no stats recorded." << endl;
}
out << endl;
out << endl;
}
void MemCntrlProfiler::clearStats()
void
MemCntrlProfiler::clearStats()
{
m_memReq = 0;
m_memBankBusy = 0;
@ -115,71 +124,99 @@ void MemCntrlProfiler::clearStats()
m_memRandBusy = 0;
m_memNotOld = 0;
for (int bank=0;
bank < m_memBankCount.size();
bank++) {
for (int bank = 0; bank < m_memBankCount.size(); bank++) {
m_memBankCount[bank] = 0;
}
}
void MemCntrlProfiler::profileMemReq(int bank) {
void
MemCntrlProfiler::profileMemReq(int bank)
{
m_memReq++;
m_memBankCount[bank]++;
}
void MemCntrlProfiler::profileMemBankBusy() {
void
MemCntrlProfiler::profileMemBankBusy()
{
m_memBankBusy++;
}
void MemCntrlProfiler::profileMemBusBusy() {
void
MemCntrlProfiler::profileMemBusBusy()
{
m_memBusBusy++;
}
void MemCntrlProfiler::profileMemReadWriteBusy() {
void
MemCntrlProfiler::profileMemReadWriteBusy()
{
m_memReadWriteBusy++;
}
void MemCntrlProfiler::profileMemDataBusBusy() {
void
MemCntrlProfiler::profileMemDataBusBusy()
{
m_memDataBusBusy++;
}
void MemCntrlProfiler::profileMemTfawBusy() {
void
MemCntrlProfiler::profileMemTfawBusy()
{
m_memTfawBusy++;
}
void MemCntrlProfiler::profileMemRefresh() {
void
MemCntrlProfiler::profileMemRefresh()
{
m_memRefresh++;
}
void MemCntrlProfiler::profileMemRead() {
void
MemCntrlProfiler::profileMemRead()
{
m_memRead++;
}
void MemCntrlProfiler::profileMemWrite() {
void
MemCntrlProfiler::profileMemWrite()
{
m_memWrite++;
}
void MemCntrlProfiler::profileMemWaitCycles(int cycles) {
void
MemCntrlProfiler::profileMemWaitCycles(int cycles)
{
m_memWaitCycles += cycles;
}
void MemCntrlProfiler::profileMemInputQ(int cycles) {
void
MemCntrlProfiler::profileMemInputQ(int cycles)
{
m_memInputQ += cycles;
}
void MemCntrlProfiler::profileMemBankQ(int cycles) {
void
MemCntrlProfiler::profileMemBankQ(int cycles)
{
m_memBankQ += cycles;
}
void MemCntrlProfiler::profileMemArbWait(int cycles) {
void
MemCntrlProfiler::profileMemArbWait(int cycles)
{
m_memArbWait += cycles;
}
void MemCntrlProfiler::profileMemRandBusy() {
void
MemCntrlProfiler::profileMemRandBusy()
{
m_memRandBusy++;
}
void MemCntrlProfiler::profileMemNotOld() {
void
MemCntrlProfiler::profileMemNotOld()
{
m_memNotOld++;
}

View file

@ -1,4 +1,3 @@
/*
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
* All rights reserved.
@ -27,17 +26,8 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* MemCntrlProfiler.hh
*
* Description:
*
* $Id$
*
*/
#ifndef MEM_CNTRL_PROFILER_H
#define MEM_CNTRL_PROFILER_H
#ifndef __MEM_RUBY_PROFILER_MEMCNTRLPROFILER_HH__
#define __MEM_RUBY_PROFILER_MEMCNTRLPROFILER_HH__
#include <iostream>
#include <string>
@ -47,18 +37,13 @@
template <class TYPE> class Vector;
class MemCntrlProfiler {
class MemCntrlProfiler
{
public:
// Constructors
MemCntrlProfiler(const std::string& description,
int banks_per_rank,
int ranks_per_dimm,
int dimms_per_channel);
// Destructor
MemCntrlProfiler(const std::string& description, int banks_per_rank,
int ranks_per_dimm, int dimms_per_channel);
~MemCntrlProfiler();
// Public Methods
void printStats(std::ostream& out) const;
void clearStats();
@ -79,14 +64,12 @@ public:
void profileMemNotOld();
void print(std::ostream& out) const;
private:
// Private Methods
private:
// Private copy constructor and assignment operator
MemCntrlProfiler(const MemCntrlProfiler& obj);
MemCntrlProfiler& operator=(const MemCntrlProfiler& obj);
// Data Members (m_ prefix)
std::string m_description;
uint64 m_memReq;
uint64 m_memBankBusy;
@ -109,18 +92,12 @@ private:
int m_dimms_per_channel;
};
// Output operator declaration
std::ostream& operator<<(std::ostream& out, const MemCntrlProfiler& obj);
// ******************* Definitions *******************
// Output operator definition
extern inline
std::ostream& operator<<(std::ostream& out, const MemCntrlProfiler& obj)
inline std::ostream&
operator<<(std::ostream& out, const MemCntrlProfiler& obj)
{
obj.print(out);
out << std::flush;
return out;
}
#endif //MEM_CNTRL_PROFILER_H
#endif // __MEM_RUBY_PROFILER_MEMCNTRLPROFILER_HH__

View file

@ -42,31 +42,21 @@
----------------------------------------------------------------------
*/
/*
* Profiler.cc
*
* Description: See Profiler.hh
*
* $Id$
*
*/
// Allows use of times() library call, which determines virtual runtime
#include <sys/resource.h>
#include <sys/times.h>
#include "mem/ruby/profiler/Profiler.hh"
#include "mem/ruby/profiler/AddressProfiler.hh"
#include "mem/ruby/system/System.hh"
#include "mem/ruby/network/Network.hh"
#include "mem/gems_common/PrioHeap.hh"
#include "mem/protocol/CacheMsg.hh"
#include "mem/protocol/Protocol.hh"
#include "mem/gems_common/util.hh"
#include "mem/gems_common/Map.hh"
#include "mem/ruby/common/Debug.hh"
#include "mem/gems_common/PrioHeap.hh"
#include "mem/gems_common/util.hh"
#include "mem/protocol/CacheMsg.hh"
#include "mem/protocol/MachineType.hh"
#include "mem/protocol/Protocol.hh"
#include "mem/ruby/common/Debug.hh"
#include "mem/ruby/network/Network.hh"
#include "mem/ruby/profiler/AddressProfiler.hh"
#include "mem/ruby/profiler/Profiler.hh"
#include "mem/ruby/system/System.hh"
#include "mem/ruby/system/System.hh"
extern std::ostream* debug_cout_ptr;
@ -114,7 +104,8 @@ Profiler::~Profiler()
delete m_requestProfileMap_ptr;
}
void Profiler::wakeup()
void
Profiler::wakeup()
{
// FIXME - avoid the repeated code
@ -122,43 +113,36 @@ void Profiler::wakeup()
perProcCycleCount.setSize(m_num_of_sequencers);
for (int i = 0; i < m_num_of_sequencers; i++) {
perProcCycleCount[i] = g_system_ptr->getCycleCount(i) - m_cycles_executed_at_start[i] + 1;
perProcCycleCount[i] =
g_system_ptr->getCycleCount(i) - m_cycles_executed_at_start[i] + 1;
// The +1 allows us to avoid division by zero
}
(*m_periodic_output_file_ptr) << "ruby_cycles: "
<< g_eventQueue_ptr->getTime()-m_ruby_start
<< endl;
ostream &out = *m_periodic_output_file_ptr;
(*m_periodic_output_file_ptr) << "mbytes_resident: "
<< process_memory_resident()
<< endl;
(*m_periodic_output_file_ptr) << "mbytes_total: "
<< process_memory_total()
<< endl;
out << "ruby_cycles: " << g_eventQueue_ptr->getTime()-m_ruby_start << endl
<< "mbytes_resident: " << process_memory_resident() << endl
<< "mbytes_total: " << process_memory_total() << endl;
if (process_memory_total() > 0) {
(*m_periodic_output_file_ptr) << "resident_ratio: "
<< process_memory_resident()/process_memory_total()
<< endl;
out << "resident_ratio: "
<< process_memory_resident() / process_memory_total() << endl;
}
(*m_periodic_output_file_ptr) << "miss_latency: "
<< m_allMissLatencyHistogram
<< endl;
out << "miss_latency: " << m_allMissLatencyHistogram << endl;
*m_periodic_output_file_ptr << endl;
out << endl;
if (m_all_instructions) {
m_inst_profiler_ptr->printStats(*m_periodic_output_file_ptr);
m_inst_profiler_ptr->printStats(out);
}
//g_system_ptr->getNetwork()->printStats(*m_periodic_output_file_ptr);
//g_system_ptr->getNetwork()->printStats(out);
g_eventQueue_ptr->scheduleEvent(this, m_stats_period);
}
void Profiler::setPeriodicStatsFile(const string& filename)
void
Profiler::setPeriodicStatsFile(const string& filename)
{
cout << "Recording periodic statistics to file '" << filename << "' every "
<< m_stats_period << " Ruby cycles" << endl;
@ -171,7 +155,8 @@ void Profiler::setPeriodicStatsFile(const string& filename)
g_eventQueue_ptr->scheduleEvent(this, 1);
}
void Profiler::setPeriodicStatsInterval(integer_t period)
void
Profiler::setPeriodicStatsInterval(integer_t period)
{
cout << "Recording periodic statistics every " << m_stats_period
<< " Ruby cycles" << endl;
@ -180,7 +165,8 @@ void Profiler::setPeriodicStatsInterval(integer_t period)
g_eventQueue_ptr->scheduleEvent(this, 1);
}
void Profiler::printConfig(ostream& out) const
void
Profiler::printConfig(ostream& out) const
{
out << endl;
out << "Profiler Configuration" << endl;
@ -188,12 +174,14 @@ void Profiler::printConfig(ostream& out) const
out << "periodic_stats_period: " << m_stats_period << endl;
}
void Profiler::print(ostream& out) const
void
Profiler::print(ostream& out) const
{
out << "[Profiler]";
}
void Profiler::printStats(ostream& out, bool short_stats)
void
Profiler::printStats(ostream& out, bool short_stats)
{
out << endl;
if (short_stats) {
@ -243,14 +231,14 @@ void Profiler::printStats(ostream& out, bool short_stats)
<< process_memory_resident()/process_memory_total() << endl;
}
out << endl;
}
Vector<integer_t> perProcCycleCount;
perProcCycleCount.setSize(m_num_of_sequencers);
for (int i = 0; i < m_num_of_sequencers; i++) {
perProcCycleCount[i] = g_system_ptr->getCycleCount(i) - m_cycles_executed_at_start[i] + 1;
perProcCycleCount[i] =
g_system_ptr->getCycleCount(i) - m_cycles_executed_at_start[i] + 1;
// The +1 allows us to avoid division by zero
}
@ -261,7 +249,8 @@ void Profiler::printStats(ostream& out, bool short_stats)
if (!short_stats) {
out << "Busy Controller Counts:" << endl;
for (int i = 0; i < MachineType_NUM; i++) {
for(int j=0; j < MachineType_base_count((MachineType)i); j++) {
int size = MachineType_base_count((MachineType)i);
for (int j = 0; j < size; j++) {
MachineID machID;
machID.type = (MachineType)i;
machID.num = j;
@ -277,7 +266,8 @@ void Profiler::printStats(ostream& out, bool short_stats)
out << "Busy Bank Count:" << m_busyBankCount << endl;
out << endl;
out << "sequencer_requests_outstanding: " << m_sequencer_requests << endl;
out << "sequencer_requests_outstanding: "
<< m_sequencer_requests << endl;
out << endl;
}
@ -287,12 +277,14 @@ void Profiler::printStats(ostream& out, bool short_stats)
out << "miss_latency: " << m_allMissLatencyHistogram << endl;
for (int i = 0; i < m_missLatencyHistograms.size(); i++) {
if (m_missLatencyHistograms[i].size() > 0) {
out << "miss_latency_" << RubyRequestType(i) << ": " << m_missLatencyHistograms[i] << endl;
out << "miss_latency_" << RubyRequestType(i) << ": "
<< m_missLatencyHistograms[i] << endl;
}
}
for (int i = 0; i < m_machLatencyHistograms.size(); i++) {
if (m_machLatencyHistograms[i].size() > 0) {
out << "miss_latency_" << GenericMachineType(i) << ": " << m_machLatencyHistograms[i] << endl;
out << "miss_latency_" << GenericMachineType(i) << ": "
<< m_machLatencyHistograms[i] << endl;
}
}
@ -303,36 +295,53 @@ void Profiler::printStats(ostream& out, bool short_stats)
out << "prefetch_latency: " << m_allSWPrefetchLatencyHistogram << endl;
for (int i = 0; i < m_SWPrefetchLatencyHistograms.size(); i++) {
if (m_SWPrefetchLatencyHistograms[i].size() > 0) {
out << "prefetch_latency_" << CacheRequestType(i) << ": " << m_SWPrefetchLatencyHistograms[i] << endl;
out << "prefetch_latency_" << CacheRequestType(i) << ": "
<< m_SWPrefetchLatencyHistograms[i] << endl;
}
}
for (int i = 0; i < m_SWPrefetchMachLatencyHistograms.size(); i++) {
if (m_SWPrefetchMachLatencyHistograms[i].size() > 0) {
out << "prefetch_latency_" << GenericMachineType(i) << ": " << m_SWPrefetchMachLatencyHistograms[i] << endl;
out << "prefetch_latency_" << GenericMachineType(i) << ": "
<< m_SWPrefetchMachLatencyHistograms[i] << endl;
}
}
out << "prefetch_latency_L2Miss:" << m_SWPrefetchL2MissLatencyHistogram << endl;
out << "prefetch_latency_L2Miss:"
<< m_SWPrefetchL2MissLatencyHistogram << endl;
if (m_all_sharing_histogram.size() > 0) {
out << "all_sharing: " << m_all_sharing_histogram << endl;
out << "read_sharing: " << m_read_sharing_histogram << endl;
out << "write_sharing: " << m_write_sharing_histogram << endl;
out << "all_sharing_percent: "; m_all_sharing_histogram.printPercent(out); out << endl;
out << "read_sharing_percent: "; m_read_sharing_histogram.printPercent(out); out << endl;
out << "write_sharing_percent: "; m_write_sharing_histogram.printPercent(out); out << endl;
out << "all_sharing_percent: ";
m_all_sharing_histogram.printPercent(out);
out << endl;
out << "read_sharing_percent: ";
m_read_sharing_histogram.printPercent(out);
out << endl;
out << "write_sharing_percent: ";
m_write_sharing_histogram.printPercent(out);
out << endl;
int64 total_miss = m_cache_to_cache + m_memory_to_cache;
out << "all_misses: " << total_miss << endl;
out << "cache_to_cache_misses: " << m_cache_to_cache << endl;
out << "memory_to_cache_misses: " << m_memory_to_cache << endl;
out << "cache_to_cache_percent: " << 100.0 * (double(m_cache_to_cache) / double(total_miss)) << endl;
out << "memory_to_cache_percent: " << 100.0 * (double(m_memory_to_cache) / double(total_miss)) << endl;
out << "cache_to_cache_percent: "
<< 100.0 * (double(m_cache_to_cache) / double(total_miss))
<< endl;
out << "memory_to_cache_percent: "
<< 100.0 * (double(m_memory_to_cache) / double(total_miss))
<< endl;
out << endl;
}
if (m_outstanding_requests.size() > 0) {
out << "outstanding_requests: "; m_outstanding_requests.printPercent(out); out << endl;
out << "outstanding_requests: ";
m_outstanding_requests.printPercent(out);
out << endl;
out << endl;
}
}
@ -346,7 +355,8 @@ void Profiler::printStats(ostream& out, bool short_stats)
requestProfileKeys.sortVector();
for (int i = 0; i < requestProfileKeys.size(); i++) {
int temp_int = m_requestProfileMap_ptr->lookup(requestProfileKeys[i]);
int temp_int =
m_requestProfileMap_ptr->lookup(requestProfileKeys[i]);
double percent = (100.0 * double(temp_int)) / double(m_requests);
while (requestProfileKeys[i] != "") {
out << setw(10) << string_split(requestProfileKeys[i], ':');
@ -370,17 +380,19 @@ void Profiler::printStats(ostream& out, bool short_stats)
out << "Message Delayed Cycles" << endl;
out << "----------------------" << endl;
out << "Total_delay_cycles: " << m_delayedCyclesHistogram << endl;
out << "Total_nonPF_delay_cycles: " << m_delayedCyclesNonPFHistogram << endl;
out << "Total_nonPF_delay_cycles: "
<< m_delayedCyclesNonPFHistogram << endl;
for (int i = 0; i < m_delayedCyclesVCHistograms.size(); i++) {
out << " virtual_network_" << i << "_delay_cycles: " << m_delayedCyclesVCHistograms[i] << endl;
out << " virtual_network_" << i << "_delay_cycles: "
<< m_delayedCyclesVCHistograms[i] << endl;
}
printResourceUsage(out);
}
}
void Profiler::printResourceUsage(ostream& out) const
void
Profiler::printResourceUsage(ostream& out) const
{
out << endl;
out << "Resource Usage" << endl;
@ -401,7 +413,8 @@ void Profiler::printResourceUsage(ostream& out) const
out << "block_outputs: " << usage.ru_oublock << endl;
}
void Profiler::clearStats()
void
Profiler::clearStats()
{
m_ruby_start = g_eventQueue_ptr->getTime();
@ -416,8 +429,9 @@ void Profiler::clearStats()
m_busyControllerCount.setSize(MachineType_NUM); // all machines
for (int i = 0; i < MachineType_NUM; i++) {
m_busyControllerCount[i].setSize(MachineType_base_count((MachineType)i));
for(int j=0; j < MachineType_base_count((MachineType)i); j++) {
int size = MachineType_base_count((MachineType)i);
m_busyControllerCount[i].setSize(size);
for (int j = 0; j < size; j++) {
m_busyControllerCount[i][j] = 0;
}
}
@ -425,8 +439,9 @@ void Profiler::clearStats()
m_delayedCyclesHistogram.clear();
m_delayedCyclesNonPFHistogram.clear();
m_delayedCyclesVCHistograms.setSize(RubySystem::getNetwork()->getNumberOfVirtualNetworks());
for (int i = 0; i < RubySystem::getNetwork()->getNumberOfVirtualNetworks(); i++) {
int size = RubySystem::getNetwork()->getNumberOfVirtualNetworks();
m_delayedCyclesVCHistograms.setSize(size);
for (int i = 0; i < size; i++) {
m_delayedCyclesVCHistograms[i].clear();
}
@ -466,26 +481,34 @@ void Profiler::clearStats()
m_outstanding_requests.clear();
m_outstanding_persistent_requests.clear();
// Flush the prefetches through the system - used so that there are no outstanding requests after stats are cleared
// Flush the prefetches through the system - used so that there
// are no outstanding requests after stats are cleared
//g_eventQueue_ptr->triggerAllEvents();
// update the start time
m_ruby_start = g_eventQueue_ptr->getTime();
}
void Profiler::addAddressTraceSample(const CacheMsg& msg, NodeID id)
void
Profiler::addAddressTraceSample(const CacheMsg& msg, NodeID id)
{
if (msg.getType() != CacheRequestType_IFETCH) {
// Note: The following line should be commented out if you
// want to use the special profiling that is part of the GS320
// protocol
// Note: The following line should be commented out if you want to
// use the special profiling that is part of the GS320 protocol
// NOTE: Unless PROFILE_HOT_LINES is enabled, nothing will be profiled by the AddressProfiler
m_address_profiler_ptr->addTraceSample(msg.getLineAddress(), msg.getProgramCounter(), msg.getType(), msg.getAccessMode(), id, false);
// NOTE: Unless PROFILE_HOT_LINES is enabled, nothing will be
// profiled by the AddressProfiler
m_address_profiler_ptr->
addTraceSample(msg.getLineAddress(), msg.getProgramCounter(),
msg.getType(), msg.getAccessMode(), id, false);
}
}
void Profiler::profileSharing(const Address& addr, AccessType type, NodeID requestor, const Set& sharers, const Set& owner)
void
Profiler::profileSharing(const Address& addr, AccessType type,
NodeID requestor, const Set& sharers,
const Set& owner)
{
Set set_contacted(owner);
if (type == AccessType_Write) {
@ -506,10 +529,11 @@ void Profiler::profileSharing(const Address& addr, AccessType type, NodeID reque
} else {
m_cache_to_cache++;
}
}
void Profiler::profileMsgDelay(int virtualNetwork, int delayCycles) {
void
Profiler::profileMsgDelay(int virtualNetwork, int delayCycles)
{
assert(virtualNetwork < m_delayedCyclesVCHistograms.size());
m_delayedCyclesHistogram.add(delayCycles);
m_delayedCyclesVCHistograms[virtualNetwork].add(delayCycles);
@ -519,7 +543,8 @@ void Profiler::profileMsgDelay(int virtualNetwork, int delayCycles) {
}
// profiles original cache requests including PUTs
void Profiler::profileRequest(const string& requestStr)
void
Profiler::profileRequest(const string& requestStr)
{
m_requests++;
@ -530,41 +555,49 @@ void Profiler::profileRequest(const string& requestStr)
}
}
void Profiler::controllerBusy(MachineID machID)
void
Profiler::controllerBusy(MachineID machID)
{
m_busyControllerCount[(int)machID.type][(int)machID.num]++;
}
void Profiler::profilePFWait(Time waitTime)
void
Profiler::profilePFWait(Time waitTime)
{
m_prefetchWaitHistogram.add(waitTime);
}
void Profiler::bankBusy()
void
Profiler::bankBusy()
{
m_busyBankCount++;
}
// non-zero cycle demand request
void Profiler::missLatency(Time t, RubyRequestType type)
void
Profiler::missLatency(Time t, RubyRequestType type)
{
m_allMissLatencyHistogram.add(t);
m_missLatencyHistograms[type].add(t);
}
// non-zero cycle prefetch request
void Profiler::swPrefetchLatency(Time t, CacheRequestType type, GenericMachineType respondingMach)
void
Profiler::swPrefetchLatency(Time t, CacheRequestType type,
GenericMachineType respondingMach)
{
m_allSWPrefetchLatencyHistogram.add(t);
m_SWPrefetchLatencyHistograms[type].add(t);
m_SWPrefetchMachLatencyHistograms[respondingMach].add(t);
if(respondingMach == GenericMachineType_Directory || respondingMach == GenericMachineType_NUM) {
if (respondingMach == GenericMachineType_Directory ||
respondingMach == GenericMachineType_NUM) {
m_SWPrefetchL2MissLatencyHistogram.add(t);
}
}
void Profiler::profileTransition(const string& component, NodeID version, Address addr,
const string& state, const string& event,
void
Profiler::profileTransition(const string& component, NodeID version,
Address addr, const string& state, const string& event,
const string& next_state, const string& note)
{
const int EVENT_SPACES = 20;
@ -573,30 +606,34 @@ void Profiler::profileTransition(const string& component, NodeID version, Addres
const int COMP_SPACES = 10;
const int STATE_SPACES = 6;
if ((g_debug_ptr->getDebugTime() > 0) &&
(g_eventQueue_ptr->getTime() >= g_debug_ptr->getDebugTime())) {
(* debug_cout_ptr).flags(ios::right);
(* debug_cout_ptr) << setw(TIME_SPACES) << g_eventQueue_ptr->getTime() << " ";
(* debug_cout_ptr) << setw(ID_SPACES) << version << " ";
(* debug_cout_ptr) << setw(COMP_SPACES) << component;
(* debug_cout_ptr) << setw(EVENT_SPACES) << event << " ";
if (g_debug_ptr->getDebugTime() <= 0 ||
g_eventQueue_ptr->getTime() < g_debug_ptr->getDebugTime())
return;
(* debug_cout_ptr).flags(ios::right);
(* debug_cout_ptr) << setw(STATE_SPACES) << state;
(* debug_cout_ptr) << ">";
(* debug_cout_ptr).flags(ios::left);
(* debug_cout_ptr) << setw(STATE_SPACES) << next_state;
ostream &out = *debug_cout_ptr;
out.flags(ios::right);
out << setw(TIME_SPACES) << g_eventQueue_ptr->getTime() << " ";
out << setw(ID_SPACES) << version << " ";
out << setw(COMP_SPACES) << component;
out << setw(EVENT_SPACES) << event << " ";
(* debug_cout_ptr) << " " << addr << " " << note;
out.flags(ios::right);
out << setw(STATE_SPACES) << state;
out << ">";
out.flags(ios::left);
out << setw(STATE_SPACES) << next_state;
(* debug_cout_ptr) << endl;
}
out << " " << addr << " " << note;
out << endl;
}
// Helper function
static double process_memory_total()
static double
process_memory_total()
{
const double MULTIPLIER = 4096.0/(1024.0*1024.0); // 4kB page size, 1024*1024 bytes per MB,
// 4kB page size, 1024*1024 bytes per MB,
const double MULTIPLIER = 4096.0 / (1024.0 * 1024.0);
ifstream proc_file;
proc_file.open("/proc/self/statm");
int total_size_in_pages = 0;
@ -606,9 +643,11 @@ static double process_memory_total()
return double(total_size_in_pages) * MULTIPLIER; // size in megabytes
}
static double process_memory_resident()
static double
process_memory_resident()
{
const double MULTIPLIER = 4096.0/(1024.0*1024.0); // 4kB page size, 1024*1024 bytes per MB,
// 4kB page size, 1024*1024 bytes per MB,
const double MULTIPLIER = 4096.0 / (1024.0 * 1024.0);
ifstream proc_file;
proc_file.open("/proc/self/statm");
int total_size_in_pages = 0;
@ -618,25 +657,29 @@ static double process_memory_resident()
return double(res_size_in_pages) * MULTIPLIER; // size in megabytes
}
void Profiler::rubyWatch(int id){
void
Profiler::rubyWatch(int id)
{
uint64 tr = 0;
Address watch_address = Address(tr);
const int ID_SPACES = 3;
const int TIME_SPACES = 7;
(* debug_cout_ptr).flags(ios::right);
(* debug_cout_ptr) << setw(TIME_SPACES) << g_eventQueue_ptr->getTime() << " ";
(* debug_cout_ptr) << setw(ID_SPACES) << id << " "
<< "RUBY WATCH "
<< watch_address
<< endl;
ostream &out = *debug_cout_ptr;
out.flags(ios::right);
out << setw(TIME_SPACES) << g_eventQueue_ptr->getTime() << " ";
out << setw(ID_SPACES) << id << " "
<< "RUBY WATCH " << watch_address << endl;
if (!m_watch_address_list_ptr->exist(watch_address)) {
m_watch_address_list_ptr->add(watch_address, 1);
}
}
bool Profiler::watchAddress(Address addr){
bool
Profiler::watchAddress(Address addr)
{
if (m_watch_address_list_ptr->exist(addr))
return true;
else

View file

@ -42,35 +42,24 @@
----------------------------------------------------------------------
*/
/*
* Profiler.hh
*
* Description:
*
* $Id$
*
*/
#ifndef __MEM_RUBY_PROFILER_PROFILER_HH__
#define __MEM_RUBY_PROFILER_PROFILER_HH__
#ifndef PROFILER_H
#define PROFILER_H
#include "mem/ruby/libruby.hh"
#include "mem/ruby/common/Global.hh"
#include "mem/protocol/GenericMachineType.hh"
#include "mem/ruby/common/Histogram.hh"
#include "mem/ruby/common/Consumer.hh"
#include "mem/protocol/AccessModeType.hh"
#include "mem/protocol/AccessType.hh"
#include "mem/ruby/system/NodeID.hh"
#include "mem/ruby/system/MachineID.hh"
#include "mem/protocol/CacheRequestType.hh"
#include "mem/protocol/GenericMachineType.hh"
#include "mem/protocol/GenericRequestType.hh"
#include "mem/protocol/PrefetchBit.hh"
#include "mem/ruby/common/Address.hh"
#include "mem/ruby/common/Consumer.hh"
#include "mem/ruby/common/Global.hh"
#include "mem/ruby/common/Histogram.hh"
#include "mem/ruby/common/Set.hh"
#include "mem/protocol/CacheRequestType.hh"
#include "mem/protocol/GenericRequestType.hh"
#include "mem/ruby/libruby.hh"
#include "mem/ruby/system/MachineID.hh"
#include "mem/ruby/system/MemoryControl.hh"
#include "mem/ruby/system/NodeID.hh"
#include "params/RubyProfiler.hh"
#include "sim/sim_object.hh"
@ -79,16 +68,13 @@ class AddressProfiler;
template <class KEY_TYPE, class VALUE_TYPE> class Map;
class Profiler : public SimObject, public Consumer {
class Profiler : public SimObject, public Consumer
{
public:
// Constructors
typedef RubyProfilerParams Params;
Profiler(const Params *);
// Destructor
~Profiler();
// Public Methods
void wakeup();
void setPeriodicStatsFile(const string& filename);
@ -107,16 +93,33 @@ public:
void addAddressTraceSample(const CacheMsg& msg, NodeID id);
void profileRequest(const string& requestStr);
void profileSharing(const Address& addr, AccessType type, NodeID requestor, const Set& sharers, const Set& owner);
void profileSharing(const Address& addr, AccessType type,
NodeID requestor, const Set& sharers,
const Set& owner);
void profileMulticastRetry(const Address& addr, int count);
void profileFilterAction(int action);
void profileConflictingRequests(const Address& addr);
void profileOutstandingRequest(int outstanding) { m_outstanding_requests.add(outstanding); }
void profileOutstandingPersistentRequest(int outstanding) { m_outstanding_persistent_requests.add(outstanding); }
void profileAverageLatencyEstimate(int latency) { m_average_latency_estimate.add(latency); }
void
profileOutstandingRequest(int outstanding)
{
m_outstanding_requests.add(outstanding);
}
void
profileOutstandingPersistentRequest(int outstanding)
{
m_outstanding_persistent_requests.add(outstanding);
}
void
profileAverageLatencyEstimate(int latency)
{
m_average_latency_estimate.add(latency);
}
void recordPrediction(bool wasGood, bool wasPredicted);
@ -127,12 +130,14 @@ public:
void controllerBusy(MachineID machID);
void bankBusy();
void missLatency(Time t, RubyRequestType type);
void swPrefetchLatency(Time t, CacheRequestType type, GenericMachineType respondingMach);
void swPrefetchLatency(Time t, CacheRequestType type,
GenericMachineType respondingMach);
void sequencerRequests(int num) { m_sequencer_requests.add(num); }
void profileTransition(const string& component, NodeID version, Address addr,
const string& state, const string& event,
const string& next_state, const string& note);
void profileTransition(const string& component, NodeID version,
Address addr, const string& state,
const string& event, const string& next_state,
const string& note);
void profileMsgDelay(int virtualNetwork, int delayCycles);
void print(ostream& out) const;
@ -141,7 +146,9 @@ public:
bool watchAddress(Address addr);
// return Ruby's start time
Time getRubyStartTime(){
Time
getRubyStartTime()
{
return m_ruby_start;
}
@ -150,12 +157,10 @@ public:
bool getAllInstructions() { return m_all_instructions; }
private:
// Private copy constructor and assignment operator
Profiler(const Profiler& obj);
Profiler& operator=(const Profiler& obj);
// Data Members (m_ prefix)
AddressProfiler* m_address_profiler_ptr;
AddressProfiler* m_inst_profiler_ptr;
@ -214,20 +219,14 @@ private:
int m_num_of_sequencers;
};
// Output operator declaration
ostream& operator<<(ostream& out, const Profiler& obj);
// ******************* Definitions *******************
// Output operator definition
extern inline
ostream& operator<<(ostream& out, const Profiler& obj)
inline ostream&
operator<<(ostream& out, const Profiler& obj)
{
obj.print(out);
out << flush;
return out;
}
#endif //PROFILER_H
#endif // __MEM_RUBY_PROFILER_PROFILER_HH__

View file

@ -1,4 +1,3 @@
/*
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
* All rights reserved.
@ -27,28 +26,28 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* $Id$
*
*/
#include "mem/ruby/profiler/StoreTrace.hh"
#include "mem/ruby/eventqueue/RubyEventQueue.hh"
#include "mem/ruby/profiler/StoreTrace.hh"
bool StoreTrace::s_init = false; // Total number of store lifetimes of all lines
int64 StoreTrace::s_total_samples = 0; // Total number of store lifetimes of all lines
bool StoreTrace::s_init = false; // Total number of store lifetimes of
// all lines
int64 StoreTrace::s_total_samples = 0; // Total number of store
// lifetimes of all lines
Histogram* StoreTrace::s_store_count_ptr = NULL;
Histogram* StoreTrace::s_store_first_to_stolen_ptr = NULL;
Histogram* StoreTrace::s_store_last_to_stolen_ptr = NULL;
Histogram* StoreTrace::s_store_first_to_last_ptr = NULL;
StoreTrace::StoreTrace(const Address& addr) :
m_store_count(-1), m_store_first_to_stolen(-1), m_store_last_to_stolen(-1), m_store_first_to_last(-1)
StoreTrace::StoreTrace(const Address& addr)
: m_store_count(-1), m_store_first_to_stolen(-1),
m_store_last_to_stolen(-1), m_store_first_to_last(-1)
{
StoreTrace::initSummary();
m_addr = addr;
m_total_samples = 0;
m_last_writer = -1; // Really -1 isn't valid, so this will trigger the initilization code
// Really -1 isn't valid, so this will trigger the initilization code
m_last_writer = -1;
m_stores_this_interval = 0;
}
@ -56,18 +55,19 @@ StoreTrace::~StoreTrace()
{
}
void StoreTrace::print(ostream& out) const
void
StoreTrace::print(ostream& out) const
{
out << m_addr;
out << " total_samples: " << m_total_samples << endl;
out << "store_count: " << m_store_count << endl;
out << "store_first_to_stolen: " << m_store_first_to_stolen << endl;
out << "store_last_to_stolen: " << m_store_last_to_stolen << endl;
out << "store_first_to_last: " << m_store_first_to_last << endl;
out << m_addr
<< " total_samples: " << m_total_samples << endl
<< "store_count: " << m_store_count << endl
<< "store_first_to_stolen: " << m_store_first_to_stolen << endl
<< "store_last_to_stolen: " << m_store_last_to_stolen << endl
<< "store_first_to_last: " << m_store_first_to_last << endl;
}
// Class method
void StoreTrace::initSummary()
void
StoreTrace::initSummary()
{
if (!s_init) {
s_total_samples = 0;
@ -79,8 +79,8 @@ void StoreTrace::initSummary()
s_init = true;
}
// Class method
void StoreTrace::printSummary(ostream& out)
void
StoreTrace::printSummary(ostream& out)
{
out << "total_samples: " << s_total_samples << endl;
out << "store_count: " << (*s_store_count_ptr) << endl;
@ -89,8 +89,8 @@ void StoreTrace::printSummary(ostream& out)
out << "store_first_to_last: " << (*s_store_first_to_last_ptr) << endl;
}
// Class method
void StoreTrace::clearSummary()
void
StoreTrace::clearSummary()
{
StoreTrace::initSummary();
s_total_samples = 0;
@ -100,7 +100,8 @@ void StoreTrace::clearSummary()
s_store_first_to_last_ptr->clear();
}
void StoreTrace::store(NodeID node)
void
StoreTrace::store(NodeID node)
{
Time current = g_eventQueue_ptr->getTime();
@ -120,7 +121,8 @@ void StoreTrace::store(NodeID node)
m_stores_this_interval++;
}
void StoreTrace::downgrade(NodeID node)
void
StoreTrace::downgrade(NodeID node)
{
if (node == m_last_writer) {
Time current = g_eventQueue_ptr->getTime();
@ -151,8 +153,3 @@ void StoreTrace::downgrade(NodeID node)
m_last_writer = -1;
}
}
bool node_less_then_eq(const StoreTrace* n1, const StoreTrace* n2)
{
return (n1->getTotal() > n2->getTotal());
}

View file

@ -1,4 +1,3 @@
/*
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
* All rights reserved.
@ -27,30 +26,20 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* $Id$
*
* Description:
*
*/
#ifndef __MEM_RUBY_PROFILER_STORETRACE_HH__
#define __MEM_RUBY_PROFILER_STORETRACE_HH__
#ifndef StoreTrace_H
#define StoreTrace_H
#include "mem/ruby/common/Global.hh"
#include "mem/ruby/common/Address.hh"
#include "mem/ruby/common/Global.hh"
#include "mem/ruby/common/Histogram.hh"
class StoreTrace {
class StoreTrace
{
public:
// Constructors
StoreTrace() { }
explicit StoreTrace(const Address& addr);
// Destructor
~StoreTrace();
// Public Methods
void store(NodeID node);
void downgrade(NodeID node);
int getTotal() const { return m_total_samples; }
@ -59,23 +48,16 @@ public:
static void clearSummary();
void print(ostream& out) const;
private:
// Private Methods
// Private copy constructor and assignment operator
// StoreTrace(const StoreTrace& obj);
// StoreTrace& operator=(const StoreTrace& obj);
// Class Members (s_ prefix)
static bool s_init;
static int64 s_total_samples; // Total number of store lifetimes of all lines
static int64 s_total_samples; // Total number of store lifetimes
// of all lines
static Histogram* s_store_count_ptr;
static Histogram* s_store_first_to_stolen_ptr;
static Histogram* s_store_last_to_stolen_ptr;
static Histogram* s_store_first_to_last_ptr;
// Data Members (m_ prefix)
Address m_addr;
NodeID m_last_writer;
Time m_first_store;
@ -89,20 +71,18 @@ private:
Histogram m_store_first_to_last;
};
bool node_less_then_eq(const StoreTrace* n1, const StoreTrace* n2);
inline bool
node_less_then_eq(const StoreTrace* n1, const StoreTrace* n2)
{
return n1->getTotal() > n2->getTotal();
}
// Output operator declaration
ostream& operator<<(ostream& out, const StoreTrace& obj);
// ******************* Definitions *******************
// Output operator definition
extern inline
ostream& operator<<(ostream& out, const StoreTrace& obj)
inline ostream&
operator<<(ostream& out, const StoreTrace& obj)
{
obj.print(out);
out << flush;
return out;
}
#endif //StoreTrace_H
#endif // __MEM_RUBY_PROFILER_STORETRACE_HH__