ruby: remove extra whitespace and correct misspelled words
This commit is contained in:
parent
a74c446e7d
commit
9eda4bdc5a
18 changed files with 52 additions and 51 deletions
|
@ -44,12 +44,12 @@ def define_options(parser):
|
|||
return
|
||||
|
||||
def create_system(options, full_system, system, dma_ports, ruby_system):
|
||||
|
||||
|
||||
if buildEnv['PROTOCOL'] != 'MI_example':
|
||||
panic("This script requires the MI_example protocol to be built.")
|
||||
|
||||
cpu_sequencers = []
|
||||
|
||||
|
||||
#
|
||||
# The ruby network creation expects the list of nodes in the system to be
|
||||
# consistent with the NetDest list. Therefore the l1 controller nodes must be
|
||||
|
@ -143,7 +143,7 @@ def create_system(options, full_system, system, dma_ports, ruby_system):
|
|||
#
|
||||
dma_seq = DMASequencer(version = i,
|
||||
ruby_system = ruby_system)
|
||||
|
||||
|
||||
dma_cntrl = DMA_Controller(version = i,
|
||||
dma_sequencer = dma_seq,
|
||||
transitions_per_cycle = options.ports,
|
||||
|
|
|
@ -55,7 +55,7 @@ def create_system(options, full_system, system, dma_ports, ruby_system):
|
|||
panic("This script requires the MOESI_CMP_directory protocol to be built.")
|
||||
|
||||
cpu_sequencers = []
|
||||
|
||||
|
||||
#
|
||||
# The ruby network creation expects the list of nodes in the system to be
|
||||
# consistent with the NetDest list. Therefore the l1 controller nodes must be
|
||||
|
@ -129,7 +129,7 @@ def create_system(options, full_system, system, dma_ports, ruby_system):
|
|||
L2cache = l2_cache,
|
||||
transitions_per_cycle = options.ports,
|
||||
ruby_system = ruby_system)
|
||||
|
||||
|
||||
exec("ruby_system.l2_cntrl%d = l2_cntrl" % i)
|
||||
l2_cntrl_nodes.append(l2_cntrl)
|
||||
|
||||
|
@ -182,7 +182,7 @@ def create_system(options, full_system, system, dma_ports, ruby_system):
|
|||
dma_seq = DMASequencer(version = i,
|
||||
ruby_system = ruby_system,
|
||||
slave = dma_port)
|
||||
|
||||
|
||||
dma_cntrl = DMA_Controller(version = i,
|
||||
dma_sequencer = dma_seq,
|
||||
transitions_per_cycle = options.ports,
|
||||
|
|
|
@ -55,9 +55,9 @@ def define_options(parser):
|
|||
help="Token_CMP: disable dyanimc timeouts, use fixed latency instead")
|
||||
parser.add_option("--allow-atomic-migration", action="store_true",
|
||||
help="allow migratory sharing for atomic only accessed blocks")
|
||||
|
||||
|
||||
def create_system(options, full_system, system, dma_ports, ruby_system):
|
||||
|
||||
|
||||
if buildEnv['PROTOCOL'] != 'MOESI_CMP_token':
|
||||
panic("This script requires the MOESI_CMP_token protocol to be built.")
|
||||
|
||||
|
@ -68,7 +68,7 @@ def create_system(options, full_system, system, dma_ports, ruby_system):
|
|||
n_tokens = options.num_cpus + 1
|
||||
|
||||
cpu_sequencers = []
|
||||
|
||||
|
||||
#
|
||||
# The ruby network creation expects the list of nodes in the system to be
|
||||
# consistent with the NetDest list. Therefore the l1 controller nodes must be
|
||||
|
@ -85,7 +85,7 @@ def create_system(options, full_system, system, dma_ports, ruby_system):
|
|||
#
|
||||
l2_bits = int(math.log(options.num_l2caches, 2))
|
||||
block_size_bits = int(math.log(options.cacheline_size, 2))
|
||||
|
||||
|
||||
for i in xrange(options.num_cpus):
|
||||
#
|
||||
# First create the Ruby objects associated with this cpu
|
||||
|
@ -153,7 +153,7 @@ def create_system(options, full_system, system, dma_ports, ruby_system):
|
|||
N_tokens = n_tokens,
|
||||
transitions_per_cycle = options.ports,
|
||||
ruby_system = ruby_system)
|
||||
|
||||
|
||||
exec("ruby_system.l2_cntrl%d = l2_cntrl" % i)
|
||||
l2_cntrl_nodes.append(l2_cntrl)
|
||||
|
||||
|
@ -212,7 +212,7 @@ def create_system(options, full_system, system, dma_ports, ruby_system):
|
|||
dma_seq = DMASequencer(version = i,
|
||||
ruby_system = ruby_system,
|
||||
slave = dma_port)
|
||||
|
||||
|
||||
dma_cntrl = DMA_Controller(version = i,
|
||||
dma_sequencer = dma_seq,
|
||||
transitions_per_cycle = options.ports,
|
||||
|
|
|
@ -66,7 +66,7 @@ def create_system(options, full_system, system, dma_ports, ruby_system):
|
|||
panic("This script requires the MOESI_hammer protocol to be built.")
|
||||
|
||||
cpu_sequencers = []
|
||||
|
||||
|
||||
#
|
||||
# The ruby network creation expects the list of nodes in the system to be
|
||||
# consistent with the NetDest list. Therefore the l1 controller nodes must be
|
||||
|
@ -209,7 +209,7 @@ def create_system(options, full_system, system, dma_ports, ruby_system):
|
|||
dma_seq = DMASequencer(version = i,
|
||||
ruby_system = ruby_system,
|
||||
slave = dma_port)
|
||||
|
||||
|
||||
dma_cntrl = DMA_Controller(version = i,
|
||||
dma_sequencer = dma_seq,
|
||||
transitions_per_cycle = options.ports,
|
||||
|
|
|
@ -43,7 +43,7 @@ def define_options(parser):
|
|||
return
|
||||
|
||||
def create_system(options, full_system, system, dma_ports, ruby_system):
|
||||
|
||||
|
||||
if buildEnv['PROTOCOL'] != 'Network_test':
|
||||
panic("This script requires the Network_test protocol to be built.")
|
||||
|
||||
|
@ -53,7 +53,7 @@ def create_system(options, full_system, system, dma_ports, ruby_system):
|
|||
# The Garnet tester protocol does not support fs nor dma
|
||||
#
|
||||
assert(dma_ports == [])
|
||||
|
||||
|
||||
#
|
||||
# The ruby network creation expects the list of nodes in the system to be
|
||||
# consistent with the NetDest list. Therefore the l1 controller nodes must be
|
||||
|
|
|
@ -180,7 +180,7 @@ class MessageBuffer
|
|||
Cycles m_time_last_time_size_checked;
|
||||
unsigned int m_size_last_time_size_checked;
|
||||
|
||||
// variables used so enqueues appear to happen imediately, while
|
||||
// variables used so enqueues appear to happen immediately, while
|
||||
// pop happen the next cycle
|
||||
Cycles m_time_last_time_enqueue;
|
||||
Tick m_time_last_time_pop;
|
||||
|
|
|
@ -61,7 +61,7 @@ class Network : public ClockedObject
|
|||
typedef RubyNetworkParams Params;
|
||||
Network(const Params *p);
|
||||
const Params * params() const
|
||||
{ return dynamic_cast<const Params *>(_params);}
|
||||
{ return dynamic_cast<const Params *>(_params); }
|
||||
|
||||
virtual ~Network();
|
||||
virtual void init();
|
||||
|
|
|
@ -41,16 +41,16 @@
|
|||
#include "mem/ruby/network/fault_model/FaultModel.hh"
|
||||
#include "params/BaseGarnetNetwork.hh"
|
||||
|
||||
class BaseGarnetNetwork : public Network
|
||||
class BaseGarnetNetwork : public Network
|
||||
{
|
||||
public:
|
||||
typedef BaseGarnetNetworkParams Params;
|
||||
BaseGarnetNetwork(const Params *p);
|
||||
|
||||
void init();
|
||||
int getNiFlitSize() {return m_ni_flit_size; }
|
||||
int getVCsPerVnet() {return m_vcs_per_vnet; }
|
||||
bool isFaultModelEnabled() {return m_enable_fault_model;}
|
||||
int getNiFlitSize() { return m_ni_flit_size; }
|
||||
int getVCsPerVnet() { return m_vcs_per_vnet; }
|
||||
bool isFaultModelEnabled() { return m_enable_fault_model; }
|
||||
FaultModel* fault_model;
|
||||
|
||||
void increment_injected_flits(int vnet) { m_flits_injected[vnet]++; }
|
||||
|
|
|
@ -54,8 +54,8 @@ class GarnetNetwork_d : public BaseGarnetNetwork
|
|||
~GarnetNetwork_d();
|
||||
void init();
|
||||
|
||||
int getBuffersPerDataVC() {return m_buffers_per_data_vc; }
|
||||
int getBuffersPerCtrlVC() {return m_buffers_per_ctrl_vc; }
|
||||
int getBuffersPerDataVC() { return m_buffers_per_data_vc; }
|
||||
int getBuffersPerCtrlVC() { return m_buffers_per_ctrl_vc; }
|
||||
|
||||
void collateStats();
|
||||
void regStats();
|
||||
|
@ -69,7 +69,7 @@ class GarnetNetwork_d : public BaseGarnetNetwork
|
|||
}
|
||||
|
||||
// Methods used by Topology to setup the network
|
||||
void makeOutLink(SwitchID src, NodeID dest, BasicLink* link,
|
||||
void makeOutLink(SwitchID src, NodeID dest, BasicLink* link,
|
||||
LinkDirection direction,
|
||||
const NetDest& routing_table_entry);
|
||||
void makeInLink(NodeID src, SwitchID dest, BasicLink* link,
|
||||
|
|
|
@ -94,8 +94,8 @@ GarnetNetwork::~GarnetNetwork()
|
|||
}
|
||||
|
||||
void
|
||||
GarnetNetwork::makeInLink(NodeID src, SwitchID dest, BasicLink* link,
|
||||
LinkDirection direction,
|
||||
GarnetNetwork::makeInLink(NodeID src, SwitchID dest, BasicLink* link,
|
||||
LinkDirection direction,
|
||||
const NetDest& routing_table_entry)
|
||||
{
|
||||
assert(src < m_nodes);
|
||||
|
@ -110,8 +110,8 @@ GarnetNetwork::makeInLink(NodeID src, SwitchID dest, BasicLink* link,
|
|||
}
|
||||
|
||||
void
|
||||
GarnetNetwork::makeOutLink(SwitchID src, NodeID dest, BasicLink* link,
|
||||
LinkDirection direction,
|
||||
GarnetNetwork::makeOutLink(SwitchID src, NodeID dest, BasicLink* link,
|
||||
LinkDirection direction,
|
||||
const NetDest& routing_table_entry)
|
||||
{
|
||||
assert(dest < m_nodes);
|
||||
|
@ -130,7 +130,7 @@ GarnetNetwork::makeOutLink(SwitchID src, NodeID dest, BasicLink* link,
|
|||
|
||||
void
|
||||
GarnetNetwork::makeInternalLink(SwitchID src, SwitchID dest, BasicLink* link,
|
||||
LinkDirection direction,
|
||||
LinkDirection direction,
|
||||
const NetDest& routing_table_entry)
|
||||
{
|
||||
GarnetIntLink* garnet_link = safe_cast<GarnetIntLink*>(link);
|
||||
|
|
|
@ -83,8 +83,8 @@ SimpleNetwork::~SimpleNetwork()
|
|||
|
||||
// From a switch to an endpoint node
|
||||
void
|
||||
SimpleNetwork::makeOutLink(SwitchID src, NodeID dest, BasicLink* link,
|
||||
LinkDirection direction,
|
||||
SimpleNetwork::makeOutLink(SwitchID src, NodeID dest, BasicLink* link,
|
||||
LinkDirection direction,
|
||||
const NetDest& routing_table_entry)
|
||||
{
|
||||
assert(dest < m_nodes);
|
||||
|
@ -102,8 +102,8 @@ SimpleNetwork::makeOutLink(SwitchID src, NodeID dest, BasicLink* link,
|
|||
|
||||
// From an endpoint node to a switch
|
||||
void
|
||||
SimpleNetwork::makeInLink(NodeID src, SwitchID dest, BasicLink* link,
|
||||
LinkDirection direction,
|
||||
SimpleNetwork::makeInLink(NodeID src, SwitchID dest, BasicLink* link,
|
||||
LinkDirection direction,
|
||||
const NetDest& routing_table_entry)
|
||||
{
|
||||
assert(src < m_nodes);
|
||||
|
@ -112,8 +112,8 @@ SimpleNetwork::makeInLink(NodeID src, SwitchID dest, BasicLink* link,
|
|||
|
||||
// From a switch to a switch
|
||||
void
|
||||
SimpleNetwork::makeInternalLink(SwitchID src, SwitchID dest, BasicLink* link,
|
||||
LinkDirection direction,
|
||||
SimpleNetwork::makeInternalLink(SwitchID src, SwitchID dest, BasicLink* link,
|
||||
LinkDirection direction,
|
||||
const NetDest& routing_table_entry)
|
||||
{
|
||||
// Create a set of new MessageBuffers
|
||||
|
|
|
@ -61,7 +61,8 @@ class BankedArray
|
|||
unsigned int mapIndexToBank(int64 idx);
|
||||
|
||||
public:
|
||||
BankedArray(unsigned int banks, Cycles accessLatency, unsigned int startIndexBit);
|
||||
BankedArray(unsigned int banks, Cycles accessLatency,
|
||||
unsigned int startIndexBit);
|
||||
|
||||
// Note: We try the access based on the cache index, not the address
|
||||
// This is so we don't get aliasing on blocks being replaced
|
||||
|
|
|
@ -304,7 +304,7 @@ DMASequencer::issueNext()
|
|||
assert(m_mandatory_q_ptr != NULL);
|
||||
m_mandatory_q_ptr->enqueue(msg);
|
||||
active_request.bytes_issued += msg->getLen();
|
||||
DPRINTF(RubyDma,
|
||||
DPRINTF(RubyDma,
|
||||
"DMA request bytes issued %d, bytes completed %d, total len %d\n",
|
||||
active_request.bytes_issued, active_request.bytes_completed,
|
||||
active_request.len);
|
||||
|
|
|
@ -363,7 +363,7 @@ RubyPort::ruby_hit_callback(PacketPtr pkt)
|
|||
if (!retryList.empty()) {
|
||||
//
|
||||
// Record the current list of ports to retry on a temporary list before
|
||||
// calling sendRetry on those ports. sendRetry will cause an
|
||||
// calling sendRetry on those ports. sendRetry will cause an
|
||||
// immediate retry, which may result in the ports being put back on the
|
||||
// list. Therefore we want to clear the retryList before calling
|
||||
// sendRetry.
|
||||
|
@ -422,7 +422,7 @@ RubyPort::MemSlavePort::hitCallback(PacketPtr pkt)
|
|||
{
|
||||
bool needsResponse = pkt->needsResponse();
|
||||
|
||||
// Unless specified at configuraiton, all responses except failed SC
|
||||
// Unless specified at configuraiton, all responses except failed SC
|
||||
// and Flush operations access M5 physical memory.
|
||||
bool accessPhysMem = access_backing_store;
|
||||
|
||||
|
|
|
@ -347,7 +347,7 @@ Sequencer::handleLlsc(const Address& address, SequencerRequest* request)
|
|||
} else {
|
||||
//
|
||||
// For successful SC requests, indicate the success to the cpu by
|
||||
// setting the extra data to one.
|
||||
// setting the extra data to one.
|
||||
//
|
||||
request->pkt->req->setExtraData(1);
|
||||
}
|
||||
|
|
|
@ -86,16 +86,16 @@ RubySystem::RubySystem(const Params *p)
|
|||
void
|
||||
RubySystem::registerNetwork(Network* network_ptr)
|
||||
{
|
||||
m_network = network_ptr;
|
||||
m_network = network_ptr;
|
||||
}
|
||||
|
||||
void
|
||||
RubySystem::registerAbstractController(AbstractController* cntrl)
|
||||
{
|
||||
m_abs_cntrl_vec.push_back(cntrl);
|
||||
m_abs_cntrl_vec.push_back(cntrl);
|
||||
|
||||
MachineID id = cntrl->getMachineID();
|
||||
g_abs_controls[id.getType()][id.getNum()] = cntrl;
|
||||
MachineID id = cntrl->getMachineID();
|
||||
g_abs_controls[id.getType()][id.getNum()] = cntrl;
|
||||
}
|
||||
|
||||
RubySystem::~RubySystem()
|
||||
|
@ -189,7 +189,7 @@ RubySystem::serializeOld(CheckpointOut &cp)
|
|||
// Restore curTick
|
||||
setCurTick(curtick_original);
|
||||
|
||||
// Aggergate the trace entries together into a single array
|
||||
// Aggregate the trace entries together into a single array
|
||||
uint8_t *raw_data = new uint8_t[4096];
|
||||
uint64 cache_trace_size = m_cache_recorder->aggregateRecords(&raw_data,
|
||||
4096);
|
||||
|
|
|
@ -225,7 +225,7 @@ class $py_ident(RubyController):
|
|||
"in StateMachine.py", param.type_ast.type.c_ident)
|
||||
code.dedent()
|
||||
code.write(path, '%s.py' % py_ident)
|
||||
|
||||
|
||||
|
||||
def printControllerHH(self, path):
|
||||
'''Output the method declarations for the class declaration'''
|
||||
|
@ -509,7 +509,7 @@ $c_ident::$c_ident(const Params *p)
|
|||
|
||||
if re.compile("sequencer").search(param.ident):
|
||||
code('m_${{param.ident}}_ptr->setController(this);')
|
||||
|
||||
|
||||
for var in self.objects:
|
||||
if var.ident.find("mandatoryQueue") >= 0:
|
||||
code('''
|
||||
|
@ -1501,7 +1501,7 @@ if (!checkResourceAvailable(%s_RequestType_%s, addr)) {
|
|||
</TR>
|
||||
''')
|
||||
code('''
|
||||
<!- Column footer->
|
||||
<!- Column footer->
|
||||
<TR>
|
||||
<TH> </TH>
|
||||
''')
|
||||
|
|
|
@ -470,8 +470,8 @@ enum ${{self.c_ident}} {
|
|||
# For each field
|
||||
for i,(ident,enum) in enumerate(self.enums.iteritems()):
|
||||
desc = enum.get("desc", "No description avaliable")
|
||||
if i == 0:
|
||||
init = ' = %s_FIRST' % self.c_ident
|
||||
if i == 0:
|
||||
init = ' = %s_FIRST' % self.c_ident
|
||||
else:
|
||||
init = ''
|
||||
code('${{self.c_ident}}_${{enum.ident}}$init, /**< $desc */')
|
||||
|
|
Loading…
Reference in a new issue