style: eliminate equality tests with true and false
Using '== true' in a boolean expression is totally redundant, and using '== false' is pretty verbose (and arguably less readable in most cases) compared to '!'. It's somewhat of a pet peeve, perhaps, but I had some time waiting for some tests to run and decided to clean these up. Unfortunately, SLICC appears not to have the '!' operator, so I had to leave the '== false' tests in the SLICC code.
This commit is contained in:
parent
2a8088f5ae
commit
0be64ffe2f
24 changed files with 49 additions and 50 deletions
|
@ -184,7 +184,7 @@ TLB::insertAt(PTE &pte, unsigned Index, int _smallPages)
|
|||
(pte.D0 << 2) | (pte.V0 <<1) | pte.G),
|
||||
((pte.PFN1 <<6) | (pte.C1 << 3) |
|
||||
(pte.D1 << 2) | (pte.V1 <<1) | pte.G));
|
||||
if (table[Index].V0 == true || table[Index].V1 == true) {
|
||||
if (table[Index].V0 || table[Index].V1) {
|
||||
// Previous entry is valid
|
||||
PageTable::iterator i = lookupTable.find(table[Index].VPN);
|
||||
lookupTable.erase(i);
|
||||
|
|
|
@ -165,7 +165,7 @@ TLB::insertAt(PowerISA::PTE &pte, unsigned Index, int _smallPages)
|
|||
} else {
|
||||
|
||||
// Update TLB
|
||||
if (table[Index].V0 == true || table[Index].V1 == true) {
|
||||
if (table[Index].V0 || table[Index].V1) {
|
||||
|
||||
// Previous entry is valid
|
||||
PageTable::iterator i = lookupTable.find(table[Index].VPN);
|
||||
|
|
|
@ -290,7 +290,7 @@ TLB::demapContext(int partition_id, int context_id)
|
|||
for (int x = 0; x < size; x++) {
|
||||
if (tlb[x].range.contextId == context_id &&
|
||||
tlb[x].range.partitionId == partition_id) {
|
||||
if (tlb[x].valid == true) {
|
||||
if (tlb[x].valid) {
|
||||
freeList.push_front(&tlb[x]);
|
||||
}
|
||||
tlb[x].valid = false;
|
||||
|
@ -329,7 +329,7 @@ TLB::flushAll()
|
|||
lookupTable.clear();
|
||||
|
||||
for (int x = 0; x < size; x++) {
|
||||
if (tlb[x].valid == true)
|
||||
if (tlb[x].valid)
|
||||
freeList.push_back(&tlb[x]);
|
||||
tlb[x].valid = false;
|
||||
tlb[x].used = false;
|
||||
|
|
|
@ -233,7 +233,7 @@ Fault
|
|||
Walker::WalkerState::startWalk()
|
||||
{
|
||||
Fault fault = NoFault;
|
||||
assert(started == false);
|
||||
assert(!started);
|
||||
started = true;
|
||||
setupWalk(req->getVaddr());
|
||||
if (timing) {
|
||||
|
@ -262,7 +262,7 @@ Fault
|
|||
Walker::WalkerState::startFunctional(Addr &addr, unsigned &logBytes)
|
||||
{
|
||||
Fault fault = NoFault;
|
||||
assert(started == false);
|
||||
assert(!started);
|
||||
started = true;
|
||||
setupWalk(addr);
|
||||
|
||||
|
|
|
@ -104,7 +104,7 @@ HexFile::parseLine(char *Str, Addr *A, uint32_t *D)
|
|||
} else if (Str[i] == ' ' || Str[i] == '\n') {
|
||||
if (Number == 0)
|
||||
return;
|
||||
if (Flag == false) {
|
||||
if (!Flag) {
|
||||
*A = Number;
|
||||
Number = 0;
|
||||
Flag = true;
|
||||
|
@ -125,7 +125,7 @@ HexFile::parseLine(char *Str, Addr *A, uint32_t *D)
|
|||
i++;
|
||||
}
|
||||
|
||||
if (Flag != true) {
|
||||
if (!Flag) {
|
||||
*A = 0;
|
||||
*D = 0;
|
||||
} else {
|
||||
|
|
|
@ -89,7 +89,7 @@ ObjectMatch::domatch(const string &name) const
|
|||
}
|
||||
}
|
||||
|
||||
if (match == true)
|
||||
if (match)
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
|
@ -115,7 +115,7 @@ Trace::ExeTracerRecord::traceInst(StaticInstPtr inst, bool ran)
|
|||
outs << Enums::OpClassStrings[inst->opClass()] << " : ";
|
||||
}
|
||||
|
||||
if (Debug::ExecResult && predicate == false) {
|
||||
if (Debug::ExecResult && !predicate) {
|
||||
outs << "Predicated False";
|
||||
}
|
||||
|
||||
|
|
|
@ -1763,7 +1763,7 @@ InOrderCPU::cleanUpRemovedInsts()
|
|||
// Clear if Non-Speculative
|
||||
if (inst->staticInst &&
|
||||
inst->seqNum == nonSpecSeqNum[tid] &&
|
||||
nonSpecInstActive[tid] == true) {
|
||||
nonSpecInstActive[tid]) {
|
||||
nonSpecInstActive[tid] = false;
|
||||
}
|
||||
|
||||
|
|
|
@ -248,19 +248,19 @@ void
|
|||
PipelineStage::removeStalls(ThreadID tid)
|
||||
{
|
||||
for (int st_num = 0; st_num < NumStages; st_num++) {
|
||||
if (stalls[tid].stage[st_num] == true) {
|
||||
if (stalls[tid].stage[st_num]) {
|
||||
DPRINTF(InOrderStage, "Removing stall from stage %i.\n",
|
||||
st_num);
|
||||
stalls[tid].stage[st_num] = false;
|
||||
}
|
||||
|
||||
if (toPrevStages->stageBlock[st_num][tid] == true) {
|
||||
if (toPrevStages->stageBlock[st_num][tid]) {
|
||||
DPRINTF(InOrderStage, "Removing pending block from stage %i.\n",
|
||||
st_num);
|
||||
toPrevStages->stageBlock[st_num][tid] = false;
|
||||
}
|
||||
|
||||
if (fromNextStages->stageBlock[st_num][tid] == true) {
|
||||
if (fromNextStages->stageBlock[st_num][tid]) {
|
||||
DPRINTF(InOrderStage, "Removing pending block from stage %i.\n",
|
||||
st_num);
|
||||
fromNextStages->stageBlock[st_num][tid] = false;
|
||||
|
|
|
@ -191,7 +191,7 @@ UseDefUnit::execute(int slot_idx)
|
|||
// If there is a non-speculative instruction
|
||||
// in the pipeline then stall instructions here
|
||||
// ---
|
||||
if (*nonSpecInstActive[tid] == true && seq_num > *nonSpecSeqNum[tid]) {
|
||||
if (*nonSpecInstActive[tid] && seq_num > *nonSpecSeqNum[tid]) {
|
||||
DPRINTF(InOrderUseDef, "[tid:%i]: [sn:%i] cannot execute because"
|
||||
"there is non-speculative instruction [sn:%i] has not "
|
||||
"graduated.\n", tid, seq_num, *nonSpecSeqNum[tid]);
|
||||
|
|
|
@ -843,10 +843,10 @@ DefaultCommit<Impl>::commit()
|
|||
|
||||
// Not sure which one takes priority. I think if we have
|
||||
// both, that's a bad sign.
|
||||
if (trapSquash[tid] == true) {
|
||||
if (trapSquash[tid]) {
|
||||
assert(!tcSquash[tid]);
|
||||
squashFromTrap(tid);
|
||||
} else if (tcSquash[tid] == true) {
|
||||
} else if (tcSquash[tid]) {
|
||||
assert(commitStatus[tid] != TrapPending);
|
||||
squashFromTC(tid);
|
||||
} else if (commitStatus[tid] == SquashAfterPending) {
|
||||
|
@ -885,7 +885,7 @@ DefaultCommit<Impl>::commit()
|
|||
// then use one older sequence number.
|
||||
InstSeqNum squashed_inst = fromIEW->squashedSeqNum[tid];
|
||||
|
||||
if (fromIEW->includeSquashInst[tid] == true) {
|
||||
if (fromIEW->includeSquashInst[tid]) {
|
||||
squashed_inst--;
|
||||
}
|
||||
|
||||
|
|
|
@ -430,8 +430,8 @@ DefaultFetch<Impl>::drainSanityCheck() const
|
|||
assert(isDrained());
|
||||
assert(retryPkt == NULL);
|
||||
assert(retryTid == InvalidThreadID);
|
||||
assert(cacheBlocked == false);
|
||||
assert(interruptPending == false);
|
||||
assert(!cacheBlocked);
|
||||
assert(!interruptPending);
|
||||
|
||||
for (ThreadID i = 0; i < numThreads; ++i) {
|
||||
assert(!memReq[i]);
|
||||
|
|
|
@ -487,7 +487,7 @@ DefaultIEW<Impl>::squashDueToBranch(DynInstPtr &inst, ThreadID tid)
|
|||
DPRINTF(IEW, "[tid:%i]: Squashing from a specific instruction, PC: %s "
|
||||
"[sn:%i].\n", tid, inst->pcState(), inst->seqNum);
|
||||
|
||||
if (toCommit->squash[tid] == false ||
|
||||
if (!toCommit->squash[tid] ||
|
||||
inst->seqNum < toCommit->squashedSeqNum[tid]) {
|
||||
toCommit->squash[tid] = true;
|
||||
toCommit->squashedSeqNum[tid] = inst->seqNum;
|
||||
|
@ -517,7 +517,7 @@ DefaultIEW<Impl>::squashDueToMemOrder(DynInstPtr &inst, ThreadID tid)
|
|||
// case the memory violator should take precedence over the branch
|
||||
// misprediction because it requires the violator itself to be included in
|
||||
// the squash.
|
||||
if (toCommit->squash[tid] == false ||
|
||||
if (!toCommit->squash[tid] ||
|
||||
inst->seqNum <= toCommit->squashedSeqNum[tid]) {
|
||||
toCommit->squash[tid] = true;
|
||||
|
||||
|
@ -538,7 +538,7 @@ DefaultIEW<Impl>::squashDueToMemBlocked(DynInstPtr &inst, ThreadID tid)
|
|||
{
|
||||
DPRINTF(IEW, "[tid:%i]: Memory blocked, squashing load and younger insts, "
|
||||
"PC: %s [sn:%i].\n", tid, inst->pcState(), inst->seqNum);
|
||||
if (toCommit->squash[tid] == false ||
|
||||
if (!toCommit->squash[tid] ||
|
||||
inst->seqNum < toCommit->squashedSeqNum[tid]) {
|
||||
toCommit->squash[tid] = true;
|
||||
|
||||
|
@ -1314,7 +1314,7 @@ DefaultIEW<Impl>::executeInsts()
|
|||
}
|
||||
|
||||
// If the store had a fault then it may not have a mem req
|
||||
if (fault != NoFault || inst->readPredicate() == false ||
|
||||
if (fault != NoFault || !inst->readPredicate() ||
|
||||
!inst->isStoreConditional()) {
|
||||
// If the instruction faulted, then we need to send it along
|
||||
// to commit without the instruction completing.
|
||||
|
@ -1339,7 +1339,7 @@ DefaultIEW<Impl>::executeInsts()
|
|||
// will be replaced and we will lose it.
|
||||
if (inst->getFault() == NoFault) {
|
||||
inst->execute();
|
||||
if (inst->readPredicate() == false)
|
||||
if (!inst->readPredicate())
|
||||
inst->forwardOldRegs();
|
||||
}
|
||||
|
||||
|
|
|
@ -1262,7 +1262,7 @@ InstructionQueue<Impl>::addToDependents(DynInstPtr &new_inst)
|
|||
// it be added to the dependency graph.
|
||||
if (src_reg >= numPhysRegs) {
|
||||
continue;
|
||||
} else if (regScoreboard[src_reg] == false) {
|
||||
} else if (!regScoreboard[src_reg]) {
|
||||
DPRINTF(IQ, "Instruction PC %s has src reg %i that "
|
||||
"is being added to the dependency chain.\n",
|
||||
new_inst->pcState(), src_reg);
|
||||
|
|
|
@ -612,12 +612,12 @@ LSQUnit<Impl>::executeLoad(DynInstPtr &inst)
|
|||
|
||||
// If the instruction faulted or predicated false, then we need to send it
|
||||
// along to commit without the instruction completing.
|
||||
if (load_fault != NoFault || inst->readPredicate() == false) {
|
||||
if (load_fault != NoFault || !inst->readPredicate()) {
|
||||
// Send this instruction to commit, also make sure iew stage
|
||||
// realizes there is activity.
|
||||
// Mark it as executed unless it is an uncached load that
|
||||
// needs to hit the head of commit.
|
||||
if (inst->readPredicate() == false)
|
||||
if (!inst->readPredicate())
|
||||
inst->forwardOldRegs();
|
||||
DPRINTF(LSQUnit, "Load [sn:%lli] not executed from %s\n",
|
||||
inst->seqNum,
|
||||
|
@ -665,7 +665,7 @@ LSQUnit<Impl>::executeStore(DynInstPtr &store_inst)
|
|||
store_fault == NoFault)
|
||||
return store_fault;
|
||||
|
||||
if (store_inst->readPredicate() == false)
|
||||
if (!store_inst->readPredicate())
|
||||
store_inst->forwardOldRegs();
|
||||
|
||||
if (storeQueue[store_idx].size == 0) {
|
||||
|
@ -673,7 +673,7 @@ LSQUnit<Impl>::executeStore(DynInstPtr &store_inst)
|
|||
store_inst->pcState(), store_inst->seqNum);
|
||||
|
||||
return store_fault;
|
||||
} else if (store_inst->readPredicate() == false) {
|
||||
} else if (!store_inst->readPredicate()) {
|
||||
DPRINTF(LSQUnit, "Store [sn:%lli] not executed from predication\n",
|
||||
store_inst->seqNum);
|
||||
return store_fault;
|
||||
|
|
|
@ -519,7 +519,7 @@ ROB<Impl>::readHeadInst(ThreadID tid)
|
|||
if (threadEntries[tid] != 0) {
|
||||
InstIt head_thread = instList[tid].begin();
|
||||
|
||||
assert((*head_thread)->isInROB()==true);
|
||||
assert((*head_thread)->isInROB());
|
||||
|
||||
return *head_thread;
|
||||
} else {
|
||||
|
|
|
@ -1101,7 +1101,7 @@ InstQueue<Impl>::addToDependents(DynInstPtr &new_inst)
|
|||
// it be added to the dependency graph.
|
||||
if (src_reg >= numPhysRegs) {
|
||||
continue;
|
||||
} else if (regScoreboard[src_reg] == false) {
|
||||
} else if (!regScoreboard[src_reg]) {
|
||||
DPRINTF(IQ, "Instruction PC %#x has src reg %i that "
|
||||
"is being added to the dependency chain.\n",
|
||||
new_inst->readPC(), src_reg);
|
||||
|
|
|
@ -261,7 +261,7 @@ machine(L1Cache, "Token protocol")
|
|||
} else if (is_valid(cache_entry)) {
|
||||
return cache_entry.CacheState;
|
||||
} else {
|
||||
if ((persistentTable.isLocked(addr) == true) && (persistentTable.findSmallest(addr) != machineID)) {
|
||||
if (persistentTable.isLocked(addr) && (persistentTable.findSmallest(addr) != machineID)) {
|
||||
// Not in cache, in persistent table, but this processor isn't highest priority
|
||||
return State:I_L;
|
||||
} else {
|
||||
|
@ -1401,7 +1401,7 @@ machine(L1Cache, "Token protocol")
|
|||
|
||||
assert(is_valid(tbe));
|
||||
if (tbe.WentPersistent) {
|
||||
// assert(starving == true);
|
||||
// assert(starving);
|
||||
outstandingRequests := outstandingRequests - 1;
|
||||
enqueue(persistentNetwork_out, PersistentMsg, l1_request_latency) {
|
||||
out_msg.Addr := address;
|
||||
|
@ -1428,7 +1428,7 @@ machine(L1Cache, "Token protocol")
|
|||
|
||||
// Update average latency
|
||||
if (tbe.IssueCount <= 1) {
|
||||
if (tbe.ExternalResponse == true) {
|
||||
if (tbe.ExternalResponse) {
|
||||
updateAverageLatencyEstimate(curCycle() - tbe.IssueTime);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -165,7 +165,7 @@ machine(L2Cache, "Token protocol")
|
|||
State getState(Entry cache_entry, Address addr) {
|
||||
if (is_valid(cache_entry)) {
|
||||
return cache_entry.CacheState;
|
||||
} else if (persistentTable.isLocked(addr) == true) {
|
||||
} else if (persistentTable.isLocked(addr)) {
|
||||
return State:I_L;
|
||||
} else {
|
||||
return State:NP;
|
||||
|
@ -250,7 +250,7 @@ machine(L2Cache, "Token protocol")
|
|||
|
||||
bool exclusiveExists(Address addr) {
|
||||
if (localDirectory.isTagPresent(addr)) {
|
||||
if (localDirectory[addr].exclusive == true) {
|
||||
if (localDirectory[addr].exclusive) {
|
||||
return true;
|
||||
}
|
||||
else {
|
||||
|
@ -285,7 +285,7 @@ machine(L2Cache, "Token protocol")
|
|||
}
|
||||
|
||||
void clearExclusiveBitIfExists(Address addr) {
|
||||
if (localDirectory.isTagPresent(addr) == true) {
|
||||
if (localDirectory.isTagPresent(addr)) {
|
||||
localDirectory[addr].exclusive := false;
|
||||
}
|
||||
}
|
||||
|
@ -761,7 +761,7 @@ machine(L2Cache, "Token protocol")
|
|||
|
||||
action(j_forwardTransientRequestToLocalSharers, "j", desc="Forward external transient request to local sharers") {
|
||||
peek(requestNetwork_in, RequestMsg) {
|
||||
if (filtering_enabled == true && in_msg.RetryNum == 0 && sharersExist(in_msg.Addr) == false) {
|
||||
if (filtering_enabled && in_msg.RetryNum == 0 && sharersExist(in_msg.Addr) == false) {
|
||||
//profile_filter_action(1);
|
||||
DPRINTF(RubySlicc, "filtered message, Retry Num: %d\n",
|
||||
in_msg.RetryNum);
|
||||
|
|
|
@ -708,7 +708,7 @@ machine(Directory, "Token protocol")
|
|||
action(s_deallocateTBE, "s", desc="Deallocate TBE") {
|
||||
|
||||
if (tbe.WentPersistent) {
|
||||
assert(starving == true);
|
||||
assert(starving);
|
||||
|
||||
enqueue(persistentNetwork_out, PersistentMsg, 1) {
|
||||
out_msg.Addr := address;
|
||||
|
|
|
@ -160,7 +160,7 @@ MessageBuffer::enqueue(MsgPtr message, Cycles delta)
|
|||
Tick current_time = m_sender->clockEdge();
|
||||
Tick arrival_time = 0;
|
||||
|
||||
if (!RubySystem::getRandomization() || (m_randomization == false)) {
|
||||
if (!RubySystem::getRandomization() || !m_randomization) {
|
||||
// No randomization
|
||||
arrival_time = current_time + delta * m_sender->clockPeriod();
|
||||
} else {
|
||||
|
|
|
@ -60,7 +60,7 @@ class NetworkMessage : public Message
|
|||
const NetDest&
|
||||
getInternalDestination() const
|
||||
{
|
||||
if (m_internal_dest_valid == false)
|
||||
if (!m_internal_dest_valid)
|
||||
return getDestination();
|
||||
|
||||
return m_internal_dest;
|
||||
|
@ -69,7 +69,7 @@ class NetworkMessage : public Message
|
|||
NetDest&
|
||||
getInternalDestination()
|
||||
{
|
||||
if (m_internal_dest_valid == false) {
|
||||
if (!m_internal_dest_valid) {
|
||||
m_internal_dest = getDestination();
|
||||
m_internal_dest_valid = true;
|
||||
}
|
||||
|
|
|
@ -94,7 +94,7 @@ DMASequencer::makeRequest(PacketPtr pkt)
|
|||
void
|
||||
DMASequencer::issueNext()
|
||||
{
|
||||
assert(m_is_busy == true);
|
||||
assert(m_is_busy);
|
||||
active_request.bytes_completed = active_request.bytes_issued;
|
||||
if (active_request.len == active_request.bytes_completed) {
|
||||
//
|
||||
|
@ -144,12 +144,12 @@ DMASequencer::issueNext()
|
|||
void
|
||||
DMASequencer::dataCallback(const DataBlock & dblk)
|
||||
{
|
||||
assert(m_is_busy == true);
|
||||
assert(m_is_busy);
|
||||
int len = active_request.bytes_issued - active_request.bytes_completed;
|
||||
int offset = 0;
|
||||
if (active_request.bytes_completed == 0)
|
||||
offset = active_request.start_paddr & m_data_block_mask;
|
||||
assert(active_request.write == false);
|
||||
assert(!active_request.write);
|
||||
if (active_request.data != NULL) {
|
||||
memcpy(&active_request.data[active_request.bytes_completed],
|
||||
dblk.getData(offset, len), len);
|
||||
|
|
|
@ -68,12 +68,11 @@ class PeekStatementAST(StatementAST):
|
|||
if self.pairs.has_key("block_on"):
|
||||
address_field = self.pairs['block_on']
|
||||
code('''
|
||||
if ( (m_is_blocking == true) &&
|
||||
(m_block_map.count(in_msg_ptr->m_$address_field) == 1) ) {
|
||||
if (m_block_map[in_msg_ptr->m_$address_field] != &$qcode) {
|
||||
if (m_is_blocking &&
|
||||
(m_block_map.count(in_msg_ptr->m_$address_field) == 1) &&
|
||||
(m_block_map[in_msg_ptr->m_$address_field] != &$qcode)) {
|
||||
$qcode.delayHead();
|
||||
continue;
|
||||
}
|
||||
}
|
||||
''')
|
||||
|
||||
|
|
Loading…
Reference in a new issue