misc: Fix a bunch of minor issues identified by static analysis
Add some missing initialisation, and fix a handful benign resource leaks (including some false positives).
This commit is contained in:
parent
71d5f03175
commit
de62aedabc
11 changed files with 37 additions and 30 deletions
|
@ -255,6 +255,8 @@ OutputDirectory::remove(const string &name, bool recursive)
|
|||
|
||||
de = readdir(subdir);
|
||||
}
|
||||
|
||||
closedir(subdir);
|
||||
}
|
||||
|
||||
// try to force recognition that we deleted the files in the directory
|
||||
|
|
|
@ -94,8 +94,9 @@ ListenSocket::listen(int port, bool reuse)
|
|||
struct sockaddr_in sockaddr;
|
||||
sockaddr.sin_family = PF_INET;
|
||||
sockaddr.sin_addr.s_addr = INADDR_ANY;
|
||||
|
||||
sockaddr.sin_port = htons(port);
|
||||
// finally clear sin_zero
|
||||
memset(&sockaddr.sin_zero, 0, sizeof(sockaddr.sin_zero));
|
||||
int ret = ::bind(fd, (struct sockaddr *)&sockaddr, sizeof (sockaddr));
|
||||
if (ret != 0) {
|
||||
if (ret == -1 && errno != EADDRINUSE)
|
||||
|
@ -126,7 +127,9 @@ ListenSocket::accept(bool nodelay)
|
|||
int sfd = ::accept(fd, (struct sockaddr *)&sockaddr, &slen);
|
||||
if (sfd != -1 && nodelay) {
|
||||
int i = 1;
|
||||
::setsockopt(sfd, IPPROTO_TCP, TCP_NODELAY, (char *)&i, sizeof(i));
|
||||
if (::setsockopt(sfd, IPPROTO_TCP, TCP_NODELAY, (char *)&i,
|
||||
sizeof(i)) < 0)
|
||||
warn("ListenSocket(accept): setsockopt() TCP_NODELAY failed!");
|
||||
}
|
||||
|
||||
return sfd;
|
||||
|
|
|
@ -52,9 +52,12 @@
|
|||
RubyTester::RubyTester(const Params *p)
|
||||
: MemObject(p), checkStartEvent(this),
|
||||
_masterId(p->system->getMasterId(name())),
|
||||
m_checkTable_ptr(nullptr),
|
||||
m_num_cpus(p->num_cpus),
|
||||
m_checks_to_complete(p->checks_to_complete),
|
||||
m_deadlock_threshold(p->deadlock_threshold),
|
||||
m_num_writers(0),
|
||||
m_num_readers(0),
|
||||
m_wakeup_frequency(p->wakeup_frequency),
|
||||
m_check_flush(p->check_flush),
|
||||
m_num_inst_ports(p->port_cpuInstPort_connection_count)
|
||||
|
|
3
src/mem/cache/cache_impl.hh
vendored
3
src/mem/cache/cache_impl.hh
vendored
|
@ -917,6 +917,9 @@ Cache<TagStore>::recvAtomic(PacketPtr pkt)
|
|||
if (pkt->cmd == MemCmd::WriteInvalidateReq) {
|
||||
memSidePort->sendAtomic(pkt); // complete writeback
|
||||
if (isTopLevel) {
|
||||
// @todo Static analysis suggests this can actually happen
|
||||
assert(blk);
|
||||
|
||||
// top level caches allocate and write the data
|
||||
assert(blk->isDirty());
|
||||
assert(!blk->isWritable());
|
||||
|
|
|
@ -105,6 +105,12 @@ CommMonitor::CommMonitor(Params* params)
|
|||
name(), samplePeriodTicks, samplePeriod.msec());
|
||||
}
|
||||
|
||||
CommMonitor::~CommMonitor()
|
||||
{
|
||||
// if not already done, close the stream
|
||||
closeStreams();
|
||||
}
|
||||
|
||||
void
|
||||
CommMonitor::closeStreams()
|
||||
{
|
||||
|
|
|
@ -77,7 +77,7 @@ class CommMonitor : public MemObject
|
|||
CommMonitor(Params* params);
|
||||
|
||||
/** Destructor */
|
||||
~CommMonitor() {}
|
||||
~CommMonitor();
|
||||
|
||||
/**
|
||||
* Callback to flush and close all open output streams on exit. If
|
||||
|
|
|
@ -601,7 +601,7 @@ class Packet : public Printable
|
|||
*/
|
||||
Packet(Request *_req, MemCmd _cmd)
|
||||
: cmd(_cmd), req(_req), data(nullptr), addr(0), _isSecure(false),
|
||||
src(InvalidPortID), dest(InvalidPortID),
|
||||
size(0), src(InvalidPortID), dest(InvalidPortID),
|
||||
bytesValidStart(0), bytesValidEnd(0),
|
||||
firstWordDelay(0), lastWordDelay(0),
|
||||
senderState(NULL)
|
||||
|
|
|
@ -307,16 +307,9 @@ PhysicalMemory::serializeStore(ostream& os, unsigned int store_id,
|
|||
|
||||
// write memory file
|
||||
string filepath = Checkpoint::dir() + "/" + filename.c_str();
|
||||
int fd = creat(filepath.c_str(), 0664);
|
||||
if (fd < 0) {
|
||||
perror("creat");
|
||||
fatal("Can't open physical memory checkpoint file '%s'\n",
|
||||
filename);
|
||||
}
|
||||
|
||||
gzFile compressed_mem = gzdopen(fd, "wb");
|
||||
gzFile compressed_mem = gzopen(filepath.c_str(), "wb");
|
||||
if (compressed_mem == NULL)
|
||||
fatal("Insufficient memory to allocate compression state for %s\n",
|
||||
fatal("Can't open physical memory checkpoint file '%s'\n",
|
||||
filename);
|
||||
|
||||
uint64_t pass_size = 0;
|
||||
|
@ -380,16 +373,9 @@ PhysicalMemory::unserializeStore(Checkpoint* cp, const string& section)
|
|||
string filepath = cp->cptDir + "/" + filename;
|
||||
|
||||
// mmap memoryfile
|
||||
int fd = open(filepath.c_str(), O_RDONLY);
|
||||
if (fd < 0) {
|
||||
perror("open");
|
||||
fatal("Can't open physical memory checkpoint file '%s'", filename);
|
||||
}
|
||||
|
||||
gzFile compressed_mem = gzdopen(fd, "rb");
|
||||
gzFile compressed_mem = gzopen(filepath.c_str(), "rb");
|
||||
if (compressed_mem == NULL)
|
||||
fatal("Insufficient memory to allocate compression state for %s\n",
|
||||
filename);
|
||||
fatal("Can't open physical memory checkpoint file '%s'", filename);
|
||||
|
||||
// we've already got the actual backing store mapped
|
||||
uint8_t* pmem = backingStore[store_id].second;
|
||||
|
|
|
@ -205,7 +205,11 @@ RubyPort::PioSlavePort::recvTimingReq(PacketPtr pkt)
|
|||
AddrRangeList l = ruby_port->master_ports[i]->getAddrRanges();
|
||||
for (auto it = l.begin(); it != l.end(); ++it) {
|
||||
if (it->contains(pkt->getAddr())) {
|
||||
ruby_port->master_ports[i]->sendTimingReq(pkt);
|
||||
// generally it is not safe to assume success here as
|
||||
// the port could be blocked
|
||||
bool M5_VAR_USED success =
|
||||
ruby_port->master_ports[i]->sendTimingReq(pkt);
|
||||
assert(success);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -460,29 +460,28 @@ Event::dump() const
|
|||
}
|
||||
|
||||
EventQueue::EventQueue(const string &n)
|
||||
: objName(n), head(NULL), _curTick(0),
|
||||
async_queue_mutex(new std::mutex())
|
||||
: objName(n), head(NULL), _curTick(0)
|
||||
{
|
||||
}
|
||||
|
||||
void
|
||||
EventQueue::asyncInsert(Event *event)
|
||||
{
|
||||
async_queue_mutex->lock();
|
||||
async_queue_mutex.lock();
|
||||
async_queue.push_back(event);
|
||||
async_queue_mutex->unlock();
|
||||
async_queue_mutex.unlock();
|
||||
}
|
||||
|
||||
void
|
||||
EventQueue::handleAsyncInsertions()
|
||||
{
|
||||
assert(this == curEventQueue());
|
||||
async_queue_mutex->lock();
|
||||
async_queue_mutex.lock();
|
||||
|
||||
while (!async_queue.empty()) {
|
||||
insert(async_queue.front());
|
||||
async_queue.pop_front();
|
||||
}
|
||||
|
||||
async_queue_mutex->unlock();
|
||||
async_queue_mutex.unlock();
|
||||
}
|
||||
|
|
|
@ -42,6 +42,7 @@
|
|||
#include <cassert>
|
||||
#include <climits>
|
||||
#include <iosfwd>
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
#include <string>
|
||||
|
||||
|
@ -448,7 +449,7 @@ class EventQueue : public Serializable
|
|||
Tick _curTick;
|
||||
|
||||
//! Mutex to protect async queue.
|
||||
std::mutex *async_queue_mutex;
|
||||
std::mutex async_queue_mutex;
|
||||
|
||||
//! List of events added by other threads to this event queue.
|
||||
std::list<Event*> async_queue;
|
||||
|
|
Loading…
Reference in a new issue