mem: Tidy up CacheBlk class

This patch modernises and tidies up the CacheBlk, removing dead code.
This commit is contained in:
Andreas Hansson 2015-07-30 03:41:39 -04:00
parent 41b39b22cd
commit 5902e29e84

71
src/mem/cache/blk.hh vendored
View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2012-2014 ARM Limited * Copyright (c) 2012-2015 ARM Limited
* All rights reserved. * All rights reserved.
* *
* The license below extends only to copyright in the software and shall * The license below extends only to copyright in the software and shall
@ -45,20 +45,19 @@
* Definitions of a simple cache block class. * Definitions of a simple cache block class.
*/ */
#ifndef __CACHE_BLK_HH__ #ifndef __MEM_CACHE_BLK_HH__
#define __CACHE_BLK_HH__ #define __MEM_CACHE_BLK_HH__
#include <list> #include <list>
#include "base/printable.hh" #include "base/printable.hh"
#include "mem/packet.hh" #include "mem/packet.hh"
#include "mem/request.hh" #include "mem/request.hh"
#include "sim/core.hh" // for Tick
/** /**
* Cache block status bit assignments * Cache block status bit assignments
*/ */
enum CacheBlkStatusBits { enum CacheBlkStatusBits : unsigned {
/** valid, readable */ /** valid, readable */
BlkValid = 0x01, BlkValid = 0x01,
/** write permission */ /** write permission */
@ -67,8 +66,6 @@ enum CacheBlkStatusBits {
BlkReadable = 0x04, BlkReadable = 0x04,
/** dirty (modified) */ /** dirty (modified) */
BlkDirty = 0x08, BlkDirty = 0x08,
/** block was referenced */
BlkReferenced = 0x10,
/** block was a hardware prefetch yet unaccessed*/ /** block was a hardware prefetch yet unaccessed*/
BlkHWPrefetched = 0x20, BlkHWPrefetched = 0x20,
/** block holds data from the secure memory space */ /** block holds data from the secure memory space */
@ -98,7 +95,7 @@ class CacheBlk
*/ */
uint8_t *data; uint8_t *data;
/** the number of bytes stored in this block. */ /** the number of bytes stored in this block. */
int size; unsigned size;
/** block state: OR of CacheBlkStatusBit */ /** block state: OR of CacheBlkStatusBit */
typedef unsigned State; typedef unsigned State;
@ -119,7 +116,7 @@ class CacheBlk
bool isTouched; bool isTouched;
/** Number of references to this block since it was brought in. */ /** Number of references to this block since it was brought in. */
int refCount; unsigned refCount;
/** holds the source requestor ID for this block. */ /** holds the source requestor ID for this block. */
int srcMasterId; int srcMasterId;
@ -138,7 +135,7 @@ class CacheBlk
Addr highAddr; // high address of lock range Addr highAddr; // high address of lock range
// check for matching execution context // check for matching execution context
bool matchesContext(Request *req) bool matchesContext(const RequestPtr req) const
{ {
Addr req_low = req->getPaddr(); Addr req_low = req->getPaddr();
Addr req_high = req_low + req->getSize() -1; Addr req_high = req_low + req->getSize() -1;
@ -146,7 +143,7 @@ class CacheBlk
(req_low >= lowAddr) && (req_high <= highAddr); (req_low >= lowAddr) && (req_high <= highAddr);
} }
bool overlapping(Request *req) bool overlapping(const RequestPtr req) const
{ {
Addr req_low = req->getPaddr(); Addr req_low = req->getPaddr();
Addr req_high = req_low + req->getSize() - 1; Addr req_high = req_low + req->getSize() - 1;
@ -154,7 +151,7 @@ class CacheBlk
return (req_low <= highAddr) && (req_high >= lowAddr); return (req_low <= highAddr) && (req_high >= lowAddr);
} }
Lock(Request *req) Lock(const RequestPtr req)
: contextId(req->contextId()), : contextId(req->contextId()),
lowAddr(req->getPaddr()), lowAddr(req->getPaddr()),
highAddr(lowAddr + req->getSize() - 1) highAddr(lowAddr + req->getSize() - 1)
@ -176,24 +173,8 @@ class CacheBlk
tickInserted(0) tickInserted(0)
{} {}
/** CacheBlk(const CacheBlk&) = delete;
* Copy the state of the given block into this one. CacheBlk& operator=(const CacheBlk&) = delete;
* @param rhs The block to copy.
* @return a const reference to this block.
*/
const CacheBlk& operator=(const CacheBlk& rhs)
{
asid = rhs.asid;
tag = rhs.tag;
data = rhs.data;
size = rhs.size;
status = rhs.status;
whenReady = rhs.whenReady;
set = rhs.set;
refCount = rhs.refCount;
task_id = rhs.task_id;
return *this;
}
/** /**
* Checks the write permissions of this block. * Checks the write permissions of this block.
@ -245,15 +226,6 @@ class CacheBlk
return (status & BlkDirty) != 0; return (status & BlkDirty) != 0;
} }
/**
* Check if this block has been referenced.
* @return True if the block has been referenced.
*/
bool isReferenced() const
{
return (status & BlkReferenced) != 0;
}
/** /**
* Check if this block was the result of a hardware prefetch, yet to * Check if this block was the result of a hardware prefetch, yet to
* be touched. * be touched.
@ -282,21 +254,21 @@ class CacheBlk
void trackLoadLocked(PacketPtr pkt) void trackLoadLocked(PacketPtr pkt)
{ {
assert(pkt->isLLSC()); assert(pkt->isLLSC());
lockList.push_front(Lock(pkt->req)); lockList.emplace_front(pkt->req);
} }
/** /**
* Clear the list of valid load locks. Should be called whenever * Clear the list of valid load locks. Should be called whenever
* block is written to or invalidated. * block is written to or invalidated.
*/ */
void clearLoadLocks(Request *req = NULL) void clearLoadLocks(RequestPtr req = nullptr)
{ {
if (!req) { if (!req) {
// No request, invaldate all locks to this line // No request, invaldate all locks to this line
lockList.clear(); lockList.clear();
} else { } else {
// Only invalidate locks that overlap with this request // Only invalidate locks that overlap with this request
std::list<Lock>::iterator lock_itr = lockList.begin(); auto lock_itr = lockList.begin();
while (lock_itr != lockList.end()) { while (lock_itr != lockList.end()) {
if (lock_itr->overlapping(req)) { if (lock_itr->overlapping(req)) {
lock_itr = lockList.erase(lock_itr); lock_itr = lockList.erase(lock_itr);
@ -350,16 +322,19 @@ class CacheBlk
*/ */
bool checkWrite(PacketPtr pkt) bool checkWrite(PacketPtr pkt)
{ {
Request *req = pkt->req; // common case
if (!pkt->isLLSC() && lockList.empty())
return true;
RequestPtr req = pkt->req;
if (pkt->isLLSC()) { if (pkt->isLLSC()) {
// it's a store conditional... have to check for matching // it's a store conditional... have to check for matching
// load locked. // load locked.
bool success = false; bool success = false;
for (std::list<Lock>::iterator i = lockList.begin(); for (const auto& l : lockList) {
i != lockList.end(); ++i) if (l.matchesContext(req)) {
{
if (i->matchesContext(req)) {
// it's a store conditional, and as far as the memory // it's a store conditional, and as far as the memory
// system can tell, the requesting context's lock is // system can tell, the requesting context's lock is
// still valid. // still valid.
@ -412,4 +387,4 @@ class CacheBlkVisitor
virtual bool operator()(CacheBlk &blk) = 0; virtual bool operator()(CacheBlk &blk) = 0;
}; };
#endif //__CACHE_BLK_HH__ #endif //__MEM_CACHE_BLK_HH__