base: Fix address range granularity calculations
This patch fixes a bug in the address range granularity calculations. Previously it incorrectly used the high bit to establish the size of the regions created, when it should really be looking at the low bit.
This commit is contained in:
parent
92e973b310
commit
82f600e02d
1 changed files with 4 additions and 1 deletions
|
@ -140,7 +140,10 @@ class AddrRange
|
||||||
*
|
*
|
||||||
* @return The size of the regions created by the interleaving bits
|
* @return The size of the regions created by the interleaving bits
|
||||||
*/
|
*/
|
||||||
uint64_t granularity() const { return ULL(1) << intlvHighBit; }
|
uint64_t granularity() const
|
||||||
|
{
|
||||||
|
return ULL(1) << (intlvHighBit - intlvBits);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Determine the number of interleaved address stripes this range
|
* Determine the number of interleaved address stripes this range
|
||||||
|
|
Loading…
Reference in a new issue