Inlining 64bit integer handling functions

- if gcc/clang is used the C functions that use long long can be
  inlined to get better performance (yes, it makes difference)
This commit is contained in:
Tomas Hruby 2010-11-18 16:37:40 +00:00
parent 3ff6f21b51
commit 504abf4b34
3 changed files with 162 additions and 176 deletions

View file

@ -9,6 +9,8 @@
#include <minix/types.h>
#endif
#if !defined(__LONG_LONG_SUPPORTED)
u64_t add64(u64_t i, u64_t j);
u64_t add64u(u64_t i, unsigned j);
u64_t add64ul(u64_t i, unsigned long j);
@ -40,7 +42,6 @@ u64_t xor64(u64_t a, u64_t b);
u64_t and64(u64_t a, u64_t b);
u64_t not64(u64_t a);
#if !defined(__LONG_LONG_SUPPORTED)
#define is_zero64(i) ((i).lo == 0 && (i).hi == 0)
#define make_zero64(i) do { (i).lo = (i).hi = 0; } while(0)
@ -50,9 +51,169 @@ u64_t not64(u64_t a);
(i) = add64u((i), 1); \
} while(0)
#else
#include <limits.h>
#define is_zero64(i) ((i) == 0)
#define make_zero64(i) ((i) = 0)
#define neg64(i) ((i) = -(i))
static inline u64_t add64(u64_t i, u64_t j)
{
return i + j;
}
static inline u64_t add64u(u64_t i, unsigned j)
{
return i + j;
}
static inline u64_t add64ul(u64_t i, unsigned long j)
{
return i + j;
}
static inline int bsr64(u64_t i)
{
int index;
u64_t mask;
for (index = 63, mask = 1ULL << 63; index >= 0; --index, mask >>= 1) {
if (i & mask)
return index;
}
return -1;
}
static inline int cmp64(u64_t i, u64_t j)
{
if (i > j)
return 1;
else if (i < j)
return -1;
else /* (i == j) */
return 0;
}
static inline int cmp64u(u64_t i, unsigned j)
{
if (i > j)
return 1;
else if (i < j)
return -1;
else /* (i == j) */
return 0;
}
static inline int cmp64ul(u64_t i, unsigned long j)
{
if (i > j)
return 1;
else if (i < j)
return -1;
else /* (i == j) */
return 0;
}
static inline unsigned cv64u(u64_t i)
{
/* return ULONG_MAX if really big */
if (i>>32)
return ULONG_MAX;
return (unsigned)i;
}
static inline unsigned long cv64ul(u64_t i)
{
/* return ULONG_MAX if really big */
if (i>>32)
return ULONG_MAX;
return (unsigned long)i;
}
static inline u64_t cvu64(unsigned i)
{
return i;
}
static inline u64_t cvul64(unsigned long i)
{
return i;
}
static inline unsigned diff64(u64_t i, u64_t j)
{
return (unsigned)(i - j);
}
static inline u64_t div64(u64_t i, u64_t j)
{
return i / j;
}
static inline u64_t rem64(u64_t i, u64_t j)
{
return i % j;
}
static inline unsigned long div64u(u64_t i, unsigned j)
{
return (unsigned long)(i / j);
}
static inline u64_t div64u64(u64_t i, unsigned j)
{
return i / j;
}
static inline unsigned rem64u(u64_t i, unsigned j)
{
return (unsigned)(i % j);
}
static inline unsigned long ex64lo(u64_t i)
{
return (unsigned long)i;
}
static inline unsigned long ex64hi(u64_t i)
{
return (unsigned long)(i>>32);
}
static inline u64_t make64(unsigned long lo, unsigned long hi)
{
return ((u64_t)hi << 32) | (u64_t)lo;
}
static inline u64_t mul64(u64_t i, u64_t j)
{
return i * j;
}
static inline u64_t mul64u(unsigned long i, unsigned j)
{
return (u64_t)i * j;
}
static inline u64_t sub64(u64_t i, u64_t j)
{
return i - j;
}
static inline u64_t sub64u(u64_t i, unsigned j)
{
return i - j;
}
static inline u64_t sub64ul(u64_t i, unsigned long j)
{
return i - j;
}
#endif
#endif /* _MINIX__U64_H */

View file

@ -1,7 +1,6 @@
# int64 sources
.PATH: ${.CURDIR}/arch/${ARCH}/int64
.if ${COMPILER_TYPE} == "ack"
SRCS+= \
add64.S \
add64u.S \
@ -18,6 +17,3 @@ SRCS+= \
mul64u.S \
sub64.S \
sub64u.S
.elif ${COMPILER_TYPE} == "gnu"
SRCS+= int64.c
.endif

View file

@ -1,171 +0,0 @@
/*
* This file implements 64-bit arithmentic functions. These functions will
* go away once clang is ready.
*
* It will only work with GCC and clang.
*
*/
#include <minix/u64.h>
#include <limits.h>
#if !defined(__LONG_LONG_SUPPORTED)
#error "ERROR: This file requires long long support"
#endif
u64_t add64(u64_t i, u64_t j)
{
return i + j;
}
u64_t add64u(u64_t i, unsigned j)
{
return i + j;
}
u64_t add64ul(u64_t i, unsigned long j)
{
return i + j;
}
int bsr64(u64_t i)
{
int index;
u64_t mask;
for (index = 63, mask = 1ULL << 63; index >= 0; --index, mask >>= 1) {
if (i & mask)
return index;
}
return -1;
}
int cmp64(u64_t i, u64_t j)
{
if (i > j)
return 1;
else if (i < j)
return -1;
else /* (i == j) */
return 0;
}
int cmp64u(u64_t i, unsigned j)
{
if (i > j)
return 1;
else if (i < j)
return -1;
else /* (i == j) */
return 0;
}
int cmp64ul(u64_t i, unsigned long j)
{
if (i > j)
return 1;
else if (i < j)
return -1;
else /* (i == j) */
return 0;
}
unsigned cv64u(u64_t i)
{
/* return ULONG_MAX if really big */
if (i>>32)
return ULONG_MAX;
return (unsigned)i;
}
unsigned long cv64ul(u64_t i)
{
/* return ULONG_MAX if really big */
if (i>>32)
return ULONG_MAX;
return (unsigned long)i;
}
u64_t cvu64(unsigned i)
{
return i;
}
u64_t cvul64(unsigned long i)
{
return i;
}
unsigned diff64(u64_t i, u64_t j)
{
return (unsigned)(i - j);
}
u64_t div64(u64_t i, u64_t j)
{
return i / j;
}
u64_t rem64(u64_t i, u64_t j)
{
return i % j;
}
unsigned long div64u(u64_t i, unsigned j)
{
return (unsigned long)(i / j);
}
u64_t div64u64(u64_t i, unsigned j)
{
return i / j;
}
unsigned rem64u(u64_t i, unsigned j)
{
return (unsigned)(i % j);
}
unsigned long ex64lo(u64_t i)
{
return (unsigned long)i;
}
unsigned long ex64hi(u64_t i)
{
return (unsigned long)(i>>32);
}
u64_t make64(unsigned long lo, unsigned long hi)
{
return ((u64_t)hi << 32) | (u64_t)lo;
}
u64_t mul64(u64_t i, u64_t j)
{
return i * j;
}
u64_t mul64u(unsigned long i, unsigned j)
{
return (u64_t)i * j;
}
u64_t sub64(u64_t i, u64_t j)
{
return i - j;
}
u64_t sub64u(u64_t i, unsigned j)
{
return i - j;
}
u64_t sub64ul(u64_t i, unsigned long j)
{
return i - j;
}