Fix some data types and add "inline" keyword for __reg_op() function.

Obtained from:	kmacy @
MFC after:	1 week
Sponsored by:	Mellanox Technologies
This commit is contained in:
Hans Petter Selasky 2016-05-23 13:18:15 +00:00
parent 899466f91e
commit 425da8eb61

View File

@ -93,7 +93,7 @@ static inline int get_count_order(unsigned int count)
}
static inline unsigned long
find_first_bit(unsigned long *addr, unsigned long size)
find_first_bit(const unsigned long *addr, unsigned long size)
{
long mask;
int bit;
@ -115,7 +115,7 @@ find_first_bit(unsigned long *addr, unsigned long size)
}
static inline unsigned long
find_first_zero_bit(unsigned long *addr, unsigned long size)
find_first_zero_bit(const unsigned long *addr, unsigned long size)
{
long mask;
int bit;
@ -137,7 +137,7 @@ find_first_zero_bit(unsigned long *addr, unsigned long size)
}
static inline unsigned long
find_last_bit(unsigned long *addr, unsigned long size)
find_last_bit(const unsigned long *addr, unsigned long size)
{
long mask;
int offs;
@ -163,7 +163,7 @@ find_last_bit(unsigned long *addr, unsigned long size)
}
static inline unsigned long
find_next_bit(unsigned long *addr, unsigned long size, unsigned long offset)
find_next_bit(const unsigned long *addr, unsigned long size, unsigned long offset)
{
long mask;
int offs;
@ -202,7 +202,7 @@ find_next_bit(unsigned long *addr, unsigned long size, unsigned long offset)
}
static inline unsigned long
find_next_zero_bit(unsigned long *addr, unsigned long size,
find_next_zero_bit(const unsigned long *addr, unsigned long size,
unsigned long offset)
{
long mask;
@ -306,23 +306,23 @@ bitmap_empty(unsigned long *addr, int size)
}
#define __set_bit(i, a) \
atomic_set_long(&((volatile long *)(a))[BIT_WORD(i)], BIT_MASK(i))
atomic_set_long(&((volatile unsigned long *)(a))[BIT_WORD(i)], BIT_MASK(i))
#define set_bit(i, a) \
atomic_set_long(&((volatile long *)(a))[BIT_WORD(i)], BIT_MASK(i))
atomic_set_long(&((volatile unsigned long *)(a))[BIT_WORD(i)], BIT_MASK(i))
#define __clear_bit(i, a) \
atomic_clear_long(&((volatile long *)(a))[BIT_WORD(i)], BIT_MASK(i))
atomic_clear_long(&((volatile unsigned long *)(a))[BIT_WORD(i)], BIT_MASK(i))
#define clear_bit(i, a) \
atomic_clear_long(&((volatile long *)(a))[BIT_WORD(i)], BIT_MASK(i))
atomic_clear_long(&((volatile unsigned long *)(a))[BIT_WORD(i)], BIT_MASK(i))
#define test_bit(i, a) \
!!(atomic_load_acq_long(&((volatile long *)(a))[BIT_WORD(i)]) & \
!!(atomic_load_acq_long(&((volatile unsigned long *)(a))[BIT_WORD(i)]) & \
BIT_MASK(i))
static inline long
test_and_clear_bit(long bit, long *var)
static inline int
test_and_clear_bit(long bit, volatile unsigned long *var)
{
long val;
@ -330,14 +330,14 @@ test_and_clear_bit(long bit, long *var)
bit %= BITS_PER_LONG;
bit = (1UL << bit);
do {
val = *(volatile long *)var;
val = *var;
} while (atomic_cmpset_long(var, val, val & ~bit) == 0);
return !!(val & bit);
}
static inline long
test_and_set_bit(long bit, long *var)
static inline int
test_and_set_bit(long bit, volatile unsigned long *var)
{
long val;
@ -345,7 +345,7 @@ test_and_set_bit(long bit, long *var)
bit %= BITS_PER_LONG;
bit = (1UL << bit);
do {
val = *(volatile long *)var;
val = *var;
} while (atomic_cmpset_long(var, val, val | bit) == 0);
return !!(val & bit);
@ -399,7 +399,8 @@ enum {
REG_OP_RELEASE,
};
static int __reg_op(unsigned long *bitmap, int pos, int order, int reg_op)
static inline int
__reg_op(unsigned long *bitmap, int pos, int order, int reg_op)
{
int nbits_reg;
int index;