MFC: Convert the atomic pointer operations to operate on uintptr_t rather
than void *. Approved by: re (kensmith)
This commit is contained in:
parent
5312e7b17e
commit
0ef1901fa9
@ -180,7 +180,9 @@ _lock_acquire(struct lock *lck, struct lockuser *lu, int prio)
|
||||
atomic_swap_ptr(&lck->l_head, lu->lu_myreq, &lu->lu_watchreq);
|
||||
|
||||
if (lu->lu_watchreq->lr_locked != 0) {
|
||||
atomic_store_rel_ptr(&lu->lu_watchreq->lr_watcher, lu);
|
||||
atomic_store_rel_ptr
|
||||
((volatile uintptr_t *)&lu->lu_watchreq->lr_watcher,
|
||||
(uintptr_t)lu);
|
||||
if ((lck->l_wait == NULL) ||
|
||||
((lck->l_type & LCK_ADAPTIVE) == 0)) {
|
||||
while (lu->lu_watchreq->lr_locked != 0)
|
||||
@ -250,14 +252,19 @@ _lock_release(struct lock *lck, struct lockuser *lu)
|
||||
|
||||
/* Update tail if our request is last. */
|
||||
if (lu->lu_watchreq->lr_owner == NULL) {
|
||||
atomic_store_rel_ptr(&lck->l_tail, lu->lu_myreq);
|
||||
atomic_store_rel_ptr(&lu->lu_myreq->lr_owner, NULL);
|
||||
atomic_store_rel_ptr((volatile uintptr_t *)&lck->l_tail,
|
||||
(uintptr_t)lu->lu_myreq);
|
||||
atomic_store_rel_ptr
|
||||
((volatile uintptr_t *)&lu->lu_myreq->lr_owner,
|
||||
(uintptr_t)NULL);
|
||||
} else {
|
||||
/* Remove ourselves from the list. */
|
||||
atomic_store_rel_ptr(&lu->lu_myreq->lr_owner,
|
||||
lu->lu_watchreq->lr_owner);
|
||||
atomic_store_rel_ptr(
|
||||
&lu->lu_watchreq->lr_owner->lu_myreq, lu->lu_myreq);
|
||||
atomic_store_rel_ptr((volatile uintptr_t *)
|
||||
&lu->lu_myreq->lr_owner,
|
||||
(uintptr_t)lu->lu_watchreq->lr_owner);
|
||||
atomic_store_rel_ptr((volatile uintptr_t *)
|
||||
&lu->lu_watchreq->lr_owner->lu_myreq,
|
||||
(uintptr_t)lu->lu_myreq);
|
||||
}
|
||||
/*
|
||||
* The watch request now becomes our own because we've
|
||||
|
@ -47,30 +47,36 @@ _thr_umtx_init(volatile umtx_t *mtx)
|
||||
static inline int
|
||||
_thr_umtx_trylock(volatile umtx_t *mtx, long id)
|
||||
{
|
||||
return umtx_trylock((struct umtx *)mtx, id);
|
||||
if (atomic_cmpset_acq_ptr((volatile uintptr_t *)mtx,
|
||||
(uintptr_t)UMTX_UNOWNED, (uintptr_t)id))
|
||||
return (0);
|
||||
return (EBUSY);
|
||||
}
|
||||
|
||||
static inline int
|
||||
_thr_umtx_lock(volatile umtx_t *mtx, long id)
|
||||
{
|
||||
if (atomic_cmpset_acq_ptr(mtx, (void *)UMTX_UNOWNED, (void *)id))
|
||||
if (atomic_cmpset_acq_ptr((volatile uintptr_t *)mtx,
|
||||
(uintptr_t)UMTX_UNOWNED, (uintptr_t)id))
|
||||
return (0);
|
||||
return __thr_umtx_lock(mtx, id);
|
||||
return (__thr_umtx_lock(mtx, id));
|
||||
}
|
||||
|
||||
static inline int
|
||||
_thr_umtx_timedlock(volatile umtx_t *mtx, long id,
|
||||
const struct timespec *timeout)
|
||||
{
|
||||
if (atomic_cmpset_acq_ptr(mtx, (void *)UMTX_UNOWNED, (void *)id))
|
||||
if (atomic_cmpset_acq_ptr((volatile uintptr_t *)mtx,
|
||||
(uintptr_t)UMTX_UNOWNED, (uintptr_t)id))
|
||||
return (0);
|
||||
return __thr_umtx_timedlock(mtx, id, timeout);
|
||||
return (__thr_umtx_timedlock(mtx, id, timeout));
|
||||
}
|
||||
|
||||
static inline int
|
||||
_thr_umtx_unlock(volatile umtx_t *mtx, long id)
|
||||
{
|
||||
if (atomic_cmpset_rel_ptr(mtx, (void *)id, (void *)UMTX_UNOWNED))
|
||||
if (atomic_cmpset_rel_ptr((volatile uintptr_t *)mtx,
|
||||
(uintptr_t)id, (uintptr_t)UMTX_UNOWNED))
|
||||
return (0);
|
||||
return __thr_umtx_unlock(mtx, id);
|
||||
}
|
||||
|
@ -434,66 +434,23 @@ atomic_cmpset_rel_64(volatile u_int64_t *p, u_int64_t cmpval, u_int64_t newval)
|
||||
#define atomic_readandclear_long atomic_readandclear_64
|
||||
|
||||
/* Operations on pointers. */
|
||||
static __inline int
|
||||
atomic_cmpset_ptr(volatile void *dst, void *exp, void *src)
|
||||
{
|
||||
|
||||
return (atomic_cmpset_long((volatile u_long *)dst, (u_long)exp,
|
||||
(u_long)src));
|
||||
}
|
||||
|
||||
static __inline int
|
||||
atomic_cmpset_acq_ptr(volatile void *dst, void *exp, void *src)
|
||||
{
|
||||
|
||||
return (atomic_cmpset_acq_long((volatile u_long *)dst, (u_long)exp,
|
||||
(u_long)src));
|
||||
}
|
||||
|
||||
static __inline int
|
||||
atomic_cmpset_rel_ptr(volatile void *dst, void *exp, void *src)
|
||||
{
|
||||
|
||||
return (atomic_cmpset_rel_long((volatile u_long *)dst, (u_long)exp,
|
||||
(u_long)src));
|
||||
}
|
||||
|
||||
static __inline void *
|
||||
atomic_load_acq_ptr(volatile void *p)
|
||||
{
|
||||
return (void *)atomic_load_acq_long((volatile u_long *)p);
|
||||
}
|
||||
|
||||
static __inline void
|
||||
atomic_store_rel_ptr(volatile void *p, void *v)
|
||||
{
|
||||
atomic_store_rel_long((volatile u_long *)p, (u_long)v);
|
||||
}
|
||||
|
||||
#define ATOMIC_PTR(NAME) \
|
||||
static __inline void \
|
||||
atomic_##NAME##_ptr(volatile void *p, uintptr_t v) \
|
||||
{ \
|
||||
atomic_##NAME##_long((volatile u_long *)p, v); \
|
||||
} \
|
||||
\
|
||||
static __inline void \
|
||||
atomic_##NAME##_acq_ptr(volatile void *p, uintptr_t v) \
|
||||
{ \
|
||||
atomic_##NAME##_acq_long((volatile u_long *)p, v);\
|
||||
} \
|
||||
\
|
||||
static __inline void \
|
||||
atomic_##NAME##_rel_ptr(volatile void *p, uintptr_t v) \
|
||||
{ \
|
||||
atomic_##NAME##_rel_long((volatile u_long *)p, v);\
|
||||
}
|
||||
|
||||
ATOMIC_PTR(set)
|
||||
ATOMIC_PTR(clear)
|
||||
ATOMIC_PTR(add)
|
||||
ATOMIC_PTR(subtract)
|
||||
|
||||
#undef ATOMIC_PTR
|
||||
#define atomic_set_ptr atomic_set_64
|
||||
#define atomic_set_acq_ptr atomic_set_acq_64
|
||||
#define atomic_set_rel_ptr atomic_set_rel_64
|
||||
#define atomic_clear_ptr atomic_clear_64
|
||||
#define atomic_clear_acq_ptr atomic_clear_acq_64
|
||||
#define atomic_clear_rel_ptr atomic_clear_rel_64
|
||||
#define atomic_add_ptr atomic_add_64
|
||||
#define atomic_add_acq_ptr atomic_add_acq_64
|
||||
#define atomic_add_rel_ptr atomic_add_rel_64
|
||||
#define atomic_subtract_ptr atomic_subtract_64
|
||||
#define atomic_subtract_acq_ptr atomic_subtract_acq_64
|
||||
#define atomic_subtract_rel_ptr atomic_subtract_rel_64
|
||||
#define atomic_cmpset_ptr atomic_cmpset_64
|
||||
#define atomic_cmpset_acq_ptr atomic_cmpset_acq_64
|
||||
#define atomic_cmpset_rel_ptr atomic_cmpset_rel_64
|
||||
#define atomic_load_acq_ptr atomic_load_acq_64
|
||||
#define atomic_store_rel_ptr atomic_store_rel_64
|
||||
#define atomic_readandclear_ptr atomic_readandclear_64
|
||||
|
||||
#endif /* ! _MACHINE_ATOMIC_H_ */
|
||||
|
@ -292,9 +292,6 @@ u_long atomic_readandclear_long(volatile u_long *);
|
||||
#define atomic_cmpset_acq_long atomic_cmpset_long
|
||||
#define atomic_cmpset_rel_long atomic_cmpset_long
|
||||
|
||||
#define atomic_cmpset_acq_ptr atomic_cmpset_ptr
|
||||
#define atomic_cmpset_rel_ptr atomic_cmpset_ptr
|
||||
|
||||
/* Operations on 8-bit bytes. */
|
||||
#define atomic_set_8 atomic_set_char
|
||||
#define atomic_set_acq_8 atomic_set_acq_char
|
||||
@ -348,55 +345,24 @@ u_long atomic_readandclear_long(volatile u_long *);
|
||||
#define atomic_readandclear_32 atomic_readandclear_int
|
||||
|
||||
/* Operations on pointers. */
|
||||
static __inline int
|
||||
atomic_cmpset_ptr(volatile void *dst, void *exp, void *src)
|
||||
{
|
||||
|
||||
return (atomic_cmpset_long((volatile u_long *)dst,
|
||||
(u_long)exp, (u_long)src));
|
||||
}
|
||||
|
||||
static __inline void *
|
||||
atomic_load_acq_ptr(volatile void *p)
|
||||
{
|
||||
/*
|
||||
* The apparently-bogus cast to intptr_t in the following is to
|
||||
* avoid a warning from "gcc -Wbad-function-cast".
|
||||
*/
|
||||
return ((void *)(intptr_t)atomic_load_acq_long((volatile u_long *)p));
|
||||
}
|
||||
|
||||
static __inline void
|
||||
atomic_store_rel_ptr(volatile void *p, void *v)
|
||||
{
|
||||
atomic_store_rel_long((volatile u_long *)p, (u_long)v);
|
||||
}
|
||||
|
||||
#define ATOMIC_PTR(NAME) \
|
||||
static __inline void \
|
||||
atomic_##NAME##_ptr(volatile void *p, uintptr_t v) \
|
||||
{ \
|
||||
atomic_##NAME##_long((volatile u_long *)p, v); \
|
||||
} \
|
||||
\
|
||||
static __inline void \
|
||||
atomic_##NAME##_acq_ptr(volatile void *p, uintptr_t v) \
|
||||
{ \
|
||||
atomic_##NAME##_acq_long((volatile u_long *)p, v);\
|
||||
} \
|
||||
\
|
||||
static __inline void \
|
||||
atomic_##NAME##_rel_ptr(volatile void *p, uintptr_t v) \
|
||||
{ \
|
||||
atomic_##NAME##_rel_long((volatile u_long *)p, v);\
|
||||
}
|
||||
|
||||
ATOMIC_PTR(set)
|
||||
ATOMIC_PTR(clear)
|
||||
ATOMIC_PTR(add)
|
||||
ATOMIC_PTR(subtract)
|
||||
|
||||
#undef ATOMIC_PTR
|
||||
#define atomic_set_ptr atomic_set_long
|
||||
#define atomic_set_acq_ptr atomic_set_acq_long
|
||||
#define atomic_set_rel_ptr atomic_set_rel_long
|
||||
#define atomic_clear_ptr atomic_clear_long
|
||||
#define atomic_clear_acq_ptr atomic_clear_acq_long
|
||||
#define atomic_clear_rel_ptr atomic_clear_rel_long
|
||||
#define atomic_add_ptr atomic_add_long
|
||||
#define atomic_add_acq_ptr atomic_add_acq_long
|
||||
#define atomic_add_rel_ptr atomic_add_rel_long
|
||||
#define atomic_subtract_ptr atomic_subtract_long
|
||||
#define atomic_subtract_acq_ptr atomic_subtract_acq_long
|
||||
#define atomic_subtract_rel_ptr atomic_subtract_rel_long
|
||||
#define atomic_load_acq_ptr atomic_load_acq_long
|
||||
#define atomic_store_rel_ptr atomic_store_rel_long
|
||||
#define atomic_cmpset_ptr atomic_cmpset_long
|
||||
#define atomic_cmpset_acq_ptr atomic_cmpset_acq_long
|
||||
#define atomic_cmpset_rel_ptr atomic_cmpset_rel_long
|
||||
#define atomic_readandclear_ptr atomic_readandclear_long
|
||||
|
||||
#endif /* !defined(WANT_FUNCTIONS) */
|
||||
#endif /* ! _MACHINE_ATOMIC_H_ */
|
||||
|
@ -285,13 +285,9 @@ atomic_readandclear_32(volatile u_int32_t *p)
|
||||
#define atomic_cmpset_rel_32 atomic_cmpset_32
|
||||
#define atomic_cmpset_rel_ptr atomic_cmpset_ptr
|
||||
#define atomic_load_acq_int atomic_load_32
|
||||
#define atomic_clear_ptr(ptr, bit) atomic_clear_32( \
|
||||
(volatile uint32_t *)ptr, (uint32_t)bit)
|
||||
#define atomic_store_ptr(ptr, bit) atomic_store_32( \
|
||||
(volatile uint32_t *)ptr, (uint32_t)bit)
|
||||
#define atomic_cmpset_ptr(dst, exp, s) atomic_cmpset_32( \
|
||||
(volatile uint32_t *)dst, (uint32_t)exp, (uint32_t)s)
|
||||
#define atomic_set_ptr(ptr, src) atomic_set_32( \
|
||||
(volatile uint32_t *)ptr, (uint32_t)src)
|
||||
#define atomic_clear_ptr atomic_clear_32
|
||||
#define atomic_store_ptr atomic_store_32
|
||||
#define atomic_cmpset_ptr atomic_cmpset_32
|
||||
#define atomic_set_ptr atomic_set_32
|
||||
|
||||
#endif /* _MACHINE_ATOMIC_H_ */
|
||||
|
@ -115,7 +115,8 @@ hatm_ext_free(struct mbufx_free **list, struct mbufx_free *buf)
|
||||
{
|
||||
for (;;) {
|
||||
buf->link = *list;
|
||||
if (atomic_cmpset_ptr(list, buf->link, buf))
|
||||
if (atomic_cmpset_ptr((uintptr_t *)list, (uintptr_t)buf->link,
|
||||
(uintptr_t)buf))
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -128,7 +129,8 @@ hatm_ext_alloc(struct hatm_softc *sc, u_int g)
|
||||
for (;;) {
|
||||
if ((buf = sc->mbuf_list[g]) == NULL)
|
||||
break;
|
||||
if (atomic_cmpset_ptr(&sc->mbuf_list[g], buf, buf->link))
|
||||
if (atomic_cmpset_ptr((uintptr_t *)&sc->mbuf_list[g],
|
||||
(uintptr_t)buf, (uintptr_t)buf->link))
|
||||
break;
|
||||
}
|
||||
if (buf == NULL) {
|
||||
@ -136,7 +138,8 @@ hatm_ext_alloc(struct hatm_softc *sc, u_int g)
|
||||
for (;;) {
|
||||
if ((buf = sc->mbuf_list[g]) == NULL)
|
||||
break;
|
||||
if (atomic_cmpset_ptr(&sc->mbuf_list[g], buf, buf->link))
|
||||
if (atomic_cmpset_ptr((uintptr_t *)&sc->mbuf_list[g],
|
||||
(uintptr_t)buf, (uintptr_t)buf->link))
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -771,7 +771,8 @@ pmc_link_target_process(struct pmc *pm, struct pmc_process *pp)
|
||||
|
||||
LIST_INSERT_HEAD(&pm->pm_targets, pt, pt_next);
|
||||
|
||||
atomic_store_rel_ptr(&pp->pp_pmcs[ri].pp_pmc, pm);
|
||||
atomic_store_rel_ptr((uintptr_t *)&pp->pp_pmcs[ri].pp_pmc,
|
||||
(uintptr_t)pm);
|
||||
|
||||
if (pm->pm_owner->po_owner == pp->pp_proc)
|
||||
pm->pm_flags |= PMC_F_ATTACHED_TO_OWNER;
|
||||
|
@ -336,9 +336,6 @@ u_long atomic_readandclear_long(volatile u_long *);
|
||||
#define atomic_cmpset_acq_long atomic_cmpset_long
|
||||
#define atomic_cmpset_rel_long atomic_cmpset_long
|
||||
|
||||
#define atomic_cmpset_acq_ptr atomic_cmpset_ptr
|
||||
#define atomic_cmpset_rel_ptr atomic_cmpset_ptr
|
||||
|
||||
/* Operations on 8-bit bytes. */
|
||||
#define atomic_set_8 atomic_set_char
|
||||
#define atomic_set_acq_8 atomic_set_acq_char
|
||||
@ -392,55 +389,24 @@ u_long atomic_readandclear_long(volatile u_long *);
|
||||
#define atomic_readandclear_32 atomic_readandclear_int
|
||||
|
||||
/* Operations on pointers. */
|
||||
static __inline int
|
||||
atomic_cmpset_ptr(volatile void *dst, void *exp, void *src)
|
||||
{
|
||||
|
||||
return (atomic_cmpset_int((volatile u_int *)dst, (u_int)exp,
|
||||
(u_int)src));
|
||||
}
|
||||
|
||||
static __inline void *
|
||||
atomic_load_acq_ptr(volatile void *p)
|
||||
{
|
||||
/*
|
||||
* The apparently-bogus cast to intptr_t in the following is to
|
||||
* avoid a warning from "gcc -Wbad-function-cast".
|
||||
*/
|
||||
return ((void *)(intptr_t)atomic_load_acq_int((volatile u_int *)p));
|
||||
}
|
||||
|
||||
static __inline void
|
||||
atomic_store_rel_ptr(volatile void *p, void *v)
|
||||
{
|
||||
atomic_store_rel_int((volatile u_int *)p, (u_int)v);
|
||||
}
|
||||
|
||||
#define ATOMIC_PTR(NAME) \
|
||||
static __inline void \
|
||||
atomic_##NAME##_ptr(volatile void *p, uintptr_t v) \
|
||||
{ \
|
||||
atomic_##NAME##_int((volatile u_int *)p, v); \
|
||||
} \
|
||||
\
|
||||
static __inline void \
|
||||
atomic_##NAME##_acq_ptr(volatile void *p, uintptr_t v) \
|
||||
{ \
|
||||
atomic_##NAME##_acq_int((volatile u_int *)p, v);\
|
||||
} \
|
||||
\
|
||||
static __inline void \
|
||||
atomic_##NAME##_rel_ptr(volatile void *p, uintptr_t v) \
|
||||
{ \
|
||||
atomic_##NAME##_rel_int((volatile u_int *)p, v);\
|
||||
}
|
||||
|
||||
ATOMIC_PTR(set)
|
||||
ATOMIC_PTR(clear)
|
||||
ATOMIC_PTR(add)
|
||||
ATOMIC_PTR(subtract)
|
||||
|
||||
#undef ATOMIC_PTR
|
||||
#define atomic_set_ptr atomic_set_int
|
||||
#define atomic_set_acq_ptr atomic_set_acq_int
|
||||
#define atomic_set_rel_ptr atomic_set_rel_int
|
||||
#define atomic_clear_ptr atomic_clear_int
|
||||
#define atomic_clear_acq_ptr atomic_clear_acq_int
|
||||
#define atomic_clear_rel_ptr atomic_clear_rel_int
|
||||
#define atomic_add_ptr atomic_add_int
|
||||
#define atomic_add_acq_ptr atomic_add_acq_int
|
||||
#define atomic_add_rel_ptr atomic_add_rel_int
|
||||
#define atomic_subtract_ptr atomic_subtract_int
|
||||
#define atomic_subtract_acq_ptr atomic_subtract_acq_int
|
||||
#define atomic_subtract_rel_ptr atomic_subtract_rel_int
|
||||
#define atomic_load_acq_ptr atomic_load_acq_int
|
||||
#define atomic_store_rel_ptr atomic_store_rel_int
|
||||
#define atomic_cmpset_ptr atomic_cmpset_int
|
||||
#define atomic_cmpset_acq_ptr atomic_cmpset_acq_int
|
||||
#define atomic_cmpset_rel_ptr atomic_cmpset_rel_int
|
||||
#define atomic_readandclear_ptr atomic_readandclear_int
|
||||
|
||||
#endif /* !defined(WANT_FUNCTIONS) */
|
||||
#endif /* ! _MACHINE_ATOMIC_H_ */
|
||||
|
@ -138,6 +138,9 @@ ATOMIC_STORE_LOAD(long, 64, "8")
|
||||
|
||||
#undef ATOMIC_STORE_LOAD
|
||||
|
||||
#define atomic_load_acq_ptr atomic_load_acq_64
|
||||
#define atomic_store_rel_ptr atomic_store_rel_64
|
||||
|
||||
#define IA64_ATOMIC(sz, type, name, width, op) \
|
||||
static __inline type \
|
||||
atomic_##name##_acq_##width(volatile type *p, type v) \
|
||||
@ -255,6 +258,19 @@ IA64_ATOMIC(8, uint64_t, subtract, 64, -)
|
||||
#define atomic_add_rel_long atomic_add_rel_64
|
||||
#define atomic_subtract_rel_long atomic_subtract_rel_64
|
||||
|
||||
#define atomic_set_ptr atomic_set_64
|
||||
#define atomic_clear_ptr atomic_clear_64
|
||||
#define atomic_add_ptr atomic_add_64
|
||||
#define atomic_subtract_ptr atomic_subtract_64
|
||||
#define atomic_set_acq_ptr atomic_set_acq_64
|
||||
#define atomic_clear_acq_ptr atomic_clear_acq_64
|
||||
#define atomic_add_acq_ptr atomic_add_acq_64
|
||||
#define atomic_subtract_acq_ptr atomic_subtract_acq_64
|
||||
#define atomic_set_rel_ptr atomic_set_rel_64
|
||||
#define atomic_clear_rel_ptr atomic_clear_rel_64
|
||||
#define atomic_add_rel_ptr atomic_add_rel_64
|
||||
#define atomic_subtract_rel_ptr atomic_subtract_rel_64
|
||||
|
||||
#undef IA64_CMPXCHG
|
||||
|
||||
/*
|
||||
@ -295,68 +311,13 @@ atomic_cmpset_rel_64(volatile uint64_t* p, uint64_t cmpval, uint64_t newval)
|
||||
#define atomic_cmpset_64 atomic_cmpset_acq_64
|
||||
#define atomic_cmpset_int atomic_cmpset_32
|
||||
#define atomic_cmpset_long atomic_cmpset_64
|
||||
#define atomic_cmpset_ptr atomic_cmpset_64
|
||||
#define atomic_cmpset_acq_int atomic_cmpset_acq_32
|
||||
#define atomic_cmpset_rel_int atomic_cmpset_rel_32
|
||||
#define atomic_cmpset_acq_long atomic_cmpset_acq_64
|
||||
#define atomic_cmpset_rel_long atomic_cmpset_rel_64
|
||||
|
||||
static __inline int
|
||||
atomic_cmpset_acq_ptr(volatile void *dst, void *exp, void *src)
|
||||
{
|
||||
int ret;
|
||||
ret = atomic_cmpset_acq_long((volatile u_long *)dst, (u_long)exp,
|
||||
(u_long)src);
|
||||
return (ret);
|
||||
}
|
||||
|
||||
static __inline int
|
||||
atomic_cmpset_rel_ptr(volatile void *dst, void *exp, void *src)
|
||||
{
|
||||
int ret;
|
||||
ret = atomic_cmpset_rel_long((volatile u_long *)dst, (u_long)exp,
|
||||
(u_long)src);
|
||||
return (ret);
|
||||
}
|
||||
|
||||
#define atomic_cmpset_ptr atomic_cmpset_acq_ptr
|
||||
|
||||
static __inline void *
|
||||
atomic_load_acq_ptr(volatile void *p)
|
||||
{
|
||||
return ((void *)atomic_load_acq_long((volatile u_long *)p));
|
||||
}
|
||||
|
||||
static __inline void
|
||||
atomic_store_rel_ptr(volatile void *p, void *v)
|
||||
{
|
||||
atomic_store_rel_long((volatile u_long *)p, (u_long)v);
|
||||
}
|
||||
|
||||
#define ATOMIC_PTR(NAME) \
|
||||
static __inline void \
|
||||
atomic_##NAME##_ptr(volatile void *p, uintptr_t v) \
|
||||
{ \
|
||||
atomic_##NAME##_long((volatile u_long *)p, v); \
|
||||
} \
|
||||
\
|
||||
static __inline void \
|
||||
atomic_##NAME##_acq_ptr(volatile void *p, uintptr_t v) \
|
||||
{ \
|
||||
atomic_##NAME##_acq_long((volatile u_long *)p, v); \
|
||||
} \
|
||||
\
|
||||
static __inline void \
|
||||
atomic_##NAME##_rel_ptr(volatile void *p, uintptr_t v) \
|
||||
{ \
|
||||
atomic_##NAME##_rel_long((volatile u_long *)p, v); \
|
||||
}
|
||||
|
||||
ATOMIC_PTR(set)
|
||||
ATOMIC_PTR(clear)
|
||||
ATOMIC_PTR(add)
|
||||
ATOMIC_PTR(subtract)
|
||||
|
||||
#undef ATOMIC_PTR
|
||||
#define atomic_cmpset_acq_ptr atomic_cmpset_acq_64
|
||||
#define atomic_cmpset_rel_ptr atomic_cmpset_rel_64
|
||||
|
||||
static __inline uint32_t
|
||||
atomic_readandclear_32(volatile uint32_t* p)
|
||||
|
@ -412,7 +412,7 @@ _mtx_trylock(struct mtx *m, int opts, const char *file, int line)
|
||||
atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
|
||||
rval = 1;
|
||||
} else
|
||||
rval = _obtain_lock(m, curthread);
|
||||
rval = _obtain_lock(m, (uintptr_t)curthread);
|
||||
|
||||
LOCK_LOG_TRY("LOCK", &m->mtx_object, opts, rval, file, line);
|
||||
if (rval)
|
||||
@ -429,7 +429,7 @@ _mtx_trylock(struct mtx *m, int opts, const char *file, int line)
|
||||
* sleep waiting for it), or if we need to recurse on it.
|
||||
*/
|
||||
void
|
||||
_mtx_lock_sleep(struct mtx *m, struct thread *td, int opts, const char *file,
|
||||
_mtx_lock_sleep(struct mtx *m, uintptr_t tid, int opts, const char *file,
|
||||
int line)
|
||||
{
|
||||
#if defined(SMP) && !defined(NO_ADAPTIVE_MUTEXES)
|
||||
@ -462,7 +462,7 @@ _mtx_lock_sleep(struct mtx *m, struct thread *td, int opts, const char *file,
|
||||
#ifdef MUTEX_PROFILING
|
||||
contested = 0;
|
||||
#endif
|
||||
while (!_obtain_lock(m, td)) {
|
||||
while (!_obtain_lock(m, tid)) {
|
||||
#ifdef MUTEX_PROFILING
|
||||
contested = 1;
|
||||
atomic_add_int(&m->mtx_contest_holding, 1);
|
||||
@ -490,7 +490,7 @@ _mtx_lock_sleep(struct mtx *m, struct thread *td, int opts, const char *file,
|
||||
* necessary.
|
||||
*/
|
||||
if (v == MTX_CONTESTED) {
|
||||
m->mtx_lock = (uintptr_t)td | MTX_CONTESTED;
|
||||
m->mtx_lock = tid | MTX_CONTESTED;
|
||||
turnstile_claim(&m->mtx_object);
|
||||
break;
|
||||
}
|
||||
@ -502,8 +502,7 @@ _mtx_lock_sleep(struct mtx *m, struct thread *td, int opts, const char *file,
|
||||
* or the state of the MTX_RECURSED bit changed.
|
||||
*/
|
||||
if ((v & MTX_CONTESTED) == 0 &&
|
||||
!atomic_cmpset_ptr(&m->mtx_lock, (void *)v,
|
||||
(void *)(v | MTX_CONTESTED))) {
|
||||
!atomic_cmpset_ptr(&m->mtx_lock, v, v | MTX_CONTESTED)) {
|
||||
turnstile_release(&m->mtx_object);
|
||||
cpu_spinwait();
|
||||
continue;
|
||||
@ -537,7 +536,7 @@ _mtx_lock_sleep(struct mtx *m, struct thread *td, int opts, const char *file,
|
||||
if (!cont_logged) {
|
||||
CTR6(KTR_CONTENTION,
|
||||
"contention: %p at %s:%d wants %s, taken by %s:%d",
|
||||
td, file, line, m->mtx_object.lo_name,
|
||||
(void *)tid, file, line, m->mtx_object.lo_name,
|
||||
WITNESS_FILE(&m->mtx_object),
|
||||
WITNESS_LINE(&m->mtx_object));
|
||||
cont_logged = 1;
|
||||
@ -554,7 +553,7 @@ _mtx_lock_sleep(struct mtx *m, struct thread *td, int opts, const char *file,
|
||||
if (cont_logged) {
|
||||
CTR4(KTR_CONTENTION,
|
||||
"contention end: %s acquired by %p at %s:%d",
|
||||
m->mtx_object.lo_name, td, file, line);
|
||||
m->mtx_object.lo_name, (void *)tid, file, line);
|
||||
}
|
||||
#endif
|
||||
#ifdef MUTEX_PROFILING
|
||||
@ -573,7 +572,7 @@ _mtx_lock_sleep(struct mtx *m, struct thread *td, int opts, const char *file,
|
||||
* is handled inline.
|
||||
*/
|
||||
void
|
||||
_mtx_lock_spin(struct mtx *m, struct thread *td, int opts, const char *file,
|
||||
_mtx_lock_spin(struct mtx *m, uintptr_t tid, int opts, const char *file,
|
||||
int line)
|
||||
{
|
||||
int i = 0;
|
||||
@ -582,7 +581,7 @@ _mtx_lock_spin(struct mtx *m, struct thread *td, int opts, const char *file,
|
||||
CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m);
|
||||
|
||||
for (;;) {
|
||||
if (_obtain_lock(m, td))
|
||||
if (_obtain_lock(m, tid))
|
||||
break;
|
||||
|
||||
/* Give interrupts a chance while we spin. */
|
||||
|
@ -651,7 +651,8 @@ kseq_assign(struct kseq *kseq)
|
||||
|
||||
do {
|
||||
*(volatile struct kse **)&ke = kseq->ksq_assigned;
|
||||
} while(!atomic_cmpset_ptr(&kseq->ksq_assigned, ke, NULL));
|
||||
} while(!atomic_cmpset_ptr((volatile uintptr_t *)&kseq->ksq_assigned,
|
||||
(uintptr_t)ke, (uintptr_t)NULL));
|
||||
for (; ke != NULL; ke = nke) {
|
||||
nke = ke->ke_assign;
|
||||
kseq->ksq_group->ksg_load--;
|
||||
@ -688,7 +689,8 @@ kseq_notify(struct kse *ke, int cpu)
|
||||
*/
|
||||
do {
|
||||
*(volatile struct kse **)&ke->ke_assign = kseq->ksq_assigned;
|
||||
} while(!atomic_cmpset_ptr(&kseq->ksq_assigned, ke->ke_assign, ke));
|
||||
} while(!atomic_cmpset_ptr((volatile uintptr_t *)&kseq->ksq_assigned,
|
||||
(uintptr_t)ke->ke_assign, (uintptr_t)ke));
|
||||
/*
|
||||
* Without sched_lock we could lose a race where we set NEEDRESCHED
|
||||
* on a thread that is switched out before the IPI is delivered. This
|
||||
|
@ -230,6 +230,11 @@ atomic_readandclear_64(volatile u_int64_t *addr)
|
||||
#define atomic_subtract_long(p, v) atomic_subtract_32((uint32_t *)p, (uint32_t)v)
|
||||
#define atomic_readandclear_long atomic_readandclear_32
|
||||
|
||||
#define atomic_set_ptr atomic_set_32
|
||||
#define atomic_clear_ptr atomic_clear_32
|
||||
#define atomic_add_ptr atomic_add_32
|
||||
#define atomic_subtract_ptr atomic_subtract_32
|
||||
|
||||
#if 0
|
||||
|
||||
/* See above. */
|
||||
@ -293,6 +298,15 @@ ATOMIC_ACQ_REL(subtract, 32, int)
|
||||
#define atomic_subtract_acq_long atomic_subtract_acq_32
|
||||
#define atomic_subtract_rel_long atomic_subtract_rel_32
|
||||
|
||||
#define atomic_set_acq_ptr atomic_set_acq_32
|
||||
#define atomic_set_rel_ptr atomic_set_rel_32
|
||||
#define atomic_clear_acq_ptr atomic_clear_acq_32
|
||||
#define atomic_clear_rel_ptr atomic_clear_rel_32
|
||||
#define atomic_add_acq_ptr atomic_add_acq_32
|
||||
#define atomic_add_rel_ptr atomic_add_rel_32
|
||||
#define atomic_subtract_acq_ptr atomic_subtract_acq_32
|
||||
#define atomic_subtract_rel_ptr atomic_subtract_rel_32
|
||||
|
||||
#undef ATOMIC_ACQ_REL
|
||||
|
||||
/*
|
||||
@ -340,6 +354,9 @@ ATOMIC_STORE_LOAD(int, 32)
|
||||
#define atomic_load_acq_long atomic_load_acq_32
|
||||
#define atomic_store_rel_long atomic_store_rel_32
|
||||
|
||||
#define atomic_load_acq_ptr atomic_load_acq_32
|
||||
#define atomic_store_rel_ptr atomic_store_rel_32
|
||||
|
||||
#undef ATOMIC_STORE_LOAD
|
||||
|
||||
/*
|
||||
@ -397,19 +414,12 @@ atomic_cmpset_64(volatile u_int64_t* p, u_int64_t cmpval, u_int64_t newval)
|
||||
|
||||
#define atomic_cmpset_int atomic_cmpset_32
|
||||
#define atomic_cmpset_long atomic_cmpset_32
|
||||
#define atomic_cmpset_ptr atomic_cmpset_32
|
||||
|
||||
#if 0
|
||||
#define atomic_cmpset_long_long atomic_cmpset_64
|
||||
#endif /* 0 */
|
||||
|
||||
static __inline int
|
||||
atomic_cmpset_ptr(volatile void *dst, void *exp, void *src)
|
||||
{
|
||||
|
||||
return (atomic_cmpset_32((volatile uint32_t *)dst, (uint32_t)exp,
|
||||
(uint32_t)src));
|
||||
}
|
||||
|
||||
static __inline uint32_t
|
||||
atomic_cmpset_acq_32(volatile uint32_t *p, uint32_t cmpval, uint32_t newval)
|
||||
{
|
||||
@ -431,60 +441,7 @@ atomic_cmpset_rel_32(volatile uint32_t *p, uint32_t cmpval, uint32_t newval)
|
||||
#define atomic_cmpset_rel_int atomic_cmpset_rel_32
|
||||
#define atomic_cmpset_acq_long atomic_cmpset_acq_32
|
||||
#define atomic_cmpset_rel_long atomic_cmpset_rel_32
|
||||
#define atomic_cmpset_acq_ptr atomic_cmpset_acq_32
|
||||
#define atomic_cmpset_rel_ptr atomic_cmpset_rel_32
|
||||
|
||||
static __inline int
|
||||
atomic_cmpset_acq_ptr(volatile void *dst, void *exp, void *src)
|
||||
{
|
||||
|
||||
return (atomic_cmpset_acq_32((volatile uint32_t *)dst,
|
||||
(uint32_t)exp, (uint32_t)src));
|
||||
}
|
||||
|
||||
static __inline int
|
||||
atomic_cmpset_rel_ptr(volatile void *dst, void *exp, void *src)
|
||||
{
|
||||
|
||||
return (atomic_cmpset_rel_32((volatile uint32_t *)dst,
|
||||
(uint32_t)exp, (uint32_t)src));
|
||||
}
|
||||
|
||||
static __inline void *
|
||||
atomic_load_acq_ptr(volatile void *p)
|
||||
{
|
||||
|
||||
return (void *)atomic_load_acq_32((volatile uint32_t *)p);
|
||||
}
|
||||
|
||||
static __inline void
|
||||
atomic_store_rel_ptr(volatile void *p, void *v)
|
||||
{
|
||||
|
||||
atomic_store_rel_32((volatile uint32_t *)p, (uint32_t)v);
|
||||
}
|
||||
|
||||
#define ATOMIC_PTR(NAME) \
|
||||
static __inline void \
|
||||
atomic_##NAME##_ptr(volatile void *p, uintptr_t v) \
|
||||
{ \
|
||||
atomic_##NAME##_32((volatile uint32_t *)p, v); \
|
||||
} \
|
||||
\
|
||||
static __inline void \
|
||||
atomic_##NAME##_acq_ptr(volatile void *p, uintptr_t v) \
|
||||
{ \
|
||||
atomic_##NAME##_acq_32((volatile uint32_t *)p, v); \
|
||||
} \
|
||||
\
|
||||
static __inline void \
|
||||
atomic_##NAME##_rel_ptr(volatile void *p, uintptr_t v) \
|
||||
{ \
|
||||
atomic_##NAME##_rel_32((volatile uint32_t *)p, v); \
|
||||
}
|
||||
|
||||
ATOMIC_PTR(set)
|
||||
ATOMIC_PTR(clear)
|
||||
ATOMIC_PTR(add)
|
||||
ATOMIC_PTR(subtract)
|
||||
|
||||
#undef ATOMIC_PTR
|
||||
#endif /* ! _MACHINE_ATOMIC_H_ */
|
||||
|
@ -275,7 +275,7 @@ ATOMIC_GEN(32, uint32_t *, uint32_t, uint32_t, 32);
|
||||
ATOMIC_GEN(long, u_long *, u_long, u_long, 64);
|
||||
ATOMIC_GEN(64, uint64_t *, uint64_t, uint64_t, 64);
|
||||
|
||||
ATOMIC_GEN(ptr, void *, void *, uintptr_t, 64);
|
||||
ATOMIC_GEN(ptr, uintptr_t *, uintptr_t, uintptr_t, 64);
|
||||
|
||||
#undef ATOMIC_GEN
|
||||
#undef atomic_cas
|
||||
|
@ -100,11 +100,11 @@ void mtx_init(struct mtx *m, const char *name, const char *type, int opts);
|
||||
void mtx_destroy(struct mtx *m);
|
||||
void mtx_sysinit(void *arg);
|
||||
void mutex_init(void);
|
||||
void _mtx_lock_sleep(struct mtx *m, struct thread *td, int opts,
|
||||
void _mtx_lock_sleep(struct mtx *m, uintptr_t tid, int opts,
|
||||
const char *file, int line);
|
||||
void _mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line);
|
||||
#ifdef SMP
|
||||
void _mtx_lock_spin(struct mtx *m, struct thread *td, int opts,
|
||||
void _mtx_lock_spin(struct mtx *m, uintptr_t tid, int opts,
|
||||
const char *file, int line);
|
||||
#endif
|
||||
void _mtx_unlock_spin(struct mtx *m, int opts, const char *file, int line);
|
||||
@ -127,19 +127,19 @@ void _mtx_assert(struct mtx *m, int what, const char *file, int line);
|
||||
/* Try to obtain mtx_lock once. */
|
||||
#ifndef _obtain_lock
|
||||
#define _obtain_lock(mp, tid) \
|
||||
atomic_cmpset_acq_ptr(&(mp)->mtx_lock, (void *)MTX_UNOWNED, (tid))
|
||||
atomic_cmpset_acq_ptr(&(mp)->mtx_lock, MTX_UNOWNED, (tid))
|
||||
#endif
|
||||
|
||||
/* Try to release mtx_lock if it is unrecursed and uncontested. */
|
||||
#ifndef _release_lock
|
||||
#define _release_lock(mp, tid) \
|
||||
atomic_cmpset_rel_ptr(&(mp)->mtx_lock, (tid), (void *)MTX_UNOWNED)
|
||||
atomic_cmpset_rel_ptr(&(mp)->mtx_lock, (tid), MTX_UNOWNED)
|
||||
#endif
|
||||
|
||||
/* Release mtx_lock quickly, assuming we own it. */
|
||||
#ifndef _release_lock_quick
|
||||
#define _release_lock_quick(mp) \
|
||||
atomic_store_rel_ptr(&(mp)->mtx_lock, (void *)MTX_UNOWNED)
|
||||
atomic_store_rel_ptr(&(mp)->mtx_lock, MTX_UNOWNED)
|
||||
#endif
|
||||
|
||||
/*
|
||||
@ -148,7 +148,7 @@ void _mtx_assert(struct mtx *m, int what, const char *file, int line);
|
||||
*/
|
||||
#ifndef _get_sleep_lock
|
||||
#define _get_sleep_lock(mp, tid, opts, file, line) do { \
|
||||
struct thread *_tid = (tid); \
|
||||
uintptr_t _tid = (uintptr_t)(tid); \
|
||||
\
|
||||
if (!_obtain_lock((mp), _tid)) \
|
||||
_mtx_lock_sleep((mp), _tid, (opts), (file), (line)); \
|
||||
@ -165,11 +165,11 @@ void _mtx_assert(struct mtx *m, int what, const char *file, int line);
|
||||
#ifndef _get_spin_lock
|
||||
#ifdef SMP
|
||||
#define _get_spin_lock(mp, tid, opts, file, line) do { \
|
||||
struct thread *_tid = (tid); \
|
||||
uintptr_t _tid = (uintptr_t)(tid); \
|
||||
\
|
||||
spinlock_enter(); \
|
||||
if (!_obtain_lock((mp), _tid)) { \
|
||||
if ((mp)->mtx_lock == (uintptr_t)_tid) \
|
||||
if ((mp)->mtx_lock == _tid) \
|
||||
(mp)->mtx_recurse++; \
|
||||
else \
|
||||
_mtx_lock_spin((mp), _tid, (opts), (file), (line)); \
|
||||
@ -177,14 +177,14 @@ void _mtx_assert(struct mtx *m, int what, const char *file, int line);
|
||||
} while (0)
|
||||
#else /* SMP */
|
||||
#define _get_spin_lock(mp, tid, opts, file, line) do { \
|
||||
struct thread *_tid = (tid); \
|
||||
uintptr_t _tid = (uintptr_t)(tid); \
|
||||
\
|
||||
spinlock_enter(); \
|
||||
if ((mp)->mtx_lock == (uintptr_t)_tid) \
|
||||
if ((mp)->mtx_lock == _tid) \
|
||||
(mp)->mtx_recurse++; \
|
||||
else { \
|
||||
KASSERT((mp)->mtx_lock == MTX_UNOWNED, ("corrupt spinlock")); \
|
||||
(mp)->mtx_lock = (uintptr_t)_tid; \
|
||||
(mp)->mtx_lock = _tid; \
|
||||
} \
|
||||
} while (0)
|
||||
#endif /* SMP */
|
||||
@ -196,7 +196,9 @@ void _mtx_assert(struct mtx *m, int what, const char *file, int line);
|
||||
*/
|
||||
#ifndef _rel_sleep_lock
|
||||
#define _rel_sleep_lock(mp, tid, opts, file, line) do { \
|
||||
if (!_release_lock((mp), (tid))) \
|
||||
uintptr_t _tid = (uintptr_t)(tid); \
|
||||
\
|
||||
if (!_release_lock((mp), _tid)) \
|
||||
_mtx_unlock_sleep((mp), (opts), (file), (line)); \
|
||||
} while (0)
|
||||
#endif
|
||||
|
Loading…
Reference in New Issue
Block a user