wg: Use atomic(9) instead of concurrency-kit atomics.

Kernel sanitizers only support atomic(9) operations.

Reviewed by:	kevans, markj, emaste
Reported by:	markj
Sponsored by:	The FreeBSD Foundation
Differential Revision:	https://reviews.freebsd.org/D36910
This commit is contained in:
John Baldwin 2022-10-28 13:36:13 -07:00
parent 744bfb2131
commit e32e1a160e
2 changed files with 65 additions and 63 deletions

View File

@ -769,10 +769,10 @@ wg_socket_set(struct wg_softc *sc, struct socket *new_so4, struct socket *new_so
sx_assert(&sc->sc_lock, SX_XLOCKED);
so4 = ck_pr_load_ptr(&so->so_so4);
so6 = ck_pr_load_ptr(&so->so_so6);
ck_pr_store_ptr(&so->so_so4, new_so4);
ck_pr_store_ptr(&so->so_so6, new_so6);
so4 = atomic_load_ptr(&so->so_so4);
so6 = atomic_load_ptr(&so->so_so6);
atomic_store_ptr(&so->so_so4, new_so4);
atomic_store_ptr(&so->so_so6, new_so6);
if (!so4 && !so6)
return;
@ -877,8 +877,8 @@ wg_send(struct wg_softc *sc, struct wg_endpoint *e, struct mbuf *m)
sa = &e->e_remote.r_sa;
NET_EPOCH_ENTER(et);
so4 = ck_pr_load_ptr(&so->so_so4);
so6 = ck_pr_load_ptr(&so->so_so6);
so4 = atomic_load_ptr(&so->so_so4);
so6 = atomic_load_ptr(&so->so_so6);
if (e->e_remote.r_sa.sa_family == AF_INET && so4 != NULL)
ret = sosend(so4, sa, NULL, m, control, 0, curthread);
else if (e->e_remote.r_sa.sa_family == AF_INET6 && so6 != NULL)
@ -931,7 +931,7 @@ wg_send_buf(struct wg_softc *sc, struct wg_endpoint *e, uint8_t *buf, size_t len
static void
wg_timers_enable(struct wg_peer *peer)
{
ck_pr_store_bool(&peer->p_enabled, true);
atomic_store_bool(&peer->p_enabled, true);
wg_timers_run_persistent_keepalive(peer);
}
@ -950,9 +950,9 @@ wg_timers_disable(struct wg_peer *peer)
*
* We should also pull NET_EPOCH_WAIT out of the FOREACH(peer) loops, but the
* performance impact is acceptable for the time being. */
ck_pr_store_bool(&peer->p_enabled, false);
atomic_store_bool(&peer->p_enabled, false);
NET_EPOCH_WAIT();
ck_pr_store_bool(&peer->p_need_another_keepalive, false);
atomic_store_bool(&peer->p_need_another_keepalive, false);
callout_stop(&peer->p_new_handshake);
callout_stop(&peer->p_send_keepalive);
@ -966,9 +966,9 @@ wg_timers_set_persistent_keepalive(struct wg_peer *peer, uint16_t interval)
{
struct epoch_tracker et;
if (interval != peer->p_persistent_keepalive_interval) {
ck_pr_store_16(&peer->p_persistent_keepalive_interval, interval);
atomic_store_16(&peer->p_persistent_keepalive_interval, interval);
NET_EPOCH_ENTER(et);
if (ck_pr_load_bool(&peer->p_enabled))
if (atomic_load_bool(&peer->p_enabled))
wg_timers_run_persistent_keepalive(peer);
NET_EPOCH_EXIT(et);
}
@ -988,7 +988,8 @@ wg_timers_event_data_sent(struct wg_peer *peer)
{
struct epoch_tracker et;
NET_EPOCH_ENTER(et);
if (ck_pr_load_bool(&peer->p_enabled) && !callout_pending(&peer->p_new_handshake))
if (atomic_load_bool(&peer->p_enabled) &&
!callout_pending(&peer->p_new_handshake))
callout_reset(&peer->p_new_handshake, MSEC_2_TICKS(
NEW_HANDSHAKE_TIMEOUT * 1000 +
arc4random_uniform(REKEY_TIMEOUT_JITTER)),
@ -1001,13 +1002,14 @@ wg_timers_event_data_received(struct wg_peer *peer)
{
struct epoch_tracker et;
NET_EPOCH_ENTER(et);
if (ck_pr_load_bool(&peer->p_enabled)) {
if (atomic_load_bool(&peer->p_enabled)) {
if (!callout_pending(&peer->p_send_keepalive))
callout_reset(&peer->p_send_keepalive,
MSEC_2_TICKS(KEEPALIVE_TIMEOUT * 1000),
wg_timers_run_send_keepalive, peer);
else
ck_pr_store_bool(&peer->p_need_another_keepalive, true);
atomic_store_bool(&peer->p_need_another_keepalive,
true);
}
NET_EPOCH_EXIT(et);
}
@ -1030,8 +1032,8 @@ wg_timers_event_any_authenticated_packet_traversal(struct wg_peer *peer)
struct epoch_tracker et;
uint16_t interval;
NET_EPOCH_ENTER(et);
interval = ck_pr_load_16(&peer->p_persistent_keepalive_interval);
if (ck_pr_load_bool(&peer->p_enabled) && interval > 0)
interval = atomic_load_16(&peer->p_persistent_keepalive_interval);
if (atomic_load_bool(&peer->p_enabled) && interval > 0)
callout_reset(&peer->p_persistent_keepalive,
MSEC_2_TICKS(interval * 1000),
wg_timers_run_persistent_keepalive, peer);
@ -1043,7 +1045,7 @@ wg_timers_event_handshake_initiated(struct wg_peer *peer)
{
struct epoch_tracker et;
NET_EPOCH_ENTER(et);
if (ck_pr_load_bool(&peer->p_enabled))
if (atomic_load_bool(&peer->p_enabled))
callout_reset(&peer->p_retry_handshake, MSEC_2_TICKS(
REKEY_TIMEOUT * 1000 +
arc4random_uniform(REKEY_TIMEOUT_JITTER)),
@ -1056,7 +1058,7 @@ wg_timers_event_handshake_complete(struct wg_peer *peer)
{
struct epoch_tracker et;
NET_EPOCH_ENTER(et);
if (ck_pr_load_bool(&peer->p_enabled)) {
if (atomic_load_bool(&peer->p_enabled)) {
mtx_lock(&peer->p_handshake_mtx);
callout_stop(&peer->p_retry_handshake);
peer->p_handshake_retries = 0;
@ -1072,7 +1074,7 @@ wg_timers_event_session_derived(struct wg_peer *peer)
{
struct epoch_tracker et;
NET_EPOCH_ENTER(et);
if (ck_pr_load_bool(&peer->p_enabled))
if (atomic_load_bool(&peer->p_enabled))
callout_reset(&peer->p_zero_key_material,
MSEC_2_TICKS(REJECT_AFTER_TIME * 3 * 1000),
wg_timers_run_zero_key_material, peer);
@ -1084,7 +1086,7 @@ wg_timers_event_want_initiation(struct wg_peer *peer)
{
struct epoch_tracker et;
NET_EPOCH_ENTER(et);
if (ck_pr_load_bool(&peer->p_enabled))
if (atomic_load_bool(&peer->p_enabled))
wg_timers_run_send_initiation(peer, false);
NET_EPOCH_EXIT(et);
}
@ -1124,7 +1126,7 @@ wg_timers_run_retry_handshake(void *_peer)
callout_stop(&peer->p_send_keepalive);
wg_queue_purge(&peer->p_stage_queue);
NET_EPOCH_ENTER(et);
if (ck_pr_load_bool(&peer->p_enabled) &&
if (atomic_load_bool(&peer->p_enabled) &&
!callout_pending(&peer->p_zero_key_material))
callout_reset(&peer->p_zero_key_material,
MSEC_2_TICKS(REJECT_AFTER_TIME * 3 * 1000),
@ -1141,9 +1143,9 @@ wg_timers_run_send_keepalive(void *_peer)
wg_send_keepalive(peer);
NET_EPOCH_ENTER(et);
if (ck_pr_load_bool(&peer->p_enabled) &&
ck_pr_load_bool(&peer->p_need_another_keepalive)) {
ck_pr_store_bool(&peer->p_need_another_keepalive, false);
if (atomic_load_bool(&peer->p_enabled) &&
atomic_load_bool(&peer->p_need_another_keepalive)) {
atomic_store_bool(&peer->p_need_another_keepalive, false);
callout_reset(&peer->p_send_keepalive,
MSEC_2_TICKS(KEEPALIVE_TIMEOUT * 1000),
wg_timers_run_send_keepalive, peer);
@ -1180,7 +1182,7 @@ wg_timers_run_persistent_keepalive(void *_peer)
{
struct wg_peer *peer = _peer;
if (ck_pr_load_16(&peer->p_persistent_keepalive_interval) > 0)
if (atomic_load_16(&peer->p_persistent_keepalive_interval) > 0)
wg_send_keepalive(peer);
}

View File

@ -564,16 +564,16 @@ noise_remote_keypairs_clear(struct noise_remote *r)
struct noise_keypair *kp;
mtx_lock(&r->r_keypair_mtx);
kp = ck_pr_load_ptr(&r->r_next);
ck_pr_store_ptr(&r->r_next, NULL);
kp = atomic_load_ptr(&r->r_next);
atomic_store_ptr(&r->r_next, NULL);
noise_keypair_drop(kp);
kp = ck_pr_load_ptr(&r->r_current);
ck_pr_store_ptr(&r->r_current, NULL);
kp = atomic_load_ptr(&r->r_current);
atomic_store_ptr(&r->r_current, NULL);
noise_keypair_drop(kp);
kp = ck_pr_load_ptr(&r->r_previous);
ck_pr_store_ptr(&r->r_previous, NULL);
kp = atomic_load_ptr(&r->r_previous);
atomic_store_ptr(&r->r_previous, NULL);
noise_keypair_drop(kp);
mtx_unlock(&r->r_keypair_mtx);
}
@ -587,12 +587,12 @@ noise_remote_expire_current(struct noise_remote *r)
noise_remote_handshake_clear(r);
NET_EPOCH_ENTER(et);
kp = ck_pr_load_ptr(&r->r_next);
kp = atomic_load_ptr(&r->r_next);
if (kp != NULL)
ck_pr_store_bool(&kp->kp_can_send, false);
kp = ck_pr_load_ptr(&r->r_current);
atomic_store_bool(&kp->kp_can_send, false);
kp = atomic_load_ptr(&r->r_current);
if (kp != NULL)
ck_pr_store_bool(&kp->kp_can_send, false);
atomic_store_bool(&kp->kp_can_send, false);
NET_EPOCH_EXIT(et);
}
@ -606,24 +606,24 @@ noise_add_new_keypair(struct noise_local *l, struct noise_remote *r,
/* Insert into the keypair table */
mtx_lock(&r->r_keypair_mtx);
next = ck_pr_load_ptr(&r->r_next);
current = ck_pr_load_ptr(&r->r_current);
previous = ck_pr_load_ptr(&r->r_previous);
next = atomic_load_ptr(&r->r_next);
current = atomic_load_ptr(&r->r_current);
previous = atomic_load_ptr(&r->r_previous);
if (kp->kp_is_initiator) {
if (next != NULL) {
ck_pr_store_ptr(&r->r_next, NULL);
ck_pr_store_ptr(&r->r_previous, next);
atomic_store_ptr(&r->r_next, NULL);
atomic_store_ptr(&r->r_previous, next);
noise_keypair_drop(current);
} else {
ck_pr_store_ptr(&r->r_previous, current);
atomic_store_ptr(&r->r_previous, current);
}
noise_keypair_drop(previous);
ck_pr_store_ptr(&r->r_current, kp);
atomic_store_ptr(&r->r_current, kp);
} else {
ck_pr_store_ptr(&r->r_next, kp);
atomic_store_ptr(&r->r_next, kp);
noise_keypair_drop(next);
ck_pr_store_ptr(&r->r_previous, NULL);
atomic_store_ptr(&r->r_previous, NULL);
noise_keypair_drop(previous);
}
@ -704,10 +704,10 @@ noise_keypair_current(struct noise_remote *r)
struct noise_keypair *kp, *ret = NULL;
NET_EPOCH_ENTER(et);
kp = ck_pr_load_ptr(&r->r_current);
if (kp != NULL && ck_pr_load_bool(&kp->kp_can_send)) {
kp = atomic_load_ptr(&r->r_current);
if (kp != NULL && atomic_load_bool(&kp->kp_can_send)) {
if (noise_timer_expired(kp->kp_birthdate, REJECT_AFTER_TIME, 0))
ck_pr_store_bool(&kp->kp_can_send, false);
atomic_store_bool(&kp->kp_can_send, false);
else if (refcount_acquire_if_not_zero(&kp->kp_refcnt))
ret = kp;
}
@ -728,20 +728,20 @@ noise_keypair_received_with(struct noise_keypair *kp)
struct noise_keypair *old;
struct noise_remote *r = kp->kp_remote;
if (kp != ck_pr_load_ptr(&r->r_next))
if (kp != atomic_load_ptr(&r->r_next))
return (0);
mtx_lock(&r->r_keypair_mtx);
if (kp != ck_pr_load_ptr(&r->r_next)) {
if (kp != atomic_load_ptr(&r->r_next)) {
mtx_unlock(&r->r_keypair_mtx);
return (0);
}
old = ck_pr_load_ptr(&r->r_previous);
ck_pr_store_ptr(&r->r_previous, ck_pr_load_ptr(&r->r_current));
old = atomic_load_ptr(&r->r_previous);
atomic_store_ptr(&r->r_previous, atomic_load_ptr(&r->r_current));
noise_keypair_drop(old);
ck_pr_store_ptr(&r->r_current, kp);
ck_pr_store_ptr(&r->r_next, NULL);
atomic_store_ptr(&r->r_current, kp);
atomic_store_ptr(&r->r_next, NULL);
mtx_unlock(&r->r_keypair_mtx);
return (ECONNRESET);
@ -793,11 +793,11 @@ noise_keypair_remote(struct noise_keypair *kp)
int
noise_keypair_nonce_next(struct noise_keypair *kp, uint64_t *send)
{
if (!ck_pr_load_bool(&kp->kp_can_send))
if (!atomic_load_bool(&kp->kp_can_send))
return (EINVAL);
#ifdef __LP64__
*send = ck_pr_faa_64(&kp->kp_nonce_send, 1);
*send = atomic_fetchadd_64(&kp->kp_nonce_send, 1);
#else
rw_wlock(&kp->kp_nonce_lock);
*send = kp->kp_nonce_send++;
@ -805,7 +805,7 @@ noise_keypair_nonce_next(struct noise_keypair *kp, uint64_t *send)
#endif
if (*send < REJECT_AFTER_MESSAGES)
return (0);
ck_pr_store_bool(&kp->kp_can_send, false);
atomic_store_bool(&kp->kp_can_send, false);
return (EINVAL);
}
@ -836,7 +836,7 @@ noise_keypair_nonce_check(struct noise_keypair *kp, uint64_t recv)
(i + index_current) &
((COUNTER_BITS_TOTAL / COUNTER_BITS) - 1)] = 0;
#ifdef __LP64__
ck_pr_store_64(&kp->kp_nonce_recv, recv);
atomic_store_64(&kp->kp_nonce_recv, recv);
#else
kp->kp_nonce_recv = recv;
#endif
@ -863,12 +863,12 @@ noise_keep_key_fresh_send(struct noise_remote *r)
uint64_t nonce;
NET_EPOCH_ENTER(et);
current = ck_pr_load_ptr(&r->r_current);
keep_key_fresh = current != NULL && ck_pr_load_bool(&current->kp_can_send);
current = atomic_load_ptr(&r->r_current);
keep_key_fresh = current != NULL && atomic_load_bool(&current->kp_can_send);
if (!keep_key_fresh)
goto out;
#ifdef __LP64__
nonce = ck_pr_load_64(&current->kp_nonce_send);
nonce = atomic_load_64(&current->kp_nonce_send);
#else
rw_rlock(&current->kp_nonce_lock);
nonce = current->kp_nonce_send;
@ -892,8 +892,8 @@ noise_keep_key_fresh_recv(struct noise_remote *r)
int keep_key_fresh;
NET_EPOCH_ENTER(et);
current = ck_pr_load_ptr(&r->r_current);
keep_key_fresh = current != NULL && ck_pr_load_bool(&current->kp_can_send) &&
current = atomic_load_ptr(&r->r_current);
keep_key_fresh = current != NULL && atomic_load_bool(&current->kp_can_send) &&
current->kp_is_initiator && noise_timer_expired(current->kp_birthdate,
REJECT_AFTER_TIME - KEEPALIVE_TIMEOUT - REKEY_TIMEOUT, 0);
NET_EPOCH_EXIT(et);
@ -921,7 +921,7 @@ noise_keypair_decrypt(struct noise_keypair *kp, uint64_t nonce, struct mbuf *m)
int ret;
#ifdef __LP64__
cur_nonce = ck_pr_load_64(&kp->kp_nonce_recv);
cur_nonce = atomic_load_64(&kp->kp_nonce_recv);
#else
rw_rlock(&kp->kp_nonce_lock);
cur_nonce = kp->kp_nonce_recv;