2014-12-31 23:19:16 +00:00
|
|
|
/*-
|
|
|
|
* Copyright (c) 2014 Chelsio Communications, Inc.
|
|
|
|
* All rights reserved.
|
|
|
|
* Written by: Navdeep Parhar <np@FreeBSD.org>
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
|
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
|
|
* SUCH DAMAGE.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <sys/cdefs.h>
|
|
|
|
__FBSDID("$FreeBSD$");
|
|
|
|
|
|
|
|
#include <sys/types.h>
|
|
|
|
#include <sys/param.h>
|
|
|
|
#include <sys/systm.h>
|
|
|
|
#include <sys/counter.h>
|
|
|
|
#include <sys/lock.h>
|
|
|
|
#include <sys/malloc.h>
|
2020-07-03 04:44:23 +00:00
|
|
|
#include <sys/mutex.h>
|
|
|
|
#include <sys/sysctl.h>
|
2014-12-31 23:19:16 +00:00
|
|
|
#include <machine/cpu.h>
|
|
|
|
|
|
|
|
#include "t4_mp_ring.h"
|
|
|
|
|
2015-01-16 01:32:40 +00:00
|
|
|
#if defined(__i386__)
|
|
|
|
#define atomic_cmpset_acq_64 atomic_cmpset_64
|
|
|
|
#define atomic_cmpset_rel_64 atomic_cmpset_64
|
|
|
|
#endif
|
|
|
|
|
2020-07-03 04:44:23 +00:00
|
|
|
/*
|
|
|
|
* mp_ring handles multiple threads (producers) enqueueing data to a tx queue.
|
|
|
|
* The thread that is writing the hardware descriptors is the consumer and it
|
|
|
|
* runs with the consumer lock held. A producer becomes the consumer if there
|
|
|
|
* isn't one already. The consumer runs with the flags sets to BUSY and
|
|
|
|
* consumes everything (IDLE or COALESCING) or gets STALLED. If it is running
|
|
|
|
* over its budget it sets flags to TOO_BUSY. A producer that observes a
|
|
|
|
* TOO_BUSY consumer will become the new consumer by setting flags to
|
|
|
|
* TAKING_OVER. The original consumer stops and sets the flags back to BUSY for
|
|
|
|
* the new consumer.
|
|
|
|
*
|
|
|
|
* COALESCING is the same as IDLE except there are items being held in the hope
|
|
|
|
* that they can be coalesced with items that follow. The driver must arrange
|
|
|
|
* for a tx update or some other event that transmits all the held items in a
|
|
|
|
* timely manner if nothing else is enqueued.
|
|
|
|
*/
|
|
|
|
|
2014-12-31 23:19:16 +00:00
|
|
|
union ring_state {
|
|
|
|
struct {
|
|
|
|
uint16_t pidx_head;
|
|
|
|
uint16_t pidx_tail;
|
|
|
|
uint16_t cidx;
|
|
|
|
uint16_t flags;
|
|
|
|
};
|
|
|
|
uint64_t state;
|
|
|
|
};
|
|
|
|
|
|
|
|
enum {
|
2020-07-03 04:44:23 +00:00
|
|
|
IDLE = 0, /* tx is all caught up, nothing to do. */
|
|
|
|
COALESCING, /* IDLE, but tx frames are being held for coalescing */
|
2014-12-31 23:19:16 +00:00
|
|
|
BUSY, /* consumer is running already, or will be shortly. */
|
2020-07-03 04:44:23 +00:00
|
|
|
TOO_BUSY, /* consumer is running and is beyond its budget */
|
|
|
|
TAKING_OVER, /* new consumer taking over from a TOO_BUSY consumer */
|
2014-12-31 23:19:16 +00:00
|
|
|
STALLED, /* consumer stopped due to lack of resources. */
|
2020-07-03 04:44:23 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
enum {
|
|
|
|
C_FAST = 0,
|
|
|
|
C_2,
|
|
|
|
C_3,
|
|
|
|
C_TAKEOVER,
|
2014-12-31 23:19:16 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
static inline uint16_t
|
|
|
|
space_available(struct mp_ring *r, union ring_state s)
|
|
|
|
{
|
|
|
|
uint16_t x = r->size - 1;
|
|
|
|
|
|
|
|
if (s.cidx == s.pidx_head)
|
|
|
|
return (x);
|
|
|
|
else if (s.cidx > s.pidx_head)
|
|
|
|
return (s.cidx - s.pidx_head - 1);
|
|
|
|
else
|
|
|
|
return (x - s.pidx_head + s.cidx);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline uint16_t
|
|
|
|
increment_idx(struct mp_ring *r, uint16_t idx, uint16_t n)
|
|
|
|
{
|
|
|
|
int x = r->size - idx;
|
|
|
|
|
|
|
|
MPASS(x > 0);
|
|
|
|
return (x > n ? idx + n : n - x);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2020-07-03 04:44:23 +00:00
|
|
|
* Consumer. Called with the consumer lock held and a guarantee that there is
|
|
|
|
* work to do.
|
2014-12-31 23:19:16 +00:00
|
|
|
*/
|
|
|
|
static void
|
2020-07-03 04:44:23 +00:00
|
|
|
drain_ring(struct mp_ring *r, int budget)
|
2014-12-31 23:19:16 +00:00
|
|
|
{
|
2020-07-03 04:44:23 +00:00
|
|
|
union ring_state os, ns;
|
2014-12-31 23:19:16 +00:00
|
|
|
int n, pending, total;
|
2020-07-03 04:44:23 +00:00
|
|
|
uint16_t cidx;
|
|
|
|
uint16_t pidx;
|
|
|
|
bool coalescing;
|
2014-12-31 23:19:16 +00:00
|
|
|
|
2020-07-03 04:44:23 +00:00
|
|
|
mtx_assert(r->cons_lock, MA_OWNED);
|
|
|
|
|
|
|
|
os.state = atomic_load_acq_64(&r->state);
|
2014-12-31 23:19:16 +00:00
|
|
|
MPASS(os.flags == BUSY);
|
2020-07-03 04:44:23 +00:00
|
|
|
|
|
|
|
cidx = os.cidx;
|
|
|
|
pidx = os.pidx_tail;
|
2014-12-31 23:19:16 +00:00
|
|
|
MPASS(cidx != pidx);
|
|
|
|
|
|
|
|
pending = 0;
|
|
|
|
total = 0;
|
|
|
|
|
|
|
|
while (cidx != pidx) {
|
|
|
|
|
|
|
|
/* Items from cidx to pidx are available for consumption. */
|
2020-07-03 04:44:23 +00:00
|
|
|
n = r->drain(r, cidx, pidx, &coalescing);
|
2014-12-31 23:19:16 +00:00
|
|
|
if (n == 0) {
|
|
|
|
critical_enter();
|
2020-07-03 04:44:23 +00:00
|
|
|
os.state = atomic_load_64(&r->state);
|
2014-12-31 23:19:16 +00:00
|
|
|
do {
|
2018-08-23 16:24:27 +00:00
|
|
|
ns.state = os.state;
|
2014-12-31 23:19:16 +00:00
|
|
|
ns.cidx = cidx;
|
2020-07-03 04:44:23 +00:00
|
|
|
|
|
|
|
MPASS(os.flags == BUSY ||
|
|
|
|
os.flags == TOO_BUSY ||
|
|
|
|
os.flags == TAKING_OVER);
|
|
|
|
|
|
|
|
if (os.flags == TAKING_OVER)
|
|
|
|
ns.flags = BUSY;
|
|
|
|
else
|
|
|
|
ns.flags = STALLED;
|
2018-08-23 16:24:27 +00:00
|
|
|
} while (atomic_fcmpset_64(&r->state, &os.state,
|
2014-12-31 23:19:16 +00:00
|
|
|
ns.state) == 0);
|
|
|
|
critical_exit();
|
2020-07-03 04:44:23 +00:00
|
|
|
if (os.flags == TAKING_OVER)
|
|
|
|
counter_u64_add(r->abdications, 1);
|
|
|
|
else if (ns.flags == STALLED)
|
2014-12-31 23:19:16 +00:00
|
|
|
counter_u64_add(r->stalls, 1);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
cidx = increment_idx(r, cidx, n);
|
|
|
|
pending += n;
|
|
|
|
total += n;
|
2020-07-03 04:44:23 +00:00
|
|
|
counter_u64_add(r->consumed, n);
|
2014-12-31 23:19:16 +00:00
|
|
|
|
2020-07-03 04:44:23 +00:00
|
|
|
os.state = atomic_load_64(&r->state);
|
2014-12-31 23:19:16 +00:00
|
|
|
do {
|
2020-07-03 04:44:23 +00:00
|
|
|
MPASS(os.flags == BUSY || os.flags == TOO_BUSY ||
|
|
|
|
os.flags == TAKING_OVER);
|
|
|
|
|
2018-08-23 16:24:27 +00:00
|
|
|
ns.state = os.state;
|
2014-12-31 23:19:16 +00:00
|
|
|
ns.cidx = cidx;
|
2020-07-03 04:44:23 +00:00
|
|
|
if (__predict_false(os.flags == TAKING_OVER)) {
|
|
|
|
MPASS(total >= budget);
|
|
|
|
ns.flags = BUSY;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (cidx == os.pidx_tail) {
|
|
|
|
ns.flags = coalescing ? COALESCING : IDLE;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (total >= budget) {
|
|
|
|
ns.flags = TOO_BUSY;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
MPASS(os.flags == BUSY);
|
|
|
|
if (pending < 32)
|
|
|
|
break;
|
2018-08-23 16:24:27 +00:00
|
|
|
} while (atomic_fcmpset_acq_64(&r->state, &os.state, ns.state) == 0);
|
2014-12-31 23:19:16 +00:00
|
|
|
|
2020-07-03 04:44:23 +00:00
|
|
|
if (__predict_false(os.flags == TAKING_OVER)) {
|
|
|
|
MPASS(ns.flags == BUSY);
|
2014-12-31 23:19:16 +00:00
|
|
|
counter_u64_add(r->abdications, 1);
|
2020-07-03 04:44:23 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ns.flags == IDLE || ns.flags == COALESCING) {
|
|
|
|
MPASS(ns.pidx_tail == cidx);
|
|
|
|
if (ns.pidx_head != ns.pidx_tail)
|
|
|
|
counter_u64_add(r->cons_idle2, 1);
|
|
|
|
else
|
|
|
|
counter_u64_add(r->cons_idle, 1);
|
2014-12-31 23:19:16 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The acquire style atomic above guarantees visibility of items
|
|
|
|
* associated with any pidx change that we notice here.
|
|
|
|
*/
|
|
|
|
pidx = ns.pidx_tail;
|
|
|
|
pending = 0;
|
|
|
|
}
|
2020-07-03 04:44:23 +00:00
|
|
|
|
|
|
|
#ifdef INVARIANTS
|
|
|
|
if (os.flags == TAKING_OVER)
|
|
|
|
MPASS(ns.flags == BUSY);
|
|
|
|
else {
|
|
|
|
MPASS(ns.flags == IDLE || ns.flags == COALESCING ||
|
|
|
|
ns.flags == STALLED);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
drain_txpkts(struct mp_ring *r, union ring_state os, int budget)
|
|
|
|
{
|
|
|
|
union ring_state ns;
|
|
|
|
uint16_t cidx = os.cidx;
|
|
|
|
uint16_t pidx = os.pidx_tail;
|
|
|
|
bool coalescing;
|
|
|
|
|
|
|
|
mtx_assert(r->cons_lock, MA_OWNED);
|
|
|
|
MPASS(os.flags == BUSY);
|
|
|
|
MPASS(cidx == pidx);
|
|
|
|
|
|
|
|
r->drain(r, cidx, pidx, &coalescing);
|
|
|
|
MPASS(coalescing == false);
|
|
|
|
critical_enter();
|
|
|
|
os.state = atomic_load_64(&r->state);
|
|
|
|
do {
|
|
|
|
ns.state = os.state;
|
|
|
|
MPASS(os.flags == BUSY);
|
|
|
|
MPASS(os.cidx == cidx);
|
|
|
|
if (ns.cidx == ns.pidx_tail)
|
|
|
|
ns.flags = IDLE;
|
|
|
|
else
|
|
|
|
ns.flags = BUSY;
|
|
|
|
} while (atomic_fcmpset_acq_64(&r->state, &os.state, ns.state) == 0);
|
|
|
|
critical_exit();
|
|
|
|
|
|
|
|
if (ns.flags == BUSY)
|
|
|
|
drain_ring(r, budget);
|
2014-12-31 23:19:16 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
mp_ring_alloc(struct mp_ring **pr, int size, void *cookie, ring_drain_t drain,
|
2020-07-03 04:44:23 +00:00
|
|
|
ring_can_drain_t can_drain, struct malloc_type *mt, struct mtx *lck,
|
|
|
|
int flags)
|
2014-12-31 23:19:16 +00:00
|
|
|
{
|
|
|
|
struct mp_ring *r;
|
2020-07-03 04:44:23 +00:00
|
|
|
int i;
|
2014-12-31 23:19:16 +00:00
|
|
|
|
|
|
|
/* All idx are 16b so size can be 65536 at most */
|
|
|
|
if (pr == NULL || size < 2 || size > 65536 || drain == NULL ||
|
|
|
|
can_drain == NULL)
|
|
|
|
return (EINVAL);
|
|
|
|
*pr = NULL;
|
|
|
|
flags &= M_NOWAIT | M_WAITOK;
|
|
|
|
MPASS(flags != 0);
|
|
|
|
|
|
|
|
r = malloc(__offsetof(struct mp_ring, items[size]), mt, flags | M_ZERO);
|
|
|
|
if (r == NULL)
|
|
|
|
return (ENOMEM);
|
|
|
|
r->size = size;
|
|
|
|
r->cookie = cookie;
|
|
|
|
r->mt = mt;
|
|
|
|
r->drain = drain;
|
|
|
|
r->can_drain = can_drain;
|
2020-07-03 04:44:23 +00:00
|
|
|
r->cons_lock = lck;
|
|
|
|
if ((r->dropped = counter_u64_alloc(flags)) == NULL)
|
|
|
|
goto failed;
|
|
|
|
for (i = 0; i < nitems(r->consumer); i++) {
|
|
|
|
if ((r->consumer[i] = counter_u64_alloc(flags)) == NULL)
|
|
|
|
goto failed;
|
2014-12-31 23:19:16 +00:00
|
|
|
}
|
2020-07-03 04:44:23 +00:00
|
|
|
if ((r->not_consumer = counter_u64_alloc(flags)) == NULL)
|
|
|
|
goto failed;
|
|
|
|
if ((r->abdications = counter_u64_alloc(flags)) == NULL)
|
|
|
|
goto failed;
|
|
|
|
if ((r->stalls = counter_u64_alloc(flags)) == NULL)
|
|
|
|
goto failed;
|
|
|
|
if ((r->consumed = counter_u64_alloc(flags)) == NULL)
|
|
|
|
goto failed;
|
|
|
|
if ((r->cons_idle = counter_u64_alloc(flags)) == NULL)
|
|
|
|
goto failed;
|
|
|
|
if ((r->cons_idle2 = counter_u64_alloc(flags)) == NULL)
|
|
|
|
goto failed;
|
2014-12-31 23:19:16 +00:00
|
|
|
*pr = r;
|
|
|
|
return (0);
|
2020-07-03 04:44:23 +00:00
|
|
|
failed:
|
|
|
|
mp_ring_free(r);
|
|
|
|
return (ENOMEM);
|
2014-12-31 23:19:16 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
|
|
|
|
mp_ring_free(struct mp_ring *r)
|
|
|
|
{
|
2020-07-03 04:44:23 +00:00
|
|
|
int i;
|
2014-12-31 23:19:16 +00:00
|
|
|
|
|
|
|
if (r == NULL)
|
|
|
|
return;
|
|
|
|
|
2020-07-03 04:44:23 +00:00
|
|
|
if (r->dropped != NULL)
|
|
|
|
counter_u64_free(r->dropped);
|
|
|
|
for (i = 0; i < nitems(r->consumer); i++) {
|
|
|
|
if (r->consumer[i] != NULL)
|
|
|
|
counter_u64_free(r->consumer[i]);
|
|
|
|
}
|
|
|
|
if (r->not_consumer != NULL)
|
|
|
|
counter_u64_free(r->not_consumer);
|
2014-12-31 23:19:16 +00:00
|
|
|
if (r->abdications != NULL)
|
|
|
|
counter_u64_free(r->abdications);
|
2020-07-03 04:44:23 +00:00
|
|
|
if (r->stalls != NULL)
|
|
|
|
counter_u64_free(r->stalls);
|
|
|
|
if (r->consumed != NULL)
|
|
|
|
counter_u64_free(r->consumed);
|
|
|
|
if (r->cons_idle != NULL)
|
|
|
|
counter_u64_free(r->cons_idle);
|
|
|
|
if (r->cons_idle2 != NULL)
|
|
|
|
counter_u64_free(r->cons_idle2);
|
2014-12-31 23:19:16 +00:00
|
|
|
|
|
|
|
free(r, r->mt);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Enqueue n items and maybe drain the ring for some time.
|
|
|
|
*
|
|
|
|
* Returns an errno.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
mp_ring_enqueue(struct mp_ring *r, void **items, int n, int budget)
|
|
|
|
{
|
|
|
|
union ring_state os, ns;
|
|
|
|
uint16_t pidx_start, pidx_stop;
|
2020-07-03 04:44:23 +00:00
|
|
|
int i, nospc, cons;
|
|
|
|
bool consumer;
|
2014-12-31 23:19:16 +00:00
|
|
|
|
|
|
|
MPASS(items != NULL);
|
|
|
|
MPASS(n > 0);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Reserve room for the new items. Our reservation, if successful, is
|
|
|
|
* from 'pidx_start' to 'pidx_stop'.
|
|
|
|
*/
|
2020-07-03 04:44:23 +00:00
|
|
|
nospc = 0;
|
|
|
|
os.state = atomic_load_64(&r->state);
|
2014-12-31 23:19:16 +00:00
|
|
|
for (;;) {
|
2020-07-03 04:44:23 +00:00
|
|
|
for (;;) {
|
|
|
|
if (__predict_true(space_available(r, os) >= n))
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* Not enough room in the ring. */
|
|
|
|
|
2014-12-31 23:19:16 +00:00
|
|
|
MPASS(os.flags != IDLE);
|
2020-07-03 04:44:23 +00:00
|
|
|
MPASS(os.flags != COALESCING);
|
|
|
|
if (__predict_false(++nospc > 100)) {
|
|
|
|
counter_u64_add(r->dropped, n);
|
|
|
|
return (ENOBUFS);
|
|
|
|
}
|
2014-12-31 23:19:16 +00:00
|
|
|
if (os.flags == STALLED)
|
2020-07-03 04:44:23 +00:00
|
|
|
mp_ring_check_drainage(r, 64);
|
|
|
|
else
|
|
|
|
cpu_spinwait();
|
|
|
|
os.state = atomic_load_64(&r->state);
|
2014-12-31 23:19:16 +00:00
|
|
|
}
|
2020-07-03 04:44:23 +00:00
|
|
|
|
|
|
|
/* There is room in the ring. */
|
|
|
|
|
|
|
|
cons = -1;
|
2014-12-31 23:19:16 +00:00
|
|
|
ns.state = os.state;
|
|
|
|
ns.pidx_head = increment_idx(r, os.pidx_head, n);
|
2020-07-03 04:44:23 +00:00
|
|
|
if (os.flags == IDLE || os.flags == COALESCING) {
|
|
|
|
MPASS(os.pidx_tail == os.cidx);
|
|
|
|
if (os.pidx_head == os.pidx_tail) {
|
|
|
|
cons = C_FAST;
|
|
|
|
ns.pidx_tail = increment_idx(r, os.pidx_tail, n);
|
|
|
|
} else
|
|
|
|
cons = C_2;
|
|
|
|
ns.flags = BUSY;
|
|
|
|
} else if (os.flags == TOO_BUSY) {
|
|
|
|
cons = C_TAKEOVER;
|
|
|
|
ns.flags = TAKING_OVER;
|
|
|
|
}
|
2014-12-31 23:19:16 +00:00
|
|
|
critical_enter();
|
2018-08-23 16:24:27 +00:00
|
|
|
if (atomic_fcmpset_64(&r->state, &os.state, ns.state))
|
2014-12-31 23:19:16 +00:00
|
|
|
break;
|
|
|
|
critical_exit();
|
|
|
|
cpu_spinwait();
|
2020-07-03 04:44:23 +00:00
|
|
|
};
|
|
|
|
|
2014-12-31 23:19:16 +00:00
|
|
|
pidx_start = os.pidx_head;
|
|
|
|
pidx_stop = ns.pidx_head;
|
|
|
|
|
2020-07-03 04:44:23 +00:00
|
|
|
if (cons == C_FAST) {
|
|
|
|
i = pidx_start;
|
|
|
|
do {
|
|
|
|
r->items[i] = *items++;
|
|
|
|
if (__predict_false(++i == r->size))
|
|
|
|
i = 0;
|
|
|
|
} while (i != pidx_stop);
|
|
|
|
critical_exit();
|
|
|
|
counter_u64_add(r->consumer[C_FAST], 1);
|
|
|
|
mtx_lock(r->cons_lock);
|
|
|
|
drain_ring(r, budget);
|
|
|
|
mtx_unlock(r->cons_lock);
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
2014-12-31 23:19:16 +00:00
|
|
|
/*
|
|
|
|
* Wait for other producers who got in ahead of us to enqueue their
|
|
|
|
* items, one producer at a time. It is our turn when the ring's
|
2016-05-03 03:41:25 +00:00
|
|
|
* pidx_tail reaches the beginning of our reservation (pidx_start).
|
2014-12-31 23:19:16 +00:00
|
|
|
*/
|
|
|
|
while (ns.pidx_tail != pidx_start) {
|
|
|
|
cpu_spinwait();
|
2020-07-03 04:44:23 +00:00
|
|
|
ns.state = atomic_load_64(&r->state);
|
2014-12-31 23:19:16 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Now it is our turn to fill up the area we reserved earlier. */
|
|
|
|
i = pidx_start;
|
|
|
|
do {
|
|
|
|
r->items[i] = *items++;
|
|
|
|
if (__predict_false(++i == r->size))
|
|
|
|
i = 0;
|
|
|
|
} while (i != pidx_stop);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Update the ring's pidx_tail. The release style atomic guarantees
|
|
|
|
* that the items are visible to any thread that sees the updated pidx.
|
|
|
|
*/
|
2020-07-03 04:44:23 +00:00
|
|
|
os.state = atomic_load_64(&r->state);
|
2014-12-31 23:19:16 +00:00
|
|
|
do {
|
2020-07-03 04:44:23 +00:00
|
|
|
consumer = false;
|
2018-08-23 16:24:27 +00:00
|
|
|
ns.state = os.state;
|
2014-12-31 23:19:16 +00:00
|
|
|
ns.pidx_tail = pidx_stop;
|
2020-07-03 04:44:23 +00:00
|
|
|
if (os.flags == IDLE || os.flags == COALESCING ||
|
|
|
|
(os.flags == STALLED && r->can_drain(r))) {
|
|
|
|
MPASS(cons == -1);
|
|
|
|
consumer = true;
|
|
|
|
ns.flags = BUSY;
|
|
|
|
}
|
2018-08-23 16:24:27 +00:00
|
|
|
} while (atomic_fcmpset_rel_64(&r->state, &os.state, ns.state) == 0);
|
2014-12-31 23:19:16 +00:00
|
|
|
critical_exit();
|
|
|
|
|
2020-07-03 04:44:23 +00:00
|
|
|
if (cons == -1) {
|
|
|
|
if (consumer)
|
|
|
|
cons = C_3;
|
|
|
|
else {
|
|
|
|
counter_u64_add(r->not_consumer, 1);
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
MPASS(cons > C_FAST && cons < nitems(r->consumer));
|
|
|
|
counter_u64_add(r->consumer[cons], 1);
|
|
|
|
mtx_lock(r->cons_lock);
|
|
|
|
drain_ring(r, budget);
|
|
|
|
mtx_unlock(r->cons_lock);
|
2014-12-31 23:19:16 +00:00
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
mp_ring_check_drainage(struct mp_ring *r, int budget)
|
|
|
|
{
|
|
|
|
union ring_state os, ns;
|
|
|
|
|
2020-07-03 04:44:23 +00:00
|
|
|
os.state = atomic_load_64(&r->state);
|
|
|
|
if (os.flags == STALLED && r->can_drain(r)) {
|
|
|
|
MPASS(os.cidx != os.pidx_tail); /* implied by STALLED */
|
|
|
|
ns.state = os.state;
|
|
|
|
ns.flags = BUSY;
|
|
|
|
if (atomic_cmpset_acq_64(&r->state, os.state, ns.state)) {
|
|
|
|
mtx_lock(r->cons_lock);
|
|
|
|
drain_ring(r, budget);
|
|
|
|
mtx_unlock(r->cons_lock);
|
|
|
|
}
|
|
|
|
} else if (os.flags == COALESCING) {
|
|
|
|
MPASS(os.cidx == os.pidx_tail);
|
|
|
|
ns.state = os.state;
|
|
|
|
ns.flags = BUSY;
|
|
|
|
if (atomic_cmpset_acq_64(&r->state, os.state, ns.state)) {
|
|
|
|
mtx_lock(r->cons_lock);
|
|
|
|
drain_txpkts(r, ns, budget);
|
|
|
|
mtx_unlock(r->cons_lock);
|
|
|
|
}
|
|
|
|
}
|
2014-12-31 23:19:16 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
mp_ring_reset_stats(struct mp_ring *r)
|
|
|
|
{
|
2020-07-03 04:44:23 +00:00
|
|
|
int i;
|
2014-12-31 23:19:16 +00:00
|
|
|
|
2020-07-03 04:44:23 +00:00
|
|
|
counter_u64_zero(r->dropped);
|
|
|
|
for (i = 0; i < nitems(r->consumer); i++)
|
|
|
|
counter_u64_zero(r->consumer[i]);
|
|
|
|
counter_u64_zero(r->not_consumer);
|
2014-12-31 23:19:16 +00:00
|
|
|
counter_u64_zero(r->abdications);
|
2020-07-03 04:44:23 +00:00
|
|
|
counter_u64_zero(r->stalls);
|
|
|
|
counter_u64_zero(r->consumed);
|
|
|
|
counter_u64_zero(r->cons_idle);
|
|
|
|
counter_u64_zero(r->cons_idle2);
|
2014-12-31 23:19:16 +00:00
|
|
|
}
|
|
|
|
|
2020-07-03 04:44:23 +00:00
|
|
|
bool
|
2014-12-31 23:19:16 +00:00
|
|
|
mp_ring_is_idle(struct mp_ring *r)
|
|
|
|
{
|
|
|
|
union ring_state s;
|
|
|
|
|
2020-07-03 04:44:23 +00:00
|
|
|
s.state = atomic_load_64(&r->state);
|
2014-12-31 23:19:16 +00:00
|
|
|
if (s.pidx_head == s.pidx_tail && s.pidx_tail == s.cidx &&
|
|
|
|
s.flags == IDLE)
|
2020-07-03 04:44:23 +00:00
|
|
|
return (true);
|
2014-12-31 23:19:16 +00:00
|
|
|
|
2020-07-03 04:44:23 +00:00
|
|
|
return (false);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
mp_ring_sysctls(struct mp_ring *r, struct sysctl_ctx_list *ctx,
|
|
|
|
struct sysctl_oid_list *children)
|
|
|
|
{
|
|
|
|
struct sysctl_oid *oid;
|
|
|
|
|
|
|
|
oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "mp_ring", CTLFLAG_RD |
|
|
|
|
CTLFLAG_MPSAFE, NULL, "mp_ring statistics");
|
|
|
|
children = SYSCTL_CHILDREN(oid);
|
|
|
|
|
|
|
|
SYSCTL_ADD_U64(ctx, children, OID_AUTO, "state", CTLFLAG_RD,
|
|
|
|
__DEVOLATILE(uint64_t *, &r->state), 0, "ring state");
|
|
|
|
SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "dropped", CTLFLAG_RD,
|
|
|
|
&r->dropped, "# of items dropped");
|
|
|
|
SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "consumed",
|
|
|
|
CTLFLAG_RD, &r->consumed, "# of items consumed");
|
|
|
|
SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "fast_consumer",
|
|
|
|
CTLFLAG_RD, &r->consumer[C_FAST],
|
|
|
|
"# of times producer became consumer (fast)");
|
|
|
|
SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "consumer2",
|
|
|
|
CTLFLAG_RD, &r->consumer[C_2],
|
|
|
|
"# of times producer became consumer (2)");
|
|
|
|
SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "consumer3",
|
|
|
|
CTLFLAG_RD, &r->consumer[C_3],
|
|
|
|
"# of times producer became consumer (3)");
|
|
|
|
SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "takeovers",
|
|
|
|
CTLFLAG_RD, &r->consumer[C_TAKEOVER],
|
|
|
|
"# of times producer took over from another consumer.");
|
|
|
|
SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "not_consumer",
|
|
|
|
CTLFLAG_RD, &r->not_consumer,
|
|
|
|
"# of times producer did not become consumer");
|
|
|
|
SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "abdications",
|
|
|
|
CTLFLAG_RD, &r->abdications, "# of consumer abdications");
|
|
|
|
SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "stalls",
|
|
|
|
CTLFLAG_RD, &r->stalls, "# of consumer stalls");
|
|
|
|
SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "cons_idle",
|
|
|
|
CTLFLAG_RD, &r->cons_idle,
|
|
|
|
"# of times consumer ran fully to completion");
|
|
|
|
SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "cons_idle2",
|
|
|
|
CTLFLAG_RD, &r->cons_idle2,
|
|
|
|
"# of times consumer idled when another enqueue was in progress");
|
2014-12-31 23:19:16 +00:00
|
|
|
}
|