hyperv/hn: Allow TX to share event taskqueues.

MFC after:	1 week
Sponsored by:	Microsoft
Differential Revision:	https://reviews.freebsd.org/D8659
This commit is contained in:
Sepherosa Ziehau 2016-11-30 07:54:28 +00:00
parent b5c7e2415e
commit 0e11868dfa
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=309318

View File

@ -169,6 +169,8 @@ do { \
#define HN_PKTSIZE(m, align) \
roundup2((m)->m_pkthdr.len + HN_RNDIS_PKT_LEN, (align))
#define HN_RING_IDX2CPU(sc, idx) (((sc)->hn_cpu + (idx)) % mp_ncpus)
struct hn_txdesc {
#ifndef HN_USE_TXDESC_BUFRING
SLIST_ENTRY(hn_txdesc) link;
@ -411,15 +413,19 @@ SYSCTL_INT(_hw_hn, OID_AUTO, lro_entry_count, CTLFLAG_RDTUN,
#endif
#endif
/* Use shared TX taskqueue */
static int hn_share_tx_taskq = 0;
SYSCTL_INT(_hw_hn, OID_AUTO, share_tx_taskq, CTLFLAG_RDTUN,
&hn_share_tx_taskq, 0, "Enable shared TX taskqueue");
static int hn_tx_taskq_cnt = 1;
SYSCTL_INT(_hw_hn, OID_AUTO, tx_taskq_cnt, CTLFLAG_RDTUN,
&hn_tx_taskq_cnt, 0, "# of TX taskqueues");
#define HN_TX_TASKQ_M_INDEP 0
#define HN_TX_TASKQ_M_GLOBAL 1
#define HN_TX_TASKQ_M_EVTTQ 2
static int hn_tx_taskq_mode = HN_TX_TASKQ_M_INDEP;
SYSCTL_INT(_hw_hn, OID_AUTO, tx_taskq_mode, CTLFLAG_RDTUN,
&hn_tx_taskq_mode, 0, "TX taskqueue modes: "
"0 - independent, 1 - share global tx taskqs, 2 - share event taskqs");
#ifndef HN_USE_TXDESC_BUFRING
static int hn_use_txdesc_bufring = 0;
#else
@ -902,7 +908,7 @@ hn_attach(device_t dev)
/*
* Setup taskqueue for transmission.
*/
if (hn_tx_taskque == NULL) {
if (hn_tx_taskq_mode == HN_TX_TASKQ_M_INDEP) {
int i;
sc->hn_tx_taskqs =
@ -915,7 +921,7 @@ hn_attach(device_t dev)
taskqueue_start_threads(&sc->hn_tx_taskqs[i], 1, PI_NET,
"%s tx%d", device_get_nameunit(dev), i);
}
} else {
} else if (hn_tx_taskq_mode == HN_TX_TASKQ_M_GLOBAL) {
sc->hn_tx_taskqs = hn_tx_taskque;
}
@ -1216,7 +1222,7 @@ hn_detach(device_t dev)
hn_destroy_rx_data(sc);
hn_destroy_tx_data(sc);
if (sc->hn_tx_taskqs != hn_tx_taskque) {
if (sc->hn_tx_taskqs != NULL && sc->hn_tx_taskqs != hn_tx_taskque) {
int i;
for (i = 0; i < hn_tx_taskq_cnt; ++i)
@ -3312,7 +3318,12 @@ hn_tx_ring_create(struct hn_softc *sc, int id)
M_WAITOK, &txr->hn_tx_lock);
#endif
txr->hn_tx_taskq = sc->hn_tx_taskqs[id % hn_tx_taskq_cnt];
if (hn_tx_taskq_mode == HN_TX_TASKQ_M_EVTTQ) {
txr->hn_tx_taskq = VMBUS_GET_EVENT_TASKQ(
device_get_parent(dev), dev, HN_RING_IDX2CPU(sc, id));
} else {
txr->hn_tx_taskq = sc->hn_tx_taskqs[id % hn_tx_taskq_cnt];
}
#ifdef HN_IFSTART_SUPPORT
if (hn_use_if_start) {
@ -4205,7 +4216,7 @@ hn_chan_attach(struct hn_softc *sc, struct vmbus_channel *chan)
}
/* Bind this channel to a proper CPU. */
vmbus_chan_cpu_set(chan, (sc->hn_cpu + idx) % mp_ncpus);
vmbus_chan_cpu_set(chan, HN_RING_IDX2CPU(sc, idx));
/*
* Open this channel
@ -5361,10 +5372,23 @@ hn_tx_taskq_create(void *arg __unused)
else if (hn_tx_taskq_cnt > mp_ncpus)
hn_tx_taskq_cnt = mp_ncpus;
/*
* Fix the TX taskqueue mode.
*/
switch (hn_tx_taskq_mode) {
case HN_TX_TASKQ_M_INDEP:
case HN_TX_TASKQ_M_GLOBAL:
case HN_TX_TASKQ_M_EVTTQ:
break;
default:
hn_tx_taskq_mode = HN_TX_TASKQ_M_INDEP;
break;
}
if (vm_guest != VM_GUEST_HV)
return;
if (!hn_share_tx_taskq)
if (hn_tx_taskq_mode != HN_TX_TASKQ_M_GLOBAL)
return;
hn_tx_taskque = malloc(hn_tx_taskq_cnt * sizeof(struct taskqueue *),