hyperv/hn: Make # of rings configurable
And since the host may not being able to allocate the # of rings requested by us, save the # of rings allocated by the host in the ring_inuse counters; use ring_inuse counters for run time operation. This paves the way for the upcoming vRSS support. MFC after: 1 week Sponsored by: Microsoft OSTC
This commit is contained in:
parent
925bcbd658
commit
3951eba165
@ -1078,9 +1078,11 @@ typedef struct hn_softc {
|
||||
netvsc_dev *net_dev;
|
||||
|
||||
int hn_rx_ring_cnt;
|
||||
int hn_rx_ring_inuse;
|
||||
struct hn_rx_ring *hn_rx_ring;
|
||||
|
||||
int hn_tx_ring_cnt;
|
||||
int hn_tx_ring_inuse;
|
||||
struct hn_tx_ring *hn_tx_ring;
|
||||
int hn_tx_chimney_max;
|
||||
struct taskqueue *hn_tx_taskq;
|
||||
|
@ -279,6 +279,14 @@ static int hn_use_if_start = 0;
|
||||
SYSCTL_INT(_hw_hn, OID_AUTO, use_if_start, CTLFLAG_RDTUN,
|
||||
&hn_use_if_start, 0, "Use if_start TX method");
|
||||
|
||||
static int hn_ring_cnt = 1;
|
||||
SYSCTL_INT(_hw_hn, OID_AUTO, ring_cnt, CTLFLAG_RDTUN,
|
||||
&hn_ring_cnt, 0, "# of TX/RX rings to used");
|
||||
|
||||
static int hn_single_tx_ring = 1;
|
||||
SYSCTL_INT(_hw_hn, OID_AUTO, single_tx_ring, CTLFLAG_RDTUN,
|
||||
&hn_single_tx_ring, 0, "Use one TX ring");
|
||||
|
||||
/*
|
||||
* Forward declarations
|
||||
*/
|
||||
@ -304,13 +312,13 @@ static int hn_tx_conf_int_sysctl(SYSCTL_HANDLER_ARGS);
|
||||
static int hn_check_iplen(const struct mbuf *, int);
|
||||
static int hn_create_tx_ring(struct hn_softc *, int);
|
||||
static void hn_destroy_tx_ring(struct hn_tx_ring *);
|
||||
static int hn_create_tx_data(struct hn_softc *);
|
||||
static int hn_create_tx_data(struct hn_softc *, int);
|
||||
static void hn_destroy_tx_data(struct hn_softc *);
|
||||
static void hn_start_taskfunc(void *, int);
|
||||
static void hn_start_txeof_taskfunc(void *, int);
|
||||
static void hn_stop_tx_tasks(struct hn_softc *);
|
||||
static int hn_encap(struct hn_tx_ring *, struct hn_txdesc *, struct mbuf **);
|
||||
static void hn_create_rx_data(struct hn_softc *sc);
|
||||
static void hn_create_rx_data(struct hn_softc *sc, int);
|
||||
static void hn_destroy_rx_data(struct hn_softc *sc);
|
||||
static void hn_set_tx_chimney_size(struct hn_softc *, int);
|
||||
|
||||
@ -386,7 +394,7 @@ netvsc_attach(device_t dev)
|
||||
hn_softc_t *sc;
|
||||
int unit = device_get_unit(dev);
|
||||
struct ifnet *ifp = NULL;
|
||||
int error;
|
||||
int error, ring_cnt, tx_ring_cnt;
|
||||
#if __FreeBSD_version >= 1100045
|
||||
int tso_maxlen;
|
||||
#endif
|
||||
@ -427,11 +435,24 @@ netvsc_attach(device_t dev)
|
||||
ifp = sc->hn_ifp = if_alloc(IFT_ETHER);
|
||||
ifp->if_softc = sc;
|
||||
|
||||
error = hn_create_tx_data(sc);
|
||||
ring_cnt = hn_ring_cnt;
|
||||
if (ring_cnt <= 0 || ring_cnt >= mp_ncpus)
|
||||
ring_cnt = mp_ncpus;
|
||||
|
||||
tx_ring_cnt = ring_cnt;
|
||||
if (hn_single_tx_ring || hn_use_if_start) {
|
||||
/*
|
||||
* - Explicitly asked to use single TX ring.
|
||||
* - ifnet.if_start is used; ifnet.if_start only needs
|
||||
* one TX ring.
|
||||
*/
|
||||
tx_ring_cnt = 1;
|
||||
}
|
||||
error = hn_create_tx_data(sc, tx_ring_cnt);
|
||||
if (error)
|
||||
goto failed;
|
||||
|
||||
hn_create_rx_data(sc);
|
||||
hn_create_rx_data(sc, ring_cnt);
|
||||
|
||||
/*
|
||||
* Associate the first TX/RX ring w/ the primary channel.
|
||||
@ -478,10 +499,16 @@ netvsc_attach(device_t dev)
|
||||
IFCAP_LRO;
|
||||
ifp->if_hwassist = sc->hn_tx_ring[0].hn_csum_assist | CSUM_TSO;
|
||||
|
||||
error = hv_rf_on_device_add(device_ctx, &device_info);
|
||||
error = hv_rf_on_device_add(device_ctx, &device_info, ring_cnt);
|
||||
if (error)
|
||||
goto failed;
|
||||
|
||||
/* TODO: vRSS */
|
||||
sc->hn_tx_ring_inuse = 1;
|
||||
sc->hn_rx_ring_inuse = 1;
|
||||
device_printf(dev, "%d TX ring, %d RX ring\n",
|
||||
sc->hn_tx_ring_inuse, sc->hn_rx_ring_inuse);
|
||||
|
||||
if (device_info.link_state == 0) {
|
||||
sc->hn_carrier = 1;
|
||||
}
|
||||
@ -1397,7 +1424,7 @@ hn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
|
||||
HN_LRO_LENLIM_MIN(ifp)) {
|
||||
int i;
|
||||
|
||||
for (i = 0; i < sc->hn_rx_ring_cnt; ++i) {
|
||||
for (i = 0; i < sc->hn_rx_ring_inuse; ++i) {
|
||||
sc->hn_rx_ring[i].hn_lro.lro_length_lim =
|
||||
HN_LRO_LENLIM_MIN(ifp);
|
||||
}
|
||||
@ -1434,7 +1461,8 @@ hn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
|
||||
NV_UNLOCK(sc);
|
||||
break;
|
||||
}
|
||||
error = hv_rf_on_device_add(hn_dev, &device_info);
|
||||
error = hv_rf_on_device_add(hn_dev, &device_info,
|
||||
sc->hn_rx_ring_inuse);
|
||||
if (error) {
|
||||
NV_LOCK(sc);
|
||||
sc->temp_unusable = FALSE;
|
||||
@ -1587,7 +1615,7 @@ hn_stop(hn_softc_t *sc)
|
||||
|
||||
atomic_clear_int(&ifp->if_drv_flags,
|
||||
(IFF_DRV_RUNNING | IFF_DRV_OACTIVE));
|
||||
for (i = 0; i < sc->hn_tx_ring_cnt; ++i)
|
||||
for (i = 0; i < sc->hn_tx_ring_inuse; ++i)
|
||||
sc->hn_tx_ring[i].hn_oactive = 0;
|
||||
|
||||
if_link_state_change(ifp, LINK_STATE_DOWN);
|
||||
@ -1680,7 +1708,7 @@ hn_ifinit_locked(hn_softc_t *sc)
|
||||
}
|
||||
|
||||
atomic_clear_int(&ifp->if_drv_flags, IFF_DRV_OACTIVE);
|
||||
for (i = 0; i < sc->hn_tx_ring_cnt; ++i)
|
||||
for (i = 0; i < sc->hn_tx_ring_inuse; ++i)
|
||||
sc->hn_tx_ring[i].hn_oactive = 0;
|
||||
|
||||
atomic_set_int(&ifp->if_drv_flags, IFF_DRV_RUNNING);
|
||||
@ -1745,7 +1773,7 @@ hn_lro_lenlim_sysctl(SYSCTL_HANDLER_ARGS)
|
||||
return EINVAL;
|
||||
|
||||
NV_LOCK(sc);
|
||||
for (i = 0; i < sc->hn_rx_ring_cnt; ++i)
|
||||
for (i = 0; i < sc->hn_rx_ring_inuse; ++i)
|
||||
sc->hn_rx_ring[i].hn_lro.lro_length_lim = lenlim;
|
||||
NV_UNLOCK(sc);
|
||||
return 0;
|
||||
@ -1775,7 +1803,7 @@ hn_lro_ackcnt_sysctl(SYSCTL_HANDLER_ARGS)
|
||||
*/
|
||||
--ackcnt;
|
||||
NV_LOCK(sc);
|
||||
for (i = 0; i < sc->hn_rx_ring_cnt; ++i)
|
||||
for (i = 0; i < sc->hn_rx_ring_inuse; ++i)
|
||||
sc->hn_rx_ring[i].hn_lro.lro_ackcnt_lim = ackcnt;
|
||||
NV_UNLOCK(sc);
|
||||
return 0;
|
||||
@ -1799,7 +1827,7 @@ hn_trust_hcsum_sysctl(SYSCTL_HANDLER_ARGS)
|
||||
return error;
|
||||
|
||||
NV_LOCK(sc);
|
||||
for (i = 0; i < sc->hn_rx_ring_cnt; ++i) {
|
||||
for (i = 0; i < sc->hn_rx_ring_inuse; ++i) {
|
||||
struct hn_rx_ring *rxr = &sc->hn_rx_ring[i];
|
||||
|
||||
if (on)
|
||||
@ -1838,7 +1866,7 @@ hn_rx_stat_ulong_sysctl(SYSCTL_HANDLER_ARGS)
|
||||
u_long stat;
|
||||
|
||||
stat = 0;
|
||||
for (i = 0; i < sc->hn_rx_ring_cnt; ++i) {
|
||||
for (i = 0; i < sc->hn_rx_ring_inuse; ++i) {
|
||||
rxr = &sc->hn_rx_ring[i];
|
||||
stat += *((u_long *)((uint8_t *)rxr + ofs));
|
||||
}
|
||||
@ -1848,7 +1876,7 @@ hn_rx_stat_ulong_sysctl(SYSCTL_HANDLER_ARGS)
|
||||
return error;
|
||||
|
||||
/* Zero out this stat. */
|
||||
for (i = 0; i < sc->hn_rx_ring_cnt; ++i) {
|
||||
for (i = 0; i < sc->hn_rx_ring_inuse; ++i) {
|
||||
rxr = &sc->hn_rx_ring[i];
|
||||
*((u_long *)((uint8_t *)rxr + ofs)) = 0;
|
||||
}
|
||||
@ -1864,7 +1892,7 @@ hn_rx_stat_u64_sysctl(SYSCTL_HANDLER_ARGS)
|
||||
uint64_t stat;
|
||||
|
||||
stat = 0;
|
||||
for (i = 0; i < sc->hn_rx_ring_cnt; ++i) {
|
||||
for (i = 0; i < sc->hn_rx_ring_inuse; ++i) {
|
||||
rxr = &sc->hn_rx_ring[i];
|
||||
stat += *((uint64_t *)((uint8_t *)rxr + ofs));
|
||||
}
|
||||
@ -1874,7 +1902,7 @@ hn_rx_stat_u64_sysctl(SYSCTL_HANDLER_ARGS)
|
||||
return error;
|
||||
|
||||
/* Zero out this stat. */
|
||||
for (i = 0; i < sc->hn_rx_ring_cnt; ++i) {
|
||||
for (i = 0; i < sc->hn_rx_ring_inuse; ++i) {
|
||||
rxr = &sc->hn_rx_ring[i];
|
||||
*((uint64_t *)((uint8_t *)rxr + ofs)) = 0;
|
||||
}
|
||||
@ -1890,7 +1918,7 @@ hn_tx_stat_ulong_sysctl(SYSCTL_HANDLER_ARGS)
|
||||
u_long stat;
|
||||
|
||||
stat = 0;
|
||||
for (i = 0; i < sc->hn_tx_ring_cnt; ++i) {
|
||||
for (i = 0; i < sc->hn_tx_ring_inuse; ++i) {
|
||||
txr = &sc->hn_tx_ring[i];
|
||||
stat += *((u_long *)((uint8_t *)txr + ofs));
|
||||
}
|
||||
@ -1900,7 +1928,7 @@ hn_tx_stat_ulong_sysctl(SYSCTL_HANDLER_ARGS)
|
||||
return error;
|
||||
|
||||
/* Zero out this stat. */
|
||||
for (i = 0; i < sc->hn_tx_ring_cnt; ++i) {
|
||||
for (i = 0; i < sc->hn_tx_ring_inuse; ++i) {
|
||||
txr = &sc->hn_tx_ring[i];
|
||||
*((u_long *)((uint8_t *)txr + ofs)) = 0;
|
||||
}
|
||||
@ -1922,7 +1950,7 @@ hn_tx_conf_int_sysctl(SYSCTL_HANDLER_ARGS)
|
||||
return error;
|
||||
|
||||
NV_LOCK(sc);
|
||||
for (i = 0; i < sc->hn_tx_ring_cnt; ++i) {
|
||||
for (i = 0; i < sc->hn_tx_ring_inuse; ++i) {
|
||||
txr = &sc->hn_tx_ring[i];
|
||||
*((int *)((uint8_t *)txr + ofs)) = conf;
|
||||
}
|
||||
@ -2019,7 +2047,7 @@ hn_dma_map_paddr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
|
||||
}
|
||||
|
||||
static void
|
||||
hn_create_rx_data(struct hn_softc *sc)
|
||||
hn_create_rx_data(struct hn_softc *sc, int ring_cnt)
|
||||
{
|
||||
struct sysctl_oid_list *child;
|
||||
struct sysctl_ctx_list *ctx;
|
||||
@ -2031,7 +2059,9 @@ hn_create_rx_data(struct hn_softc *sc)
|
||||
#endif
|
||||
int i;
|
||||
|
||||
sc->hn_rx_ring_cnt = 1; /* TODO: vRSS */
|
||||
sc->hn_rx_ring_cnt = ring_cnt;
|
||||
sc->hn_rx_ring_inuse = sc->hn_rx_ring_cnt;
|
||||
|
||||
sc->hn_rx_ring = malloc(sizeof(struct hn_rx_ring) * sc->hn_rx_ring_cnt,
|
||||
M_NETVSC, M_WAITOK | M_ZERO);
|
||||
|
||||
@ -2132,6 +2162,10 @@ hn_create_rx_data(struct hn_softc *sc)
|
||||
CTLTYPE_ULONG | CTLFLAG_RW, sc,
|
||||
__offsetof(struct hn_rx_ring, hn_small_pkts),
|
||||
hn_rx_stat_ulong_sysctl, "LU", "# of small packets received");
|
||||
SYSCTL_ADD_INT(ctx, child, OID_AUTO, "rx_ring_cnt",
|
||||
CTLFLAG_RD, &sc->hn_rx_ring_cnt, 0, "# created RX rings");
|
||||
SYSCTL_ADD_INT(ctx, child, OID_AUTO, "rx_ring_inuse",
|
||||
CTLFLAG_RD, &sc->hn_rx_ring_inuse, 0, "# used RX rings");
|
||||
}
|
||||
|
||||
static void
|
||||
@ -2152,6 +2186,7 @@ hn_destroy_rx_data(struct hn_softc *sc)
|
||||
sc->hn_rx_ring = NULL;
|
||||
|
||||
sc->hn_rx_ring_cnt = 0;
|
||||
sc->hn_rx_ring_inuse = 0;
|
||||
}
|
||||
|
||||
static int
|
||||
@ -2388,18 +2423,15 @@ hn_destroy_tx_ring(struct hn_tx_ring *txr)
|
||||
}
|
||||
|
||||
static int
|
||||
hn_create_tx_data(struct hn_softc *sc)
|
||||
hn_create_tx_data(struct hn_softc *sc, int ring_cnt)
|
||||
{
|
||||
struct sysctl_oid_list *child;
|
||||
struct sysctl_ctx_list *ctx;
|
||||
int i;
|
||||
|
||||
if (hn_use_if_start) {
|
||||
/* ifnet.if_start only needs one TX ring */
|
||||
sc->hn_tx_ring_cnt = 1;
|
||||
} else {
|
||||
sc->hn_tx_ring_cnt = 1; /* TODO: vRSS */
|
||||
}
|
||||
sc->hn_tx_ring_cnt = ring_cnt;
|
||||
sc->hn_tx_ring_inuse = sc->hn_tx_ring_cnt;
|
||||
|
||||
sc->hn_tx_ring = malloc(sizeof(struct hn_tx_ring) * sc->hn_tx_ring_cnt,
|
||||
M_NETVSC, M_WAITOK | M_ZERO);
|
||||
|
||||
@ -2458,6 +2490,10 @@ hn_create_tx_data(struct hn_softc *sc)
|
||||
hn_tx_conf_int_sysctl, "I",
|
||||
"Always schedule transmission "
|
||||
"instead of doing direct transmission");
|
||||
SYSCTL_ADD_INT(ctx, child, OID_AUTO, "tx_ring_cnt",
|
||||
CTLFLAG_RD, &sc->hn_tx_ring_cnt, 0, "# created TX rings");
|
||||
SYSCTL_ADD_INT(ctx, child, OID_AUTO, "tx_ring_inuse",
|
||||
CTLFLAG_RD, &sc->hn_tx_ring_inuse, 0, "# used TX rings");
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -2468,7 +2504,7 @@ hn_set_tx_chimney_size(struct hn_softc *sc, int chimney_size)
|
||||
int i;
|
||||
|
||||
NV_LOCK(sc);
|
||||
for (i = 0; i < sc->hn_tx_ring_cnt; ++i)
|
||||
for (i = 0; i < sc->hn_tx_ring_inuse; ++i)
|
||||
sc->hn_tx_ring[i].hn_tx_chimney_size = chimney_size;
|
||||
NV_UNLOCK(sc);
|
||||
}
|
||||
@ -2488,6 +2524,7 @@ hn_destroy_tx_data(struct hn_softc *sc)
|
||||
sc->hn_tx_ring = NULL;
|
||||
|
||||
sc->hn_tx_ring_cnt = 0;
|
||||
sc->hn_tx_ring_inuse = 0;
|
||||
}
|
||||
|
||||
static void
|
||||
@ -2516,7 +2553,7 @@ hn_stop_tx_tasks(struct hn_softc *sc)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < sc->hn_tx_ring_cnt; ++i) {
|
||||
for (i = 0; i < sc->hn_tx_ring_inuse; ++i) {
|
||||
struct hn_tx_ring *txr = &sc->hn_tx_ring[i];
|
||||
|
||||
taskqueue_drain(txr->hn_tx_taskq, &txr->hn_tx_task);
|
||||
@ -2592,7 +2629,7 @@ hn_transmit(struct ifnet *ifp, struct mbuf *m)
|
||||
* Select the TX ring based on flowid
|
||||
*/
|
||||
if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
|
||||
idx = m->m_pkthdr.flowid % sc->hn_tx_ring_cnt;
|
||||
idx = m->m_pkthdr.flowid % sc->hn_tx_ring_inuse;
|
||||
txr = &sc->hn_tx_ring[idx];
|
||||
|
||||
error = drbr_enqueue(ifp, txr->hn_mbuf_br, m);
|
||||
@ -2624,7 +2661,7 @@ hn_xmit_qflush(struct ifnet *ifp)
|
||||
struct hn_softc *sc = ifp->if_softc;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < sc->hn_tx_ring_cnt; ++i) {
|
||||
for (i = 0; i < sc->hn_tx_ring_inuse; ++i) {
|
||||
struct hn_tx_ring *txr = &sc->hn_tx_ring[i];
|
||||
struct mbuf *m;
|
||||
|
||||
|
@ -815,7 +815,8 @@ hv_rf_close_device(rndis_device *device)
|
||||
* RNDIS filter on device add
|
||||
*/
|
||||
int
|
||||
hv_rf_on_device_add(struct hv_device *device, void *additl_info)
|
||||
hv_rf_on_device_add(struct hv_device *device, void *additl_info,
|
||||
int nchan __unused)
|
||||
{
|
||||
int ret;
|
||||
netvsc_dev *net_dev;
|
||||
|
@ -101,7 +101,7 @@ int hv_rf_on_receive(netvsc_dev *net_dev, struct hv_device *device,
|
||||
struct hv_vmbus_channel *chan, netvsc_packet *pkt);
|
||||
void hv_rf_receive_rollup(netvsc_dev *net_dev);
|
||||
void hv_rf_channel_rollup(struct hv_vmbus_channel *chan);
|
||||
int hv_rf_on_device_add(struct hv_device *device, void *additl_info);
|
||||
int hv_rf_on_device_add(struct hv_device *device, void *additl_info, int nchan);
|
||||
int hv_rf_on_device_remove(struct hv_device *device, boolean_t destroy_channel);
|
||||
int hv_rf_on_open(struct hv_device *device);
|
||||
int hv_rf_on_close(struct hv_device *device);
|
||||
|
Loading…
Reference in New Issue
Block a user