hyperv/hn: Move chimney sending buffer to hn_softc

And don't recreate chimney sending buffer for each primary channel
open, it is now created in device_attach DEVMETHOD and destroyed
in device_detach DEVMETHOD.

MFC after:	1 week
Sponsored by:	Microsoft
Differential Revision:	https://reviews.freebsd.org/D7574
This commit is contained in:
Sepherosa Ziehau 2016-08-22 07:34:39 +00:00
parent bb7e9b3046
commit fdd6031b04
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=304591
5 changed files with 147 additions and 161 deletions

View File

@ -37,6 +37,7 @@
#include <sys/param.h>
#include <sys/kernel.h>
#include <sys/socket.h>
#include <sys/limits.h>
#include <sys/lock.h>
#include <net/if.h>
#include <net/if_var.h>
@ -60,10 +61,10 @@ static void hv_nv_on_channel_callback(struct vmbus_channel *chan,
void *xrxr);
static int hv_nv_init_send_buffer_with_net_vsp(struct hn_softc *sc);
static int hv_nv_init_rx_buffer_with_net_vsp(struct hn_softc *, int);
static int hv_nv_destroy_send_buffer(netvsc_dev *net_dev);
static int hv_nv_destroy_send_buffer(struct hn_softc *sc);
static int hv_nv_destroy_rx_buffer(struct hn_softc *sc);
static int hv_nv_connect_to_vsp(struct hn_softc *sc);
static void hv_nv_on_send_completion(netvsc_dev *net_dev,
static void hv_nv_on_send_completion(struct hn_softc *sc,
struct vmbus_channel *, const struct vmbus_chanpkt_hdr *pkt);
static void hv_nv_on_receive_completion(struct vmbus_channel *chan,
uint64_t tid);
@ -71,7 +72,7 @@ static void hv_nv_on_receive(netvsc_dev *net_dev,
struct hn_rx_ring *rxr, struct vmbus_channel *chan,
const struct vmbus_chanpkt_hdr *pkt);
static void hn_nvs_sent_none(struct hn_send_ctx *sndc,
struct netvsc_dev_ *net_dev, struct vmbus_channel *chan,
struct hn_softc *, struct vmbus_channel *chan,
const void *, int);
static struct hn_send_ctx hn_send_ctx_none =
@ -111,31 +112,30 @@ hv_nv_get_inbound_net_device(struct hn_softc *sc)
return sc->net_dev;
}
int
hv_nv_get_next_send_section(netvsc_dev *net_dev)
uint32_t
hn_chim_alloc(struct hn_softc *sc)
{
unsigned long bitsmap_words = net_dev->bitsmap_words;
unsigned long *bitsmap = net_dev->send_section_bitsmap;
unsigned long idx;
int ret = HN_NVS_CHIM_IDX_INVALID;
int i;
int i, bmap_cnt = sc->hn_chim_bmap_cnt;
u_long *bmap = sc->hn_chim_bmap;
uint32_t ret = HN_NVS_CHIM_IDX_INVALID;
for (i = 0; i < bitsmap_words; i++) {
idx = ffsl(~bitsmap[i]);
if (0 == idx)
for (i = 0; i < bmap_cnt; ++i) {
int idx;
idx = ffsl(~bmap[i]);
if (idx == 0)
continue;
idx--;
KASSERT(i * BITS_PER_LONG + idx < net_dev->send_section_count,
("invalid i %d and idx %lu", i, idx));
--idx; /* ffsl is 1-based */
KASSERT(i * LONG_BIT + idx < sc->hn_chim_cnt,
("invalid i %d and idx %d", i, idx));
if (atomic_testandset_long(&bitsmap[i], idx))
if (atomic_testandset_long(&bmap[i], idx))
continue;
ret = i * BITS_PER_LONG + idx;
ret = i * LONG_BIT + idx;
break;
}
return (ret);
}
@ -248,22 +248,8 @@ hv_nv_init_send_buffer_with_net_vsp(struct hn_softc *sc)
const struct hn_nvs_chim_connresp *resp;
size_t resp_len;
uint32_t status, sectsz;
netvsc_dev *net_dev;
int error;
net_dev = hv_nv_get_outbound_net_device(sc);
if (!net_dev) {
return (ENODEV);
}
net_dev->send_buf = hyperv_dmamem_alloc(bus_get_dma_tag(sc->hn_dev),
PAGE_SIZE, 0, net_dev->send_buf_size, &net_dev->txbuf_dma,
BUS_DMA_WAITOK | BUS_DMA_ZERO);
if (net_dev->send_buf == NULL) {
device_printf(sc->hn_dev, "allocate chimney txbuf failed\n");
return (ENOMEM);
}
/*
* Connect chimney sending buffer GPADL to the primary channel.
*
@ -272,8 +258,8 @@ hv_nv_init_send_buffer_with_net_vsp(struct hn_softc *sc)
* Sub-channels just share this chimney sending buffer.
*/
error = vmbus_chan_gpadl_connect(sc->hn_prichan,
net_dev->txbuf_dma.hv_paddr, net_dev->send_buf_size,
&net_dev->send_buf_gpadl_handle);
sc->hn_chim_dma.hv_paddr, NETVSC_SEND_BUFFER_SIZE,
&sc->hn_chim_gpadl);
if (error) {
if_printf(sc->hn_ifp, "chimney sending buffer gpadl "
"connect failed: %d\n", error);
@ -293,7 +279,7 @@ hv_nv_init_send_buffer_with_net_vsp(struct hn_softc *sc)
chim = vmbus_xact_req_data(xact);
chim->nvs_type = HN_NVS_TYPE_CHIM_CONN;
chim->nvs_gpadl = net_dev->send_buf_gpadl_handle;
chim->nvs_gpadl = sc->hn_chim_gpadl;
chim->nvs_sig = HN_NVS_CHIM_SIG;
hn_send_ctx_init_simple(&sndc, hn_nvs_sent_xact, xact);
@ -340,23 +326,31 @@ hv_nv_init_send_buffer_with_net_vsp(struct hn_softc *sc)
return 0;
}
net_dev->send_section_size = sectsz;
net_dev->send_section_count =
net_dev->send_buf_size / net_dev->send_section_size;
net_dev->bitsmap_words = howmany(net_dev->send_section_count,
BITS_PER_LONG);
net_dev->send_section_bitsmap =
malloc(net_dev->bitsmap_words * sizeof(long), M_NETVSC,
M_WAITOK | M_ZERO);
sc->hn_chim_szmax = sectsz;
sc->hn_chim_cnt = NETVSC_SEND_BUFFER_SIZE / sc->hn_chim_szmax;
if (NETVSC_SEND_BUFFER_SIZE % sc->hn_chim_szmax != 0) {
if_printf(sc->hn_ifp, "chimney sending sections are "
"not properly aligned\n");
}
if (sc->hn_chim_cnt % LONG_BIT != 0) {
if_printf(sc->hn_ifp, "discard %d chimney sending sections\n",
sc->hn_chim_cnt % LONG_BIT);
}
sc->hn_chim_bmap_cnt = sc->hn_chim_cnt / LONG_BIT;
sc->hn_chim_bmap = malloc(sc->hn_chim_bmap_cnt * sizeof(u_long),
M_NETVSC, M_WAITOK | M_ZERO);
/* Done! */
sc->hn_flags |= HN_FLAG_CHIM_CONNECTED;
if (bootverbose) {
if_printf(sc->hn_ifp, "chimney sending buffer %u/%u\n",
net_dev->send_section_size, net_dev->send_section_count);
if_printf(sc->hn_ifp, "chimney sending buffer %d/%d\n",
sc->hn_chim_szmax, sc->hn_chim_cnt);
}
return 0;
cleanup:
hv_nv_destroy_send_buffer(net_dev);
hv_nv_destroy_send_buffer(sc);
return (error);
}
@ -379,9 +373,8 @@ hv_nv_destroy_rx_buffer(struct hn_softc *sc)
disconn.nvs_sig = HN_NVS_RXBUF_SIG;
/* NOTE: No response. */
ret = hn_nvs_send(sc->hn_prichan,
VMBUS_CHANPKT_FLAG_NONE, &disconn, sizeof(disconn),
&hn_send_ctx_none);
ret = hn_nvs_send(sc->hn_prichan, VMBUS_CHANPKT_FLAG_NONE,
&disconn, sizeof(disconn), &hn_send_ctx_none);
if (ret != 0) {
if_printf(sc->hn_ifp,
"send rxbuf disconn failed: %d\n", ret);
@ -410,11 +403,11 @@ hv_nv_destroy_rx_buffer(struct hn_softc *sc)
* Net VSC destroy send buffer
*/
static int
hv_nv_destroy_send_buffer(netvsc_dev *net_dev)
hv_nv_destroy_send_buffer(struct hn_softc *sc)
{
int ret = 0;
if (net_dev->send_section_size) {
if (sc->hn_flags & HN_FLAG_CHIM_CONNECTED) {
struct hn_nvs_chim_disconn disconn;
/*
@ -425,39 +418,33 @@ hv_nv_destroy_send_buffer(netvsc_dev *net_dev)
disconn.nvs_sig = HN_NVS_CHIM_SIG;
/* NOTE: No response. */
ret = hn_nvs_send(net_dev->sc->hn_prichan,
VMBUS_CHANPKT_FLAG_NONE, &disconn, sizeof(disconn),
&hn_send_ctx_none);
ret = hn_nvs_send(sc->hn_prichan, VMBUS_CHANPKT_FLAG_NONE,
&disconn, sizeof(disconn), &hn_send_ctx_none);
if (ret != 0) {
if_printf(net_dev->sc->hn_ifp,
if_printf(sc->hn_ifp,
"send chim disconn failed: %d\n", ret);
return (ret);
}
sc->hn_flags &= ~HN_FLAG_CHIM_CONNECTED;
}
/* Tear down the gpadl on the vsp end */
if (net_dev->send_buf_gpadl_handle) {
ret = vmbus_chan_gpadl_disconnect(net_dev->sc->hn_prichan,
net_dev->send_buf_gpadl_handle);
if (sc->hn_chim_gpadl != 0) {
/*
* If we failed here, we might as well return and have a leak
* rather than continue and a bugchk
* Disconnect chimney sending buffer from primary channel.
*/
ret = vmbus_chan_gpadl_disconnect(sc->hn_prichan,
sc->hn_chim_gpadl);
if (ret != 0) {
if_printf(sc->hn_ifp,
"chim disconn failed: %d\n", ret);
return (ret);
}
net_dev->send_buf_gpadl_handle = 0;
sc->hn_chim_gpadl = 0;
}
if (net_dev->send_buf) {
/* Free up the receive buffer */
hyperv_dmamem_free(&net_dev->txbuf_dma, net_dev->send_buf);
net_dev->send_buf = NULL;
}
if (net_dev->send_section_bitsmap) {
free(net_dev->send_section_bitsmap, M_NETVSC);
if (sc->hn_chim_bmap != NULL) {
free(sc->hn_chim_bmap, M_NETVSC);
sc->hn_chim_bmap = NULL;
}
return (ret);
@ -622,7 +609,6 @@ hv_nv_connect_to_vsp(struct hn_softc *sc)
rxbuf_size = NETVSC_RECEIVE_BUFFER_SIZE_LEGACY;
else
rxbuf_size = NETVSC_RECEIVE_BUFFER_SIZE;
net_dev->send_buf_size = NETVSC_SEND_BUFFER_SIZE;
ret = hv_nv_init_rx_buffer_with_net_vsp(sc, rxbuf_size);
if (ret == 0)
@ -639,7 +625,7 @@ static void
hv_nv_disconnect_from_vsp(struct hn_softc *sc)
{
hv_nv_destroy_rx_buffer(sc);
hv_nv_destroy_send_buffer(sc->net_dev);
hv_nv_destroy_send_buffer(sc);
}
void
@ -727,7 +713,7 @@ hv_nv_on_device_remove(struct hn_softc *sc, boolean_t destroy_channel)
void
hn_nvs_sent_xact(struct hn_send_ctx *sndc,
struct netvsc_dev_ *net_dev __unused, struct vmbus_channel *chan __unused,
struct hn_softc *sc __unused, struct vmbus_channel *chan __unused,
const void *data, int dlen)
{
@ -736,42 +722,42 @@ hn_nvs_sent_xact(struct hn_send_ctx *sndc,
static void
hn_nvs_sent_none(struct hn_send_ctx *sndc __unused,
struct netvsc_dev_ *net_dev __unused, struct vmbus_channel *chan __unused,
struct hn_softc *sc __unused, struct vmbus_channel *chan __unused,
const void *data __unused, int dlen __unused)
{
/* EMPTY */
}
void
hn_chim_free(struct netvsc_dev_ *net_dev, uint32_t chim_idx)
hn_chim_free(struct hn_softc *sc, uint32_t chim_idx)
{
u_long mask;
uint32_t idx;
idx = chim_idx / BITS_PER_LONG;
KASSERT(idx < net_dev->bitsmap_words,
idx = chim_idx / LONG_BIT;
KASSERT(idx < sc->hn_chim_bmap_cnt,
("invalid chimney index 0x%x", chim_idx));
mask = 1UL << (chim_idx % BITS_PER_LONG);
KASSERT(net_dev->send_section_bitsmap[idx] & mask,
mask = 1UL << (chim_idx % LONG_BIT);
KASSERT(sc->hn_chim_bmap[idx] & mask,
("index bitmap 0x%lx, chimney index %u, "
"bitmap idx %d, bitmask 0x%lx",
net_dev->send_section_bitsmap[idx], chim_idx, idx, mask));
sc->hn_chim_bmap[idx], chim_idx, idx, mask));
atomic_clear_long(&net_dev->send_section_bitsmap[idx], mask);
atomic_clear_long(&sc->hn_chim_bmap[idx], mask);
}
/*
* Net VSC on send completion
*/
static void
hv_nv_on_send_completion(netvsc_dev *net_dev, struct vmbus_channel *chan,
hv_nv_on_send_completion(struct hn_softc *sc, struct vmbus_channel *chan,
const struct vmbus_chanpkt_hdr *pkt)
{
struct hn_send_ctx *sndc;
sndc = (struct hn_send_ctx *)(uintptr_t)pkt->cph_xactid;
sndc->hn_cb(sndc, net_dev, chan, VMBUS_CHANPKT_CONST_DATA(pkt),
sndc->hn_cb(sndc, sc, chan, VMBUS_CHANPKT_CONST_DATA(pkt),
VMBUS_CHANPKT_DATALEN(pkt));
/*
* NOTE:
@ -930,8 +916,7 @@ hv_nv_on_channel_callback(struct vmbus_channel *chan, void *xrxr)
if (bytes_rxed > 0) {
switch (pkt->cph_type) {
case VMBUS_CHANPKT_TYPE_COMP:
hv_nv_on_send_completion(net_dev, chan,
pkt);
hv_nv_on_send_completion(sc, chan, pkt);
break;
case VMBUS_CHANPKT_TYPE_RXBUF:
hv_nv_on_receive(net_dev, rxr, chan, pkt);

View File

@ -217,20 +217,8 @@ typedef struct rndis_recv_scale_param_ {
*/
typedef struct netvsc_dev_ {
struct hn_softc *sc;
/* Send buffer allocated by us but manages by NetVSP */
void *send_buf;
uint32_t send_buf_size;
uint32_t send_buf_gpadl_handle;
uint32_t send_section_size;
uint32_t send_section_count;
unsigned long bitsmap_words;
unsigned long *send_section_bitsmap;
/* Holds rndis device info */
void *extension;
struct hyperv_dma txbuf_dma;
} netvsc_dev;
struct vmbus_channel;
@ -255,12 +243,6 @@ typedef void (*pfn_on_send_rx_completion)(struct vmbus_channel *, void *);
#define TRANSPORT_TYPE_IPV6_TCP ((TYPE_IPV6 << 16) | TYPE_TCP)
#define TRANSPORT_TYPE_IPV6_UDP ((TYPE_IPV6 << 16) | TYPE_UDP)
#ifdef __LP64__
#define BITS_PER_LONG 64
#else
#define BITS_PER_LONG 32
#endif
typedef struct {
uint8_t mac_addr[6]; /* Assumption unsigned long */
uint8_t link_state;
@ -333,7 +315,7 @@ struct hn_tx_ring {
struct vmbus_channel *hn_chan;
int hn_direct_tx_size;
int hn_tx_chimney_size;
int hn_chim_size;
bus_dma_tag_t hn_tx_data_dtag;
uint64_t hn_csum_assist;
@ -382,8 +364,13 @@ typedef struct hn_softc {
int hn_tx_ring_inuse;
struct hn_tx_ring *hn_tx_ring;
uint8_t *hn_chim;
u_long *hn_chim_bmap;
int hn_chim_bmap_cnt;
int hn_chim_cnt;
int hn_chim_szmax;
int hn_cpu;
int hn_tx_chimney_max;
struct taskqueue *hn_tx_taskq;
struct sysctl_oid *hn_tx_sysctl_tree;
struct sysctl_oid *hn_rx_sysctl_tree;
@ -394,9 +381,13 @@ typedef struct hn_softc {
void *hn_rxbuf;
uint32_t hn_rxbuf_gpadl;
struct hyperv_dma hn_rxbuf_dma;
uint32_t hn_chim_gpadl;
struct hyperv_dma hn_chim_dma;
} hn_softc_t;
#define HN_FLAG_RXBUF_CONNECTED 0x0001
#define HN_FLAG_CHIM_CONNECTED 0x0002
/*
* Externs
@ -411,7 +402,6 @@ int hv_nv_on_device_remove(struct hn_softc *sc,
boolean_t destroy_channel);
int hv_nv_on_send(struct vmbus_channel *chan, uint32_t rndis_mtype,
struct hn_send_ctx *sndc, struct vmbus_gpa *gpa, int gpa_cnt);
int hv_nv_get_next_send_section(netvsc_dev *net_dev);
void hv_nv_subchan_attach(struct vmbus_channel *chan,
struct hn_rx_ring *rxr);

View File

@ -328,7 +328,7 @@ static int hn_lro_lenlim_sysctl(SYSCTL_HANDLER_ARGS);
static int hn_lro_ackcnt_sysctl(SYSCTL_HANDLER_ARGS);
#endif
static int hn_trust_hcsum_sysctl(SYSCTL_HANDLER_ARGS);
static int hn_tx_chimney_size_sysctl(SYSCTL_HANDLER_ARGS);
static int hn_chim_size_sysctl(SYSCTL_HANDLER_ARGS);
static int hn_rx_stat_ulong_sysctl(SYSCTL_HANDLER_ARGS);
static int hn_rx_stat_u64_sysctl(SYSCTL_HANDLER_ARGS);
static int hn_tx_stat_ulong_sysctl(SYSCTL_HANDLER_ARGS);
@ -344,7 +344,7 @@ static void hn_stop_tx_tasks(struct hn_softc *);
static int hn_encap(struct hn_tx_ring *, struct hn_txdesc *, struct mbuf **);
static int hn_create_rx_data(struct hn_softc *sc, int);
static void hn_destroy_rx_data(struct hn_softc *sc);
static void hn_set_tx_chimney_size(struct hn_softc *, int);
static void hn_set_chim_size(struct hn_softc *, int);
static void hn_channel_attach(struct hn_softc *, struct vmbus_channel *);
static void hn_subchan_attach(struct hn_softc *, struct vmbus_channel *);
static void hn_subchan_setup(struct hn_softc *);
@ -606,11 +606,10 @@ netvsc_attach(device_t dev)
ifp->if_hw_tsomaxsegcount, ifp->if_hw_tsomaxsegsize);
#endif
sc->hn_tx_chimney_max = sc->net_dev->send_section_size;
hn_set_tx_chimney_size(sc, sc->hn_tx_chimney_max);
hn_set_chim_size(sc, sc->hn_chim_szmax);
if (hn_tx_chimney_size > 0 &&
hn_tx_chimney_size < sc->hn_tx_chimney_max)
hn_set_tx_chimney_size(sc, hn_tx_chimney_size);
hn_tx_chimney_size < sc->hn_chim_szmax)
hn_set_chim_size(sc, hn_tx_chimney_size);
SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
@ -796,14 +795,14 @@ hn_txeof(struct hn_tx_ring *txr)
}
static void
hn_tx_done(struct hn_send_ctx *sndc, struct netvsc_dev_ *net_dev,
hn_tx_done(struct hn_send_ctx *sndc, struct hn_softc *sc,
struct vmbus_channel *chan, const void *data __unused, int dlen __unused)
{
struct hn_txdesc *txd = sndc->hn_cbarg;
struct hn_tx_ring *txr;
if (sndc->hn_chim_idx != HN_NVS_CHIM_IDX_INVALID)
hn_chim_free(net_dev, sndc->hn_chim_idx);
hn_chim_free(sc, sndc->hn_chim_idx);
txr = txd->txr;
KASSERT(txr->hn_chan == chan,
@ -986,16 +985,12 @@ hn_encap(struct hn_tx_ring *txr, struct hn_txdesc *txd, struct mbuf **m_head0)
/*
* Chimney send, if the packet could fit into one chimney buffer.
*/
if (tot_data_buf_len < txr->hn_tx_chimney_size) {
netvsc_dev *net_dev = txr->hn_sc->net_dev;
if (tot_data_buf_len < txr->hn_chim_size) {
txr->hn_tx_chimney_tried++;
send_buf_section_idx =
hv_nv_get_next_send_section(net_dev);
send_buf_section_idx = hn_chim_alloc(txr->hn_sc);
if (send_buf_section_idx != HN_NVS_CHIM_IDX_INVALID) {
uint8_t *dest = ((uint8_t *)net_dev->send_buf +
(send_buf_section_idx *
net_dev->send_section_size));
uint8_t *dest = txr->hn_sc->hn_chim +
(send_buf_section_idx * txr->hn_sc->hn_chim_szmax);
memcpy(dest, rndis_mesg, rndis_msg_size);
dest += rndis_msg_size;
@ -1617,10 +1612,8 @@ hn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
hn_subchan_setup(sc);
}
sc->hn_tx_chimney_max = sc->net_dev->send_section_size;
if (sc->hn_tx_ring[0].hn_tx_chimney_size >
sc->hn_tx_chimney_max)
hn_set_tx_chimney_size(sc, sc->hn_tx_chimney_max);
if (sc->hn_tx_ring[0].hn_chim_size > sc->hn_chim_szmax)
hn_set_chim_size(sc, sc->hn_chim_szmax);
hn_ifinit_locked(sc);
@ -1984,20 +1977,20 @@ hn_trust_hcsum_sysctl(SYSCTL_HANDLER_ARGS)
}
static int
hn_tx_chimney_size_sysctl(SYSCTL_HANDLER_ARGS)
hn_chim_size_sysctl(SYSCTL_HANDLER_ARGS)
{
struct hn_softc *sc = arg1;
int chimney_size, error;
int chim_size, error;
chimney_size = sc->hn_tx_ring[0].hn_tx_chimney_size;
error = sysctl_handle_int(oidp, &chimney_size, 0, req);
chim_size = sc->hn_tx_ring[0].hn_chim_size;
error = sysctl_handle_int(oidp, &chim_size, 0, req);
if (error || req->newptr == NULL)
return error;
if (chimney_size > sc->hn_tx_chimney_max || chimney_size <= 0)
if (chim_size > sc->hn_chim_szmax || chim_size <= 0)
return EINVAL;
hn_set_tx_chimney_size(sc, chimney_size);
hn_set_chim_size(sc, chim_size);
return 0;
}
@ -2359,6 +2352,11 @@ hn_destroy_rx_data(struct hn_softc *sc)
{
int i;
if (sc->hn_rxbuf != NULL) {
hyperv_dmamem_free(&sc->hn_rxbuf_dma, sc->hn_rxbuf);
sc->hn_rxbuf = NULL;
}
if (sc->hn_rx_ring_cnt == 0)
return;
@ -2375,11 +2373,6 @@ hn_destroy_rx_data(struct hn_softc *sc)
sc->hn_rx_ring_cnt = 0;
sc->hn_rx_ring_inuse = 0;
if (sc->hn_rxbuf != NULL) {
hyperv_dmamem_free(&sc->hn_rxbuf_dma, sc->hn_rxbuf);
sc->hn_rxbuf = NULL;
}
}
static int
@ -2639,6 +2632,19 @@ hn_create_tx_data(struct hn_softc *sc, int ring_cnt)
struct sysctl_ctx_list *ctx;
int i;
/*
* Create TXBUF for chimney sending.
*
* NOTE: It is shared by all channels.
*/
sc->hn_chim = hyperv_dmamem_alloc(bus_get_dma_tag(sc->hn_dev),
PAGE_SIZE, 0, NETVSC_SEND_BUFFER_SIZE, &sc->hn_chim_dma,
BUS_DMA_WAITOK | BUS_DMA_ZERO);
if (sc->hn_chim == NULL) {
device_printf(sc->hn_dev, "allocate txbuf failed\n");
return (ENOMEM);
}
sc->hn_tx_ring_cnt = ring_cnt;
sc->hn_tx_ring_inuse = sc->hn_tx_ring_cnt;
@ -2688,12 +2694,11 @@ hn_create_tx_data(struct hn_softc *sc, int ring_cnt)
CTLFLAG_RD, &sc->hn_tx_ring[0].hn_txdesc_cnt, 0,
"# of total TX descs");
SYSCTL_ADD_INT(ctx, child, OID_AUTO, "tx_chimney_max",
CTLFLAG_RD, &sc->hn_tx_chimney_max, 0,
CTLFLAG_RD, &sc->hn_chim_szmax, 0,
"Chimney send packet size upper boundary");
SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "tx_chimney_size",
CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0,
hn_tx_chimney_size_sysctl,
"I", "Chimney send packet size limit");
hn_chim_size_sysctl, "I", "Chimney send packet size limit");
SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "direct_tx_size",
CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc,
__offsetof(struct hn_tx_ring, hn_direct_tx_size),
@ -2714,13 +2719,13 @@ hn_create_tx_data(struct hn_softc *sc, int ring_cnt)
}
static void
hn_set_tx_chimney_size(struct hn_softc *sc, int chimney_size)
hn_set_chim_size(struct hn_softc *sc, int chim_size)
{
int i;
NV_LOCK(sc);
for (i = 0; i < sc->hn_tx_ring_inuse; ++i)
sc->hn_tx_ring[i].hn_tx_chimney_size = chimney_size;
sc->hn_tx_ring[i].hn_chim_size = chim_size;
NV_UNLOCK(sc);
}
@ -2729,6 +2734,11 @@ hn_destroy_tx_data(struct hn_softc *sc)
{
int i;
if (sc->hn_chim != NULL) {
hyperv_dmamem_free(&sc->hn_chim_dma, sc->hn_chim);
sc->hn_chim = NULL;
}
if (sc->hn_tx_ring_cnt == 0)
return;

View File

@ -86,10 +86,10 @@ hv_rf_send_offload_request(struct hn_softc *sc,
rndis_offload_params *offloads);
static void hn_rndis_sent_halt(struct hn_send_ctx *sndc,
struct netvsc_dev_ *net_dev, struct vmbus_channel *chan,
struct hn_softc *sc, struct vmbus_channel *chan,
const void *data, int dlen);
static void hn_rndis_sent_cb(struct hn_send_ctx *sndc,
struct netvsc_dev_ *net_dev, struct vmbus_channel *chan,
struct hn_softc *sc, struct vmbus_channel *chan,
const void *data, int dlen);
/*
@ -240,7 +240,7 @@ static int
hv_rf_send_request(rndis_device *device, rndis_request *request,
uint32_t message_type)
{
netvsc_dev *net_dev = device->net_dev;
struct hn_softc *sc = device->net_dev->sc;
uint32_t send_buf_section_idx, tot_data_buf_len;
struct vmbus_gpa gpa[2];
int gpa_cnt, send_buf_section_size;
@ -269,11 +269,11 @@ hv_rf_send_request(rndis_device *device, rndis_request *request,
else
cb = hn_rndis_sent_halt;
if (tot_data_buf_len < net_dev->send_section_size) {
send_buf_section_idx = hv_nv_get_next_send_section(net_dev);
if (tot_data_buf_len < sc->hn_chim_szmax) {
send_buf_section_idx = hn_chim_alloc(sc);
if (send_buf_section_idx != HN_NVS_CHIM_IDX_INVALID) {
char *dest = ((char *)net_dev->send_buf +
send_buf_section_idx * net_dev->send_section_size);
uint8_t *dest = sc->hn_chim +
(send_buf_section_idx * sc->hn_chim_szmax);
memcpy(dest, &request->request_msg, request->request_msg.msg_len);
send_buf_section_size = tot_data_buf_len;
@ -288,8 +288,8 @@ hv_rf_send_request(rndis_device *device, rndis_request *request,
sendit:
hn_send_ctx_init(&request->send_ctx, cb, request,
send_buf_section_idx, send_buf_section_size);
return hv_nv_on_send(device->net_dev->sc->hn_prichan,
HN_NVS_RNDIS_MTYPE_CTRL, &request->send_ctx, gpa, gpa_cnt);
return hv_nv_on_send(sc->hn_prichan, HN_NVS_RNDIS_MTYPE_CTRL,
&request->send_ctx, gpa, gpa_cnt);
}
/*
@ -1247,23 +1247,23 @@ hv_rf_on_close(struct hn_softc *sc)
}
static void
hn_rndis_sent_cb(struct hn_send_ctx *sndc, struct netvsc_dev_ *net_dev,
hn_rndis_sent_cb(struct hn_send_ctx *sndc, struct hn_softc *sc,
struct vmbus_channel *chan __unused, const void *data __unused,
int dlen __unused)
{
if (sndc->hn_chim_idx != HN_NVS_CHIM_IDX_INVALID)
hn_chim_free(net_dev, sndc->hn_chim_idx);
hn_chim_free(sc, sndc->hn_chim_idx);
}
static void
hn_rndis_sent_halt(struct hn_send_ctx *sndc, struct netvsc_dev_ *net_dev,
hn_rndis_sent_halt(struct hn_send_ctx *sndc, struct hn_softc *sc,
struct vmbus_channel *chan __unused, const void *data __unused,
int dlen __unused)
{
rndis_request *request = sndc->hn_cbarg;
if (sndc->hn_chim_idx != HN_NVS_CHIM_IDX_INVALID)
hn_chim_free(net_dev, sndc->hn_chim_idx);
hn_chim_free(sc, sndc->hn_chim_idx);
/*
* Notify hv_rf_halt_device() about halt completion.

View File

@ -34,13 +34,13 @@
#include <dev/hyperv/include/vmbus.h>
#include <dev/hyperv/netvsc/if_hnreg.h>
struct netvsc_dev_;
struct hn_softc;
struct vmbus_channel;
struct hn_send_ctx;
typedef void (*hn_sent_callback_t)
(struct hn_send_ctx *, struct netvsc_dev_ *,
(struct hn_send_ctx *, struct hn_softc *,
struct vmbus_channel *, const void *, int);
struct hn_send_ctx {
@ -108,8 +108,9 @@ hn_nvs_send_sglist(struct vmbus_channel *chan, struct vmbus_gpa sg[], int sglen,
}
void hn_nvs_sent_xact(struct hn_send_ctx *sndc,
struct netvsc_dev_ *net_dev, struct vmbus_channel *chan,
struct hn_softc *sc, struct vmbus_channel *chan,
const void *data, int dlen);
void hn_chim_free(struct netvsc_dev_ *net_dev, uint32_t chim_idx);
uint32_t hn_chim_alloc(struct hn_softc *sc);
void hn_chim_free(struct hn_softc *sc, uint32_t chim_idx);
#endif /* !_IF_HNVAR_H_ */