hyperv/hn: Make transmission path channel aware
Chimney sending buffer still needs conversion, which will be done along with the upcoming vRSS support. MFC after: 1 week Sponsored by: Microsoft OSTC Differential Revision: https://reviews.freebsd.org/D5457
This commit is contained in:
parent
c10a0b9e69
commit
5f609a0812
@ -809,7 +809,7 @@ hv_nv_on_send_completion(netvsc_dev *net_dev,
|
||||
* Returns 0 on success, non-zero on failure.
|
||||
*/
|
||||
int
|
||||
hv_nv_on_send(struct hv_device *device, netvsc_packet *pkt)
|
||||
hv_nv_on_send(struct hv_vmbus_channel *chan, netvsc_packet *pkt)
|
||||
{
|
||||
nvsp_msg send_msg;
|
||||
int ret;
|
||||
@ -829,11 +829,11 @@ hv_nv_on_send(struct hv_device *device, netvsc_packet *pkt)
|
||||
pkt->send_buf_section_size;
|
||||
|
||||
if (pkt->page_buf_count) {
|
||||
ret = hv_vmbus_channel_send_packet_pagebuffer(device->channel,
|
||||
ret = hv_vmbus_channel_send_packet_pagebuffer(chan,
|
||||
pkt->page_buffers, pkt->page_buf_count,
|
||||
&send_msg, sizeof(nvsp_msg), (uint64_t)(uintptr_t)pkt);
|
||||
} else {
|
||||
ret = hv_vmbus_channel_send_packet(device->channel,
|
||||
ret = hv_vmbus_channel_send_packet(chan,
|
||||
&send_msg, sizeof(nvsp_msg), (uint64_t)(uintptr_t)pkt,
|
||||
HV_VMBUS_PACKET_TYPE_DATA_IN_BAND,
|
||||
HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
|
||||
|
@ -1017,6 +1017,8 @@ struct hn_rx_ring {
|
||||
#define HN_TRUST_HCSUM_TCP 0x0002
|
||||
#define HN_TRUST_HCSUM_UDP 0x0004
|
||||
|
||||
struct hv_vmbus_channel;
|
||||
|
||||
struct hn_tx_ring {
|
||||
#ifndef HN_USE_TXDESC_BUFRING
|
||||
struct mtx hn_txlist_spin;
|
||||
@ -1039,6 +1041,7 @@ struct hn_tx_ring {
|
||||
|
||||
struct mtx hn_tx_lock;
|
||||
struct hn_softc *hn_sc;
|
||||
struct hv_vmbus_channel *hn_chan;
|
||||
|
||||
int hn_direct_tx_size;
|
||||
int hn_tx_chimney_size;
|
||||
@ -1096,7 +1099,7 @@ netvsc_dev *hv_nv_on_device_add(struct hv_device *device,
|
||||
void *additional_info);
|
||||
int hv_nv_on_device_remove(struct hv_device *device,
|
||||
boolean_t destroy_channel);
|
||||
int hv_nv_on_send(struct hv_device *device, netvsc_packet *pkt);
|
||||
int hv_nv_on_send(struct hv_vmbus_channel *chan, netvsc_packet *pkt);
|
||||
int hv_nv_get_next_send_section(netvsc_dev *net_dev);
|
||||
|
||||
#endif /* __HV_NET_VSC_H__ */
|
||||
|
@ -436,6 +436,7 @@ netvsc_attach(device_t dev)
|
||||
chan = device_ctx->channel;
|
||||
chan->hv_chan_rxr = &sc->hn_rx_ring[0];
|
||||
chan->hv_chan_txr = &sc->hn_tx_ring[0];
|
||||
sc->hn_tx_ring[0].hn_chan = chan;
|
||||
|
||||
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
|
||||
ifp->if_dunit = unit;
|
||||
@ -854,6 +855,8 @@ hn_encap(struct hn_tx_ring *txr, struct hn_txdesc *txd, struct mbuf **m_head0)
|
||||
|
||||
/*
|
||||
* Chimney send, if the packet could fit into one chimney buffer.
|
||||
*
|
||||
* TODO: vRSS, chimney buffer should be per-channel.
|
||||
*/
|
||||
if (packet->tot_data_buf_len < txr->hn_tx_chimney_size) {
|
||||
netvsc_dev *net_dev = txr->hn_sc->net_dev;
|
||||
@ -940,8 +943,7 @@ hn_encap(struct hn_tx_ring *txr, struct hn_txdesc *txd, struct mbuf **m_head0)
|
||||
* associated w/ the txd will _not_ be freed.
|
||||
*/
|
||||
static int
|
||||
hn_send_pkt(struct ifnet *ifp, struct hv_device *device_ctx,
|
||||
struct hn_tx_ring *txr, struct hn_txdesc *txd)
|
||||
hn_send_pkt(struct ifnet *ifp, struct hn_tx_ring *txr, struct hn_txdesc *txd)
|
||||
{
|
||||
int error, send_failed = 0;
|
||||
|
||||
@ -950,7 +952,7 @@ hn_send_pkt(struct ifnet *ifp, struct hv_device *device_ctx,
|
||||
* Make sure that txd is not freed before ETHER_BPF_MTAP.
|
||||
*/
|
||||
hn_txdesc_hold(txd);
|
||||
error = hv_nv_on_send(device_ctx, &txd->netvsc_pkt);
|
||||
error = hv_nv_on_send(txr->hn_chan, &txd->netvsc_pkt);
|
||||
if (!error) {
|
||||
ETHER_BPF_MTAP(ifp, txd->m);
|
||||
if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
|
||||
@ -1010,7 +1012,6 @@ hn_start_locked(struct hn_tx_ring *txr, int len)
|
||||
{
|
||||
struct hn_softc *sc = txr->hn_sc;
|
||||
struct ifnet *ifp = sc->hn_ifp;
|
||||
struct hv_device *device_ctx = vmbus_get_devctx(sc->hn_dev);
|
||||
|
||||
KASSERT(hn_use_if_start,
|
||||
("hn_start_locked is called, when if_start is disabled"));
|
||||
@ -1054,7 +1055,7 @@ hn_start_locked(struct hn_tx_ring *txr, int len)
|
||||
continue;
|
||||
}
|
||||
|
||||
error = hn_send_pkt(ifp, device_ctx, txr, txd);
|
||||
error = hn_send_pkt(ifp, txr, txd);
|
||||
if (__predict_false(error)) {
|
||||
/* txd is freed, but m_head is not */
|
||||
IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
|
||||
@ -2497,7 +2498,6 @@ hn_xmit(struct hn_tx_ring *txr, int len)
|
||||
{
|
||||
struct hn_softc *sc = txr->hn_sc;
|
||||
struct ifnet *ifp = sc->hn_ifp;
|
||||
struct hv_device *device_ctx = vmbus_get_devctx(sc->hn_dev);
|
||||
struct mbuf *m_head;
|
||||
|
||||
mtx_assert(&txr->hn_tx_lock, MA_OWNED);
|
||||
@ -2536,7 +2536,7 @@ hn_xmit(struct hn_tx_ring *txr, int len)
|
||||
continue;
|
||||
}
|
||||
|
||||
error = hn_send_pkt(ifp, device_ctx, txr, txd);
|
||||
error = hn_send_pkt(ifp, txr, txd);
|
||||
if (__predict_false(error)) {
|
||||
/* txd is freed, but m_head is not */
|
||||
drbr_putback(ifp, txr->hn_mbuf_br, m_head);
|
||||
|
@ -250,7 +250,7 @@ hv_rf_send_request(rndis_device *device, rndis_request *request,
|
||||
NVSP_1_CHIMNEY_SEND_INVALID_SECTION_INDEX;
|
||||
packet->send_buf_section_size = 0;
|
||||
|
||||
ret = hv_nv_on_send(device->net_dev->dev, packet);
|
||||
ret = hv_nv_on_send(device->net_dev->dev->channel, packet);
|
||||
|
||||
return (ret);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user