Mechanically convert Xen netfront/netback(4) to IfAPI

Reviewed by:	zlei
Sponsored by:	Juniper Networks, Inc.
Differential Revision: https://reviews.freebsd.org/D37800
This commit is contained in:
Justin Hibbits 2022-03-01 11:52:45 -06:00
parent 7814374b7c
commit 02f3b17fa5
2 changed files with 153 additions and 150 deletions

View File

@ -147,20 +147,20 @@ static void xnb_attach_failed(struct xnb_softc *xnb,
static int xnb_shutdown(struct xnb_softc *xnb);
static int create_netdev(device_t dev);
static int xnb_detach(device_t dev);
static int xnb_ifmedia_upd(struct ifnet *ifp);
static void xnb_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr);
static int xnb_ifmedia_upd(if_t ifp);
static void xnb_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr);
static void xnb_intr(void *arg);
static int xnb_send(netif_rx_back_ring_t *rxb, domid_t otherend,
const struct mbuf *mbufc, gnttab_copy_table gnttab);
static int xnb_recv(netif_tx_back_ring_t *txb, domid_t otherend,
struct mbuf **mbufc, struct ifnet *ifnet,
struct mbuf **mbufc, if_t ifnet,
gnttab_copy_table gnttab);
static int xnb_ring2pkt(struct xnb_pkt *pkt,
const netif_tx_back_ring_t *tx_ring,
RING_IDX start);
static void xnb_txpkt2rsp(const struct xnb_pkt *pkt,
netif_tx_back_ring_t *ring, int error);
static struct mbuf *xnb_pkt2mbufc(const struct xnb_pkt *pkt, struct ifnet *ifp);
static struct mbuf *xnb_pkt2mbufc(const struct xnb_pkt *pkt, if_t ifp);
static int xnb_txpkt2gnttab(const struct xnb_pkt *pkt,
struct mbuf *mbufc,
gnttab_copy_table gnttab,
@ -180,9 +180,9 @@ static int xnb_rxpkt2rsp(const struct xnb_pkt *pkt,
const gnttab_copy_table gnttab, int n_entries,
netif_rx_back_ring_t *ring);
static void xnb_stop(struct xnb_softc*);
static int xnb_ioctl(struct ifnet*, u_long, caddr_t);
static void xnb_start_locked(struct ifnet*);
static void xnb_start(struct ifnet*);
static int xnb_ioctl(if_t, u_long, caddr_t);
static void xnb_start_locked(if_t);
static void xnb_start(if_t);
static void xnb_ifinit_locked(struct xnb_softc*);
static void xnb_ifinit(void*);
#ifdef XNB_DEBUG
@ -396,7 +396,7 @@ struct xnb_softc {
struct ifmedia sc_media;
/** Media carrier info */
struct ifnet *xnb_ifp;
if_t xnb_ifp;
/** Our own private carrier state */
unsigned carrier;
@ -1180,7 +1180,7 @@ xnb_setup_sysctl(struct xnb_softc *xnb)
int
create_netdev(device_t dev)
{
struct ifnet *ifp;
if_t ifp;
struct xnb_softc *xnb;
int err = 0;
uint32_t handle;
@ -1224,18 +1224,18 @@ create_netdev(device_t dev)
if (err == 0) {
/* Set up ifnet structure */
ifp = xnb->xnb_ifp = if_alloc(IFT_ETHER);
ifp->if_softc = xnb;
if_setsoftc(ifp, xnb);
if_initname(ifp, xnb->if_name, IF_DUNIT_NONE);
ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
ifp->if_ioctl = xnb_ioctl;
ifp->if_start = xnb_start;
ifp->if_init = xnb_ifinit;
ifp->if_mtu = ETHERMTU;
ifp->if_snd.ifq_maxlen = NET_RX_RING_SIZE - 1;
if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
if_setioctlfn(ifp, xnb_ioctl);
if_setstartfn(ifp, xnb_start);
if_setinitfn(ifp, xnb_ifinit);
if_setmtu(ifp, ETHERMTU);
if_setsendqlen(ifp, NET_RX_RING_SIZE - 1);
ifp->if_hwassist = XNB_CSUM_FEATURES;
ifp->if_capabilities = IFCAP_HWCSUM;
ifp->if_capenable = IFCAP_HWCSUM;
if_sethwassist(ifp, XNB_CSUM_FEATURES);
if_setcapabilities(ifp, IFCAP_HWCSUM);
if_setcapenable(ifp, IFCAP_HWCSUM);
ether_ifattach(ifp, xnb->mac);
xnb->carrier = 0;
@ -1425,7 +1425,7 @@ static void
xnb_intr(void *arg)
{
struct xnb_softc *xnb;
struct ifnet *ifp;
if_t ifp;
netif_tx_back_ring_t *txb;
RING_IDX req_prod_local;
@ -1449,7 +1449,7 @@ xnb_intr(void *arg)
break;
/* Send the packet to the generic network stack */
(*xnb->xnb_ifp->if_input)(xnb->xnb_ifp, mbufc);
if_input(xnb->xnb_ifp, mbufc);
}
RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(txb, notify);
@ -1653,7 +1653,7 @@ xnb_txpkt2rsp(const struct xnb_pkt *pkt, netif_tx_back_ring_t *ring,
* NULL on failure
*/
static struct mbuf*
xnb_pkt2mbufc(const struct xnb_pkt *pkt, struct ifnet *ifp)
xnb_pkt2mbufc(const struct xnb_pkt *pkt, if_t ifp)
{
/**
* \todo consider using a memory pool for mbufs instead of
@ -1805,7 +1805,7 @@ xnb_update_mbufc(struct mbuf *mbufc, const gnttab_copy_table gnttab,
*/
static int
xnb_recv(netif_tx_back_ring_t *txb, domid_t otherend, struct mbuf **mbufc,
struct ifnet *ifnet, gnttab_copy_table gnttab)
if_t ifnet, gnttab_copy_table gnttab)
{
struct xnb_pkt pkt;
/* number of tx requests consumed to build the last packet */
@ -2189,18 +2189,18 @@ xnb_add_mbuf_cksum(struct mbuf *mbufc)
static void
xnb_stop(struct xnb_softc *xnb)
{
struct ifnet *ifp;
if_t ifp;
mtx_assert(&xnb->sc_lock, MA_OWNED);
ifp = xnb->xnb_ifp;
ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
if_link_state_change(ifp, LINK_STATE_DOWN);
}
static int
xnb_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
xnb_ioctl(if_t ifp, u_long cmd, caddr_t data)
{
struct xnb_softc *xnb = ifp->if_softc;
struct xnb_softc *xnb = if_getsoftc(ifp);
struct ifreq *ifr = (struct ifreq*) data;
#ifdef INET
struct ifaddr *ifa = (struct ifaddr*)data;
@ -2210,10 +2210,10 @@ xnb_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
switch (cmd) {
case SIOCSIFFLAGS:
mtx_lock(&xnb->sc_lock);
if (ifp->if_flags & IFF_UP) {
if (if_getflags(ifp) & IFF_UP) {
xnb_ifinit_locked(xnb);
} else {
if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
xnb_stop(xnb);
}
}
@ -2227,14 +2227,14 @@ xnb_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
#ifdef INET
mtx_lock(&xnb->sc_lock);
if (ifa->ifa_addr->sa_family == AF_INET) {
ifp->if_flags |= IFF_UP;
if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
ifp->if_drv_flags &= ~(IFF_DRV_RUNNING |
IFF_DRV_OACTIVE);
if_setflagbits(ifp, IFF_UP, 0);
if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) {
if_setdrvflagbits(ifp, 0,
IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
if_link_state_change(ifp,
LINK_STATE_DOWN);
ifp->if_drv_flags |= IFF_DRV_RUNNING;
ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
if_link_state_change(ifp,
LINK_STATE_UP);
}
@ -2251,16 +2251,16 @@ xnb_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
case SIOCSIFCAP:
mtx_lock(&xnb->sc_lock);
if (ifr->ifr_reqcap & IFCAP_TXCSUM) {
ifp->if_capenable |= IFCAP_TXCSUM;
ifp->if_hwassist |= XNB_CSUM_FEATURES;
if_setcapenablebit(ifp, IFCAP_TXCSUM, 0);
if_sethwassistbits(ifp, XNB_CSUM_FEATURES, 0);
} else {
ifp->if_capenable &= ~(IFCAP_TXCSUM);
ifp->if_hwassist &= ~(XNB_CSUM_FEATURES);
if_setcapenablebit(ifp, 0, IFCAP_TXCSUM);
if_sethwassistbits(ifp, 0, XNB_CSUM_FEATURES);
}
if ((ifr->ifr_reqcap & IFCAP_RXCSUM)) {
ifp->if_capenable |= IFCAP_RXCSUM;
if_setcapenablebit(ifp, IFCAP_RXCSUM, 0);
} else {
ifp->if_capenable &= ~(IFCAP_RXCSUM);
if_setcapenablebit(ifp, 0, IFCAP_RXCSUM);
}
/*
* TODO enable TSO4 and LRO once we no longer need
@ -2268,30 +2268,30 @@ xnb_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
*/
#if 0
if (ifr->if_reqcap |= IFCAP_TSO4) {
if (IFCAP_TXCSUM & ifp->if_capenable) {
if (IFCAP_TXCSUM & if_getcapenable(ifp)) {
printf("xnb: Xen netif requires that "
"TXCSUM be enabled in order "
"to use TSO4\n");
error = EINVAL;
} else {
ifp->if_capenable |= IFCAP_TSO4;
ifp->if_hwassist |= CSUM_TSO;
if_setcapenablebit(ifp, IFCAP_TSO4, 0);
if_sethwassistbits(ifp, CSUM_TSO, 0);
}
} else {
ifp->if_capenable &= ~(IFCAP_TSO4);
ifp->if_hwassist &= ~(CSUM_TSO);
if_setcapenablebit(ifp, 0, IFCAP_TSO4);
if_sethwassistbits(ifp, 0, (CSUM_TSO));
}
if (ifr->ifreqcap |= IFCAP_LRO) {
ifp->if_capenable |= IFCAP_LRO;
if_setcapenablebit(ifp, IFCAP_LRO, 0);
} else {
ifp->if_capenable &= ~(IFCAP_LRO);
if_setcapenablebit(ifp, 0, IFCAP_LRO);
}
#endif
mtx_unlock(&xnb->sc_lock);
break;
case SIOCSIFMTU:
ifp->if_mtu = ifr->ifr_mtu;
ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
if_setmtu(ifp, ifr->ifr_mtu);
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
xnb_ifinit(xnb);
break;
case SIOCADDMULTI:
@ -2309,14 +2309,14 @@ xnb_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
}
static void
xnb_start_locked(struct ifnet *ifp)
xnb_start_locked(if_t ifp)
{
netif_rx_back_ring_t *rxb;
struct xnb_softc *xnb;
struct mbuf *mbufc;
RING_IDX req_prod_local;
xnb = ifp->if_softc;
xnb = if_getsoftc(ifp);
rxb = &xnb->ring_configs[XNB_RING_TYPE_RX].back_ring.rx_ring;
if (!xnb->carrier)
@ -2330,7 +2330,7 @@ xnb_start_locked(struct ifnet *ifp)
for (;;) {
int error;
IF_DEQUEUE(&ifp->if_snd, mbufc);
mbufc = if_dequeue(ifp);
if (mbufc == NULL)
break;
error = xnb_send(rxb, xnb->otherend_id, mbufc,
@ -2342,7 +2342,7 @@ xnb_start_locked(struct ifnet *ifp)
* Requeue pkt and send when space is
* available.
*/
IF_PREPEND(&ifp->if_snd, mbufc);
if_sendq_prepend(ifp, mbufc);
/*
* Perhaps the frontend missed an IRQ
* and went to sleep. Notify it to wake
@ -2414,11 +2414,11 @@ xnb_send(netif_rx_back_ring_t *ring, domid_t otherend, const struct mbuf *mbufc,
}
static void
xnb_start(struct ifnet *ifp)
xnb_start(if_t ifp)
{
struct xnb_softc *xnb;
xnb = ifp->if_softc;
xnb = if_getsoftc(ifp);
mtx_lock(&xnb->rx_lock);
xnb_start_locked(ifp);
mtx_unlock(&xnb->rx_lock);
@ -2428,19 +2428,19 @@ xnb_start(struct ifnet *ifp)
static void
xnb_ifinit_locked(struct xnb_softc *xnb)
{
struct ifnet *ifp;
if_t ifp;
ifp = xnb->xnb_ifp;
mtx_assert(&xnb->sc_lock, MA_OWNED);
if (ifp->if_drv_flags & IFF_DRV_RUNNING)
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
return;
xnb_stop(xnb);
ifp->if_drv_flags |= IFF_DRV_RUNNING;
ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
if_link_state_change(ifp, LINK_STATE_UP);
}
@ -2459,7 +2459,7 @@ xnb_ifinit(void *xsc)
* state has changed. Since we don't have a physical carrier, we don't care
*/
static int
xnb_ifmedia_upd(struct ifnet *ifp)
xnb_ifmedia_upd(if_t ifp)
{
return (0);
}
@ -2469,7 +2469,7 @@ xnb_ifmedia_upd(struct ifnet *ifp)
* state is. Since we don't have a physical carrier, this is very simple
*/
static void
xnb_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
xnb_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr)
{
ifmr->ifm_status = IFM_AVALID|IFM_ACTIVE;
ifmr->ifm_active = IFM_ETHER|IFM_MANUAL;

View File

@ -129,7 +129,7 @@ static void xn_rxq_intr(struct netfront_rxq *);
static void xn_txq_intr(struct netfront_txq *);
static void xn_intr(void *);
static int xn_assemble_tx_request(struct netfront_txq *, struct mbuf *);
static int xn_ioctl(struct ifnet *, u_long, caddr_t);
static int xn_ioctl(if_t, u_long, caddr_t);
static void xn_ifinit_locked(struct netfront_info *);
static void xn_ifinit(void *);
static void xn_stop(struct netfront_info *);
@ -139,15 +139,15 @@ static void netif_free(struct netfront_info *info);
static int netfront_detach(device_t dev);
static int xn_txq_mq_start_locked(struct netfront_txq *, struct mbuf *);
static int xn_txq_mq_start(struct ifnet *, struct mbuf *);
static int xn_txq_mq_start(if_t, struct mbuf *);
static int talk_to_backend(device_t dev, struct netfront_info *info);
static int create_netdev(device_t dev);
static void netif_disconnect_backend(struct netfront_info *info);
static int setup_device(device_t dev, struct netfront_info *info,
unsigned long);
static int xn_ifmedia_upd(struct ifnet *ifp);
static void xn_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr);
static int xn_ifmedia_upd(if_t ifp);
static void xn_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr);
static int xn_connect(struct netfront_info *);
static void xn_kick_rings(struct netfront_info *);
@ -215,7 +215,7 @@ struct netfront_txq {
};
struct netfront_info {
struct ifnet *xn_ifp;
if_t xn_ifp;
struct mtx sc_lock;
@ -637,14 +637,14 @@ talk_to_backend(device_t dev, struct netfront_info *info)
message = "writing feature-sg";
goto abort_transaction;
}
if ((info->xn_ifp->if_capenable & IFCAP_LRO) != 0) {
if ((if_getcapenable(info->xn_ifp) & IFCAP_LRO) != 0) {
err = xs_printf(xst, node, "feature-gso-tcpv4", "%d", 1);
if (err != 0) {
message = "writing feature-gso-tcpv4";
goto abort_transaction;
}
}
if ((info->xn_ifp->if_capenable & IFCAP_RXCSUM) == 0) {
if ((if_getcapenable(info->xn_ifp) & IFCAP_RXCSUM) == 0) {
err = xs_printf(xst, node, "feature-no-csum-offload", "%d", 1);
if (err != 0) {
message = "writing feature-no-csum-offload";
@ -685,7 +685,7 @@ static void
xn_txq_start(struct netfront_txq *txq)
{
struct netfront_info *np = txq->info;
struct ifnet *ifp = np->xn_ifp;
if_t ifp = np->xn_ifp;
XN_TX_LOCK_ASSERT(txq);
if (!drbr_empty(ifp, txq->br))
@ -1007,6 +1007,12 @@ setup_device(device_t dev, struct netfront_info *info,
}
#ifdef INET
static u_int
netfront_addr_cb(void *arg, struct ifaddr *a, u_int count)
{
arp_ifinit((if_t)arg, a);
return (1);
}
/**
* If this interface has an ipv4 address, send an arp for it. This
* helps to get the network going again after migrating hosts.
@ -1014,15 +1020,10 @@ setup_device(device_t dev, struct netfront_info *info,
static void
netfront_send_fake_arp(device_t dev, struct netfront_info *info)
{
struct ifnet *ifp;
struct ifaddr *ifa;
if_t ifp;
ifp = info->xn_ifp;
CK_STAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
if (ifa->ifa_addr->sa_family == AF_INET) {
arp_ifinit(ifp, ifa);
}
}
if_foreach_addr_type(ifp, AF_INET, netfront_addr_cb, ifp);
}
#endif
@ -1036,7 +1037,7 @@ netfront_backend_changed(device_t dev, XenbusState newstate)
DPRINTK("newstate=%d\n", newstate);
CURVNET_SET(sc->xn_ifp->if_vnet);
CURVNET_SET(if_getvnet(sc->xn_ifp));
switch (newstate) {
case XenbusStateInitialising:
@ -1228,7 +1229,7 @@ xn_release_rx_bufs(struct netfront_rxq *rxq)
static void
xn_rxeof(struct netfront_rxq *rxq)
{
struct ifnet *ifp;
if_t ifp;
struct netfront_info *np = rxq->info;
#if (defined(INET) || defined(INET6))
struct lro_ctrl *lro = &rxq->lro;
@ -1313,16 +1314,16 @@ xn_rxeof(struct netfront_rxq *rxq)
if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
#if (defined(INET) || defined(INET6))
/* Use LRO if possible */
if ((ifp->if_capenable & IFCAP_LRO) == 0 ||
if ((if_getcapenable(ifp) & IFCAP_LRO) == 0 ||
lro->lro_cnt == 0 || tcp_lro_rx(lro, m, 0)) {
/*
* If LRO fails, pass up to the stack
* directly.
*/
(*ifp->if_input)(ifp, m);
if_input(ifp, m);
}
#else
(*ifp->if_input)(ifp, m);
if_input(ifp, m);
#endif
}
@ -1339,7 +1340,7 @@ xn_txeof(struct netfront_txq *txq)
{
RING_IDX i, prod;
unsigned short id;
struct ifnet *ifp;
if_t ifp;
netif_tx_response_t *txr;
struct mbuf *m;
struct netfront_info *np = txq->info;
@ -1387,7 +1388,7 @@ xn_txeof(struct netfront_txq *txq)
txq->mbufs_cnt--;
mbuf_release(m);
/* Only mark the txq active if we've freed up at least one slot to try */
ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
}
txq->ring.rsp_cons = prod;
@ -1589,7 +1590,7 @@ static int
xn_assemble_tx_request(struct netfront_txq *txq, struct mbuf *m_head)
{
struct netfront_info *np = txq->info;
struct ifnet *ifp = np->xn_ifp;
if_t ifp = np->xn_ifp;
int otherend_id, error, nfrags;
bus_dma_segment_t *segs = txq->segs;
struct mbuf_xennet *tag;
@ -1768,7 +1769,7 @@ xn_assemble_tx_request(struct netfront_txq *txq, struct mbuf *m_head)
static void
xn_ifinit_locked(struct netfront_info *np)
{
struct ifnet *ifp;
if_t ifp;
int i;
struct netfront_rxq *rxq;
@ -1776,7 +1777,7 @@ xn_ifinit_locked(struct netfront_info *np)
ifp = np->xn_ifp;
if (ifp->if_drv_flags & IFF_DRV_RUNNING || !netfront_carrier_ok(np))
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING || !netfront_carrier_ok(np))
return;
xn_stop(np);
@ -1791,8 +1792,8 @@ xn_ifinit_locked(struct netfront_info *np)
XN_RX_UNLOCK(rxq);
}
ifp->if_drv_flags |= IFF_DRV_RUNNING;
ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
if_link_state_change(ifp, LINK_STATE_UP);
}
@ -1807,9 +1808,9 @@ xn_ifinit(void *xsc)
}
static int
xn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
xn_ioctl(if_t ifp, u_long cmd, caddr_t data)
{
struct netfront_info *sc = ifp->if_softc;
struct netfront_info *sc = if_getsoftc(ifp);
struct ifreq *ifr = (struct ifreq *) data;
device_t dev;
#ifdef INET
@ -1824,8 +1825,8 @@ xn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
#ifdef INET
XN_LOCK(sc);
if (ifa->ifa_addr->sa_family == AF_INET) {
ifp->if_flags |= IFF_UP;
if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
if_setflagbits(ifp, IFF_UP, 0);
if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING))
xn_ifinit_locked(sc);
arp_ifinit(ifp, ifa);
XN_UNLOCK(sc);
@ -1838,16 +1839,16 @@ xn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
#endif
break;
case SIOCSIFMTU:
if (ifp->if_mtu == ifr->ifr_mtu)
if (if_getmtu(ifp) == ifr->ifr_mtu)
break;
ifp->if_mtu = ifr->ifr_mtu;
ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
if_setmtu(ifp, ifr->ifr_mtu);
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
xn_ifinit(sc);
break;
case SIOCSIFFLAGS:
XN_LOCK(sc);
if (ifp->if_flags & IFF_UP) {
if (if_getflags(ifp) & IFF_UP) {
/*
* If only the state of the PROMISC flag changed,
* then just use the 'set promisc mode' command
@ -1858,24 +1859,24 @@ xn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
*/
xn_ifinit_locked(sc);
} else {
if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
xn_stop(sc);
}
}
sc->xn_if_flags = ifp->if_flags;
sc->xn_if_flags = if_getflags(ifp);
XN_UNLOCK(sc);
break;
case SIOCSIFCAP:
mask = ifr->ifr_reqcap ^ ifp->if_capenable;
mask = ifr->ifr_reqcap ^ if_getcapenable(ifp);
reinit = 0;
if (mask & IFCAP_TXCSUM) {
ifp->if_capenable ^= IFCAP_TXCSUM;
ifp->if_hwassist ^= XN_CSUM_FEATURES;
if_togglecapenable(ifp, IFCAP_TXCSUM);
if_togglehwassist(ifp, XN_CSUM_FEATURES);
}
if (mask & IFCAP_TSO4) {
ifp->if_capenable ^= IFCAP_TSO4;
ifp->if_hwassist ^= CSUM_TSO;
if_togglecapenable(ifp, IFCAP_TSO4);
if_togglehwassist(ifp, CSUM_TSO);
}
if (mask & (IFCAP_RXCSUM | IFCAP_LRO)) {
@ -1883,9 +1884,9 @@ xn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
reinit = 1;
if (mask & IFCAP_RXCSUM)
ifp->if_capenable ^= IFCAP_RXCSUM;
if_togglecapenable(ifp, IFCAP_RXCSUM);
if (mask & IFCAP_LRO)
ifp->if_capenable ^= IFCAP_LRO;
if_togglecapenable(ifp, IFCAP_LRO);
}
if (reinit == 0)
@ -1943,13 +1944,13 @@ xn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
static void
xn_stop(struct netfront_info *sc)
{
struct ifnet *ifp;
if_t ifp;
XN_LOCK_ASSERT(sc);
ifp = sc->xn_ifp;
ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
if_link_state_change(ifp, LINK_STATE_DOWN);
}
@ -2077,9 +2078,9 @@ xn_query_features(struct netfront_info *np)
"feature-gso-tcpv4", NULL, "%d", &val) != 0)
val = 0;
np->xn_ifp->if_capabilities &= ~(IFCAP_TSO4|IFCAP_LRO);
if_setcapabilitiesbit(np->xn_ifp, 0, IFCAP_TSO4 | IFCAP_LRO);
if (val) {
np->xn_ifp->if_capabilities |= IFCAP_TSO4|IFCAP_LRO;
if_setcapabilitiesbit(np->xn_ifp, IFCAP_TSO4 | IFCAP_LRO, 0);
printf(" feature-gso-tcp4");
}
@ -2091,9 +2092,9 @@ xn_query_features(struct netfront_info *np)
"feature-no-csum-offload", NULL, "%d", &val) != 0)
val = 0;
np->xn_ifp->if_capabilities |= IFCAP_HWCSUM;
if_setcapabilitiesbit(np->xn_ifp, IFCAP_HWCSUM, 0);
if (val) {
np->xn_ifp->if_capabilities &= ~(IFCAP_HWCSUM);
if_setcapabilitiesbit(np->xn_ifp, 0, IFCAP_HWCSUM);
printf(" feature-no-csum-offload");
}
@ -2107,49 +2108,50 @@ xn_configure_features(struct netfront_info *np)
#if (defined(INET) || defined(INET6))
int i;
#endif
struct ifnet *ifp;
if_t ifp;
ifp = np->xn_ifp;
err = 0;
if ((ifp->if_capenable & ifp->if_capabilities) == ifp->if_capenable) {
if ((if_getcapenable(ifp) & if_getcapabilities(ifp)) == if_getcapenable(ifp)) {
/* Current options are available, no need to do anything. */
return (0);
}
/* Try to preserve as many options as possible. */
cap_enabled = ifp->if_capenable;
ifp->if_capenable = ifp->if_hwassist = 0;
cap_enabled = if_getcapenable(ifp);
if_setcapenable(ifp, 0);
if_sethwassist(ifp, 0);
#if (defined(INET) || defined(INET6))
if ((cap_enabled & IFCAP_LRO) != 0)
for (i = 0; i < np->num_queues; i++)
tcp_lro_free(&np->rxq[i].lro);
if (xn_enable_lro &&
(ifp->if_capabilities & cap_enabled & IFCAP_LRO) != 0) {
ifp->if_capenable |= IFCAP_LRO;
(if_getcapabilities(ifp) & cap_enabled & IFCAP_LRO) != 0) {
if_setcapenablebit(ifp, IFCAP_LRO, 0);
for (i = 0; i < np->num_queues; i++) {
err = tcp_lro_init(&np->rxq[i].lro);
if (err != 0) {
device_printf(np->xbdev,
"LRO initialization failed\n");
ifp->if_capenable &= ~IFCAP_LRO;
if_setcapenablebit(ifp, 0, IFCAP_LRO);
break;
}
np->rxq[i].lro.ifp = ifp;
}
}
if ((ifp->if_capabilities & cap_enabled & IFCAP_TSO4) != 0) {
ifp->if_capenable |= IFCAP_TSO4;
ifp->if_hwassist |= CSUM_TSO;
if ((if_getcapabilities(ifp) & cap_enabled & IFCAP_TSO4) != 0) {
if_setcapenablebit(ifp, IFCAP_TSO4, 0);
if_sethwassistbits(ifp, CSUM_TSO, 0);
}
#endif
if ((ifp->if_capabilities & cap_enabled & IFCAP_TXCSUM) != 0) {
ifp->if_capenable |= IFCAP_TXCSUM;
ifp->if_hwassist |= XN_CSUM_FEATURES;
if ((if_getcapabilities(ifp) & cap_enabled & IFCAP_TXCSUM) != 0) {
if_setcapenablebit(ifp, IFCAP_TXCSUM, 0);
if_sethwassistbits(ifp, XN_CSUM_FEATURES, 0);
}
if ((ifp->if_capabilities & cap_enabled & IFCAP_RXCSUM) != 0)
ifp->if_capenable |= IFCAP_RXCSUM;
if ((if_getcapabilities(ifp) & cap_enabled & IFCAP_RXCSUM) != 0)
if_setcapenablebit(ifp, IFCAP_RXCSUM, 0);
return (err);
}
@ -2158,7 +2160,7 @@ static int
xn_txq_mq_start_locked(struct netfront_txq *txq, struct mbuf *m)
{
struct netfront_info *np;
struct ifnet *ifp;
if_t ifp;
struct buf_ring *br;
int error, notify;
@ -2169,7 +2171,7 @@ xn_txq_mq_start_locked(struct netfront_txq *txq, struct mbuf *m)
XN_TX_LOCK_ASSERT(txq);
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0 ||
!netfront_carrier_ok(np)) {
if (m != NULL)
error = drbr_enqueue(ifp, br, m);
@ -2209,13 +2211,13 @@ xn_txq_mq_start_locked(struct netfront_txq *txq, struct mbuf *m)
}
static int
xn_txq_mq_start(struct ifnet *ifp, struct mbuf *m)
xn_txq_mq_start(if_t ifp, struct mbuf *m)
{
struct netfront_info *np;
struct netfront_txq *txq;
int i, npairs, error;
np = ifp->if_softc;
np = if_getsoftc(ifp);
npairs = np->num_queues;
if (!netfront_carrier_ok(np))
@ -2243,14 +2245,14 @@ xn_txq_mq_start(struct ifnet *ifp, struct mbuf *m)
}
static void
xn_qflush(struct ifnet *ifp)
xn_qflush(if_t ifp)
{
struct netfront_info *np;
struct netfront_txq *txq;
struct mbuf *m;
int i;
np = ifp->if_softc;
np = if_getsoftc(ifp);
for (i = 0; i < np->num_queues; i++) {
txq = &np->txq[i];
@ -2273,7 +2275,7 @@ create_netdev(device_t dev)
{
struct netfront_info *np;
int err;
struct ifnet *ifp;
if_t ifp;
np = device_get_softc(dev);
@ -2291,25 +2293,26 @@ create_netdev(device_t dev)
/* Set up ifnet structure */
ifp = np->xn_ifp = if_alloc(IFT_ETHER);
ifp->if_softc = np;
if_initname(ifp, "xn", device_get_unit(dev));
ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
ifp->if_ioctl = xn_ioctl;
if_setsoftc(ifp, np);
if_initname(ifp, "xn", device_get_unit(dev));
if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
if_setioctlfn(ifp, xn_ioctl);
ifp->if_transmit = xn_txq_mq_start;
ifp->if_qflush = xn_qflush;
if_settransmitfn(ifp, xn_txq_mq_start);
if_setqflushfn(ifp, xn_qflush);
ifp->if_init = xn_ifinit;
if_setinitfn(ifp, xn_ifinit);
ifp->if_hwassist = XN_CSUM_FEATURES;
if_sethwassist(ifp, XN_CSUM_FEATURES);
/* Enable all supported features at device creation. */
ifp->if_capenable = ifp->if_capabilities =
IFCAP_HWCSUM|IFCAP_TSO4|IFCAP_LRO;
ifp->if_hw_tsomax = 65536 - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
ifp->if_hw_tsomaxsegcount = MAX_TX_REQ_FRAGS;
ifp->if_hw_tsomaxsegsize = PAGE_SIZE;
if_setcapabilities(ifp, IFCAP_HWCSUM|IFCAP_TSO4|IFCAP_LRO);
if_setcapenable(ifp, if_getcapabilities(ifp));
ether_ifattach(ifp, np->mac);
if_sethwtsomax(ifp, 65536 - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN));
if_sethwtsomaxsegcount(ifp, MAX_TX_REQ_FRAGS);
if_sethwtsomaxsegsize(ifp, PAGE_SIZE);
ether_ifattach(ifp, np->mac);
netfront_carrier_off(np);
err = bus_dma_tag_create(
@ -2383,14 +2386,14 @@ netif_disconnect_backend(struct netfront_info *np)
}
static int
xn_ifmedia_upd(struct ifnet *ifp)
xn_ifmedia_upd(if_t ifp)
{
return (0);
}
static void
xn_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
xn_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr)
{
ifmr->ifm_status = IFM_AVALID|IFM_ACTIVE;