xen-netfront: delete all trailing white spaces

Submitted by:		Wei Liu <wei.liu2@citrix.com>
Reviewed by:		royger
Differential Revision:	https://reviews.freebsd.org/D3886
Sponsored by:		Citrix Systems R&D
This commit is contained in:
Roger Pau Monné 2015-10-19 14:12:15 +00:00
parent 80c4b9e575
commit 17374b6c3b

View File

@ -230,7 +230,7 @@ struct net_device_stats
u_long tx_fifo_errors;
u_long tx_heartbeat_errors;
u_long tx_window_errors;
/* for cslip etc */
u_long rx_compressed;
u_long tx_compressed;
@ -256,7 +256,7 @@ struct netfront_info {
u_int copying_receiver;
u_int carrier;
u_int maxfrags;
/* Receive-ring batched refills. */
#define RX_MIN_TARGET 32
#define RX_MAX_TARGET NET_RX_RING_SIZE
@ -265,9 +265,9 @@ struct netfront_info {
int rx_target;
grant_ref_t gref_tx_head;
grant_ref_t grant_tx_ref[NET_TX_RING_SIZE + 1];
grant_ref_t grant_tx_ref[NET_TX_RING_SIZE + 1];
grant_ref_t gref_rx_head;
grant_ref_t grant_rx_ref[NET_TX_RING_SIZE + 1];
grant_ref_t grant_rx_ref[NET_TX_RING_SIZE + 1];
device_t xbdev;
int tx_ring_ref;
@ -299,12 +299,12 @@ struct netfront_info {
#define XN_TX_LOCK(_sc) mtx_lock(&(_sc)->tx_lock)
#define XN_TX_UNLOCK(_sc) mtx_unlock(&(_sc)->tx_lock)
#define XN_LOCK(_sc) mtx_lock(&(_sc)->sc_lock);
#define XN_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_lock);
#define XN_LOCK(_sc) mtx_lock(&(_sc)->sc_lock);
#define XN_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_lock);
#define XN_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->sc_lock, MA_OWNED);
#define XN_RX_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->rx_lock, MA_OWNED);
#define XN_TX_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->tx_lock, MA_OWNED);
#define XN_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->sc_lock, MA_OWNED);
#define XN_RX_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->rx_lock, MA_OWNED);
#define XN_TX_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->tx_lock, MA_OWNED);
#define XN_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->rx_lock); \
mtx_destroy(&(_sc)->tx_lock); \
mtx_destroy(&(_sc)->sc_lock);
@ -389,7 +389,7 @@ xennet_get_rx_ref(struct netfront_info *np, RING_IDX ri)
* a preallocated array of length ETH_ALEN (as declared in linux/if_ether.h).
* Return 0 on success, or errno on error.
*/
static int
static int
xen_net_read_mac(device_t dev, uint8_t mac[])
{
int error, i;
@ -441,7 +441,7 @@ xen_net_read_mac(device_t dev, uint8_t mac[])
* inform the backend of the appropriate details for those. Switch to
* Connected state.
*/
static int
static int
netfront_probe(device_t dev)
{
@ -458,7 +458,7 @@ netfront_probe(device_t dev)
static int
netfront_attach(device_t dev)
{
{
int err;
err = create_netdev(dev);
@ -507,7 +507,7 @@ netfront_resume(device_t dev)
}
/* Common code used when first setting up, and when resuming. */
static int
static int
talk_to_backend(device_t dev, struct netfront_info *info)
{
const char *message;
@ -525,7 +525,7 @@ talk_to_backend(device_t dev, struct netfront_info *info)
err = setup_device(dev, info);
if (err)
goto out;
again:
err = xs_transaction_start(&xst);
if (err) {
@ -582,9 +582,9 @@ talk_to_backend(device_t dev, struct netfront_info *info)
xenbus_dev_fatal(dev, err, "completing transaction");
goto destroy_ring;
}
return 0;
abort_transaction:
xs_transaction_end(xst, 1);
xenbus_dev_fatal(dev, err, "%s", message);
@ -594,7 +594,7 @@ talk_to_backend(device_t dev, struct netfront_info *info)
return err;
}
static int
static int
setup_device(device_t dev, struct netfront_info *info)
{
netif_tx_sring_t *txs;
@ -642,7 +642,7 @@ setup_device(device_t dev, struct netfront_info *info)
}
return (0);
fail:
netif_free(info);
return (error);
@ -658,7 +658,7 @@ netfront_send_fake_arp(device_t dev, struct netfront_info *info)
{
struct ifnet *ifp;
struct ifaddr *ifa;
ifp = info->xn_ifp;
TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
if (ifa->ifa_addr->sa_family == AF_INET) {
@ -675,7 +675,7 @@ static void
netfront_backend_changed(device_t dev, XenbusState newstate)
{
struct netfront_info *sc = device_get_softc(dev);
DPRINTK("newstate=%d\n", newstate);
switch (newstate) {
@ -709,14 +709,14 @@ xn_free_rx_ring(struct netfront_info *sc)
{
#if 0
int i;
for (i = 0; i < NET_RX_RING_SIZE; i++) {
if (sc->xn_cdata.rx_mbufs[i] != NULL) {
m_freem(sc->rx_mbufs[i]);
sc->rx_mbufs[i] = NULL;
}
}
sc->rx.rsp_cons = 0;
sc->xn_rx_if->req_prod = 0;
sc->xn_rx_if->event = sc->rx.rsp_cons ;
@ -728,14 +728,14 @@ xn_free_tx_ring(struct netfront_info *sc)
{
#if 0
int i;
for (i = 0; i < NET_TX_RING_SIZE; i++) {
if (sc->tx_mbufs[i] != NULL) {
m_freem(sc->tx_mbufs[i]);
sc->xn_cdata.xn_tx_chain[i] = NULL;
}
}
return;
#endif
}
@ -798,12 +798,12 @@ network_alloc_rx_buffers(struct netfront_info *sc)
netif_rx_request_t *req;
vm_offset_t vaddr;
u_long pfn;
req_prod = sc->rx.req_prod_pvt;
if (__predict_false(sc->carrier == 0))
return;
/*
* Allocate mbufs greedily, even though we batch updates to the
* receive ring. This creates a less bursty demand on the memory
@ -825,11 +825,11 @@ network_alloc_rx_buffers(struct netfront_info *sc)
break;
}
m_new->m_len = m_new->m_pkthdr.len = MJUMPAGESIZE;
/* queue the mbufs allocated */
(void )mbufq_enqueue(&sc->xn_rx_batch, m_new);
}
/*
* If we've allocated at least half of our target number of entries,
* submit them to the backend - we have enough to make the overhead
@ -846,7 +846,7 @@ network_alloc_rx_buffers(struct netfront_info *sc)
* Double floating fill target if we risked having the backend
* run out of empty buffers for receive traffic. We define "running
* low" as having less than a fourth of our target buffers free
* at the time we refilled the queue.
* at the time we refilled the queue.
*/
if ((req_prod - sc->rx.sring->rsp_prod) < (sc->rx_target / 4)) {
sc->rx_target *= 2;
@ -888,11 +888,11 @@ network_alloc_rx_buffers(struct netfront_info *sc)
}
req->id = id;
req->gref = ref;
sc->rx_pfn_array[i] =
vtophys(mtod(m_new,vm_offset_t)) >> PAGE_SHIFT;
}
}
KASSERT(i, ("no mbufs processed")); /* should have returned earlier */
KASSERT(mbufq_len(&sc->xn_rx_batch) == 0, ("not all mbufs processed"));
/*
@ -912,7 +912,7 @@ network_alloc_rx_buffers(struct netfront_info *sc)
} else {
wmb();
}
/* Above is a suitable barrier to ensure backend will see requests. */
sc->rx.req_prod_pvt = req_prod + i;
push:
@ -947,7 +947,7 @@ xn_rxeof(struct netfront_info *np)
mbufq_init(&rxq, INT_MAX);
ifp = np->xn_ifp;
rp = np->rx.sring->rsp_prod;
rmb(); /* Ensure we see queued responses up to 'rp'. */
@ -974,7 +974,7 @@ xn_rxeof(struct netfront_info *np)
* XXX this isn't necessarily the case - need to add
* check
*/
m->m_pkthdr.csum_flags |=
(CSUM_IP_CHECKED | CSUM_IP_VALID | CSUM_DATA_VALID
| CSUM_PSEUDO_HDR);
@ -994,16 +994,16 @@ xn_rxeof(struct netfront_info *np)
balloon_update_driver_allowance(-pages_flipped);
#endif
}
mbufq_drain(&errq);
/*
/*
* Process all the mbufs after the remapping is complete.
* Break the mbuf chain first though.
*/
while ((m = mbufq_dequeue(&rxq)) != NULL) {
if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
/*
* Do we really need to drop the rx lock?
*/
@ -1023,7 +1023,7 @@ xn_rxeof(struct netfront_info *np)
#endif
XN_RX_LOCK(np);
}
np->rx.rsp_cons = i;
#if __FreeBSD_version >= 700000 && (defined(INET) || defined(INET6))
@ -1040,18 +1040,18 @@ xn_rxeof(struct netfront_info *np)
#if 0
/* If we get a callback with very few responses, reduce fill target. */
/* NB. Note exponential increase, linear decrease. */
if (((np->rx.req_prod_pvt - np->rx.sring->rsp_prod) >
if (((np->rx.req_prod_pvt - np->rx.sring->rsp_prod) >
((3*np->rx_target) / 4)) && (--np->rx_target < np->rx_min_target))
np->rx_target = np->rx_min_target;
#endif
network_alloc_rx_buffers(np);
RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, work_to_do);
} while (work_to_do);
}
static void
static void
xn_txeof(struct netfront_info *np)
{
RING_IDX i, prod;
@ -1059,18 +1059,18 @@ xn_txeof(struct netfront_info *np)
struct ifnet *ifp;
netif_tx_response_t *txr;
struct mbuf *m;
XN_TX_LOCK_ASSERT(np);
if (!netfront_carrier_ok(np))
return;
ifp = np->xn_ifp;
do {
prod = np->tx.sring->rsp_prod;
rmb(); /* Ensure we see responses up to 'rp'. */
for (i = np->tx.rsp_cons; i != prod; i++) {
txr = RING_GET_RESPONSE(&np->tx, i);
if (txr->status == NETIF_RSP_NULL)
@ -1081,13 +1081,13 @@ xn_txeof(struct netfront_info *np)
__func__, txr->status);
}
id = txr->id;
m = np->tx_mbufs[id];
m = np->tx_mbufs[id];
KASSERT(m != NULL, ("mbuf not found in xn_tx_chain"));
KASSERT((uintptr_t)m > NET_TX_RING_SIZE,
("mbuf already on the free list, but we're "
"trying to free it again!"));
M_ASSERTVALID(m);
/*
* Increment packet count if this is the last
* mbuf of the chain.
@ -1104,7 +1104,7 @@ xn_txeof(struct netfront_info *np)
gnttab_release_grant_reference(
&np->gref_tx_head, np->grant_tx_ref[id]);
np->grant_tx_ref[id] = GRANT_REF_INVALID;
np->tx_mbufs[id] = NULL;
add_id_to_freelist(np->tx_mbufs, id);
np->xn_cdata.xn_tx_chain_cnt--;
@ -1113,7 +1113,7 @@ xn_txeof(struct netfront_info *np)
ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
}
np->tx.rsp_cons = prod;
/*
* Set a new event, then check for race with update of
* tx_cons. Note that it is essential to schedule a
@ -1128,7 +1128,7 @@ xn_txeof(struct netfront_info *np)
mb();
} while (prod != np->tx.sring->rsp_prod);
if (np->tx_full &&
((np->tx.sring->req_prod - prod) < NET_TX_RING_SIZE)) {
np->tx_full = 0;
@ -1154,8 +1154,8 @@ xn_intr(void *xsc)
if (RING_HAS_UNCONSUMED_RESPONSES(&np->tx)) {
XN_TX_LOCK(np);
xn_txeof(np);
XN_TX_UNLOCK(np);
}
XN_TX_UNLOCK(np);
}
XN_RX_LOCK(np);
xn_rxeof(np);
@ -1193,10 +1193,10 @@ xennet_get_extras(struct netfront_info *np,
grant_ref_t ref;
if (__predict_false(*cons + 1 == rp)) {
#if 0
#if 0
if (net_ratelimit())
WPRINTK("Missing extra info\n");
#endif
#endif
err = EINVAL;
break;
}
@ -1206,11 +1206,11 @@ xennet_get_extras(struct netfront_info *np,
if (__predict_false(!extra->type ||
extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
#if 0
#if 0
if (net_ratelimit())
WPRINTK("Invalid extra type: %d\n",
extra->type);
#endif
#endif
err = EINVAL;
} else {
memcpy(&extras[extra->type - 1], extra, sizeof(*extra));
@ -1254,18 +1254,18 @@ xennet_get_responses(struct netfront_info *np,
for (;;) {
u_long mfn;
#if 0
#if 0
DPRINTK("rx->status=%hd rx->offset=%hu frags=%u\n",
rx->status, rx->offset, frags);
#endif
if (__predict_false(rx->status < 0 ||
rx->offset + rx->status > PAGE_SIZE)) {
#if 0
#if 0
if (net_ratelimit())
WPRINTK("rx->offset: %x, size: %u\n",
rx->offset, rx->status);
#endif
#endif
xennet_move_rx_slot(np, m, ref);
if (m0 == m)
m0 = NULL;
@ -1273,7 +1273,7 @@ xennet_get_responses(struct netfront_info *np,
err = EINVAL;
goto next_skip_queue;
}
/*
* This definitely indicates a bug, either in this driver or in
* the backend driver. In future this should flag the bad
@ -1281,10 +1281,10 @@ xennet_get_responses(struct netfront_info *np,
*/
if (ref == GRANT_REF_INVALID) {
#if 0
#if 0
if (net_ratelimit())
WPRINTK("Bad rx response id %d.\n", rx->id);
#endif
#endif
printf("%s: Bad rx response id %d.\n", __func__,rx->id);
err = EINVAL;
goto next;
@ -1317,7 +1317,7 @@ xennet_get_responses(struct netfront_info *np,
m->m_len = rx->status;
m->m_data += rx->offset;
m0->m_pkthdr.len += rx->status;
next_skip_queue:
if (!(rx->flags & NETRXF_more_data))
break;
@ -1332,23 +1332,23 @@ xennet_get_responses(struct netfront_info *np,
}
/*
* Note that m can be NULL, if rx->status < 0 or if
* rx->offset + rx->status > PAGE_SIZE above.
* rx->offset + rx->status > PAGE_SIZE above.
*/
m_prev = m;
rx = RING_GET_RESPONSE(&np->rx, *cons + frags);
m = xennet_get_rx_mbuf(np, *cons + frags);
/*
* m_prev == NULL can happen if rx->status < 0 or if
* rx->offset + * rx->status > PAGE_SIZE above.
* rx->offset + * rx->status > PAGE_SIZE above.
*/
if (m_prev != NULL)
m_prev->m_next = m;
/*
* m0 can be NULL if rx->status < 0 or if * rx->offset +
* rx->status > PAGE_SIZE above.
* rx->status > PAGE_SIZE above.
*/
if (m0 == NULL)
m0 = m;
@ -1365,7 +1365,7 @@ xennet_get_responses(struct netfront_info *np,
}
static void
xn_tick_locked(struct netfront_info *sc)
xn_tick_locked(struct netfront_info *sc)
{
XN_RX_LOCK_ASSERT(sc);
callout_reset(&sc->xn_stat_ch, hz, xn_tick, sc);
@ -1374,10 +1374,10 @@ xn_tick_locked(struct netfront_info *sc)
}
static void
xn_tick(void *xsc)
xn_tick(void *xsc)
{
struct netfront_info *sc;
sc = xsc;
XN_RX_LOCK(sc);
xn_tick_locked(sc);
@ -1573,7 +1573,7 @@ xn_assemble_tx_request(struct netfront_info *sc, struct mbuf *m_head)
}
static void
xn_start_locked(struct ifnet *ifp)
xn_start_locked(struct ifnet *ifp)
{
struct netfront_info *sc;
struct mbuf *m_head;
@ -1621,35 +1621,35 @@ xn_start(struct ifnet *ifp)
}
/* equivalent of network_open() in Linux */
static void
xn_ifinit_locked(struct netfront_info *sc)
static void
xn_ifinit_locked(struct netfront_info *sc)
{
struct ifnet *ifp;
XN_LOCK_ASSERT(sc);
ifp = sc->xn_ifp;
if (ifp->if_drv_flags & IFF_DRV_RUNNING)
if (ifp->if_drv_flags & IFF_DRV_RUNNING)
return;
xn_stop(sc);
network_alloc_rx_buffers(sc);
sc->rx.sring->rsp_event = sc->rx.rsp_cons + 1;
ifp->if_drv_flags |= IFF_DRV_RUNNING;
ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
if_link_state_change(ifp, LINK_STATE_UP);
callout_reset(&sc->xn_stat_ch, hz, xn_tick, sc);
}
static void
static void
xn_ifinit(void *xsc)
{
struct netfront_info *sc = xsc;
XN_LOCK(sc);
xn_ifinit_locked(sc);
XN_UNLOCK(sc);
@ -1671,12 +1671,12 @@ xn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
XN_LOCK(sc);
if (ifa->ifa_addr->sa_family == AF_INET) {
ifp->if_flags |= IFF_UP;
if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
xn_ifinit_locked(sc);
arp_ifinit(ifp, ifa);
XN_UNLOCK(sc);
XN_UNLOCK(sc);
} else {
XN_UNLOCK(sc);
XN_UNLOCK(sc);
#endif
error = ether_ioctl(ifp, cmd, data);
#ifdef INET
@ -1688,7 +1688,7 @@ xn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
#ifdef notyet
if (ifr->ifr_mtu > XN_JUMBO_MTU)
error = EINVAL;
else
else
#endif
{
ifp->if_mtu = ifr->ifr_mtu;
@ -1763,7 +1763,7 @@ xn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
}
if (mask & IFCAP_LRO) {
ifp->if_capenable ^= IFCAP_LRO;
}
#endif
error = 0;
@ -1786,24 +1786,24 @@ xn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
default:
error = ether_ioctl(ifp, cmd, data);
}
return (error);
}
static void
xn_stop(struct netfront_info *sc)
{
{
struct ifnet *ifp;
XN_LOCK_ASSERT(sc);
ifp = sc->xn_ifp;
callout_stop(&sc->xn_stat_ch);
xn_free_rx_ring(sc);
xn_free_tx_ring(sc);
ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
if_link_state_change(ifp, LINK_STATE_DOWN);
}
@ -1836,9 +1836,9 @@ network_connect(struct netfront_info *np)
/* Recovery procedure: */
error = talk_to_backend(np->xbdev, np);
if (error)
if (error)
return (error);
/* Step 1: Reinitialise variables. */
xn_query_features(np);
xn_configure_features(np);
@ -1874,7 +1874,7 @@ network_connect(struct netfront_info *np)
}
np->rx.req_prod_pvt = requeue_idx;
/* Step 3: All public and private state should now be sane. Get
* ready to start sending and receiving packets and give the driver
* domain a kick because we've probably just requeued some
@ -1971,7 +1971,7 @@ xn_configure_features(struct netfront_info *np)
* Create a network device.
* @param dev Newbus device representing this virtual NIC.
*/
int
int
create_netdev(device_t dev)
{
int i;
@ -1980,9 +1980,9 @@ create_netdev(device_t dev)
struct ifnet *ifp;
np = device_get_softc(dev);
np->xbdev = dev;
XN_LOCK_INIT(np, xennetif);
ifmedia_init(&np->sc_media, 0, xn_ifmedia_upd, xn_ifmedia_sts);
@ -1996,7 +1996,7 @@ create_netdev(device_t dev)
/* Initialise {tx,rx}_skbs to be a free chain containing every entry. */
for (i = 0; i <= NET_TX_RING_SIZE; i++) {
np->tx_mbufs[i] = (void *) ((u_long) i+1);
np->grant_tx_ref[i] = GRANT_REF_INVALID;
np->grant_tx_ref[i] = GRANT_REF_INVALID;
}
np->tx_mbufs[NET_TX_RING_SIZE] = (void *)0;
@ -2023,11 +2023,11 @@ create_netdev(device_t dev)
err = ENOMEM;
goto exit;
}
err = xen_net_read_mac(dev, np->mac);
if (err)
goto out;
/* Set up ifnet structure */
ifp = np->xn_ifp = if_alloc(IFT_ETHER);
ifp->if_softc = np;
@ -2041,13 +2041,13 @@ create_netdev(device_t dev)
#endif
ifp->if_init = xn_ifinit;
ifp->if_snd.ifq_maxlen = NET_TX_RING_SIZE - 1;
ifp->if_hwassist = XN_CSUM_FEATURES;
ifp->if_capabilities = IFCAP_HWCSUM;
ifp->if_hw_tsomax = 65536 - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
ifp->if_hw_tsomaxsegcount = MAX_TX_REQ_FRAGS;
ifp->if_hw_tsomaxsegsize = PAGE_SIZE;
ether_ifattach(ifp, np->mac);
callout_init(&np->xn_stat_ch, 1);
netfront_carrier_off(np);
@ -2151,27 +2151,27 @@ xn_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
}
/* ** Driver registration ** */
static device_method_t netfront_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, netfront_probe),
DEVMETHOD(device_attach, netfront_attach),
DEVMETHOD(device_detach, netfront_detach),
DEVMETHOD(device_shutdown, bus_generic_shutdown),
DEVMETHOD(device_suspend, netfront_suspend),
DEVMETHOD(device_resume, netfront_resume),
static device_method_t netfront_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, netfront_probe),
DEVMETHOD(device_attach, netfront_attach),
DEVMETHOD(device_detach, netfront_detach),
DEVMETHOD(device_shutdown, bus_generic_shutdown),
DEVMETHOD(device_suspend, netfront_suspend),
DEVMETHOD(device_resume, netfront_resume),
/* Xenbus interface */
DEVMETHOD(xenbus_otherend_changed, netfront_backend_changed),
DEVMETHOD_END
};
};
static driver_t netfront_driver = {
"xn",
netfront_methods,
sizeof(struct netfront_info),
};
devclass_t netfront_devclass;
static driver_t netfront_driver = {
"xn",
netfront_methods,
sizeof(struct netfront_info),
};
devclass_t netfront_devclass;
DRIVER_MODULE(xe, xenbusb_front, netfront_driver, netfront_devclass, NULL,
NULL);
NULL);