dev/xen: clean up empty lines in .c and .h files
This commit is contained in:
parent
6831ac28dc
commit
6c7cae4a73
@ -274,7 +274,7 @@ balloon_process(void *unused)
|
||||
{
|
||||
int need_sleep = 0;
|
||||
long credit;
|
||||
|
||||
|
||||
mtx_lock(&balloon_mutex);
|
||||
for (;;) {
|
||||
int sleep_time;
|
||||
|
@ -256,7 +256,6 @@ struct xbb_xen_reqlist {
|
||||
*/
|
||||
uint64_t gnt_base;
|
||||
|
||||
|
||||
#ifdef XBB_USE_BOUNCE_BUFFERS
|
||||
/**
|
||||
* Pre-allocated domain local memory used to proxy remote
|
||||
@ -545,7 +544,6 @@ typedef int (*xbb_dispatch_t)(struct xbb_softc *xbb,
|
||||
* Per-instance configuration data.
|
||||
*/
|
||||
struct xbb_softc {
|
||||
|
||||
/**
|
||||
* Task-queue used to process I/O requests.
|
||||
*/
|
||||
@ -1029,7 +1027,6 @@ xbb_get_kva(struct xbb_softc *xbb, int nr_pages)
|
||||
* to satisfy the request.
|
||||
*/
|
||||
if (++num_clear == nr_pages) {
|
||||
|
||||
bit_nset(xbb->kva_free, first_clear,
|
||||
first_clear + nr_pages - 1);
|
||||
|
||||
@ -1093,7 +1090,6 @@ xbb_unmap_reqlist(struct xbb_xen_reqlist *reqlist)
|
||||
|
||||
invcount = 0;
|
||||
for (i = 0; i < reqlist->nr_segments; i++) {
|
||||
|
||||
if (reqlist->gnt_handles[i] == GRANT_REF_INVALID)
|
||||
continue;
|
||||
|
||||
@ -1127,7 +1123,6 @@ xbb_get_reqlist(struct xbb_softc *xbb)
|
||||
mtx_assert(&xbb->lock, MA_OWNED);
|
||||
|
||||
if ((reqlist = STAILQ_FIRST(&xbb->reqlist_free_stailq)) != NULL) {
|
||||
|
||||
STAILQ_REMOVE_HEAD(&xbb->reqlist_free_stailq, links);
|
||||
reqlist->flags = XBB_REQLIST_NONE;
|
||||
reqlist->kva = NULL;
|
||||
@ -1366,7 +1361,6 @@ xbb_push_responses(struct xbb_softc *xbb, int *run_taskqueue, int *notify)
|
||||
RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&xbb->rings.common, *notify);
|
||||
|
||||
if (xbb->rings.common.rsp_prod_pvt == xbb->rings.common.req_cons) {
|
||||
|
||||
/*
|
||||
* Tail check for pending requests. Allows frontend to avoid
|
||||
* notifications if requests are already in flight (lower
|
||||
@ -1374,7 +1368,6 @@ xbb_push_responses(struct xbb_softc *xbb, int *run_taskqueue, int *notify)
|
||||
*/
|
||||
RING_FINAL_CHECK_FOR_REQUESTS(&xbb->rings.common, more_to_do);
|
||||
} else if (RING_HAS_UNCONSUMED_REQUESTS(&xbb->rings.common)) {
|
||||
|
||||
more_to_do = 1;
|
||||
}
|
||||
|
||||
@ -1505,7 +1498,6 @@ xbb_bio_done(struct bio *bio)
|
||||
|
||||
if (bio->bio_error == ENXIO
|
||||
&& xenbus_get_state(xbb->dev) == XenbusStateConnected) {
|
||||
|
||||
/*
|
||||
* Backend device has disappeared. Signal the
|
||||
* front-end that we (the device proxy) want to
|
||||
@ -1744,7 +1736,6 @@ xbb_dispatch_io(struct xbb_softc *xbb, struct xbb_xen_reqlist *reqlist)
|
||||
|
||||
for (seg_idx = 0, map = xbb->maps; seg_idx < reqlist->nr_segments;
|
||||
seg_idx++, map++){
|
||||
|
||||
if (__predict_false(map->status != 0)) {
|
||||
DPRINTF("invalid buffer -- could not remap "
|
||||
"it (%d)\n", map->status);
|
||||
@ -1760,7 +1751,6 @@ xbb_dispatch_io(struct xbb_softc *xbb, struct xbb_xen_reqlist *reqlist)
|
||||
}
|
||||
if (reqlist->starting_sector_number + total_sects >
|
||||
xbb->media_num_sectors) {
|
||||
|
||||
DPRINTF("%s of [%" PRIu64 ",%" PRIu64 "] "
|
||||
"extends past end of device %s\n",
|
||||
operation == BIO_READ ? "read" : "write",
|
||||
@ -1831,7 +1821,6 @@ xbb_run_queue(void *context, int pending)
|
||||
int cur_operation;
|
||||
struct xbb_xen_reqlist *reqlist;
|
||||
|
||||
|
||||
xbb = (struct xbb_softc *)context;
|
||||
rings = &xbb->rings;
|
||||
|
||||
@ -2121,7 +2110,6 @@ xbb_dispatch_dev(struct xbb_softc *xbb, struct xbb_xen_reqlist *reqlist,
|
||||
nseg = reqlist->nr_segments;
|
||||
|
||||
for (seg_idx = 0; seg_idx < nseg; seg_idx++, xbb_sg++) {
|
||||
|
||||
/*
|
||||
* KVA will not be contiguous, so any additional
|
||||
* I/O will need to be represented in a new bio.
|
||||
@ -2173,7 +2161,6 @@ xbb_dispatch_dev(struct xbb_softc *xbb, struct xbb_xen_reqlist *reqlist,
|
||||
bio_offset += xbb_sg->nsect << 9;
|
||||
|
||||
if (xbb_sg->last_sect != (PAGE_SIZE - 512) >> 9) {
|
||||
|
||||
if ((bio->bio_length & (xbb->sector_size - 1)) != 0) {
|
||||
printf("%s: Discontiguous I/O request "
|
||||
"from domain %d ends on "
|
||||
@ -2224,7 +2211,7 @@ xbb_dispatch_dev(struct xbb_softc *xbb, struct xbb_xen_reqlist *reqlist,
|
||||
fail_free_bios:
|
||||
for (bio_idx = 0; bio_idx < (nbio-1); bio_idx++)
|
||||
g_destroy_bio(bios[bio_idx]);
|
||||
|
||||
|
||||
return (error);
|
||||
}
|
||||
|
||||
@ -2302,7 +2289,6 @@ xbb_dispatch_file(struct xbb_softc *xbb, struct xbb_xen_reqlist *reqlist,
|
||||
nseg = reqlist->nr_segments;
|
||||
|
||||
for (xiovec = NULL, seg_idx = 0; seg_idx < nseg; seg_idx++, xbb_sg++) {
|
||||
|
||||
/*
|
||||
* If the first sector is not 0, the KVA will
|
||||
* not be contiguous and we'll need to go on
|
||||
@ -2354,7 +2340,6 @@ xbb_dispatch_file(struct xbb_softc *xbb, struct xbb_xen_reqlist *reqlist,
|
||||
for (seg_idx = 0, p_vaddr = file_data->xiovecs_vaddr,
|
||||
xiovec = xuio.uio_iov; seg_idx < xuio.uio_iovcnt;
|
||||
seg_idx++, xiovec++, p_vaddr++) {
|
||||
|
||||
memcpy(xiovec->iov_base, *p_vaddr, xiovec->iov_len);
|
||||
}
|
||||
} else {
|
||||
@ -2448,12 +2433,10 @@ xbb_dispatch_file(struct xbb_softc *xbb, struct xbb_xen_reqlist *reqlist,
|
||||
#ifdef XBB_USE_BOUNCE_BUFFERS
|
||||
/* We only need to copy here for read operations */
|
||||
if (operation == BIO_READ) {
|
||||
|
||||
for (seg_idx = 0, p_vaddr = file_data->xiovecs_vaddr,
|
||||
xiovec = file_data->saved_xiovecs;
|
||||
seg_idx < saved_uio_iovcnt; seg_idx++,
|
||||
xiovec++, p_vaddr++) {
|
||||
|
||||
/*
|
||||
* Note that we have to use the copy of the
|
||||
* io vector we made above. uiomove() modifies
|
||||
@ -2563,7 +2546,6 @@ xbb_open_dev(struct xbb_softc *xbb)
|
||||
return (error);
|
||||
}
|
||||
|
||||
|
||||
dev = xbb->vn->v_rdev;
|
||||
devsw = dev->si_devsw;
|
||||
if (!devsw->d_ioctl) {
|
||||
@ -2814,11 +2796,10 @@ xbb_disconnect(struct xbb_softc *xbb)
|
||||
*/
|
||||
if (xbb->active_request_count != 0)
|
||||
return (EAGAIN);
|
||||
|
||||
|
||||
for (ring_idx = 0, op = ops;
|
||||
ring_idx < xbb->ring_config.ring_pages;
|
||||
ring_idx++, op++) {
|
||||
|
||||
op->host_addr = xbb->ring_config.gnt_addr
|
||||
+ (ring_idx * PAGE_SIZE);
|
||||
op->dev_bus_addr = xbb->ring_config.bus_addr[ring_idx];
|
||||
@ -2895,7 +2876,6 @@ xbb_connect_ring(struct xbb_softc *xbb)
|
||||
for (ring_idx = 0, gnt = gnts;
|
||||
ring_idx < xbb->ring_config.ring_pages;
|
||||
ring_idx++, gnt++) {
|
||||
|
||||
gnt->host_addr = xbb->ring_config.gnt_addr
|
||||
+ (ring_idx * PAGE_SIZE);
|
||||
gnt->flags = GNTMAP_host_map;
|
||||
@ -3138,13 +3118,10 @@ xbb_collect_frontend_info(struct xbb_softc *xbb)
|
||||
*/
|
||||
xbb->abi = BLKIF_PROTOCOL_NATIVE;
|
||||
} else if (!strcmp(protocol_abi, XEN_IO_PROTO_ABI_X86_32)) {
|
||||
|
||||
xbb->abi = BLKIF_PROTOCOL_X86_32;
|
||||
} else if (!strcmp(protocol_abi, XEN_IO_PROTO_ABI_X86_64)) {
|
||||
|
||||
xbb->abi = BLKIF_PROTOCOL_X86_64;
|
||||
} else {
|
||||
|
||||
xenbus_dev_fatal(xbb->dev, EINVAL,
|
||||
"Unknown protocol ABI (%s) published by "
|
||||
"frontend. Unable to connect.", protocol_abi);
|
||||
@ -3361,7 +3338,7 @@ xbb_connect(struct xbb_softc *xbb)
|
||||
/* Specific errors are reported by xbb_connect_ring(). */
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
if (xbb_publish_backend_info(xbb) != 0) {
|
||||
/*
|
||||
* If we can't publish our data, we cannot participate
|
||||
@ -3496,7 +3473,7 @@ xbb_attach_failed(struct xbb_softc *xbb, int err, const char *fmt, ...)
|
||||
static int
|
||||
xbb_probe(device_t dev)
|
||||
{
|
||||
|
||||
|
||||
if (!strcmp(xenbus_get_type(dev), "vbd")) {
|
||||
device_set_desc(dev, "Backend Virtual Block Device");
|
||||
device_quiet(dev);
|
||||
@ -3517,7 +3494,7 @@ xbb_setup_sysctl(struct xbb_softc *xbb)
|
||||
{
|
||||
struct sysctl_ctx_list *sysctl_ctx = NULL;
|
||||
struct sysctl_oid *sysctl_tree = NULL;
|
||||
|
||||
|
||||
sysctl_ctx = device_get_sysctl_ctx(xbb->dev);
|
||||
if (sysctl_ctx == NULL)
|
||||
return;
|
||||
@ -3930,7 +3907,6 @@ static device_method_t xbb_methods[] = {
|
||||
|
||||
/* Xenbus interface */
|
||||
DEVMETHOD(xenbus_otherend_changed, xbb_frontend_changed),
|
||||
|
||||
{ 0, 0 }
|
||||
};
|
||||
|
||||
|
@ -427,7 +427,6 @@ xbd_startio(struct xbd_softc *sc)
|
||||
return;
|
||||
|
||||
while (!RING_FULL(&sc->xbd_ring)) {
|
||||
|
||||
if (sc->xbd_qfrozen_cnt != 0)
|
||||
break;
|
||||
|
||||
@ -768,7 +767,6 @@ xbd_alloc_ring(struct xbd_softc *sc)
|
||||
for (i = 0, sring_page_addr = (uintptr_t)sring;
|
||||
i < sc->xbd_ring_pages;
|
||||
i++, sring_page_addr += PAGE_SIZE) {
|
||||
|
||||
error = xenbus_grant_ring(sc->xbd_dev,
|
||||
(vtophys(sring_page_addr) >> PAGE_SHIFT),
|
||||
&sc->xbd_ring_ref[i]);
|
||||
@ -899,7 +897,7 @@ xbd_setup_sysctl(struct xbd_softc *xbd)
|
||||
struct sysctl_ctx_list *sysctl_ctx = NULL;
|
||||
struct sysctl_oid *sysctl_tree = NULL;
|
||||
struct sysctl_oid_list *children;
|
||||
|
||||
|
||||
sysctl_ctx = device_get_sysctl_ctx(xbd->xbd_dev);
|
||||
if (sysctl_ctx == NULL)
|
||||
return;
|
||||
@ -1050,7 +1048,7 @@ static void
|
||||
xbd_free(struct xbd_softc *sc)
|
||||
{
|
||||
int i;
|
||||
|
||||
|
||||
/* Prevent new requests being issued until we fix things up. */
|
||||
mtx_lock(&sc->xbd_io_lock);
|
||||
sc->xbd_state = XBD_STATE_DISCONNECTED;
|
||||
@ -1059,7 +1057,6 @@ xbd_free(struct xbd_softc *sc)
|
||||
/* Free resources associated with old device channel. */
|
||||
xbd_free_ring(sc);
|
||||
if (sc->xbd_shadow) {
|
||||
|
||||
for (i = 0; i < sc->xbd_max_requests; i++) {
|
||||
struct xbd_command *cm;
|
||||
|
||||
@ -1638,7 +1635,7 @@ static device_method_t xbd_methods[] = {
|
||||
DEVMETHOD(device_shutdown, bus_generic_shutdown),
|
||||
DEVMETHOD(device_suspend, xbd_suspend),
|
||||
DEVMETHOD(device_resume, xbd_resume),
|
||||
|
||||
|
||||
/* Xenbus interface */
|
||||
DEVMETHOD(xenbus_otherend_changed, xbd_backend_changed),
|
||||
|
||||
@ -1651,5 +1648,5 @@ static driver_t xbd_driver = {
|
||||
sizeof(struct xbd_softc),
|
||||
};
|
||||
devclass_t xbd_devclass;
|
||||
|
||||
|
||||
DRIVER_MODULE(xbd, xenbusb_front, xbd_driver, xbd_devclass, 0, 0);
|
||||
|
@ -356,7 +356,7 @@ xctrl_on_watch_event(struct xs_watch *watch, const char **vec, unsigned int len)
|
||||
char *result;
|
||||
int error;
|
||||
int result_len;
|
||||
|
||||
|
||||
error = xs_read(XST_NIL, "control", "shutdown",
|
||||
&result_len, (void **)&result);
|
||||
if (error != 0 || result_len == 0)
|
||||
@ -370,7 +370,6 @@ xctrl_on_watch_event(struct xs_watch *watch, const char **vec, unsigned int len)
|
||||
reason = xctrl_shutdown_reasons;
|
||||
last_reason = reason + nitems(xctrl_shutdown_reasons);
|
||||
while (reason < last_reason) {
|
||||
|
||||
if (!strcmp(result, reason->name)) {
|
||||
reason->handler();
|
||||
break;
|
||||
@ -469,11 +468,11 @@ static device_method_t xctrl_methods[] = {
|
||||
DEVMETHOD(device_probe, xctrl_probe),
|
||||
DEVMETHOD(device_attach, xctrl_attach),
|
||||
DEVMETHOD(device_detach, xctrl_detach),
|
||||
|
||||
|
||||
DEVMETHOD_END
|
||||
};
|
||||
|
||||
DEFINE_CLASS_0(xctrl, xctrl_driver, xctrl_methods, sizeof(struct xctrl_softc));
|
||||
devclass_t xctrl_devclass;
|
||||
|
||||
|
||||
DRIVER_MODULE(xctrl, xenstore, xctrl_driver, xctrl_devclass, NULL, NULL);
|
||||
|
@ -462,7 +462,7 @@ gntdev_dealloc_gref(struct ioctl_gntdev_dealloc_gref *arg)
|
||||
}
|
||||
mtx_unlock(&cleanup_data.to_kill_grefs_mtx);
|
||||
mtx_unlock(&priv_user->user_data_lock);
|
||||
|
||||
|
||||
taskqueue_enqueue(taskqueue_thread, &cleanup_task);
|
||||
put_file_offset(priv_user, arg->count, arg->index);
|
||||
|
||||
@ -573,10 +573,10 @@ notify_unmap_cleanup(struct gntdev_gmap *gmap)
|
||||
int error, count;
|
||||
vm_page_t m;
|
||||
struct gnttab_unmap_grant_ref *unmap_ops;
|
||||
|
||||
|
||||
unmap_ops = malloc(sizeof(struct gnttab_unmap_grant_ref) * gmap->count,
|
||||
M_GNTDEV, M_WAITOK);
|
||||
|
||||
|
||||
/* Enumerate freeable maps. */
|
||||
count = 0;
|
||||
for (i = 0; i < gmap->count; i++) {
|
||||
@ -588,7 +588,7 @@ notify_unmap_cleanup(struct gntdev_gmap *gmap)
|
||||
count++;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/* Perform notification. */
|
||||
if (count > 0 && gmap->notify) {
|
||||
vm_page_t page;
|
||||
@ -598,7 +598,7 @@ notify_unmap_cleanup(struct gntdev_gmap *gmap)
|
||||
page = PHYS_TO_VM_PAGE(gmap->map->phys_base_addr + page_offset);
|
||||
notify(gmap->notify, page);
|
||||
}
|
||||
|
||||
|
||||
/* Free the pages. */
|
||||
VM_OBJECT_WLOCK(gmap->map->mem);
|
||||
retry:
|
||||
@ -611,16 +611,16 @@ retry:
|
||||
cdev_pager_free_page(gmap->map->mem, m);
|
||||
}
|
||||
VM_OBJECT_WUNLOCK(gmap->map->mem);
|
||||
|
||||
|
||||
/* Perform unmap hypercall. */
|
||||
error = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref,
|
||||
unmap_ops, count);
|
||||
|
||||
|
||||
for (i = 0; i < gmap->count; i++) {
|
||||
gmap->grant_map_ops[i].handle = -1;
|
||||
gmap->grant_map_ops[i].host_addr = 0;
|
||||
}
|
||||
|
||||
|
||||
if (gmap->map) {
|
||||
error = xenmem_free(gntdev_dev, gmap->map->pseudo_phys_res_id,
|
||||
gmap->map->pseudo_phys_res);
|
||||
@ -630,9 +630,9 @@ retry:
|
||||
free(gmap->map, M_GNTDEV);
|
||||
gmap->map = NULL;
|
||||
}
|
||||
|
||||
|
||||
free(unmap_ops, M_GNTDEV);
|
||||
|
||||
|
||||
return (error);
|
||||
}
|
||||
|
||||
@ -721,13 +721,13 @@ gntdev_unmap_grant_ref(struct ioctl_gntdev_unmap_grant_ref *arg)
|
||||
STAILQ_INSERT_TAIL(&cleanup_data.to_kill_gmaps, gmap, gmap_next.list);
|
||||
mtx_unlock(&cleanup_data.to_kill_gmaps_mtx);
|
||||
mtx_unlock(&priv_user->user_data_lock);
|
||||
|
||||
|
||||
if (gmap->map)
|
||||
vm_object_deallocate(gmap->map->mem);
|
||||
|
||||
taskqueue_enqueue(taskqueue_thread, &cleanup_task);
|
||||
put_file_offset(priv_user, arg->count, arg->index);
|
||||
|
||||
|
||||
return (0);
|
||||
}
|
||||
|
||||
|
@ -224,7 +224,6 @@ gnttab_end_foreign_access_references(u_int count, grant_ref_t *refs)
|
||||
tail = *refs;
|
||||
last_ref = refs + count;
|
||||
while (refs != last_ref) {
|
||||
|
||||
if (gnttab_end_foreign_access_ref(*refs)) {
|
||||
gnttab_entry(*refs) = head;
|
||||
head = *refs;
|
||||
@ -417,7 +416,6 @@ gnttab_cancel_free_callback(struct gnttab_free_callback *callback)
|
||||
mtx_unlock(&gnttab_list_lock);
|
||||
}
|
||||
|
||||
|
||||
static int
|
||||
grow_gnttab_list(unsigned int more_frames)
|
||||
{
|
||||
|
@ -194,7 +194,6 @@ static void xnb_add_mbuf_cksum(struct mbuf *mbufc);
|
||||
#endif
|
||||
/*------------------------------ Data Structures -----------------------------*/
|
||||
|
||||
|
||||
/**
|
||||
* Representation of a xennet packet. Simplified version of a packet as
|
||||
* stored in the Xen tx ring. Applicable to both RX and TX packets
|
||||
@ -306,7 +305,6 @@ xnb_dump_txreq(RING_IDX idx, const struct netif_tx_request *txreq)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* \brief Configuration data for a shared memory request ring
|
||||
* used to communicate with the front-end client of this
|
||||
@ -1414,7 +1412,6 @@ xnb_frontend_changed(device_t dev, XenbusState frontend_state)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*---------------------------- Request Processing ----------------------------*/
|
||||
/**
|
||||
* Interrupt handler bound to the shared ring's event channel.
|
||||
@ -1467,7 +1464,6 @@ xnb_intr(void *arg)
|
||||
xnb_start(ifp);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Build a struct xnb_pkt based on netif_tx_request's from a netif tx ring.
|
||||
* Will read exactly 0 or 1 packets from the ring; never a partial packet.
|
||||
@ -1591,7 +1587,6 @@ xnb_ring2pkt(struct xnb_pkt *pkt, const netif_tx_back_ring_t *tx_ring,
|
||||
return idx - start;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Respond to all the requests that constituted pkt. Builds the responses and
|
||||
* writes them to the ring, but doesn't push them to the shared ring.
|
||||
@ -2448,7 +2443,6 @@ xnb_ifinit_locked(struct xnb_softc *xnb)
|
||||
if_link_state_change(ifp, LINK_STATE_UP);
|
||||
}
|
||||
|
||||
|
||||
static void
|
||||
xnb_ifinit(void *xsc)
|
||||
{
|
||||
@ -2480,7 +2474,6 @@ xnb_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
|
||||
ifmr->ifm_active = IFM_ETHER|IFM_MANUAL;
|
||||
}
|
||||
|
||||
|
||||
/*---------------------------- NewBus Registration ---------------------------*/
|
||||
static device_method_t xnb_methods[] = {
|
||||
/* Device interface */
|
||||
@ -2493,7 +2486,6 @@ static device_method_t xnb_methods[] = {
|
||||
|
||||
/* Xenbus interface */
|
||||
DEVMETHOD(xenbus_otherend_changed, xnb_frontend_changed),
|
||||
|
||||
{ 0, 0 }
|
||||
};
|
||||
|
||||
@ -2506,7 +2498,6 @@ devclass_t xnb_devclass;
|
||||
|
||||
DRIVER_MODULE(xnb, xenbusb_back, xnb_driver, xnb_devclass, 0, 0);
|
||||
|
||||
|
||||
/*-------------------------- Unit Tests -------------------------------------*/
|
||||
#ifdef XNB_DEBUG
|
||||
#include "netback_unit_tests.c"
|
||||
|
@ -46,7 +46,6 @@ __FBSDID("$FreeBSD$");
|
||||
* standalone; they must be #include'd from the driver's .c file.
|
||||
*/
|
||||
|
||||
|
||||
/** Helper macro used to snprintf to a buffer and update the buffer pointer */
|
||||
#define SNCATF(buffer, buflen, ...) do { \
|
||||
size_t new_chars = snprintf(buffer, buflen, __VA_ARGS__); \
|
||||
@ -75,7 +74,6 @@ __FBSDID("$FreeBSD$");
|
||||
} \
|
||||
})
|
||||
|
||||
|
||||
/**
|
||||
* The signature used by all testcases. If the test writes anything
|
||||
* to buffer, then it will be considered a failure
|
||||
@ -644,7 +642,6 @@ xnb_ring2pkt_wraps(char *buffer, size_t buflen)
|
||||
XNB_ASSERT(RING_GET_REQUEST(&xnb_unit_pvt.txb, pkt.cdr + 1) == req);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* xnb_txpkt2rsp should do nothing for an empty packet
|
||||
*/
|
||||
@ -959,7 +956,6 @@ xnb_txpkt2rsp_wraps(char *buffer, size_t buflen)
|
||||
XNB_ASSERT(rsp->status == NETIF_RSP_OKAY);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Helper function used to setup pkt2mbufc tests
|
||||
* \param size size in bytes of the single request to push to the ring
|
||||
@ -1280,7 +1276,6 @@ xnb_txpkt2gnttab_2cluster(char *buffer, size_t buflen)
|
||||
m_freem(pMbuf);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* xnb_update_mbufc on a short packet that only has one gnttab entry
|
||||
*/
|
||||
@ -2032,7 +2027,7 @@ xnb_rxpkt2rsp_copyerror(char *buffer, size_t buflen)
|
||||
struct netif_rx_request *req;
|
||||
struct netif_rx_response *rsp;
|
||||
struct mbuf *mbuf;
|
||||
|
||||
|
||||
mbuf = m_getm(NULL, size, M_WAITOK, MT_DATA);
|
||||
mbuf->m_flags |= M_PKTHDR;
|
||||
mbuf->m_pkthdr.len = size;
|
||||
|
@ -1432,7 +1432,6 @@ xn_get_responses(struct netfront_rxq *rxq,
|
||||
#endif
|
||||
if (__predict_false(rx->status < 0 ||
|
||||
rx->offset + rx->status > PAGE_SIZE)) {
|
||||
|
||||
xn_move_rx_slot(rxq, m, ref);
|
||||
if (m0 == m)
|
||||
m0 = NULL;
|
||||
|
@ -185,7 +185,6 @@ put_pdev(struct pcifront_device *pdev)
|
||||
free(pdev, M_DEVBUF);
|
||||
}
|
||||
|
||||
|
||||
/* Write to the xenbus info needed by backend */
|
||||
static int
|
||||
pcifront_publish_info(struct pcifront_device *pdev)
|
||||
@ -434,7 +433,6 @@ pcifront_init(void *unused)
|
||||
|
||||
SYSINIT(pciif, SI_SUB_PSEUDO, SI_ORDER_ANY, pcifront_init, NULL)
|
||||
|
||||
|
||||
/* Newbus xpcife device driver probe */
|
||||
static int
|
||||
xpcife_probe(device_t dev)
|
||||
@ -536,7 +534,6 @@ static driver_t xpcife_driver = {
|
||||
|
||||
DRIVER_MODULE(xpcife, nexus, xpcife_driver, xpcife_devclass, 0, 0);
|
||||
|
||||
|
||||
/* Newbus xen pcib device driver probe */
|
||||
static int
|
||||
xpcib_probe(device_t dev)
|
||||
@ -549,7 +546,7 @@ xpcib_probe(device_t dev)
|
||||
sc->domain = pdev->xdev->otherend_id;
|
||||
sc->bus = device_get_unit(dev);
|
||||
sc->pdev = pdev;
|
||||
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -603,7 +600,7 @@ xpcib_read_config(device_t dev, int bus, int slot, int func,
|
||||
int err;
|
||||
|
||||
err = do_pci_op(sc->pdev, &op);
|
||||
|
||||
|
||||
DPRINTF("read config (b=%d, s=%d, f=%d, reg=%d, len=%d, val=%x, err=%d)\n",
|
||||
bus, slot, func, reg, bytes, op.value, err);
|
||||
|
||||
|
@ -227,7 +227,6 @@ static device_method_t xenpci_methods[] = {
|
||||
DEVMETHOD(device_attach, xenpci_attach),
|
||||
DEVMETHOD(device_detach, xenpci_detach),
|
||||
DEVMETHOD(device_resume, xenpci_resume),
|
||||
|
||||
{ 0, 0 }
|
||||
};
|
||||
|
||||
|
@ -28,7 +28,6 @@
|
||||
* IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
__FBSDID("$FreeBSD$");
|
||||
|
||||
@ -971,7 +970,6 @@ xenwatch_thread(void *unused)
|
||||
struct xs_stored_msg *msg;
|
||||
|
||||
for (;;) {
|
||||
|
||||
mtx_lock(&xs.watch_events_lock);
|
||||
while (TAILQ_EMPTY(&xs.watch_events))
|
||||
mtx_sleep(&xs.watch_events,
|
||||
@ -1223,7 +1221,7 @@ static device_method_t xenstore_methods[] = {
|
||||
DEVMETHOD(device_shutdown, bus_generic_shutdown),
|
||||
DEVMETHOD(device_suspend, xs_suspend),
|
||||
DEVMETHOD(device_resume, xs_resume),
|
||||
|
||||
|
||||
/* Bus interface */
|
||||
DEVMETHOD(bus_add_child, bus_generic_add_child),
|
||||
DEVMETHOD(bus_alloc_resource, bus_generic_alloc_resource),
|
||||
@ -1236,7 +1234,7 @@ static device_method_t xenstore_methods[] = {
|
||||
|
||||
DEFINE_CLASS_0(xenstore, xenstore_driver, xenstore_methods, 0);
|
||||
static devclass_t xenstore_devclass;
|
||||
|
||||
|
||||
DRIVER_MODULE(xenstore, xenpv, xenstore_driver, xenstore_devclass, 0, 0);
|
||||
|
||||
/*------------------------------- Sysctl Data --------------------------------*/
|
||||
@ -1656,4 +1654,3 @@ xs_unlock(void)
|
||||
sx_xunlock(&xs.request_mutex);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -29,7 +29,6 @@
|
||||
* IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
__FBSDID("$FreeBSD$");
|
||||
|
||||
@ -129,7 +128,6 @@ xs_dev_return_error(struct xs_dev_data *u, int error, int req_id, int tx_id)
|
||||
msg.tx_id = tx_id;
|
||||
payload = NULL;
|
||||
|
||||
|
||||
payload = xs_dev_error_to_string(error);
|
||||
if (payload == NULL)
|
||||
payload = xs_dev_error_to_string(EINVAL);
|
||||
|
@ -53,7 +53,6 @@ static int xsd_dev_read(struct cdev *dev, struct uio *uio, int ioflag);
|
||||
static int xsd_dev_mmap(struct cdev *dev, vm_ooffset_t offset,
|
||||
vm_paddr_t *paddr, int nprot, vm_memattr_t *memattr);
|
||||
|
||||
|
||||
static struct cdevsw xsd_dev_cdevsw = {
|
||||
.d_version = D_VERSION,
|
||||
.d_read = xsd_dev_read,
|
||||
|
Loading…
x
Reference in New Issue
Block a user