2008-08-12 20:01:57 +00:00
|
|
|
/*
|
2009-08-30 20:45:24 +00:00
|
|
|
* XenBSD block device driver
|
|
|
|
*
|
2013-05-31 21:05:07 +00:00
|
|
|
* Copyright (c) 2010-2013 Spectra Logic Corporation
|
2009-11-30 04:32:34 +00:00
|
|
|
* Copyright (c) 2009 Scott Long, Yahoo!
|
2009-08-30 20:45:24 +00:00
|
|
|
* Copyright (c) 2009 Frank Suchomel, Citrix
|
2009-11-30 04:20:43 +00:00
|
|
|
* Copyright (c) 2009 Doug F. Rabson, Citrix
|
|
|
|
* Copyright (c) 2005 Kip Macy
|
|
|
|
* Copyright (c) 2003-2004, Keir Fraser & Steve Hand
|
|
|
|
* Modifications by Mark A. Williamson are (c) Intel Research Cambridge
|
|
|
|
*
|
|
|
|
*
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
|
|
* of this software and associated documentation files (the "Software"), to
|
|
|
|
* deal in the Software without restriction, including without limitation the
|
|
|
|
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
|
|
|
|
* sell copies of the Software, and to permit persons to whom the Software is
|
|
|
|
* furnished to do so, subject to the following conditions:
|
|
|
|
*
|
|
|
|
* The above copyright notice and this permission notice shall be included in
|
|
|
|
* all copies or substantial portions of the Software.
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
|
|
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
|
|
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
|
|
|
* DEALINGS IN THE SOFTWARE.
|
2008-08-12 20:01:57 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include <sys/cdefs.h>
|
|
|
|
__FBSDID("$FreeBSD$");
|
|
|
|
|
|
|
|
#include <sys/param.h>
|
|
|
|
#include <sys/systm.h>
|
|
|
|
#include <sys/malloc.h>
|
|
|
|
#include <sys/kernel.h>
|
|
|
|
#include <vm/vm.h>
|
|
|
|
#include <vm/pmap.h>
|
|
|
|
|
|
|
|
#include <sys/bio.h>
|
|
|
|
#include <sys/bus.h>
|
|
|
|
#include <sys/conf.h>
|
|
|
|
#include <sys/module.h>
|
2012-02-15 06:45:49 +00:00
|
|
|
#include <sys/sysctl.h>
|
2008-08-12 20:01:57 +00:00
|
|
|
|
|
|
|
#include <machine/bus.h>
|
|
|
|
#include <sys/rman.h>
|
|
|
|
#include <machine/resource.h>
|
|
|
|
#include <machine/intr_machdep.h>
|
|
|
|
#include <machine/vmparam.h>
|
2009-11-30 04:32:34 +00:00
|
|
|
#include <sys/bus_dma.h>
|
2008-08-12 20:01:57 +00:00
|
|
|
|
Implement vector callback for PVHVM and unify event channel implementations
Re-structure Xen HVM support so that:
- Xen is detected and hypercalls can be performed very
early in system startup.
- Xen interrupt services are implemented using FreeBSD's native
interrupt delivery infrastructure.
- the Xen interrupt service implementation is shared between PV
and HVM guests.
- Xen interrupt handlers can optionally use a filter handler
in order to avoid the overhead of dispatch to an interrupt
thread.
- interrupt load can be distributed among all available CPUs.
- the overhead of accessing the emulated local and I/O apics
on HVM is removed for event channel port events.
- a similar optimization can eventually, and fairly easily,
be used to optimize MSI.
Early Xen detection, HVM refactoring, PVHVM interrupt infrastructure,
and misc Xen cleanups:
Sponsored by: Spectra Logic Corporation
Unification of PV & HVM interrupt infrastructure, bug fixes,
and misc Xen cleanups:
Submitted by: Roger Pau Monné
Sponsored by: Citrix Systems R&D
sys/x86/x86/local_apic.c:
sys/amd64/include/apicvar.h:
sys/i386/include/apicvar.h:
sys/amd64/amd64/apic_vector.S:
sys/i386/i386/apic_vector.s:
sys/amd64/amd64/machdep.c:
sys/i386/i386/machdep.c:
sys/i386/xen/exception.s:
sys/x86/include/segments.h:
Reserve IDT vector 0x93 for the Xen event channel upcall
interrupt handler. On Hypervisors that support the direct
vector callback feature, we can request that this vector be
called directly by an injected HVM interrupt event, instead
of a simulated PCI interrupt on the Xen platform PCI device.
This avoids all of the overhead of dealing with the emulated
I/O APIC and local APIC. It also means that the Hypervisor
can inject these events on any CPU, allowing upcalls for
different ports to be handled in parallel.
sys/amd64/amd64/mp_machdep.c:
sys/i386/i386/mp_machdep.c:
Map Xen per-vcpu area during AP startup.
sys/amd64/include/intr_machdep.h:
sys/i386/include/intr_machdep.h:
Increase the FreeBSD IRQ vector table to include space
for event channel interrupt sources.
sys/amd64/include/pcpu.h:
sys/i386/include/pcpu.h:
Remove Xen HVM per-cpu variable data. These fields are now
allocated via the dynamic per-cpu scheme. See xen_intr.c
for details.
sys/amd64/include/xen/hypercall.h:
sys/dev/xen/blkback/blkback.c:
sys/i386/include/xen/xenvar.h:
sys/i386/xen/clock.c:
sys/i386/xen/xen_machdep.c:
sys/xen/gnttab.c:
Prefer FreeBSD primatives to Linux ones in Xen support code.
sys/amd64/include/xen/xen-os.h:
sys/i386/include/xen/xen-os.h:
sys/xen/xen-os.h:
sys/dev/xen/balloon/balloon.c:
sys/dev/xen/blkback/blkback.c:
sys/dev/xen/blkfront/blkfront.c:
sys/dev/xen/console/xencons_ring.c:
sys/dev/xen/control/control.c:
sys/dev/xen/netback/netback.c:
sys/dev/xen/netfront/netfront.c:
sys/dev/xen/xenpci/xenpci.c:
sys/i386/i386/machdep.c:
sys/i386/include/pmap.h:
sys/i386/include/xen/xenfunc.h:
sys/i386/isa/npx.c:
sys/i386/xen/clock.c:
sys/i386/xen/mp_machdep.c:
sys/i386/xen/mptable.c:
sys/i386/xen/xen_clock_util.c:
sys/i386/xen/xen_machdep.c:
sys/i386/xen/xen_rtc.c:
sys/xen/evtchn/evtchn_dev.c:
sys/xen/features.c:
sys/xen/gnttab.c:
sys/xen/gnttab.h:
sys/xen/hvm.h:
sys/xen/xenbus/xenbus.c:
sys/xen/xenbus/xenbus_if.m:
sys/xen/xenbus/xenbusb_front.c:
sys/xen/xenbus/xenbusvar.h:
sys/xen/xenstore/xenstore.c:
sys/xen/xenstore/xenstore_dev.c:
sys/xen/xenstore/xenstorevar.h:
Pull common Xen OS support functions/settings into xen/xen-os.h.
sys/amd64/include/xen/xen-os.h:
sys/i386/include/xen/xen-os.h:
sys/xen/xen-os.h:
Remove constants, macros, and functions unused in FreeBSD's Xen
support.
sys/xen/xen-os.h:
sys/i386/xen/xen_machdep.c:
sys/x86/xen/hvm.c:
Introduce new functions xen_domain(), xen_pv_domain(), and
xen_hvm_domain(). These are used in favor of #ifdefs so that
FreeBSD can dynamically detect and adapt to the presence of
a hypervisor. The goal is to have an HVM optimized GENERIC,
but more is necessary before this is possible.
sys/amd64/amd64/machdep.c:
sys/dev/xen/xenpci/xenpcivar.h:
sys/dev/xen/xenpci/xenpci.c:
sys/x86/xen/hvm.c:
sys/sys/kernel.h:
Refactor magic ioport, Hypercall table and Hypervisor shared
information page setup, and move it to a dedicated HVM support
module.
HVM mode initialization is now triggered during the
SI_SUB_HYPERVISOR phase of system startup. This currently
occurs just after the kernel VM is fully setup which is
just enough infrastructure to allow the hypercall table
and shared info page to be properly mapped.
sys/xen/hvm.h:
sys/x86/xen/hvm.c:
Add definitions and a method for configuring Hypervisor event
delievery via a direct vector callback.
sys/amd64/include/xen/xen-os.h:
sys/x86/xen/hvm.c:
sys/conf/files:
sys/conf/files.amd64:
sys/conf/files.i386:
Adjust kernel build to reflect the refactoring of early
Xen startup code and Xen interrupt services.
sys/dev/xen/blkback/blkback.c:
sys/dev/xen/blkfront/blkfront.c:
sys/dev/xen/blkfront/block.h:
sys/dev/xen/control/control.c:
sys/dev/xen/evtchn/evtchn_dev.c:
sys/dev/xen/netback/netback.c:
sys/dev/xen/netfront/netfront.c:
sys/xen/xenstore/xenstore.c:
sys/xen/evtchn/evtchn_dev.c:
sys/dev/xen/console/console.c:
sys/dev/xen/console/xencons_ring.c
Adjust drivers to use new xen_intr_*() API.
sys/dev/xen/blkback/blkback.c:
Since blkback defers all event handling to a taskqueue,
convert this task queue to a "fast" taskqueue, and schedule
it via an interrupt filter. This avoids an unnecessary
ithread context switch.
sys/xen/xenstore/xenstore.c:
The xenstore driver is MPSAFE. Indicate as much when
registering its interrupt handler.
sys/xen/xenbus/xenbus.c:
sys/xen/xenbus/xenbusvar.h:
Remove unused event channel APIs.
sys/xen/evtchn.h:
Remove all kernel Xen interrupt service API definitions
from this file. It is now only used for structure and
ioctl definitions related to the event channel userland
device driver.
Update the definitions in this file to match those from
NetBSD. Implementing this interface will be necessary for
Dom0 support.
sys/xen/evtchn/evtchnvar.h:
Add a header file for implemenation internal APIs related
to managing event channels event delivery. This is used
to allow, for example, the event channel userland device
driver to access low-level routines that typical kernel
consumers of event channel services should never access.
sys/xen/interface/event_channel.h:
sys/xen/xen_intr.h:
Standardize on the evtchn_port_t type for referring to
an event channel port id. In order to prevent low-level
event channel APIs from leaking to kernel consumers who
should not have access to this data, the type is defined
twice: Once in the Xen provided event_channel.h, and again
in xen/xen_intr.h. The double declaration is protected by
__XEN_EVTCHN_PORT_DEFINED__ to ensure it is never declared
twice within a given compilation unit.
sys/xen/xen_intr.h:
sys/xen/evtchn/evtchn.c:
sys/x86/xen/xen_intr.c:
sys/dev/xen/xenpci/evtchn.c:
sys/dev/xen/xenpci/xenpcivar.h:
New implementation of Xen interrupt services. This is
similar in many respects to the i386 PV implementation with
the exception that events for bound to event channel ports
(i.e. not IPI, virtual IRQ, or physical IRQ) are further
optimized to avoid mask/unmask operations that aren't
necessary for these edge triggered events.
Stubs exist for supporting physical IRQ binding, but will
need additional work before this implementation can be
fully shared between PV and HVM.
sys/amd64/amd64/mp_machdep.c:
sys/i386/i386/mp_machdep.c:
sys/i386/xen/mp_machdep.c
sys/x86/xen/hvm.c:
Add support for placing vcpu_info into an arbritary memory
page instead of using HYPERVISOR_shared_info->vcpu_info.
This allows the creation of domains with more than 32 vcpus.
sys/i386/i386/machdep.c:
sys/i386/xen/clock.c:
sys/i386/xen/xen_machdep.c:
sys/i386/xen/exception.s:
Add support for new event channle implementation.
2013-08-29 19:52:18 +00:00
|
|
|
#include <xen/xen-os.h>
|
2009-03-11 15:30:12 +00:00
|
|
|
#include <xen/hypervisor.h>
|
2008-12-29 06:31:03 +00:00
|
|
|
#include <xen/xen_intr.h>
|
2009-03-11 15:30:12 +00:00
|
|
|
#include <xen/gnttab.h>
|
2008-08-12 20:01:57 +00:00
|
|
|
#include <xen/interface/grant_table.h>
|
2008-12-04 07:59:05 +00:00
|
|
|
#include <xen/interface/io/protocols.h>
|
|
|
|
#include <xen/xenbus/xenbusvar.h>
|
2008-08-12 20:01:57 +00:00
|
|
|
|
Implement vector callback for PVHVM and unify event channel implementations
Re-structure Xen HVM support so that:
- Xen is detected and hypercalls can be performed very
early in system startup.
- Xen interrupt services are implemented using FreeBSD's native
interrupt delivery infrastructure.
- the Xen interrupt service implementation is shared between PV
and HVM guests.
- Xen interrupt handlers can optionally use a filter handler
in order to avoid the overhead of dispatch to an interrupt
thread.
- interrupt load can be distributed among all available CPUs.
- the overhead of accessing the emulated local and I/O apics
on HVM is removed for event channel port events.
- a similar optimization can eventually, and fairly easily,
be used to optimize MSI.
Early Xen detection, HVM refactoring, PVHVM interrupt infrastructure,
and misc Xen cleanups:
Sponsored by: Spectra Logic Corporation
Unification of PV & HVM interrupt infrastructure, bug fixes,
and misc Xen cleanups:
Submitted by: Roger Pau Monné
Sponsored by: Citrix Systems R&D
sys/x86/x86/local_apic.c:
sys/amd64/include/apicvar.h:
sys/i386/include/apicvar.h:
sys/amd64/amd64/apic_vector.S:
sys/i386/i386/apic_vector.s:
sys/amd64/amd64/machdep.c:
sys/i386/i386/machdep.c:
sys/i386/xen/exception.s:
sys/x86/include/segments.h:
Reserve IDT vector 0x93 for the Xen event channel upcall
interrupt handler. On Hypervisors that support the direct
vector callback feature, we can request that this vector be
called directly by an injected HVM interrupt event, instead
of a simulated PCI interrupt on the Xen platform PCI device.
This avoids all of the overhead of dealing with the emulated
I/O APIC and local APIC. It also means that the Hypervisor
can inject these events on any CPU, allowing upcalls for
different ports to be handled in parallel.
sys/amd64/amd64/mp_machdep.c:
sys/i386/i386/mp_machdep.c:
Map Xen per-vcpu area during AP startup.
sys/amd64/include/intr_machdep.h:
sys/i386/include/intr_machdep.h:
Increase the FreeBSD IRQ vector table to include space
for event channel interrupt sources.
sys/amd64/include/pcpu.h:
sys/i386/include/pcpu.h:
Remove Xen HVM per-cpu variable data. These fields are now
allocated via the dynamic per-cpu scheme. See xen_intr.c
for details.
sys/amd64/include/xen/hypercall.h:
sys/dev/xen/blkback/blkback.c:
sys/i386/include/xen/xenvar.h:
sys/i386/xen/clock.c:
sys/i386/xen/xen_machdep.c:
sys/xen/gnttab.c:
Prefer FreeBSD primatives to Linux ones in Xen support code.
sys/amd64/include/xen/xen-os.h:
sys/i386/include/xen/xen-os.h:
sys/xen/xen-os.h:
sys/dev/xen/balloon/balloon.c:
sys/dev/xen/blkback/blkback.c:
sys/dev/xen/blkfront/blkfront.c:
sys/dev/xen/console/xencons_ring.c:
sys/dev/xen/control/control.c:
sys/dev/xen/netback/netback.c:
sys/dev/xen/netfront/netfront.c:
sys/dev/xen/xenpci/xenpci.c:
sys/i386/i386/machdep.c:
sys/i386/include/pmap.h:
sys/i386/include/xen/xenfunc.h:
sys/i386/isa/npx.c:
sys/i386/xen/clock.c:
sys/i386/xen/mp_machdep.c:
sys/i386/xen/mptable.c:
sys/i386/xen/xen_clock_util.c:
sys/i386/xen/xen_machdep.c:
sys/i386/xen/xen_rtc.c:
sys/xen/evtchn/evtchn_dev.c:
sys/xen/features.c:
sys/xen/gnttab.c:
sys/xen/gnttab.h:
sys/xen/hvm.h:
sys/xen/xenbus/xenbus.c:
sys/xen/xenbus/xenbus_if.m:
sys/xen/xenbus/xenbusb_front.c:
sys/xen/xenbus/xenbusvar.h:
sys/xen/xenstore/xenstore.c:
sys/xen/xenstore/xenstore_dev.c:
sys/xen/xenstore/xenstorevar.h:
Pull common Xen OS support functions/settings into xen/xen-os.h.
sys/amd64/include/xen/xen-os.h:
sys/i386/include/xen/xen-os.h:
sys/xen/xen-os.h:
Remove constants, macros, and functions unused in FreeBSD's Xen
support.
sys/xen/xen-os.h:
sys/i386/xen/xen_machdep.c:
sys/x86/xen/hvm.c:
Introduce new functions xen_domain(), xen_pv_domain(), and
xen_hvm_domain(). These are used in favor of #ifdefs so that
FreeBSD can dynamically detect and adapt to the presence of
a hypervisor. The goal is to have an HVM optimized GENERIC,
but more is necessary before this is possible.
sys/amd64/amd64/machdep.c:
sys/dev/xen/xenpci/xenpcivar.h:
sys/dev/xen/xenpci/xenpci.c:
sys/x86/xen/hvm.c:
sys/sys/kernel.h:
Refactor magic ioport, Hypercall table and Hypervisor shared
information page setup, and move it to a dedicated HVM support
module.
HVM mode initialization is now triggered during the
SI_SUB_HYPERVISOR phase of system startup. This currently
occurs just after the kernel VM is fully setup which is
just enough infrastructure to allow the hypercall table
and shared info page to be properly mapped.
sys/xen/hvm.h:
sys/x86/xen/hvm.c:
Add definitions and a method for configuring Hypervisor event
delievery via a direct vector callback.
sys/amd64/include/xen/xen-os.h:
sys/x86/xen/hvm.c:
sys/conf/files:
sys/conf/files.amd64:
sys/conf/files.i386:
Adjust kernel build to reflect the refactoring of early
Xen startup code and Xen interrupt services.
sys/dev/xen/blkback/blkback.c:
sys/dev/xen/blkfront/blkfront.c:
sys/dev/xen/blkfront/block.h:
sys/dev/xen/control/control.c:
sys/dev/xen/evtchn/evtchn_dev.c:
sys/dev/xen/netback/netback.c:
sys/dev/xen/netfront/netfront.c:
sys/xen/xenstore/xenstore.c:
sys/xen/evtchn/evtchn_dev.c:
sys/dev/xen/console/console.c:
sys/dev/xen/console/xencons_ring.c
Adjust drivers to use new xen_intr_*() API.
sys/dev/xen/blkback/blkback.c:
Since blkback defers all event handling to a taskqueue,
convert this task queue to a "fast" taskqueue, and schedule
it via an interrupt filter. This avoids an unnecessary
ithread context switch.
sys/xen/xenstore/xenstore.c:
The xenstore driver is MPSAFE. Indicate as much when
registering its interrupt handler.
sys/xen/xenbus/xenbus.c:
sys/xen/xenbus/xenbusvar.h:
Remove unused event channel APIs.
sys/xen/evtchn.h:
Remove all kernel Xen interrupt service API definitions
from this file. It is now only used for structure and
ioctl definitions related to the event channel userland
device driver.
Update the definitions in this file to match those from
NetBSD. Implementing this interface will be necessary for
Dom0 support.
sys/xen/evtchn/evtchnvar.h:
Add a header file for implemenation internal APIs related
to managing event channels event delivery. This is used
to allow, for example, the event channel userland device
driver to access low-level routines that typical kernel
consumers of event channel services should never access.
sys/xen/interface/event_channel.h:
sys/xen/xen_intr.h:
Standardize on the evtchn_port_t type for referring to
an event channel port id. In order to prevent low-level
event channel APIs from leaking to kernel consumers who
should not have access to this data, the type is defined
twice: Once in the Xen provided event_channel.h, and again
in xen/xen_intr.h. The double declaration is protected by
__XEN_EVTCHN_PORT_DEFINED__ to ensure it is never declared
twice within a given compilation unit.
sys/xen/xen_intr.h:
sys/xen/evtchn/evtchn.c:
sys/x86/xen/xen_intr.c:
sys/dev/xen/xenpci/evtchn.c:
sys/dev/xen/xenpci/xenpcivar.h:
New implementation of Xen interrupt services. This is
similar in many respects to the i386 PV implementation with
the exception that events for bound to event channel ports
(i.e. not IPI, virtual IRQ, or physical IRQ) are further
optimized to avoid mask/unmask operations that aren't
necessary for these edge triggered events.
Stubs exist for supporting physical IRQ binding, but will
need additional work before this implementation can be
fully shared between PV and HVM.
sys/amd64/amd64/mp_machdep.c:
sys/i386/i386/mp_machdep.c:
sys/i386/xen/mp_machdep.c
sys/x86/xen/hvm.c:
Add support for placing vcpu_info into an arbritary memory
page instead of using HYPERVISOR_shared_info->vcpu_info.
This allows the creation of domains with more than 32 vcpus.
sys/i386/i386/machdep.c:
sys/i386/xen/clock.c:
sys/i386/xen/xen_machdep.c:
sys/i386/xen/exception.s:
Add support for new event channle implementation.
2013-08-29 19:52:18 +00:00
|
|
|
#include <machine/_inttypes.h>
|
|
|
|
|
2008-08-12 20:01:57 +00:00
|
|
|
#include <geom/geom_disk.h>
|
|
|
|
|
|
|
|
#include <dev/xen/blkfront/block.h>
|
|
|
|
|
2008-12-04 07:59:05 +00:00
|
|
|
#include "xenbus_if.h"
|
|
|
|
|
2013-05-31 22:21:37 +00:00
|
|
|
/*--------------------------- Forward Declarations ---------------------------*/
|
2013-05-31 21:05:07 +00:00
|
|
|
static void xbd_closing(device_t);
|
2013-05-31 22:21:37 +00:00
|
|
|
static void xbd_startio(struct xbd_softc *sc);
|
2008-08-12 20:01:57 +00:00
|
|
|
|
2013-05-31 22:21:37 +00:00
|
|
|
/*---------------------------------- Macros ----------------------------------*/
|
|
|
|
#if 0
|
|
|
|
#define DPRINTK(fmt, args...) printf("[XEN] %s:%d: " fmt ".\n", __func__, __LINE__, ##args)
|
|
|
|
#else
|
|
|
|
#define DPRINTK(fmt, args...)
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#define XBD_SECTOR_SHFT 9
|
Improve the Xen para-virtualized device infrastructure of FreeBSD:
o Add support for backend devices (e.g. blkback)
o Implement extensions to the Xen para-virtualized block API to allow
for larger and more outstanding I/Os.
o Import a completely rewritten block back driver with support for fronting
I/O to both raw devices and files.
o General cleanup and documentation of the XenBus and XenStore support code.
o Robustness and performance updates for the block front driver.
o Fixes to the netfront driver.
Sponsored by: Spectra Logic Corporation
sys/xen/xenbus/init.txt:
Deleted: This file explains the Linux method for XenBus device
enumeration and thus does not apply to FreeBSD's NewBus approach.
sys/xen/xenbus/xenbus_probe_backend.c:
Deleted: Linux version of backend XenBus service routines. It
was never ported to FreeBSD. See xenbusb.c, xenbusb_if.m,
xenbusb_front.c xenbusb_back.c for details of FreeBSD's XenBus
support.
sys/xen/xenbus/xenbusvar.h:
sys/xen/xenbus/xenbus_xs.c:
sys/xen/xenbus/xenbus_comms.c:
sys/xen/xenbus/xenbus_comms.h:
sys/xen/xenstore/xenstorevar.h:
sys/xen/xenstore/xenstore.c:
Split XenStore into its own tree. XenBus is a software layer built
on top of XenStore. The old arrangement and the naming of some
structures and functions blurred these lines making it difficult to
discern what services are provided by which layer and at what times
these services are available (e.g. during system startup and shutdown).
sys/xen/xenbus/xenbus_client.c:
sys/xen/xenbus/xenbus.c:
sys/xen/xenbus/xenbus_probe.c:
sys/xen/xenbus/xenbusb.c:
sys/xen/xenbus/xenbusb.h:
Split up XenBus code into methods available for use by client
drivers (xenbus.c) and code used by the XenBus "bus code" to
enumerate, attach, detach, and service bus drivers.
sys/xen/reboot.c:
sys/dev/xen/control/control.c:
Add a XenBus front driver for handling shutdown, reboot, suspend, and
resume events published in the XenStore. Move all PV suspend/reboot
support from reboot.c into this driver.
sys/xen/blkif.h:
New file from Xen vendor with macros and structures used by
a block back driver to service requests from a VM running a
different ABI (e.g. amd64 back with i386 front).
sys/conf/files:
Adjust kernel build spec for new XenBus/XenStore layout and added
Xen functionality.
sys/dev/xen/balloon/balloon.c:
sys/dev/xen/netfront/netfront.c:
sys/dev/xen/blkfront/blkfront.c:
sys/xen/xenbus/...
sys/xen/xenstore/...
o Rename XenStore APIs and structures from xenbus_* to xs_*.
o Adjust to use of M_XENBUS and M_XENSTORE malloc types for allocation
of objects returned by these APIs.
o Adjust for changes in the bus interface for Xen drivers.
sys/xen/xenbus/...
sys/xen/xenstore/...
Add Doxygen comments for these interfaces and the code that
implements them.
sys/dev/xen/blkback/blkback.c:
o Rewrite the Block Back driver to attach properly via newbus,
operate correctly in both PV and HVM mode regardless of domain
(e.g. can be in a DOM other than 0), and to deal with the latest
metadata available in XenStore for block devices.
o Allow users to specify a file as a backend to blkback, in addition
to character devices. Use the namei lookup of the backend path
to automatically configure, based on file type, the appropriate
backend method.
The current implementation is limited to a single outstanding I/O
at a time to file backed storage.
sys/dev/xen/blkback/blkback.c:
sys/xen/interface/io/blkif.h:
sys/xen/blkif.h:
sys/dev/xen/blkfront/blkfront.c:
sys/dev/xen/blkfront/block.h:
Extend the Xen blkif API: Negotiable request size and number of
requests.
This change extends the information recorded in the XenStore
allowing block front/back devices to negotiate for optimal I/O
parameters. This has been achieved without sacrificing backward
compatibility with drivers that are unaware of these protocol
enhancements. The extensions center around the connection protocol
which now includes these additions:
o The back-end device publishes its maximum supported values for,
request I/O size, the number of page segments that can be
associated with a request, the maximum number of requests that
can be concurrently active, and the maximum number of pages that
can be in the shared request ring. These values are published
before the back-end enters the XenbusStateInitWait state.
o The front-end waits for the back-end to enter either the InitWait
or Initialize state. At this point, the front end limits it's
own capabilities to the lesser of the values it finds published
by the backend, it's own maximums, or, should any back-end data
be missing in the store, the values supported by the original
protocol. It then initializes it's internal data structures
including allocation of the shared ring, publishes its maximum
capabilities to the XenStore and transitions to the Initialized
state.
o The back-end waits for the front-end to enter the Initalized
state. At this point, the back end limits it's own capabilities
to the lesser of the values it finds published by the frontend,
it's own maximums, or, should any front-end data be missing in
the store, the values supported by the original protocol. It
then initializes it's internal data structures, attaches to the
shared ring and transitions to the Connected state.
o The front-end waits for the back-end to enter the Connnected
state, transitions itself to the connected state, and can
commence I/O.
Although an updated front-end driver must be aware of the back-end's
InitWait state, the back-end has been coded such that it can
tolerate a front-end that skips this step and transitions directly
to the Initialized state without waiting for the back-end.
sys/xen/interface/io/blkif.h:
o Increase BLKIF_MAX_SEGMENTS_PER_REQUEST to 255. This is
the maximum number possible without changing the blkif
request header structure (nr_segs is a uint8_t).
o Add two new constants:
BLKIF_MAX_SEGMENTS_PER_HEADER_BLOCK, and
BLKIF_MAX_SEGMENTS_PER_SEGMENT_BLOCK. These respectively
indicate the number of segments that can fit in the first
ring-buffer entry of a request, and for each subsequent
(sg element only) ring-buffer entry associated with the
"header" ring-buffer entry of the request.
o Add the blkif_request_segment_t typedef for segment
elements.
o Add the BLKRING_GET_SG_REQUEST() macro which wraps the
RING_GET_REQUEST() macro and returns a properly cast
pointer to an array of blkif_request_segment_ts.
o Add the BLKIF_SEGS_TO_BLOCKS() macro which calculates the
number of ring entries that will be consumed by a blkif
request with the given number of segments.
sys/xen/blkif.h:
o Update for changes in interface/io/blkif.h macros.
o Update the BLKIF_MAX_RING_REQUESTS() macro to take the
ring size as an argument to allow this calculation on
multi-page rings.
o Add a companion macro to BLKIF_MAX_RING_REQUESTS(),
BLKIF_RING_PAGES(). This macro determines the number of
ring pages required in order to support a ring with the
supplied number of request blocks.
sys/dev/xen/blkback/blkback.c:
sys/dev/xen/blkfront/blkfront.c:
sys/dev/xen/blkfront/block.h:
o Negotiate with the other-end with the following limits:
Reqeust Size: MAXPHYS
Max Segments: (MAXPHYS/PAGE_SIZE) + 1
Max Requests: 256
Max Ring Pages: Sufficient to support Max Requests with
Max Segments.
o Dynamically allocate request pools and segemnts-per-request.
o Update ring allocation/attachment code to support a
multi-page shared ring.
o Update routines that access the shared ring to handle
multi-block requests.
sys/dev/xen/blkfront/blkfront.c:
o Track blkfront allocations in a blkfront driver specific
malloc pool.
o Strip out XenStore transaction retry logic in the
connection code. Transactions only need to be used when
the update to multiple XenStore nodes must be atomic.
That is not the case here.
o Fully disable blkif_resume() until it can be fixed
properly (it didn't work before this change).
o Destroy bus-dma objects during device instance tear-down.
o Properly handle backend devices with powef-of-2 sector
sizes larger than 512b.
sys/dev/xen/blkback/blkback.c:
Advertise support for and implement the BLKIF_OP_WRITE_BARRIER
and BLKIF_OP_FLUSH_DISKCACHE blkif opcodes using BIO_FLUSH and
the BIO_ORDERED attribute of bios.
sys/dev/xen/blkfront/blkfront.c:
sys/dev/xen/blkfront/block.h:
Fix various bugs in blkfront.
o gnttab_alloc_grant_references() returns 0 for success and
non-zero for failure. The check for < 0 is a leftover
Linuxism.
o When we negotiate with blkback and have to reduce some of our
capabilities, print out the original and reduced capability before
changing the local capability. So the user now gets the correct
information.
o Fix blkif_restart_queue_callback() formatting. Make sure we hold
the mutex in that function before calling xb_startio().
o Fix a couple of KASSERT()s.
o Fix a check in the xb_remove_* macro to be a little more specific.
sys/xen/gnttab.h:
sys/xen/gnttab.c:
Define GNTTAB_LIST_END publicly as GRANT_REF_INVALID.
sys/dev/xen/netfront/netfront.c:
Use GRANT_REF_INVALID instead of driver private definitions of the
same constant.
sys/xen/gnttab.h:
sys/xen/gnttab.c:
Add the gnttab_end_foreign_access_references() API.
This API allows a client to batch the release of an array of grant
references, instead of coding a private for loop. The implementation
takes advantage of this batching to reduce lock overhead to one
acquisition and release per-batch instead of per-freed grant reference.
While here, reduce the duration the gnttab_list_lock is held during
gnttab_free_grant_references() operations. The search to find the
tail of the incoming free list does not rely on global state and so
can be performed without holding the lock.
sys/dev/xen/xenpci/evtchn.c:
sys/dev/xen/evtchn/evtchn.c:
sys/xen/xen_intr.h:
o Implement the bind_interdomain_evtchn_to_irqhandler API for HVM mode.
This allows an HVM domain to serve back end devices to other domains.
This API is already implemented for PV mode.
o Synchronize the API between HVM and PV.
sys/dev/xen/xenpci/xenpci.c:
o Scan the full region of CPUID space in which the Xen VMM interface
may be implemented. On systems using SuSE as a Dom0 where the
Viridian API is also exported, the VMM interface is above the region
we used to search.
o Pass through bus_alloc_resource() calls so that XenBus drivers
attaching on an HVM system can allocate unused physical address
space from the nexus. The block back driver makes use of this
facility.
sys/i386/xen/xen_machdep.c:
Use the correct type for accessing the statically mapped xenstore
metadata.
sys/xen/interface/hvm/params.h:
sys/xen/xenstore/xenstore.c:
Move hvm_get_parameter() to the correct global header file instead
of as a private method to the XenStore.
sys/xen/interface/io/protocols.h:
Sync with vendor.
sys/xeninterface/io/ring.h:
Add macro for calculating the number of ring pages needed for an N
deep ring.
To avoid duplication within the macros, create and use the new
__RING_HEADER_SIZE() macro. This macro calculates the size of the
ring book keeping struct (producer/consumer indexes, etc.) that
resides at the head of the ring.
Add the __RING_PAGES() macro which calculates the number of shared
ring pages required to support a ring with the given number of
requests.
These APIs are used to support the multi-page ring version of the
Xen block API.
sys/xeninterface/io/xenbus.h:
Add Comments.
sys/xen/xenbus/...
o Refactor the FreeBSD XenBus support code to allow for both front and
backend device attachments.
o Make use of new config_intr_hook capabilities to allow front and back
devices to be probed/attached in parallel.
o Fix bugs in probe/attach state machine that could cause the system to
hang when confronted with a failure either in the local domain or in
a remote domain to which one of our driver instances is attaching.
o Publish all required state to the XenStore on device detach and
failure. The majority of the missing functionality was for serving
as a back end since the typical "hot-plug" scripts in Dom0 don't
handle the case of cleaning up for a "service domain" that is not
itself.
o Add dynamic sysctl nodes exposing the generic ivars of
XenBus devices.
o Add doxygen style comments to the majority of the code.
o Cleanup types, formatting, etc.
sys/xen/xenbus/xenbusb.c:
Common code used by both front and back XenBus busses.
sys/xen/xenbus/xenbusb_if.m:
Method definitions for a XenBus bus.
sys/xen/xenbus/xenbusb_front.c:
sys/xen/xenbus/xenbusb_back.c:
XenBus bus specialization for front and back devices.
MFC after: 1 month
2010-10-19 20:53:30 +00:00
|
|
|
|
2013-05-31 22:21:37 +00:00
|
|
|
/*---------------------------- Global Static Data ----------------------------*/
|
|
|
|
static MALLOC_DEFINE(M_XENBLOCKFRONT, "xbd", "Xen Block Front driver data");
|
2008-08-12 20:01:57 +00:00
|
|
|
|
2015-07-30 03:50:01 +00:00
|
|
|
static int xbd_enable_indirect = 1;
|
|
|
|
SYSCTL_NODE(_hw, OID_AUTO, xbd, CTLFLAG_RD, 0, "xbd driver parameters");
|
|
|
|
SYSCTL_INT(_hw_xbd, OID_AUTO, xbd_enable_indirect, CTLFLAG_RDTUN,
|
|
|
|
&xbd_enable_indirect, 0, "Enable xbd indirect segments");
|
|
|
|
|
2013-05-31 22:21:37 +00:00
|
|
|
/*---------------------------- Command Processing ----------------------------*/
|
2013-06-15 04:51:31 +00:00
|
|
|
static void
|
|
|
|
xbd_freeze(struct xbd_softc *sc, xbd_flag_t xbd_flag)
|
|
|
|
{
|
|
|
|
if (xbd_flag != XBDF_NONE && (sc->xbd_flags & xbd_flag) != 0)
|
|
|
|
return;
|
|
|
|
|
|
|
|
sc->xbd_flags |= xbd_flag;
|
|
|
|
sc->xbd_qfrozen_cnt++;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
xbd_thaw(struct xbd_softc *sc, xbd_flag_t xbd_flag)
|
|
|
|
{
|
|
|
|
if (xbd_flag != XBDF_NONE && (sc->xbd_flags & xbd_flag) == 0)
|
|
|
|
return;
|
|
|
|
|
2013-06-16 16:01:24 +00:00
|
|
|
if (sc->xbd_qfrozen_cnt == 0)
|
2013-06-15 04:51:31 +00:00
|
|
|
panic("%s: Thaw with flag 0x%x while not frozen.",
|
|
|
|
__func__, xbd_flag);
|
|
|
|
|
|
|
|
sc->xbd_flags &= ~xbd_flag;
|
|
|
|
sc->xbd_qfrozen_cnt--;
|
|
|
|
}
|
|
|
|
|
2013-06-26 20:39:07 +00:00
|
|
|
static void
|
|
|
|
xbd_cm_freeze(struct xbd_softc *sc, struct xbd_command *cm, xbdc_flag_t cm_flag)
|
|
|
|
{
|
|
|
|
if ((cm->cm_flags & XBDCF_FROZEN) != 0)
|
|
|
|
return;
|
|
|
|
|
|
|
|
cm->cm_flags |= XBDCF_FROZEN|cm_flag;
|
|
|
|
xbd_freeze(sc, XBDF_NONE);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
xbd_cm_thaw(struct xbd_softc *sc, struct xbd_command *cm)
|
|
|
|
{
|
|
|
|
if ((cm->cm_flags & XBDCF_FROZEN) == 0)
|
|
|
|
return;
|
|
|
|
|
|
|
|
cm->cm_flags &= ~XBDCF_FROZEN;
|
|
|
|
xbd_thaw(sc, XBDF_NONE);
|
|
|
|
}
|
|
|
|
|
2013-05-31 22:21:37 +00:00
|
|
|
static inline void
|
2013-05-31 22:33:28 +00:00
|
|
|
xbd_flush_requests(struct xbd_softc *sc)
|
2013-05-31 22:21:37 +00:00
|
|
|
{
|
|
|
|
int notify;
|
2008-08-12 20:01:57 +00:00
|
|
|
|
2013-05-31 22:21:37 +00:00
|
|
|
RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&sc->xbd_ring, notify);
|
2008-08-12 20:01:57 +00:00
|
|
|
|
2013-05-31 22:21:37 +00:00
|
|
|
if (notify)
|
Implement vector callback for PVHVM and unify event channel implementations
Re-structure Xen HVM support so that:
- Xen is detected and hypercalls can be performed very
early in system startup.
- Xen interrupt services are implemented using FreeBSD's native
interrupt delivery infrastructure.
- the Xen interrupt service implementation is shared between PV
and HVM guests.
- Xen interrupt handlers can optionally use a filter handler
in order to avoid the overhead of dispatch to an interrupt
thread.
- interrupt load can be distributed among all available CPUs.
- the overhead of accessing the emulated local and I/O apics
on HVM is removed for event channel port events.
- a similar optimization can eventually, and fairly easily,
be used to optimize MSI.
Early Xen detection, HVM refactoring, PVHVM interrupt infrastructure,
and misc Xen cleanups:
Sponsored by: Spectra Logic Corporation
Unification of PV & HVM interrupt infrastructure, bug fixes,
and misc Xen cleanups:
Submitted by: Roger Pau Monné
Sponsored by: Citrix Systems R&D
sys/x86/x86/local_apic.c:
sys/amd64/include/apicvar.h:
sys/i386/include/apicvar.h:
sys/amd64/amd64/apic_vector.S:
sys/i386/i386/apic_vector.s:
sys/amd64/amd64/machdep.c:
sys/i386/i386/machdep.c:
sys/i386/xen/exception.s:
sys/x86/include/segments.h:
Reserve IDT vector 0x93 for the Xen event channel upcall
interrupt handler. On Hypervisors that support the direct
vector callback feature, we can request that this vector be
called directly by an injected HVM interrupt event, instead
of a simulated PCI interrupt on the Xen platform PCI device.
This avoids all of the overhead of dealing with the emulated
I/O APIC and local APIC. It also means that the Hypervisor
can inject these events on any CPU, allowing upcalls for
different ports to be handled in parallel.
sys/amd64/amd64/mp_machdep.c:
sys/i386/i386/mp_machdep.c:
Map Xen per-vcpu area during AP startup.
sys/amd64/include/intr_machdep.h:
sys/i386/include/intr_machdep.h:
Increase the FreeBSD IRQ vector table to include space
for event channel interrupt sources.
sys/amd64/include/pcpu.h:
sys/i386/include/pcpu.h:
Remove Xen HVM per-cpu variable data. These fields are now
allocated via the dynamic per-cpu scheme. See xen_intr.c
for details.
sys/amd64/include/xen/hypercall.h:
sys/dev/xen/blkback/blkback.c:
sys/i386/include/xen/xenvar.h:
sys/i386/xen/clock.c:
sys/i386/xen/xen_machdep.c:
sys/xen/gnttab.c:
Prefer FreeBSD primatives to Linux ones in Xen support code.
sys/amd64/include/xen/xen-os.h:
sys/i386/include/xen/xen-os.h:
sys/xen/xen-os.h:
sys/dev/xen/balloon/balloon.c:
sys/dev/xen/blkback/blkback.c:
sys/dev/xen/blkfront/blkfront.c:
sys/dev/xen/console/xencons_ring.c:
sys/dev/xen/control/control.c:
sys/dev/xen/netback/netback.c:
sys/dev/xen/netfront/netfront.c:
sys/dev/xen/xenpci/xenpci.c:
sys/i386/i386/machdep.c:
sys/i386/include/pmap.h:
sys/i386/include/xen/xenfunc.h:
sys/i386/isa/npx.c:
sys/i386/xen/clock.c:
sys/i386/xen/mp_machdep.c:
sys/i386/xen/mptable.c:
sys/i386/xen/xen_clock_util.c:
sys/i386/xen/xen_machdep.c:
sys/i386/xen/xen_rtc.c:
sys/xen/evtchn/evtchn_dev.c:
sys/xen/features.c:
sys/xen/gnttab.c:
sys/xen/gnttab.h:
sys/xen/hvm.h:
sys/xen/xenbus/xenbus.c:
sys/xen/xenbus/xenbus_if.m:
sys/xen/xenbus/xenbusb_front.c:
sys/xen/xenbus/xenbusvar.h:
sys/xen/xenstore/xenstore.c:
sys/xen/xenstore/xenstore_dev.c:
sys/xen/xenstore/xenstorevar.h:
Pull common Xen OS support functions/settings into xen/xen-os.h.
sys/amd64/include/xen/xen-os.h:
sys/i386/include/xen/xen-os.h:
sys/xen/xen-os.h:
Remove constants, macros, and functions unused in FreeBSD's Xen
support.
sys/xen/xen-os.h:
sys/i386/xen/xen_machdep.c:
sys/x86/xen/hvm.c:
Introduce new functions xen_domain(), xen_pv_domain(), and
xen_hvm_domain(). These are used in favor of #ifdefs so that
FreeBSD can dynamically detect and adapt to the presence of
a hypervisor. The goal is to have an HVM optimized GENERIC,
but more is necessary before this is possible.
sys/amd64/amd64/machdep.c:
sys/dev/xen/xenpci/xenpcivar.h:
sys/dev/xen/xenpci/xenpci.c:
sys/x86/xen/hvm.c:
sys/sys/kernel.h:
Refactor magic ioport, Hypercall table and Hypervisor shared
information page setup, and move it to a dedicated HVM support
module.
HVM mode initialization is now triggered during the
SI_SUB_HYPERVISOR phase of system startup. This currently
occurs just after the kernel VM is fully setup which is
just enough infrastructure to allow the hypercall table
and shared info page to be properly mapped.
sys/xen/hvm.h:
sys/x86/xen/hvm.c:
Add definitions and a method for configuring Hypervisor event
delievery via a direct vector callback.
sys/amd64/include/xen/xen-os.h:
sys/x86/xen/hvm.c:
sys/conf/files:
sys/conf/files.amd64:
sys/conf/files.i386:
Adjust kernel build to reflect the refactoring of early
Xen startup code and Xen interrupt services.
sys/dev/xen/blkback/blkback.c:
sys/dev/xen/blkfront/blkfront.c:
sys/dev/xen/blkfront/block.h:
sys/dev/xen/control/control.c:
sys/dev/xen/evtchn/evtchn_dev.c:
sys/dev/xen/netback/netback.c:
sys/dev/xen/netfront/netfront.c:
sys/xen/xenstore/xenstore.c:
sys/xen/evtchn/evtchn_dev.c:
sys/dev/xen/console/console.c:
sys/dev/xen/console/xencons_ring.c
Adjust drivers to use new xen_intr_*() API.
sys/dev/xen/blkback/blkback.c:
Since blkback defers all event handling to a taskqueue,
convert this task queue to a "fast" taskqueue, and schedule
it via an interrupt filter. This avoids an unnecessary
ithread context switch.
sys/xen/xenstore/xenstore.c:
The xenstore driver is MPSAFE. Indicate as much when
registering its interrupt handler.
sys/xen/xenbus/xenbus.c:
sys/xen/xenbus/xenbusvar.h:
Remove unused event channel APIs.
sys/xen/evtchn.h:
Remove all kernel Xen interrupt service API definitions
from this file. It is now only used for structure and
ioctl definitions related to the event channel userland
device driver.
Update the definitions in this file to match those from
NetBSD. Implementing this interface will be necessary for
Dom0 support.
sys/xen/evtchn/evtchnvar.h:
Add a header file for implemenation internal APIs related
to managing event channels event delivery. This is used
to allow, for example, the event channel userland device
driver to access low-level routines that typical kernel
consumers of event channel services should never access.
sys/xen/interface/event_channel.h:
sys/xen/xen_intr.h:
Standardize on the evtchn_port_t type for referring to
an event channel port id. In order to prevent low-level
event channel APIs from leaking to kernel consumers who
should not have access to this data, the type is defined
twice: Once in the Xen provided event_channel.h, and again
in xen/xen_intr.h. The double declaration is protected by
__XEN_EVTCHN_PORT_DEFINED__ to ensure it is never declared
twice within a given compilation unit.
sys/xen/xen_intr.h:
sys/xen/evtchn/evtchn.c:
sys/x86/xen/xen_intr.c:
sys/dev/xen/xenpci/evtchn.c:
sys/dev/xen/xenpci/xenpcivar.h:
New implementation of Xen interrupt services. This is
similar in many respects to the i386 PV implementation with
the exception that events for bound to event channel ports
(i.e. not IPI, virtual IRQ, or physical IRQ) are further
optimized to avoid mask/unmask operations that aren't
necessary for these edge triggered events.
Stubs exist for supporting physical IRQ binding, but will
need additional work before this implementation can be
fully shared between PV and HVM.
sys/amd64/amd64/mp_machdep.c:
sys/i386/i386/mp_machdep.c:
sys/i386/xen/mp_machdep.c
sys/x86/xen/hvm.c:
Add support for placing vcpu_info into an arbritary memory
page instead of using HYPERVISOR_shared_info->vcpu_info.
This allows the creation of domains with more than 32 vcpus.
sys/i386/i386/machdep.c:
sys/i386/xen/clock.c:
sys/i386/xen/xen_machdep.c:
sys/i386/xen/exception.s:
Add support for new event channle implementation.
2013-08-29 19:52:18 +00:00
|
|
|
xen_intr_signal(sc->xen_intr_handle);
|
2013-05-31 22:21:37 +00:00
|
|
|
}
|
2008-08-12 20:01:57 +00:00
|
|
|
|
2008-12-04 07:59:05 +00:00
|
|
|
static void
|
2013-05-31 22:21:37 +00:00
|
|
|
xbd_free_command(struct xbd_command *cm)
|
2008-12-04 07:59:05 +00:00
|
|
|
{
|
|
|
|
|
2013-06-14 17:00:58 +00:00
|
|
|
KASSERT((cm->cm_flags & XBDCF_Q_MASK) == XBD_Q_NONE,
|
|
|
|
("Freeing command that is still on queue %d.",
|
|
|
|
cm->cm_flags & XBDCF_Q_MASK));
|
2008-12-04 07:59:05 +00:00
|
|
|
|
2013-06-14 17:00:58 +00:00
|
|
|
cm->cm_flags = XBDCF_INITIALIZER;
|
2013-05-31 22:21:37 +00:00
|
|
|
cm->cm_bp = NULL;
|
|
|
|
cm->cm_complete = NULL;
|
2013-06-14 17:00:58 +00:00
|
|
|
xbd_enqueue_cm(cm, XBD_Q_FREE);
|
2013-06-15 04:51:31 +00:00
|
|
|
xbd_thaw(cm->cm_sc, XBDF_CM_SHORTAGE);
|
2013-05-31 22:21:37 +00:00
|
|
|
}
|
2008-12-04 07:59:05 +00:00
|
|
|
|
2013-05-31 22:21:37 +00:00
|
|
|
static void
|
2015-06-23 06:50:03 +00:00
|
|
|
xbd_mksegarray(bus_dma_segment_t *segs, int nsegs,
|
2015-06-20 00:02:03 +00:00
|
|
|
grant_ref_t * gref_head, int otherend_id, int readonly,
|
2015-10-06 11:29:44 +00:00
|
|
|
grant_ref_t * sg_ref, struct blkif_request_segment *sg)
|
2013-05-31 22:21:37 +00:00
|
|
|
{
|
2015-06-20 00:02:03 +00:00
|
|
|
struct blkif_request_segment *last_block_sg = sg + nsegs;
|
2013-05-31 22:21:37 +00:00
|
|
|
vm_paddr_t buffer_ma;
|
|
|
|
uint64_t fsect, lsect;
|
|
|
|
int ref;
|
2008-12-04 07:59:05 +00:00
|
|
|
|
2015-06-12 07:50:34 +00:00
|
|
|
while (sg < last_block_sg) {
|
2016-01-11 21:02:30 +00:00
|
|
|
KASSERT(segs->ds_addr % (1 << XBD_SECTOR_SHFT) == 0,
|
|
|
|
("XEN disk driver I/O must be sector aligned"));
|
|
|
|
KASSERT(segs->ds_len % (1 << XBD_SECTOR_SHFT) == 0,
|
|
|
|
("XEN disk driver I/Os must be a multiple of "
|
|
|
|
"the sector length"));
|
2015-06-12 07:50:34 +00:00
|
|
|
buffer_ma = segs->ds_addr;
|
|
|
|
fsect = (buffer_ma & PAGE_MASK) >> XBD_SECTOR_SHFT;
|
|
|
|
lsect = fsect + (segs->ds_len >> XBD_SECTOR_SHFT) - 1;
|
2008-12-29 06:31:03 +00:00
|
|
|
|
2015-06-12 07:50:34 +00:00
|
|
|
KASSERT(lsect <= 7, ("XEN disk driver data cannot "
|
|
|
|
"cross a page boundary"));
|
2008-08-23 21:30:08 +00:00
|
|
|
|
2015-06-12 07:50:34 +00:00
|
|
|
/* install a grant reference. */
|
2015-06-20 00:02:03 +00:00
|
|
|
ref = gnttab_claim_grant_reference(gref_head);
|
2008-08-12 20:01:57 +00:00
|
|
|
|
2015-06-12 07:50:34 +00:00
|
|
|
/*
|
|
|
|
* GNTTAB_LIST_END == 0xffffffff, but it is private
|
|
|
|
* to gnttab.c.
|
|
|
|
*/
|
|
|
|
KASSERT(ref != ~0, ("grant_reference failed"));
|
|
|
|
|
|
|
|
gnttab_grant_foreign_access_ref(
|
|
|
|
ref,
|
2015-06-20 00:02:03 +00:00
|
|
|
otherend_id,
|
2015-06-12 07:50:34 +00:00
|
|
|
buffer_ma >> PAGE_SHIFT,
|
2015-06-20 00:02:03 +00:00
|
|
|
readonly);
|
2015-06-12 07:50:34 +00:00
|
|
|
|
|
|
|
*sg_ref = ref;
|
|
|
|
*sg = (struct blkif_request_segment) {
|
|
|
|
.gref = ref,
|
|
|
|
.first_sect = fsect,
|
|
|
|
.last_sect = lsect
|
|
|
|
};
|
|
|
|
sg++;
|
|
|
|
sg_ref++;
|
|
|
|
segs++;
|
2013-05-31 22:21:37 +00:00
|
|
|
}
|
2015-06-20 00:02:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
xbd_queue_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
|
|
|
|
{
|
|
|
|
struct xbd_softc *sc;
|
|
|
|
struct xbd_command *cm;
|
|
|
|
int op;
|
|
|
|
|
|
|
|
cm = arg;
|
|
|
|
sc = cm->cm_sc;
|
|
|
|
|
|
|
|
if (error) {
|
|
|
|
cm->cm_bp->bio_error = EIO;
|
|
|
|
biodone(cm->cm_bp);
|
|
|
|
xbd_free_command(cm);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2015-07-30 03:50:01 +00:00
|
|
|
KASSERT(nsegs <= sc->xbd_max_request_segments,
|
2015-06-20 00:02:03 +00:00
|
|
|
("Too many segments in a blkfront I/O"));
|
|
|
|
|
2015-07-30 03:50:01 +00:00
|
|
|
if (nsegs <= BLKIF_MAX_SEGMENTS_PER_REQUEST) {
|
|
|
|
blkif_request_t *ring_req;
|
|
|
|
|
|
|
|
/* Fill out a blkif_request_t structure. */
|
|
|
|
ring_req = (blkif_request_t *)
|
|
|
|
RING_GET_REQUEST(&sc->xbd_ring, sc->xbd_ring.req_prod_pvt);
|
|
|
|
sc->xbd_ring.req_prod_pvt++;
|
|
|
|
ring_req->id = cm->cm_id;
|
|
|
|
ring_req->operation = cm->cm_operation;
|
|
|
|
ring_req->sector_number = cm->cm_sector_number;
|
|
|
|
ring_req->handle = (blkif_vdev_t)(uintptr_t)sc->xbd_disk;
|
|
|
|
ring_req->nr_segments = nsegs;
|
|
|
|
cm->cm_nseg = nsegs;
|
|
|
|
xbd_mksegarray(segs, nsegs, &cm->cm_gref_head,
|
|
|
|
xenbus_get_otherend_id(sc->xbd_dev),
|
|
|
|
cm->cm_operation == BLKIF_OP_WRITE,
|
|
|
|
cm->cm_sg_refs, ring_req->seg);
|
|
|
|
} else {
|
|
|
|
blkif_request_indirect_t *ring_req;
|
|
|
|
|
|
|
|
/* Fill out a blkif_request_indirect_t structure. */
|
|
|
|
ring_req = (blkif_request_indirect_t *)
|
|
|
|
RING_GET_REQUEST(&sc->xbd_ring, sc->xbd_ring.req_prod_pvt);
|
|
|
|
sc->xbd_ring.req_prod_pvt++;
|
|
|
|
ring_req->id = cm->cm_id;
|
|
|
|
ring_req->operation = BLKIF_OP_INDIRECT;
|
|
|
|
ring_req->indirect_op = cm->cm_operation;
|
|
|
|
ring_req->sector_number = cm->cm_sector_number;
|
|
|
|
ring_req->handle = (blkif_vdev_t)(uintptr_t)sc->xbd_disk;
|
|
|
|
ring_req->nr_segments = nsegs;
|
|
|
|
cm->cm_nseg = nsegs;
|
|
|
|
xbd_mksegarray(segs, nsegs, &cm->cm_gref_head,
|
|
|
|
xenbus_get_otherend_id(sc->xbd_dev),
|
|
|
|
cm->cm_operation == BLKIF_OP_WRITE,
|
|
|
|
cm->cm_sg_refs, cm->cm_indirectionpages);
|
|
|
|
memcpy(ring_req->indirect_grefs, &cm->cm_indirectionrefs,
|
|
|
|
sizeof(grant_ref_t) * sc->xbd_max_request_indirectpages);
|
|
|
|
}
|
2008-08-12 20:01:57 +00:00
|
|
|
|
2013-05-31 22:21:37 +00:00
|
|
|
if (cm->cm_operation == BLKIF_OP_READ)
|
|
|
|
op = BUS_DMASYNC_PREREAD;
|
|
|
|
else if (cm->cm_operation == BLKIF_OP_WRITE)
|
|
|
|
op = BUS_DMASYNC_PREWRITE;
|
|
|
|
else
|
|
|
|
op = 0;
|
|
|
|
bus_dmamap_sync(sc->xbd_io_dmat, cm->cm_map, op);
|
2008-08-12 20:01:57 +00:00
|
|
|
|
2013-05-31 22:21:37 +00:00
|
|
|
gnttab_free_grant_references(cm->cm_gref_head);
|
|
|
|
|
2013-06-14 17:00:58 +00:00
|
|
|
xbd_enqueue_cm(cm, XBD_Q_BUSY);
|
2008-08-12 20:01:57 +00:00
|
|
|
|
|
|
|
/*
|
2013-06-15 04:51:31 +00:00
|
|
|
* If bus dma had to asynchronously call us back to dispatch
|
|
|
|
* this command, we are no longer executing in the context of
|
|
|
|
* xbd_startio(). Thus we cannot rely on xbd_startio()'s call to
|
|
|
|
* xbd_flush_requests() to publish this command to the backend
|
|
|
|
* along with any other commands that it could batch.
|
2008-08-12 20:01:57 +00:00
|
|
|
*/
|
2013-06-15 04:51:31 +00:00
|
|
|
if ((cm->cm_flags & XBDCF_ASYNC_MAPPING) != 0)
|
2013-05-31 22:33:28 +00:00
|
|
|
xbd_flush_requests(sc);
|
2009-08-30 20:45:24 +00:00
|
|
|
|
2008-08-12 20:01:57 +00:00
|
|
|
return;
|
2009-11-30 04:32:34 +00:00
|
|
|
}
|
2008-08-12 20:01:57 +00:00
|
|
|
|
2013-05-31 22:21:37 +00:00
|
|
|
static int
|
|
|
|
xbd_queue_request(struct xbd_softc *sc, struct xbd_command *cm)
|
2009-11-30 04:32:34 +00:00
|
|
|
{
|
2013-05-31 22:21:37 +00:00
|
|
|
int error;
|
2009-11-30 04:32:34 +00:00
|
|
|
|
2015-11-09 12:22:44 +00:00
|
|
|
if (cm->cm_bp != NULL)
|
|
|
|
error = bus_dmamap_load_bio(sc->xbd_io_dmat, cm->cm_map,
|
|
|
|
cm->cm_bp, xbd_queue_cb, cm, 0);
|
|
|
|
else
|
|
|
|
error = bus_dmamap_load(sc->xbd_io_dmat, cm->cm_map,
|
|
|
|
cm->cm_data, cm->cm_datalen, xbd_queue_cb, cm, 0);
|
2013-05-31 22:21:37 +00:00
|
|
|
if (error == EINPROGRESS) {
|
2013-06-15 04:51:31 +00:00
|
|
|
/*
|
|
|
|
* Maintain queuing order by freezing the queue. The next
|
|
|
|
* command may not require as many resources as the command
|
|
|
|
* we just attempted to map, so we can't rely on bus dma
|
|
|
|
* blocking for it too.
|
|
|
|
*/
|
2013-06-26 20:39:07 +00:00
|
|
|
xbd_cm_freeze(sc, cm, XBDCF_ASYNC_MAPPING);
|
2013-05-31 22:21:37 +00:00
|
|
|
return (0);
|
2009-11-30 04:32:34 +00:00
|
|
|
}
|
|
|
|
|
2013-05-31 22:21:37 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
xbd_restart_queue_callback(void *arg)
|
|
|
|
{
|
|
|
|
struct xbd_softc *sc = arg;
|
|
|
|
|
|
|
|
mtx_lock(&sc->xbd_io_lock);
|
|
|
|
|
2013-06-15 04:51:31 +00:00
|
|
|
xbd_thaw(sc, XBDF_GNT_SHORTAGE);
|
|
|
|
|
2013-05-31 22:21:37 +00:00
|
|
|
xbd_startio(sc);
|
|
|
|
|
|
|
|
mtx_unlock(&sc->xbd_io_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct xbd_command *
|
|
|
|
xbd_bio_command(struct xbd_softc *sc)
|
|
|
|
{
|
|
|
|
struct xbd_command *cm;
|
|
|
|
struct bio *bp;
|
|
|
|
|
Implement vector callback for PVHVM and unify event channel implementations
Re-structure Xen HVM support so that:
- Xen is detected and hypercalls can be performed very
early in system startup.
- Xen interrupt services are implemented using FreeBSD's native
interrupt delivery infrastructure.
- the Xen interrupt service implementation is shared between PV
and HVM guests.
- Xen interrupt handlers can optionally use a filter handler
in order to avoid the overhead of dispatch to an interrupt
thread.
- interrupt load can be distributed among all available CPUs.
- the overhead of accessing the emulated local and I/O apics
on HVM is removed for event channel port events.
- a similar optimization can eventually, and fairly easily,
be used to optimize MSI.
Early Xen detection, HVM refactoring, PVHVM interrupt infrastructure,
and misc Xen cleanups:
Sponsored by: Spectra Logic Corporation
Unification of PV & HVM interrupt infrastructure, bug fixes,
and misc Xen cleanups:
Submitted by: Roger Pau Monné
Sponsored by: Citrix Systems R&D
sys/x86/x86/local_apic.c:
sys/amd64/include/apicvar.h:
sys/i386/include/apicvar.h:
sys/amd64/amd64/apic_vector.S:
sys/i386/i386/apic_vector.s:
sys/amd64/amd64/machdep.c:
sys/i386/i386/machdep.c:
sys/i386/xen/exception.s:
sys/x86/include/segments.h:
Reserve IDT vector 0x93 for the Xen event channel upcall
interrupt handler. On Hypervisors that support the direct
vector callback feature, we can request that this vector be
called directly by an injected HVM interrupt event, instead
of a simulated PCI interrupt on the Xen platform PCI device.
This avoids all of the overhead of dealing with the emulated
I/O APIC and local APIC. It also means that the Hypervisor
can inject these events on any CPU, allowing upcalls for
different ports to be handled in parallel.
sys/amd64/amd64/mp_machdep.c:
sys/i386/i386/mp_machdep.c:
Map Xen per-vcpu area during AP startup.
sys/amd64/include/intr_machdep.h:
sys/i386/include/intr_machdep.h:
Increase the FreeBSD IRQ vector table to include space
for event channel interrupt sources.
sys/amd64/include/pcpu.h:
sys/i386/include/pcpu.h:
Remove Xen HVM per-cpu variable data. These fields are now
allocated via the dynamic per-cpu scheme. See xen_intr.c
for details.
sys/amd64/include/xen/hypercall.h:
sys/dev/xen/blkback/blkback.c:
sys/i386/include/xen/xenvar.h:
sys/i386/xen/clock.c:
sys/i386/xen/xen_machdep.c:
sys/xen/gnttab.c:
Prefer FreeBSD primatives to Linux ones in Xen support code.
sys/amd64/include/xen/xen-os.h:
sys/i386/include/xen/xen-os.h:
sys/xen/xen-os.h:
sys/dev/xen/balloon/balloon.c:
sys/dev/xen/blkback/blkback.c:
sys/dev/xen/blkfront/blkfront.c:
sys/dev/xen/console/xencons_ring.c:
sys/dev/xen/control/control.c:
sys/dev/xen/netback/netback.c:
sys/dev/xen/netfront/netfront.c:
sys/dev/xen/xenpci/xenpci.c:
sys/i386/i386/machdep.c:
sys/i386/include/pmap.h:
sys/i386/include/xen/xenfunc.h:
sys/i386/isa/npx.c:
sys/i386/xen/clock.c:
sys/i386/xen/mp_machdep.c:
sys/i386/xen/mptable.c:
sys/i386/xen/xen_clock_util.c:
sys/i386/xen/xen_machdep.c:
sys/i386/xen/xen_rtc.c:
sys/xen/evtchn/evtchn_dev.c:
sys/xen/features.c:
sys/xen/gnttab.c:
sys/xen/gnttab.h:
sys/xen/hvm.h:
sys/xen/xenbus/xenbus.c:
sys/xen/xenbus/xenbus_if.m:
sys/xen/xenbus/xenbusb_front.c:
sys/xen/xenbus/xenbusvar.h:
sys/xen/xenstore/xenstore.c:
sys/xen/xenstore/xenstore_dev.c:
sys/xen/xenstore/xenstorevar.h:
Pull common Xen OS support functions/settings into xen/xen-os.h.
sys/amd64/include/xen/xen-os.h:
sys/i386/include/xen/xen-os.h:
sys/xen/xen-os.h:
Remove constants, macros, and functions unused in FreeBSD's Xen
support.
sys/xen/xen-os.h:
sys/i386/xen/xen_machdep.c:
sys/x86/xen/hvm.c:
Introduce new functions xen_domain(), xen_pv_domain(), and
xen_hvm_domain(). These are used in favor of #ifdefs so that
FreeBSD can dynamically detect and adapt to the presence of
a hypervisor. The goal is to have an HVM optimized GENERIC,
but more is necessary before this is possible.
sys/amd64/amd64/machdep.c:
sys/dev/xen/xenpci/xenpcivar.h:
sys/dev/xen/xenpci/xenpci.c:
sys/x86/xen/hvm.c:
sys/sys/kernel.h:
Refactor magic ioport, Hypercall table and Hypervisor shared
information page setup, and move it to a dedicated HVM support
module.
HVM mode initialization is now triggered during the
SI_SUB_HYPERVISOR phase of system startup. This currently
occurs just after the kernel VM is fully setup which is
just enough infrastructure to allow the hypercall table
and shared info page to be properly mapped.
sys/xen/hvm.h:
sys/x86/xen/hvm.c:
Add definitions and a method for configuring Hypervisor event
delievery via a direct vector callback.
sys/amd64/include/xen/xen-os.h:
sys/x86/xen/hvm.c:
sys/conf/files:
sys/conf/files.amd64:
sys/conf/files.i386:
Adjust kernel build to reflect the refactoring of early
Xen startup code and Xen interrupt services.
sys/dev/xen/blkback/blkback.c:
sys/dev/xen/blkfront/blkfront.c:
sys/dev/xen/blkfront/block.h:
sys/dev/xen/control/control.c:
sys/dev/xen/evtchn/evtchn_dev.c:
sys/dev/xen/netback/netback.c:
sys/dev/xen/netfront/netfront.c:
sys/xen/xenstore/xenstore.c:
sys/xen/evtchn/evtchn_dev.c:
sys/dev/xen/console/console.c:
sys/dev/xen/console/xencons_ring.c
Adjust drivers to use new xen_intr_*() API.
sys/dev/xen/blkback/blkback.c:
Since blkback defers all event handling to a taskqueue,
convert this task queue to a "fast" taskqueue, and schedule
it via an interrupt filter. This avoids an unnecessary
ithread context switch.
sys/xen/xenstore/xenstore.c:
The xenstore driver is MPSAFE. Indicate as much when
registering its interrupt handler.
sys/xen/xenbus/xenbus.c:
sys/xen/xenbus/xenbusvar.h:
Remove unused event channel APIs.
sys/xen/evtchn.h:
Remove all kernel Xen interrupt service API definitions
from this file. It is now only used for structure and
ioctl definitions related to the event channel userland
device driver.
Update the definitions in this file to match those from
NetBSD. Implementing this interface will be necessary for
Dom0 support.
sys/xen/evtchn/evtchnvar.h:
Add a header file for implemenation internal APIs related
to managing event channels event delivery. This is used
to allow, for example, the event channel userland device
driver to access low-level routines that typical kernel
consumers of event channel services should never access.
sys/xen/interface/event_channel.h:
sys/xen/xen_intr.h:
Standardize on the evtchn_port_t type for referring to
an event channel port id. In order to prevent low-level
event channel APIs from leaking to kernel consumers who
should not have access to this data, the type is defined
twice: Once in the Xen provided event_channel.h, and again
in xen/xen_intr.h. The double declaration is protected by
__XEN_EVTCHN_PORT_DEFINED__ to ensure it is never declared
twice within a given compilation unit.
sys/xen/xen_intr.h:
sys/xen/evtchn/evtchn.c:
sys/x86/xen/xen_intr.c:
sys/dev/xen/xenpci/evtchn.c:
sys/dev/xen/xenpci/xenpcivar.h:
New implementation of Xen interrupt services. This is
similar in many respects to the i386 PV implementation with
the exception that events for bound to event channel ports
(i.e. not IPI, virtual IRQ, or physical IRQ) are further
optimized to avoid mask/unmask operations that aren't
necessary for these edge triggered events.
Stubs exist for supporting physical IRQ binding, but will
need additional work before this implementation can be
fully shared between PV and HVM.
sys/amd64/amd64/mp_machdep.c:
sys/i386/i386/mp_machdep.c:
sys/i386/xen/mp_machdep.c
sys/x86/xen/hvm.c:
Add support for placing vcpu_info into an arbritary memory
page instead of using HYPERVISOR_shared_info->vcpu_info.
This allows the creation of domains with more than 32 vcpus.
sys/i386/i386/machdep.c:
sys/i386/xen/clock.c:
sys/i386/xen/xen_machdep.c:
sys/i386/xen/exception.s:
Add support for new event channle implementation.
2013-08-29 19:52:18 +00:00
|
|
|
if (__predict_false(sc->xbd_state != XBD_STATE_CONNECTED))
|
2013-05-31 22:21:37 +00:00
|
|
|
return (NULL);
|
|
|
|
|
|
|
|
bp = xbd_dequeue_bio(sc);
|
|
|
|
if (bp == NULL)
|
|
|
|
return (NULL);
|
|
|
|
|
2013-06-14 17:00:58 +00:00
|
|
|
if ((cm = xbd_dequeue_cm(sc, XBD_Q_FREE)) == NULL) {
|
2013-06-15 04:51:31 +00:00
|
|
|
xbd_freeze(sc, XBDF_CM_SHORTAGE);
|
2013-05-31 22:21:37 +00:00
|
|
|
xbd_requeue_bio(sc, bp);
|
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (gnttab_alloc_grant_references(sc->xbd_max_request_segments,
|
|
|
|
&cm->cm_gref_head) != 0) {
|
|
|
|
gnttab_request_free_callback(&sc->xbd_callback,
|
|
|
|
xbd_restart_queue_callback, sc,
|
|
|
|
sc->xbd_max_request_segments);
|
2013-06-15 04:51:31 +00:00
|
|
|
xbd_freeze(sc, XBDF_GNT_SHORTAGE);
|
2013-05-31 22:21:37 +00:00
|
|
|
xbd_requeue_bio(sc, bp);
|
2013-06-14 17:00:58 +00:00
|
|
|
xbd_enqueue_cm(cm, XBD_Q_FREE);
|
2013-05-31 22:21:37 +00:00
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
cm->cm_bp = bp;
|
|
|
|
cm->cm_sector_number = (blkif_sector_t)bp->bio_pblkno;
|
|
|
|
|
2013-06-26 20:39:07 +00:00
|
|
|
switch (bp->bio_cmd) {
|
|
|
|
case BIO_READ:
|
|
|
|
cm->cm_operation = BLKIF_OP_READ;
|
|
|
|
break;
|
|
|
|
case BIO_WRITE:
|
|
|
|
cm->cm_operation = BLKIF_OP_WRITE;
|
|
|
|
if ((bp->bio_flags & BIO_ORDERED) != 0) {
|
|
|
|
if ((sc->xbd_flags & XBDF_BARRIER) != 0) {
|
|
|
|
cm->cm_operation = BLKIF_OP_WRITE_BARRIER;
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* Single step this command.
|
|
|
|
*/
|
|
|
|
cm->cm_flags |= XBDCF_Q_FREEZE;
|
|
|
|
if (xbd_queue_length(sc, XBD_Q_BUSY) != 0) {
|
|
|
|
/*
|
|
|
|
* Wait for in-flight requests to
|
|
|
|
* finish.
|
|
|
|
*/
|
|
|
|
xbd_freeze(sc, XBDF_WAIT_IDLE);
|
|
|
|
xbd_requeue_cm(cm, XBD_Q_READY);
|
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case BIO_FLUSH:
|
|
|
|
if ((sc->xbd_flags & XBDF_FLUSH) != 0)
|
|
|
|
cm->cm_operation = BLKIF_OP_FLUSH_DISKCACHE;
|
|
|
|
else if ((sc->xbd_flags & XBDF_BARRIER) != 0)
|
|
|
|
cm->cm_operation = BLKIF_OP_WRITE_BARRIER;
|
|
|
|
else
|
|
|
|
panic("flush request, but no flush support available");
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
panic("unknown bio command %d", bp->bio_cmd);
|
|
|
|
}
|
|
|
|
|
2013-05-31 22:21:37 +00:00
|
|
|
return (cm);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Dequeue buffers and place them in the shared communication ring.
|
|
|
|
* Return when no more requests can be accepted or all buffers have
|
|
|
|
* been queued.
|
|
|
|
*
|
|
|
|
* Signal XEN once the ring has been filled out.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
xbd_startio(struct xbd_softc *sc)
|
|
|
|
{
|
|
|
|
struct xbd_command *cm;
|
|
|
|
int error, queued = 0;
|
|
|
|
|
|
|
|
mtx_assert(&sc->xbd_io_lock, MA_OWNED);
|
|
|
|
|
2013-06-14 17:00:58 +00:00
|
|
|
if (sc->xbd_state != XBD_STATE_CONNECTED)
|
2013-05-31 22:21:37 +00:00
|
|
|
return;
|
|
|
|
|
2015-06-12 07:50:34 +00:00
|
|
|
while (!RING_FULL(&sc->xbd_ring)) {
|
|
|
|
|
2013-06-15 04:51:31 +00:00
|
|
|
if (sc->xbd_qfrozen_cnt != 0)
|
2013-05-31 22:21:37 +00:00
|
|
|
break;
|
|
|
|
|
2013-06-14 17:00:58 +00:00
|
|
|
cm = xbd_dequeue_cm(sc, XBD_Q_READY);
|
2013-05-31 22:21:37 +00:00
|
|
|
|
|
|
|
if (cm == NULL)
|
|
|
|
cm = xbd_bio_command(sc);
|
|
|
|
|
|
|
|
if (cm == NULL)
|
|
|
|
break;
|
|
|
|
|
2013-06-26 20:39:07 +00:00
|
|
|
if ((cm->cm_flags & XBDCF_Q_FREEZE) != 0) {
|
|
|
|
/*
|
|
|
|
* Single step command. Future work is
|
|
|
|
* held off until this command completes.
|
|
|
|
*/
|
|
|
|
xbd_cm_freeze(sc, cm, XBDCF_Q_FREEZE);
|
|
|
|
}
|
|
|
|
|
2013-05-31 22:21:37 +00:00
|
|
|
if ((error = xbd_queue_request(sc, cm)) != 0) {
|
|
|
|
printf("xbd_queue_request returned %d\n", error);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
queued++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (queued != 0)
|
2013-05-31 22:33:28 +00:00
|
|
|
xbd_flush_requests(sc);
|
2013-05-31 22:21:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
xbd_bio_complete(struct xbd_softc *sc, struct xbd_command *cm)
|
|
|
|
{
|
|
|
|
struct bio *bp;
|
|
|
|
|
|
|
|
bp = cm->cm_bp;
|
|
|
|
|
Implement vector callback for PVHVM and unify event channel implementations
Re-structure Xen HVM support so that:
- Xen is detected and hypercalls can be performed very
early in system startup.
- Xen interrupt services are implemented using FreeBSD's native
interrupt delivery infrastructure.
- the Xen interrupt service implementation is shared between PV
and HVM guests.
- Xen interrupt handlers can optionally use a filter handler
in order to avoid the overhead of dispatch to an interrupt
thread.
- interrupt load can be distributed among all available CPUs.
- the overhead of accessing the emulated local and I/O apics
on HVM is removed for event channel port events.
- a similar optimization can eventually, and fairly easily,
be used to optimize MSI.
Early Xen detection, HVM refactoring, PVHVM interrupt infrastructure,
and misc Xen cleanups:
Sponsored by: Spectra Logic Corporation
Unification of PV & HVM interrupt infrastructure, bug fixes,
and misc Xen cleanups:
Submitted by: Roger Pau Monné
Sponsored by: Citrix Systems R&D
sys/x86/x86/local_apic.c:
sys/amd64/include/apicvar.h:
sys/i386/include/apicvar.h:
sys/amd64/amd64/apic_vector.S:
sys/i386/i386/apic_vector.s:
sys/amd64/amd64/machdep.c:
sys/i386/i386/machdep.c:
sys/i386/xen/exception.s:
sys/x86/include/segments.h:
Reserve IDT vector 0x93 for the Xen event channel upcall
interrupt handler. On Hypervisors that support the direct
vector callback feature, we can request that this vector be
called directly by an injected HVM interrupt event, instead
of a simulated PCI interrupt on the Xen platform PCI device.
This avoids all of the overhead of dealing with the emulated
I/O APIC and local APIC. It also means that the Hypervisor
can inject these events on any CPU, allowing upcalls for
different ports to be handled in parallel.
sys/amd64/amd64/mp_machdep.c:
sys/i386/i386/mp_machdep.c:
Map Xen per-vcpu area during AP startup.
sys/amd64/include/intr_machdep.h:
sys/i386/include/intr_machdep.h:
Increase the FreeBSD IRQ vector table to include space
for event channel interrupt sources.
sys/amd64/include/pcpu.h:
sys/i386/include/pcpu.h:
Remove Xen HVM per-cpu variable data. These fields are now
allocated via the dynamic per-cpu scheme. See xen_intr.c
for details.
sys/amd64/include/xen/hypercall.h:
sys/dev/xen/blkback/blkback.c:
sys/i386/include/xen/xenvar.h:
sys/i386/xen/clock.c:
sys/i386/xen/xen_machdep.c:
sys/xen/gnttab.c:
Prefer FreeBSD primatives to Linux ones in Xen support code.
sys/amd64/include/xen/xen-os.h:
sys/i386/include/xen/xen-os.h:
sys/xen/xen-os.h:
sys/dev/xen/balloon/balloon.c:
sys/dev/xen/blkback/blkback.c:
sys/dev/xen/blkfront/blkfront.c:
sys/dev/xen/console/xencons_ring.c:
sys/dev/xen/control/control.c:
sys/dev/xen/netback/netback.c:
sys/dev/xen/netfront/netfront.c:
sys/dev/xen/xenpci/xenpci.c:
sys/i386/i386/machdep.c:
sys/i386/include/pmap.h:
sys/i386/include/xen/xenfunc.h:
sys/i386/isa/npx.c:
sys/i386/xen/clock.c:
sys/i386/xen/mp_machdep.c:
sys/i386/xen/mptable.c:
sys/i386/xen/xen_clock_util.c:
sys/i386/xen/xen_machdep.c:
sys/i386/xen/xen_rtc.c:
sys/xen/evtchn/evtchn_dev.c:
sys/xen/features.c:
sys/xen/gnttab.c:
sys/xen/gnttab.h:
sys/xen/hvm.h:
sys/xen/xenbus/xenbus.c:
sys/xen/xenbus/xenbus_if.m:
sys/xen/xenbus/xenbusb_front.c:
sys/xen/xenbus/xenbusvar.h:
sys/xen/xenstore/xenstore.c:
sys/xen/xenstore/xenstore_dev.c:
sys/xen/xenstore/xenstorevar.h:
Pull common Xen OS support functions/settings into xen/xen-os.h.
sys/amd64/include/xen/xen-os.h:
sys/i386/include/xen/xen-os.h:
sys/xen/xen-os.h:
Remove constants, macros, and functions unused in FreeBSD's Xen
support.
sys/xen/xen-os.h:
sys/i386/xen/xen_machdep.c:
sys/x86/xen/hvm.c:
Introduce new functions xen_domain(), xen_pv_domain(), and
xen_hvm_domain(). These are used in favor of #ifdefs so that
FreeBSD can dynamically detect and adapt to the presence of
a hypervisor. The goal is to have an HVM optimized GENERIC,
but more is necessary before this is possible.
sys/amd64/amd64/machdep.c:
sys/dev/xen/xenpci/xenpcivar.h:
sys/dev/xen/xenpci/xenpci.c:
sys/x86/xen/hvm.c:
sys/sys/kernel.h:
Refactor magic ioport, Hypercall table and Hypervisor shared
information page setup, and move it to a dedicated HVM support
module.
HVM mode initialization is now triggered during the
SI_SUB_HYPERVISOR phase of system startup. This currently
occurs just after the kernel VM is fully setup which is
just enough infrastructure to allow the hypercall table
and shared info page to be properly mapped.
sys/xen/hvm.h:
sys/x86/xen/hvm.c:
Add definitions and a method for configuring Hypervisor event
delievery via a direct vector callback.
sys/amd64/include/xen/xen-os.h:
sys/x86/xen/hvm.c:
sys/conf/files:
sys/conf/files.amd64:
sys/conf/files.i386:
Adjust kernel build to reflect the refactoring of early
Xen startup code and Xen interrupt services.
sys/dev/xen/blkback/blkback.c:
sys/dev/xen/blkfront/blkfront.c:
sys/dev/xen/blkfront/block.h:
sys/dev/xen/control/control.c:
sys/dev/xen/evtchn/evtchn_dev.c:
sys/dev/xen/netback/netback.c:
sys/dev/xen/netfront/netfront.c:
sys/xen/xenstore/xenstore.c:
sys/xen/evtchn/evtchn_dev.c:
sys/dev/xen/console/console.c:
sys/dev/xen/console/xencons_ring.c
Adjust drivers to use new xen_intr_*() API.
sys/dev/xen/blkback/blkback.c:
Since blkback defers all event handling to a taskqueue,
convert this task queue to a "fast" taskqueue, and schedule
it via an interrupt filter. This avoids an unnecessary
ithread context switch.
sys/xen/xenstore/xenstore.c:
The xenstore driver is MPSAFE. Indicate as much when
registering its interrupt handler.
sys/xen/xenbus/xenbus.c:
sys/xen/xenbus/xenbusvar.h:
Remove unused event channel APIs.
sys/xen/evtchn.h:
Remove all kernel Xen interrupt service API definitions
from this file. It is now only used for structure and
ioctl definitions related to the event channel userland
device driver.
Update the definitions in this file to match those from
NetBSD. Implementing this interface will be necessary for
Dom0 support.
sys/xen/evtchn/evtchnvar.h:
Add a header file for implemenation internal APIs related
to managing event channels event delivery. This is used
to allow, for example, the event channel userland device
driver to access low-level routines that typical kernel
consumers of event channel services should never access.
sys/xen/interface/event_channel.h:
sys/xen/xen_intr.h:
Standardize on the evtchn_port_t type for referring to
an event channel port id. In order to prevent low-level
event channel APIs from leaking to kernel consumers who
should not have access to this data, the type is defined
twice: Once in the Xen provided event_channel.h, and again
in xen/xen_intr.h. The double declaration is protected by
__XEN_EVTCHN_PORT_DEFINED__ to ensure it is never declared
twice within a given compilation unit.
sys/xen/xen_intr.h:
sys/xen/evtchn/evtchn.c:
sys/x86/xen/xen_intr.c:
sys/dev/xen/xenpci/evtchn.c:
sys/dev/xen/xenpci/xenpcivar.h:
New implementation of Xen interrupt services. This is
similar in many respects to the i386 PV implementation with
the exception that events for bound to event channel ports
(i.e. not IPI, virtual IRQ, or physical IRQ) are further
optimized to avoid mask/unmask operations that aren't
necessary for these edge triggered events.
Stubs exist for supporting physical IRQ binding, but will
need additional work before this implementation can be
fully shared between PV and HVM.
sys/amd64/amd64/mp_machdep.c:
sys/i386/i386/mp_machdep.c:
sys/i386/xen/mp_machdep.c
sys/x86/xen/hvm.c:
Add support for placing vcpu_info into an arbritary memory
page instead of using HYPERVISOR_shared_info->vcpu_info.
This allows the creation of domains with more than 32 vcpus.
sys/i386/i386/machdep.c:
sys/i386/xen/clock.c:
sys/i386/xen/xen_machdep.c:
sys/i386/xen/exception.s:
Add support for new event channle implementation.
2013-08-29 19:52:18 +00:00
|
|
|
if (__predict_false(cm->cm_status != BLKIF_RSP_OKAY)) {
|
2013-05-31 22:21:37 +00:00
|
|
|
disk_err(bp, "disk error" , -1, 0);
|
|
|
|
printf(" status: %x\n", cm->cm_status);
|
|
|
|
bp->bio_flags |= BIO_ERROR;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (bp->bio_flags & BIO_ERROR)
|
|
|
|
bp->bio_error = EIO;
|
2009-11-30 04:32:34 +00:00
|
|
|
else
|
|
|
|
bp->bio_resid = 0;
|
|
|
|
|
2013-05-31 21:05:07 +00:00
|
|
|
xbd_free_command(cm);
|
2008-08-12 20:01:57 +00:00
|
|
|
biodone(bp);
|
|
|
|
}
|
|
|
|
|
2013-05-31 22:21:37 +00:00
|
|
|
static void
|
|
|
|
xbd_int(void *xsc)
|
|
|
|
{
|
|
|
|
struct xbd_softc *sc = xsc;
|
|
|
|
struct xbd_command *cm;
|
|
|
|
blkif_response_t *bret;
|
|
|
|
RING_IDX i, rp;
|
|
|
|
int op;
|
|
|
|
|
|
|
|
mtx_lock(&sc->xbd_io_lock);
|
|
|
|
|
Implement vector callback for PVHVM and unify event channel implementations
Re-structure Xen HVM support so that:
- Xen is detected and hypercalls can be performed very
early in system startup.
- Xen interrupt services are implemented using FreeBSD's native
interrupt delivery infrastructure.
- the Xen interrupt service implementation is shared between PV
and HVM guests.
- Xen interrupt handlers can optionally use a filter handler
in order to avoid the overhead of dispatch to an interrupt
thread.
- interrupt load can be distributed among all available CPUs.
- the overhead of accessing the emulated local and I/O apics
on HVM is removed for event channel port events.
- a similar optimization can eventually, and fairly easily,
be used to optimize MSI.
Early Xen detection, HVM refactoring, PVHVM interrupt infrastructure,
and misc Xen cleanups:
Sponsored by: Spectra Logic Corporation
Unification of PV & HVM interrupt infrastructure, bug fixes,
and misc Xen cleanups:
Submitted by: Roger Pau Monné
Sponsored by: Citrix Systems R&D
sys/x86/x86/local_apic.c:
sys/amd64/include/apicvar.h:
sys/i386/include/apicvar.h:
sys/amd64/amd64/apic_vector.S:
sys/i386/i386/apic_vector.s:
sys/amd64/amd64/machdep.c:
sys/i386/i386/machdep.c:
sys/i386/xen/exception.s:
sys/x86/include/segments.h:
Reserve IDT vector 0x93 for the Xen event channel upcall
interrupt handler. On Hypervisors that support the direct
vector callback feature, we can request that this vector be
called directly by an injected HVM interrupt event, instead
of a simulated PCI interrupt on the Xen platform PCI device.
This avoids all of the overhead of dealing with the emulated
I/O APIC and local APIC. It also means that the Hypervisor
can inject these events on any CPU, allowing upcalls for
different ports to be handled in parallel.
sys/amd64/amd64/mp_machdep.c:
sys/i386/i386/mp_machdep.c:
Map Xen per-vcpu area during AP startup.
sys/amd64/include/intr_machdep.h:
sys/i386/include/intr_machdep.h:
Increase the FreeBSD IRQ vector table to include space
for event channel interrupt sources.
sys/amd64/include/pcpu.h:
sys/i386/include/pcpu.h:
Remove Xen HVM per-cpu variable data. These fields are now
allocated via the dynamic per-cpu scheme. See xen_intr.c
for details.
sys/amd64/include/xen/hypercall.h:
sys/dev/xen/blkback/blkback.c:
sys/i386/include/xen/xenvar.h:
sys/i386/xen/clock.c:
sys/i386/xen/xen_machdep.c:
sys/xen/gnttab.c:
Prefer FreeBSD primatives to Linux ones in Xen support code.
sys/amd64/include/xen/xen-os.h:
sys/i386/include/xen/xen-os.h:
sys/xen/xen-os.h:
sys/dev/xen/balloon/balloon.c:
sys/dev/xen/blkback/blkback.c:
sys/dev/xen/blkfront/blkfront.c:
sys/dev/xen/console/xencons_ring.c:
sys/dev/xen/control/control.c:
sys/dev/xen/netback/netback.c:
sys/dev/xen/netfront/netfront.c:
sys/dev/xen/xenpci/xenpci.c:
sys/i386/i386/machdep.c:
sys/i386/include/pmap.h:
sys/i386/include/xen/xenfunc.h:
sys/i386/isa/npx.c:
sys/i386/xen/clock.c:
sys/i386/xen/mp_machdep.c:
sys/i386/xen/mptable.c:
sys/i386/xen/xen_clock_util.c:
sys/i386/xen/xen_machdep.c:
sys/i386/xen/xen_rtc.c:
sys/xen/evtchn/evtchn_dev.c:
sys/xen/features.c:
sys/xen/gnttab.c:
sys/xen/gnttab.h:
sys/xen/hvm.h:
sys/xen/xenbus/xenbus.c:
sys/xen/xenbus/xenbus_if.m:
sys/xen/xenbus/xenbusb_front.c:
sys/xen/xenbus/xenbusvar.h:
sys/xen/xenstore/xenstore.c:
sys/xen/xenstore/xenstore_dev.c:
sys/xen/xenstore/xenstorevar.h:
Pull common Xen OS support functions/settings into xen/xen-os.h.
sys/amd64/include/xen/xen-os.h:
sys/i386/include/xen/xen-os.h:
sys/xen/xen-os.h:
Remove constants, macros, and functions unused in FreeBSD's Xen
support.
sys/xen/xen-os.h:
sys/i386/xen/xen_machdep.c:
sys/x86/xen/hvm.c:
Introduce new functions xen_domain(), xen_pv_domain(), and
xen_hvm_domain(). These are used in favor of #ifdefs so that
FreeBSD can dynamically detect and adapt to the presence of
a hypervisor. The goal is to have an HVM optimized GENERIC,
but more is necessary before this is possible.
sys/amd64/amd64/machdep.c:
sys/dev/xen/xenpci/xenpcivar.h:
sys/dev/xen/xenpci/xenpci.c:
sys/x86/xen/hvm.c:
sys/sys/kernel.h:
Refactor magic ioport, Hypercall table and Hypervisor shared
information page setup, and move it to a dedicated HVM support
module.
HVM mode initialization is now triggered during the
SI_SUB_HYPERVISOR phase of system startup. This currently
occurs just after the kernel VM is fully setup which is
just enough infrastructure to allow the hypercall table
and shared info page to be properly mapped.
sys/xen/hvm.h:
sys/x86/xen/hvm.c:
Add definitions and a method for configuring Hypervisor event
delievery via a direct vector callback.
sys/amd64/include/xen/xen-os.h:
sys/x86/xen/hvm.c:
sys/conf/files:
sys/conf/files.amd64:
sys/conf/files.i386:
Adjust kernel build to reflect the refactoring of early
Xen startup code and Xen interrupt services.
sys/dev/xen/blkback/blkback.c:
sys/dev/xen/blkfront/blkfront.c:
sys/dev/xen/blkfront/block.h:
sys/dev/xen/control/control.c:
sys/dev/xen/evtchn/evtchn_dev.c:
sys/dev/xen/netback/netback.c:
sys/dev/xen/netfront/netfront.c:
sys/xen/xenstore/xenstore.c:
sys/xen/evtchn/evtchn_dev.c:
sys/dev/xen/console/console.c:
sys/dev/xen/console/xencons_ring.c
Adjust drivers to use new xen_intr_*() API.
sys/dev/xen/blkback/blkback.c:
Since blkback defers all event handling to a taskqueue,
convert this task queue to a "fast" taskqueue, and schedule
it via an interrupt filter. This avoids an unnecessary
ithread context switch.
sys/xen/xenstore/xenstore.c:
The xenstore driver is MPSAFE. Indicate as much when
registering its interrupt handler.
sys/xen/xenbus/xenbus.c:
sys/xen/xenbus/xenbusvar.h:
Remove unused event channel APIs.
sys/xen/evtchn.h:
Remove all kernel Xen interrupt service API definitions
from this file. It is now only used for structure and
ioctl definitions related to the event channel userland
device driver.
Update the definitions in this file to match those from
NetBSD. Implementing this interface will be necessary for
Dom0 support.
sys/xen/evtchn/evtchnvar.h:
Add a header file for implemenation internal APIs related
to managing event channels event delivery. This is used
to allow, for example, the event channel userland device
driver to access low-level routines that typical kernel
consumers of event channel services should never access.
sys/xen/interface/event_channel.h:
sys/xen/xen_intr.h:
Standardize on the evtchn_port_t type for referring to
an event channel port id. In order to prevent low-level
event channel APIs from leaking to kernel consumers who
should not have access to this data, the type is defined
twice: Once in the Xen provided event_channel.h, and again
in xen/xen_intr.h. The double declaration is protected by
__XEN_EVTCHN_PORT_DEFINED__ to ensure it is never declared
twice within a given compilation unit.
sys/xen/xen_intr.h:
sys/xen/evtchn/evtchn.c:
sys/x86/xen/xen_intr.c:
sys/dev/xen/xenpci/evtchn.c:
sys/dev/xen/xenpci/xenpcivar.h:
New implementation of Xen interrupt services. This is
similar in many respects to the i386 PV implementation with
the exception that events for bound to event channel ports
(i.e. not IPI, virtual IRQ, or physical IRQ) are further
optimized to avoid mask/unmask operations that aren't
necessary for these edge triggered events.
Stubs exist for supporting physical IRQ binding, but will
need additional work before this implementation can be
fully shared between PV and HVM.
sys/amd64/amd64/mp_machdep.c:
sys/i386/i386/mp_machdep.c:
sys/i386/xen/mp_machdep.c
sys/x86/xen/hvm.c:
Add support for placing vcpu_info into an arbritary memory
page instead of using HYPERVISOR_shared_info->vcpu_info.
This allows the creation of domains with more than 32 vcpus.
sys/i386/i386/machdep.c:
sys/i386/xen/clock.c:
sys/i386/xen/xen_machdep.c:
sys/i386/xen/exception.s:
Add support for new event channle implementation.
2013-08-29 19:52:18 +00:00
|
|
|
if (__predict_false(sc->xbd_state == XBD_STATE_DISCONNECTED)) {
|
2013-05-31 22:21:37 +00:00
|
|
|
mtx_unlock(&sc->xbd_io_lock);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
again:
|
|
|
|
rp = sc->xbd_ring.sring->rsp_prod;
|
|
|
|
rmb(); /* Ensure we see queued responses up to 'rp'. */
|
|
|
|
|
|
|
|
for (i = sc->xbd_ring.rsp_cons; i != rp;) {
|
|
|
|
bret = RING_GET_RESPONSE(&sc->xbd_ring, i);
|
|
|
|
cm = &sc->xbd_shadow[bret->id];
|
|
|
|
|
2013-06-14 17:00:58 +00:00
|
|
|
xbd_remove_cm(cm, XBD_Q_BUSY);
|
2015-06-12 07:50:34 +00:00
|
|
|
gnttab_end_foreign_access_references(cm->cm_nseg,
|
|
|
|
cm->cm_sg_refs);
|
|
|
|
i++;
|
2013-05-31 22:21:37 +00:00
|
|
|
|
|
|
|
if (cm->cm_operation == BLKIF_OP_READ)
|
|
|
|
op = BUS_DMASYNC_POSTREAD;
|
2013-06-26 20:39:07 +00:00
|
|
|
else if (cm->cm_operation == BLKIF_OP_WRITE ||
|
|
|
|
cm->cm_operation == BLKIF_OP_WRITE_BARRIER)
|
2013-05-31 22:21:37 +00:00
|
|
|
op = BUS_DMASYNC_POSTWRITE;
|
|
|
|
else
|
|
|
|
op = 0;
|
|
|
|
bus_dmamap_sync(sc->xbd_io_dmat, cm->cm_map, op);
|
|
|
|
bus_dmamap_unload(sc->xbd_io_dmat, cm->cm_map);
|
|
|
|
|
|
|
|
/*
|
2013-06-15 04:51:31 +00:00
|
|
|
* Release any hold this command has on future command
|
|
|
|
* dispatch.
|
2013-05-31 22:21:37 +00:00
|
|
|
*/
|
2013-06-26 20:39:07 +00:00
|
|
|
xbd_cm_thaw(sc, cm);
|
2013-05-31 22:21:37 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Directly call the i/o complete routine to save an
|
|
|
|
* an indirection in the common case.
|
|
|
|
*/
|
|
|
|
cm->cm_status = bret->status;
|
|
|
|
if (cm->cm_bp)
|
|
|
|
xbd_bio_complete(sc, cm);
|
|
|
|
else if (cm->cm_complete != NULL)
|
|
|
|
cm->cm_complete(cm);
|
|
|
|
else
|
|
|
|
xbd_free_command(cm);
|
|
|
|
}
|
|
|
|
|
|
|
|
sc->xbd_ring.rsp_cons = i;
|
|
|
|
|
|
|
|
if (i != sc->xbd_ring.req_prod_pvt) {
|
|
|
|
int more_to_do;
|
|
|
|
RING_FINAL_CHECK_FOR_RESPONSES(&sc->xbd_ring, more_to_do);
|
|
|
|
if (more_to_do)
|
|
|
|
goto again;
|
|
|
|
} else {
|
|
|
|
sc->xbd_ring.sring->rsp_event = i + 1;
|
|
|
|
}
|
|
|
|
|
2013-06-26 20:39:07 +00:00
|
|
|
if (xbd_queue_length(sc, XBD_Q_BUSY) == 0)
|
|
|
|
xbd_thaw(sc, XBDF_WAIT_IDLE);
|
|
|
|
|
2013-05-31 22:21:37 +00:00
|
|
|
xbd_startio(sc);
|
|
|
|
|
Implement vector callback for PVHVM and unify event channel implementations
Re-structure Xen HVM support so that:
- Xen is detected and hypercalls can be performed very
early in system startup.
- Xen interrupt services are implemented using FreeBSD's native
interrupt delivery infrastructure.
- the Xen interrupt service implementation is shared between PV
and HVM guests.
- Xen interrupt handlers can optionally use a filter handler
in order to avoid the overhead of dispatch to an interrupt
thread.
- interrupt load can be distributed among all available CPUs.
- the overhead of accessing the emulated local and I/O apics
on HVM is removed for event channel port events.
- a similar optimization can eventually, and fairly easily,
be used to optimize MSI.
Early Xen detection, HVM refactoring, PVHVM interrupt infrastructure,
and misc Xen cleanups:
Sponsored by: Spectra Logic Corporation
Unification of PV & HVM interrupt infrastructure, bug fixes,
and misc Xen cleanups:
Submitted by: Roger Pau Monné
Sponsored by: Citrix Systems R&D
sys/x86/x86/local_apic.c:
sys/amd64/include/apicvar.h:
sys/i386/include/apicvar.h:
sys/amd64/amd64/apic_vector.S:
sys/i386/i386/apic_vector.s:
sys/amd64/amd64/machdep.c:
sys/i386/i386/machdep.c:
sys/i386/xen/exception.s:
sys/x86/include/segments.h:
Reserve IDT vector 0x93 for the Xen event channel upcall
interrupt handler. On Hypervisors that support the direct
vector callback feature, we can request that this vector be
called directly by an injected HVM interrupt event, instead
of a simulated PCI interrupt on the Xen platform PCI device.
This avoids all of the overhead of dealing with the emulated
I/O APIC and local APIC. It also means that the Hypervisor
can inject these events on any CPU, allowing upcalls for
different ports to be handled in parallel.
sys/amd64/amd64/mp_machdep.c:
sys/i386/i386/mp_machdep.c:
Map Xen per-vcpu area during AP startup.
sys/amd64/include/intr_machdep.h:
sys/i386/include/intr_machdep.h:
Increase the FreeBSD IRQ vector table to include space
for event channel interrupt sources.
sys/amd64/include/pcpu.h:
sys/i386/include/pcpu.h:
Remove Xen HVM per-cpu variable data. These fields are now
allocated via the dynamic per-cpu scheme. See xen_intr.c
for details.
sys/amd64/include/xen/hypercall.h:
sys/dev/xen/blkback/blkback.c:
sys/i386/include/xen/xenvar.h:
sys/i386/xen/clock.c:
sys/i386/xen/xen_machdep.c:
sys/xen/gnttab.c:
Prefer FreeBSD primatives to Linux ones in Xen support code.
sys/amd64/include/xen/xen-os.h:
sys/i386/include/xen/xen-os.h:
sys/xen/xen-os.h:
sys/dev/xen/balloon/balloon.c:
sys/dev/xen/blkback/blkback.c:
sys/dev/xen/blkfront/blkfront.c:
sys/dev/xen/console/xencons_ring.c:
sys/dev/xen/control/control.c:
sys/dev/xen/netback/netback.c:
sys/dev/xen/netfront/netfront.c:
sys/dev/xen/xenpci/xenpci.c:
sys/i386/i386/machdep.c:
sys/i386/include/pmap.h:
sys/i386/include/xen/xenfunc.h:
sys/i386/isa/npx.c:
sys/i386/xen/clock.c:
sys/i386/xen/mp_machdep.c:
sys/i386/xen/mptable.c:
sys/i386/xen/xen_clock_util.c:
sys/i386/xen/xen_machdep.c:
sys/i386/xen/xen_rtc.c:
sys/xen/evtchn/evtchn_dev.c:
sys/xen/features.c:
sys/xen/gnttab.c:
sys/xen/gnttab.h:
sys/xen/hvm.h:
sys/xen/xenbus/xenbus.c:
sys/xen/xenbus/xenbus_if.m:
sys/xen/xenbus/xenbusb_front.c:
sys/xen/xenbus/xenbusvar.h:
sys/xen/xenstore/xenstore.c:
sys/xen/xenstore/xenstore_dev.c:
sys/xen/xenstore/xenstorevar.h:
Pull common Xen OS support functions/settings into xen/xen-os.h.
sys/amd64/include/xen/xen-os.h:
sys/i386/include/xen/xen-os.h:
sys/xen/xen-os.h:
Remove constants, macros, and functions unused in FreeBSD's Xen
support.
sys/xen/xen-os.h:
sys/i386/xen/xen_machdep.c:
sys/x86/xen/hvm.c:
Introduce new functions xen_domain(), xen_pv_domain(), and
xen_hvm_domain(). These are used in favor of #ifdefs so that
FreeBSD can dynamically detect and adapt to the presence of
a hypervisor. The goal is to have an HVM optimized GENERIC,
but more is necessary before this is possible.
sys/amd64/amd64/machdep.c:
sys/dev/xen/xenpci/xenpcivar.h:
sys/dev/xen/xenpci/xenpci.c:
sys/x86/xen/hvm.c:
sys/sys/kernel.h:
Refactor magic ioport, Hypercall table and Hypervisor shared
information page setup, and move it to a dedicated HVM support
module.
HVM mode initialization is now triggered during the
SI_SUB_HYPERVISOR phase of system startup. This currently
occurs just after the kernel VM is fully setup which is
just enough infrastructure to allow the hypercall table
and shared info page to be properly mapped.
sys/xen/hvm.h:
sys/x86/xen/hvm.c:
Add definitions and a method for configuring Hypervisor event
delievery via a direct vector callback.
sys/amd64/include/xen/xen-os.h:
sys/x86/xen/hvm.c:
sys/conf/files:
sys/conf/files.amd64:
sys/conf/files.i386:
Adjust kernel build to reflect the refactoring of early
Xen startup code and Xen interrupt services.
sys/dev/xen/blkback/blkback.c:
sys/dev/xen/blkfront/blkfront.c:
sys/dev/xen/blkfront/block.h:
sys/dev/xen/control/control.c:
sys/dev/xen/evtchn/evtchn_dev.c:
sys/dev/xen/netback/netback.c:
sys/dev/xen/netfront/netfront.c:
sys/xen/xenstore/xenstore.c:
sys/xen/evtchn/evtchn_dev.c:
sys/dev/xen/console/console.c:
sys/dev/xen/console/xencons_ring.c
Adjust drivers to use new xen_intr_*() API.
sys/dev/xen/blkback/blkback.c:
Since blkback defers all event handling to a taskqueue,
convert this task queue to a "fast" taskqueue, and schedule
it via an interrupt filter. This avoids an unnecessary
ithread context switch.
sys/xen/xenstore/xenstore.c:
The xenstore driver is MPSAFE. Indicate as much when
registering its interrupt handler.
sys/xen/xenbus/xenbus.c:
sys/xen/xenbus/xenbusvar.h:
Remove unused event channel APIs.
sys/xen/evtchn.h:
Remove all kernel Xen interrupt service API definitions
from this file. It is now only used for structure and
ioctl definitions related to the event channel userland
device driver.
Update the definitions in this file to match those from
NetBSD. Implementing this interface will be necessary for
Dom0 support.
sys/xen/evtchn/evtchnvar.h:
Add a header file for implemenation internal APIs related
to managing event channels event delivery. This is used
to allow, for example, the event channel userland device
driver to access low-level routines that typical kernel
consumers of event channel services should never access.
sys/xen/interface/event_channel.h:
sys/xen/xen_intr.h:
Standardize on the evtchn_port_t type for referring to
an event channel port id. In order to prevent low-level
event channel APIs from leaking to kernel consumers who
should not have access to this data, the type is defined
twice: Once in the Xen provided event_channel.h, and again
in xen/xen_intr.h. The double declaration is protected by
__XEN_EVTCHN_PORT_DEFINED__ to ensure it is never declared
twice within a given compilation unit.
sys/xen/xen_intr.h:
sys/xen/evtchn/evtchn.c:
sys/x86/xen/xen_intr.c:
sys/dev/xen/xenpci/evtchn.c:
sys/dev/xen/xenpci/xenpcivar.h:
New implementation of Xen interrupt services. This is
similar in many respects to the i386 PV implementation with
the exception that events for bound to event channel ports
(i.e. not IPI, virtual IRQ, or physical IRQ) are further
optimized to avoid mask/unmask operations that aren't
necessary for these edge triggered events.
Stubs exist for supporting physical IRQ binding, but will
need additional work before this implementation can be
fully shared between PV and HVM.
sys/amd64/amd64/mp_machdep.c:
sys/i386/i386/mp_machdep.c:
sys/i386/xen/mp_machdep.c
sys/x86/xen/hvm.c:
Add support for placing vcpu_info into an arbritary memory
page instead of using HYPERVISOR_shared_info->vcpu_info.
This allows the creation of domains with more than 32 vcpus.
sys/i386/i386/machdep.c:
sys/i386/xen/clock.c:
sys/i386/xen/xen_machdep.c:
sys/i386/xen/exception.s:
Add support for new event channle implementation.
2013-08-29 19:52:18 +00:00
|
|
|
if (__predict_false(sc->xbd_state == XBD_STATE_SUSPENDED))
|
2013-06-14 17:00:58 +00:00
|
|
|
wakeup(&sc->xbd_cm_q[XBD_Q_BUSY]);
|
2013-05-31 22:21:37 +00:00
|
|
|
|
|
|
|
mtx_unlock(&sc->xbd_io_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*------------------------------- Dump Support -------------------------------*/
|
|
|
|
/**
|
|
|
|
* Quiesce the disk writes for a dump file before allowing the next buffer.
|
|
|
|
*/
|
2009-08-30 20:45:24 +00:00
|
|
|
static void
|
2013-05-31 21:05:07 +00:00
|
|
|
xbd_quiesce(struct xbd_softc *sc)
|
2009-08-30 20:45:24 +00:00
|
|
|
{
|
2013-05-31 21:05:07 +00:00
|
|
|
int mtd;
|
2009-08-30 20:45:24 +00:00
|
|
|
|
|
|
|
// While there are outstanding requests
|
2013-06-26 20:39:07 +00:00
|
|
|
while (xbd_queue_length(sc, XBD_Q_BUSY) != 0) {
|
2013-05-31 21:05:07 +00:00
|
|
|
RING_FINAL_CHECK_FOR_RESPONSES(&sc->xbd_ring, mtd);
|
2009-08-30 20:45:24 +00:00
|
|
|
if (mtd) {
|
2016-05-03 03:41:25 +00:00
|
|
|
/* Received request completions, update queue. */
|
2013-05-31 21:05:07 +00:00
|
|
|
xbd_int(sc);
|
2009-08-30 20:45:24 +00:00
|
|
|
}
|
2013-06-26 20:39:07 +00:00
|
|
|
if (xbd_queue_length(sc, XBD_Q_BUSY) != 0) {
|
2009-11-30 04:32:34 +00:00
|
|
|
/*
|
|
|
|
* Still pending requests, wait for the disk i/o
|
|
|
|
* to complete.
|
|
|
|
*/
|
2009-11-24 07:17:51 +00:00
|
|
|
HYPERVISOR_yield();
|
2009-08-30 20:45:24 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-11-30 04:32:34 +00:00
|
|
|
/* Kernel dump function for a paravirtualized disk device */
|
|
|
|
static void
|
2013-05-31 21:05:07 +00:00
|
|
|
xbd_dump_complete(struct xbd_command *cm)
|
2009-11-30 04:32:34 +00:00
|
|
|
{
|
|
|
|
|
2013-06-14 17:00:58 +00:00
|
|
|
xbd_enqueue_cm(cm, XBD_Q_COMPLETE);
|
2009-11-30 04:32:34 +00:00
|
|
|
}
|
2009-08-30 20:45:24 +00:00
|
|
|
|
|
|
|
static int
|
2013-05-31 21:05:07 +00:00
|
|
|
xbd_dump(void *arg, void *virtual, vm_offset_t physical, off_t offset,
|
|
|
|
size_t length)
|
2009-08-30 20:45:24 +00:00
|
|
|
{
|
2013-05-31 21:05:07 +00:00
|
|
|
struct disk *dp = arg;
|
|
|
|
struct xbd_softc *sc = dp->d_drv1;
|
|
|
|
struct xbd_command *cm;
|
|
|
|
size_t chunk;
|
|
|
|
int sbp;
|
|
|
|
int rc = 0;
|
2009-11-30 04:32:34 +00:00
|
|
|
|
|
|
|
if (length <= 0)
|
|
|
|
return (rc);
|
|
|
|
|
2013-05-31 21:05:07 +00:00
|
|
|
xbd_quiesce(sc); /* All quiet on the western front. */
|
2009-11-30 04:32:34 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If this lock is held, then this module is failing, and a
|
|
|
|
* successful kernel dump is highly unlikely anyway.
|
|
|
|
*/
|
2013-05-31 21:05:07 +00:00
|
|
|
mtx_lock(&sc->xbd_io_lock);
|
2009-11-30 04:32:34 +00:00
|
|
|
|
|
|
|
/* Split the 64KB block as needed */
|
|
|
|
for (sbp=0; length > 0; sbp++) {
|
2013-06-14 17:00:58 +00:00
|
|
|
cm = xbd_dequeue_cm(sc, XBD_Q_FREE);
|
2009-11-30 04:32:34 +00:00
|
|
|
if (cm == NULL) {
|
2013-05-31 21:05:07 +00:00
|
|
|
mtx_unlock(&sc->xbd_io_lock);
|
|
|
|
device_printf(sc->xbd_dev, "dump: no more commands?\n");
|
2009-11-30 04:32:34 +00:00
|
|
|
return (EBUSY);
|
2009-08-30 20:45:24 +00:00
|
|
|
}
|
2009-11-30 04:32:34 +00:00
|
|
|
|
2013-05-31 21:05:07 +00:00
|
|
|
if (gnttab_alloc_grant_references(sc->xbd_max_request_segments,
|
|
|
|
&cm->cm_gref_head) != 0) {
|
|
|
|
xbd_free_command(cm);
|
|
|
|
mtx_unlock(&sc->xbd_io_lock);
|
|
|
|
device_printf(sc->xbd_dev, "no more grant allocs?\n");
|
2009-11-30 04:32:34 +00:00
|
|
|
return (EBUSY);
|
2009-08-30 20:45:24 +00:00
|
|
|
}
|
2009-11-30 04:32:34 +00:00
|
|
|
|
2013-05-31 21:05:07 +00:00
|
|
|
chunk = length > sc->xbd_max_request_size ?
|
|
|
|
sc->xbd_max_request_size : length;
|
|
|
|
cm->cm_data = virtual;
|
|
|
|
cm->cm_datalen = chunk;
|
|
|
|
cm->cm_operation = BLKIF_OP_WRITE;
|
|
|
|
cm->cm_sector_number = offset / dp->d_sectorsize;
|
|
|
|
cm->cm_complete = xbd_dump_complete;
|
2009-11-30 04:32:34 +00:00
|
|
|
|
2013-06-14 17:00:58 +00:00
|
|
|
xbd_enqueue_cm(cm, XBD_Q_READY);
|
2009-11-30 04:32:34 +00:00
|
|
|
|
|
|
|
length -= chunk;
|
|
|
|
offset += chunk;
|
|
|
|
virtual = (char *) virtual + chunk;
|
2009-08-30 20:45:24 +00:00
|
|
|
}
|
2009-11-30 04:32:34 +00:00
|
|
|
|
|
|
|
/* Tell DOM0 to do the I/O */
|
2013-05-31 21:05:07 +00:00
|
|
|
xbd_startio(sc);
|
|
|
|
mtx_unlock(&sc->xbd_io_lock);
|
2009-11-30 04:32:34 +00:00
|
|
|
|
|
|
|
/* Poll for the completion. */
|
2013-05-31 21:05:07 +00:00
|
|
|
xbd_quiesce(sc); /* All quite on the eastern front */
|
2009-11-30 04:32:34 +00:00
|
|
|
|
|
|
|
/* If there were any errors, bail out... */
|
2013-06-14 17:00:58 +00:00
|
|
|
while ((cm = xbd_dequeue_cm(sc, XBD_Q_COMPLETE)) != NULL) {
|
2013-05-31 21:05:07 +00:00
|
|
|
if (cm->cm_status != BLKIF_RSP_OKAY) {
|
|
|
|
device_printf(sc->xbd_dev,
|
2009-11-30 04:32:34 +00:00
|
|
|
"Dump I/O failed at sector %jd\n",
|
2013-05-31 21:05:07 +00:00
|
|
|
cm->cm_sector_number);
|
2009-11-30 04:32:34 +00:00
|
|
|
rc = EIO;
|
|
|
|
}
|
2013-05-31 21:05:07 +00:00
|
|
|
xbd_free_command(cm);
|
2009-11-30 04:32:34 +00:00
|
|
|
}
|
|
|
|
|
2009-08-30 20:45:24 +00:00
|
|
|
return (rc);
|
|
|
|
}
|
|
|
|
|
2013-05-31 22:21:37 +00:00
|
|
|
/*----------------------------- Disk Entrypoints -----------------------------*/
|
2008-12-04 07:59:05 +00:00
|
|
|
static int
|
2013-05-31 22:21:37 +00:00
|
|
|
xbd_open(struct disk *dp)
|
2008-12-04 07:59:05 +00:00
|
|
|
{
|
2013-05-31 22:21:37 +00:00
|
|
|
struct xbd_softc *sc = dp->d_drv1;
|
2008-12-04 07:59:05 +00:00
|
|
|
|
2013-05-31 22:21:37 +00:00
|
|
|
if (sc == NULL) {
|
2016-02-14 13:42:16 +00:00
|
|
|
printf("xbd%d: not found", dp->d_unit);
|
2013-05-31 22:21:37 +00:00
|
|
|
return (ENXIO);
|
2008-12-04 07:59:05 +00:00
|
|
|
}
|
2008-08-12 20:01:57 +00:00
|
|
|
|
2013-06-14 17:00:58 +00:00
|
|
|
sc->xbd_flags |= XBDF_OPEN;
|
2013-05-31 22:21:37 +00:00
|
|
|
sc->xbd_users++;
|
|
|
|
return (0);
|
2008-12-04 07:59:05 +00:00
|
|
|
}
|
2008-08-12 20:01:57 +00:00
|
|
|
|
2013-05-31 22:21:37 +00:00
|
|
|
static int
|
|
|
|
xbd_close(struct disk *dp)
|
2012-02-15 06:45:49 +00:00
|
|
|
{
|
2013-05-31 22:21:37 +00:00
|
|
|
struct xbd_softc *sc = dp->d_drv1;
|
2012-02-15 06:45:49 +00:00
|
|
|
|
2013-05-31 22:21:37 +00:00
|
|
|
if (sc == NULL)
|
|
|
|
return (ENXIO);
|
2013-06-14 17:00:58 +00:00
|
|
|
sc->xbd_flags &= ~XBDF_OPEN;
|
2013-05-31 22:21:37 +00:00
|
|
|
if (--(sc->xbd_users) == 0) {
|
|
|
|
/*
|
|
|
|
* Check whether we have been instructed to close. We will
|
|
|
|
* have ignored this request initially, as the device was
|
|
|
|
* still mounted.
|
|
|
|
*/
|
|
|
|
if (xenbus_get_otherend_state(sc->xbd_dev) ==
|
|
|
|
XenbusStateClosing)
|
|
|
|
xbd_closing(sc->xbd_dev);
|
|
|
|
}
|
|
|
|
return (0);
|
|
|
|
}
|
2012-02-15 06:45:49 +00:00
|
|
|
|
2013-05-31 22:21:37 +00:00
|
|
|
static int
|
|
|
|
xbd_ioctl(struct disk *dp, u_long cmd, void *addr, int flag, struct thread *td)
|
|
|
|
{
|
|
|
|
struct xbd_softc *sc = dp->d_drv1;
|
2012-02-15 06:45:49 +00:00
|
|
|
|
2013-05-31 22:21:37 +00:00
|
|
|
if (sc == NULL)
|
|
|
|
return (ENXIO);
|
2012-02-15 06:45:49 +00:00
|
|
|
|
2013-05-31 22:21:37 +00:00
|
|
|
return (ENOTTY);
|
2012-02-15 06:45:49 +00:00
|
|
|
}
|
|
|
|
|
2008-12-04 07:59:05 +00:00
|
|
|
/*
|
2013-05-31 22:21:37 +00:00
|
|
|
* Read/write routine for a buffer. Finds the proper unit, place it on
|
|
|
|
* the sortq and kick the controller.
|
2008-12-04 07:59:05 +00:00
|
|
|
*/
|
2013-05-31 22:21:37 +00:00
|
|
|
static void
|
|
|
|
xbd_strategy(struct bio *bp)
|
2008-08-12 20:01:57 +00:00
|
|
|
{
|
2013-05-31 22:21:37 +00:00
|
|
|
struct xbd_softc *sc = bp->bio_disk->d_drv1;
|
2008-08-12 20:01:57 +00:00
|
|
|
|
2013-05-31 22:21:37 +00:00
|
|
|
/* bogus disk? */
|
|
|
|
if (sc == NULL) {
|
|
|
|
bp->bio_error = EINVAL;
|
|
|
|
bp->bio_flags |= BIO_ERROR;
|
|
|
|
bp->bio_resid = bp->bio_bcount;
|
|
|
|
biodone(bp);
|
|
|
|
return;
|
2008-08-12 20:01:57 +00:00
|
|
|
}
|
|
|
|
|
2013-05-31 22:21:37 +00:00
|
|
|
/*
|
|
|
|
* Place it in the queue of disk activities for this disk
|
|
|
|
*/
|
|
|
|
mtx_lock(&sc->xbd_io_lock);
|
2012-02-15 06:45:49 +00:00
|
|
|
|
2013-05-31 22:21:37 +00:00
|
|
|
xbd_enqueue_bio(sc, bp);
|
|
|
|
xbd_startio(sc);
|
2009-03-11 15:30:12 +00:00
|
|
|
|
2013-05-31 22:21:37 +00:00
|
|
|
mtx_unlock(&sc->xbd_io_lock);
|
|
|
|
return;
|
2009-03-11 15:30:12 +00:00
|
|
|
}
|
|
|
|
|
2013-05-31 22:21:37 +00:00
|
|
|
/*------------------------------ Ring Management -----------------------------*/
|
|
|
|
static int
|
2013-05-31 22:33:28 +00:00
|
|
|
xbd_alloc_ring(struct xbd_softc *sc)
|
2009-03-11 15:30:12 +00:00
|
|
|
{
|
2013-05-31 22:21:37 +00:00
|
|
|
blkif_sring_t *sring;
|
|
|
|
uintptr_t sring_page_addr;
|
|
|
|
int error;
|
|
|
|
int i;
|
2009-03-11 15:30:12 +00:00
|
|
|
|
2013-05-31 22:21:37 +00:00
|
|
|
sring = malloc(sc->xbd_ring_pages * PAGE_SIZE, M_XENBLOCKFRONT,
|
|
|
|
M_NOWAIT|M_ZERO);
|
|
|
|
if (sring == NULL) {
|
|
|
|
xenbus_dev_fatal(sc->xbd_dev, ENOMEM, "allocating shared ring");
|
|
|
|
return (ENOMEM);
|
|
|
|
}
|
|
|
|
SHARED_RING_INIT(sring);
|
|
|
|
FRONT_RING_INIT(&sc->xbd_ring, sring, sc->xbd_ring_pages * PAGE_SIZE);
|
2011-09-21 00:02:44 +00:00
|
|
|
|
2013-05-31 22:21:37 +00:00
|
|
|
for (i = 0, sring_page_addr = (uintptr_t)sring;
|
|
|
|
i < sc->xbd_ring_pages;
|
|
|
|
i++, sring_page_addr += PAGE_SIZE) {
|
|
|
|
|
|
|
|
error = xenbus_grant_ring(sc->xbd_dev,
|
2015-08-06 17:07:21 +00:00
|
|
|
(vtophys(sring_page_addr) >> PAGE_SHIFT),
|
2013-05-31 22:21:37 +00:00
|
|
|
&sc->xbd_ring_ref[i]);
|
|
|
|
if (error) {
|
|
|
|
xenbus_dev_fatal(sc->xbd_dev, error,
|
|
|
|
"granting ring_ref(%d)", i);
|
|
|
|
return (error);
|
2011-09-21 00:02:44 +00:00
|
|
|
}
|
|
|
|
}
|
2013-05-31 22:21:37 +00:00
|
|
|
if (sc->xbd_ring_pages == 1) {
|
|
|
|
error = xs_printf(XST_NIL, xenbus_get_node(sc->xbd_dev),
|
|
|
|
"ring-ref", "%u", sc->xbd_ring_ref[0]);
|
|
|
|
if (error) {
|
|
|
|
xenbus_dev_fatal(sc->xbd_dev, error,
|
|
|
|
"writing %s/ring-ref",
|
|
|
|
xenbus_get_node(sc->xbd_dev));
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
for (i = 0; i < sc->xbd_ring_pages; i++) {
|
|
|
|
char ring_ref_name[]= "ring_refXX";
|
2008-08-12 20:01:57 +00:00
|
|
|
|
2013-05-31 22:21:37 +00:00
|
|
|
snprintf(ring_ref_name, sizeof(ring_ref_name),
|
|
|
|
"ring-ref%u", i);
|
|
|
|
error = xs_printf(XST_NIL, xenbus_get_node(sc->xbd_dev),
|
|
|
|
ring_ref_name, "%u", sc->xbd_ring_ref[i]);
|
|
|
|
if (error) {
|
|
|
|
xenbus_dev_fatal(sc->xbd_dev, error,
|
|
|
|
"writing %s/%s",
|
|
|
|
xenbus_get_node(sc->xbd_dev),
|
|
|
|
ring_ref_name);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2011-09-21 00:02:44 +00:00
|
|
|
|
Implement vector callback for PVHVM and unify event channel implementations
Re-structure Xen HVM support so that:
- Xen is detected and hypercalls can be performed very
early in system startup.
- Xen interrupt services are implemented using FreeBSD's native
interrupt delivery infrastructure.
- the Xen interrupt service implementation is shared between PV
and HVM guests.
- Xen interrupt handlers can optionally use a filter handler
in order to avoid the overhead of dispatch to an interrupt
thread.
- interrupt load can be distributed among all available CPUs.
- the overhead of accessing the emulated local and I/O apics
on HVM is removed for event channel port events.
- a similar optimization can eventually, and fairly easily,
be used to optimize MSI.
Early Xen detection, HVM refactoring, PVHVM interrupt infrastructure,
and misc Xen cleanups:
Sponsored by: Spectra Logic Corporation
Unification of PV & HVM interrupt infrastructure, bug fixes,
and misc Xen cleanups:
Submitted by: Roger Pau Monné
Sponsored by: Citrix Systems R&D
sys/x86/x86/local_apic.c:
sys/amd64/include/apicvar.h:
sys/i386/include/apicvar.h:
sys/amd64/amd64/apic_vector.S:
sys/i386/i386/apic_vector.s:
sys/amd64/amd64/machdep.c:
sys/i386/i386/machdep.c:
sys/i386/xen/exception.s:
sys/x86/include/segments.h:
Reserve IDT vector 0x93 for the Xen event channel upcall
interrupt handler. On Hypervisors that support the direct
vector callback feature, we can request that this vector be
called directly by an injected HVM interrupt event, instead
of a simulated PCI interrupt on the Xen platform PCI device.
This avoids all of the overhead of dealing with the emulated
I/O APIC and local APIC. It also means that the Hypervisor
can inject these events on any CPU, allowing upcalls for
different ports to be handled in parallel.
sys/amd64/amd64/mp_machdep.c:
sys/i386/i386/mp_machdep.c:
Map Xen per-vcpu area during AP startup.
sys/amd64/include/intr_machdep.h:
sys/i386/include/intr_machdep.h:
Increase the FreeBSD IRQ vector table to include space
for event channel interrupt sources.
sys/amd64/include/pcpu.h:
sys/i386/include/pcpu.h:
Remove Xen HVM per-cpu variable data. These fields are now
allocated via the dynamic per-cpu scheme. See xen_intr.c
for details.
sys/amd64/include/xen/hypercall.h:
sys/dev/xen/blkback/blkback.c:
sys/i386/include/xen/xenvar.h:
sys/i386/xen/clock.c:
sys/i386/xen/xen_machdep.c:
sys/xen/gnttab.c:
Prefer FreeBSD primatives to Linux ones in Xen support code.
sys/amd64/include/xen/xen-os.h:
sys/i386/include/xen/xen-os.h:
sys/xen/xen-os.h:
sys/dev/xen/balloon/balloon.c:
sys/dev/xen/blkback/blkback.c:
sys/dev/xen/blkfront/blkfront.c:
sys/dev/xen/console/xencons_ring.c:
sys/dev/xen/control/control.c:
sys/dev/xen/netback/netback.c:
sys/dev/xen/netfront/netfront.c:
sys/dev/xen/xenpci/xenpci.c:
sys/i386/i386/machdep.c:
sys/i386/include/pmap.h:
sys/i386/include/xen/xenfunc.h:
sys/i386/isa/npx.c:
sys/i386/xen/clock.c:
sys/i386/xen/mp_machdep.c:
sys/i386/xen/mptable.c:
sys/i386/xen/xen_clock_util.c:
sys/i386/xen/xen_machdep.c:
sys/i386/xen/xen_rtc.c:
sys/xen/evtchn/evtchn_dev.c:
sys/xen/features.c:
sys/xen/gnttab.c:
sys/xen/gnttab.h:
sys/xen/hvm.h:
sys/xen/xenbus/xenbus.c:
sys/xen/xenbus/xenbus_if.m:
sys/xen/xenbus/xenbusb_front.c:
sys/xen/xenbus/xenbusvar.h:
sys/xen/xenstore/xenstore.c:
sys/xen/xenstore/xenstore_dev.c:
sys/xen/xenstore/xenstorevar.h:
Pull common Xen OS support functions/settings into xen/xen-os.h.
sys/amd64/include/xen/xen-os.h:
sys/i386/include/xen/xen-os.h:
sys/xen/xen-os.h:
Remove constants, macros, and functions unused in FreeBSD's Xen
support.
sys/xen/xen-os.h:
sys/i386/xen/xen_machdep.c:
sys/x86/xen/hvm.c:
Introduce new functions xen_domain(), xen_pv_domain(), and
xen_hvm_domain(). These are used in favor of #ifdefs so that
FreeBSD can dynamically detect and adapt to the presence of
a hypervisor. The goal is to have an HVM optimized GENERIC,
but more is necessary before this is possible.
sys/amd64/amd64/machdep.c:
sys/dev/xen/xenpci/xenpcivar.h:
sys/dev/xen/xenpci/xenpci.c:
sys/x86/xen/hvm.c:
sys/sys/kernel.h:
Refactor magic ioport, Hypercall table and Hypervisor shared
information page setup, and move it to a dedicated HVM support
module.
HVM mode initialization is now triggered during the
SI_SUB_HYPERVISOR phase of system startup. This currently
occurs just after the kernel VM is fully setup which is
just enough infrastructure to allow the hypercall table
and shared info page to be properly mapped.
sys/xen/hvm.h:
sys/x86/xen/hvm.c:
Add definitions and a method for configuring Hypervisor event
delievery via a direct vector callback.
sys/amd64/include/xen/xen-os.h:
sys/x86/xen/hvm.c:
sys/conf/files:
sys/conf/files.amd64:
sys/conf/files.i386:
Adjust kernel build to reflect the refactoring of early
Xen startup code and Xen interrupt services.
sys/dev/xen/blkback/blkback.c:
sys/dev/xen/blkfront/blkfront.c:
sys/dev/xen/blkfront/block.h:
sys/dev/xen/control/control.c:
sys/dev/xen/evtchn/evtchn_dev.c:
sys/dev/xen/netback/netback.c:
sys/dev/xen/netfront/netfront.c:
sys/xen/xenstore/xenstore.c:
sys/xen/evtchn/evtchn_dev.c:
sys/dev/xen/console/console.c:
sys/dev/xen/console/xencons_ring.c
Adjust drivers to use new xen_intr_*() API.
sys/dev/xen/blkback/blkback.c:
Since blkback defers all event handling to a taskqueue,
convert this task queue to a "fast" taskqueue, and schedule
it via an interrupt filter. This avoids an unnecessary
ithread context switch.
sys/xen/xenstore/xenstore.c:
The xenstore driver is MPSAFE. Indicate as much when
registering its interrupt handler.
sys/xen/xenbus/xenbus.c:
sys/xen/xenbus/xenbusvar.h:
Remove unused event channel APIs.
sys/xen/evtchn.h:
Remove all kernel Xen interrupt service API definitions
from this file. It is now only used for structure and
ioctl definitions related to the event channel userland
device driver.
Update the definitions in this file to match those from
NetBSD. Implementing this interface will be necessary for
Dom0 support.
sys/xen/evtchn/evtchnvar.h:
Add a header file for implemenation internal APIs related
to managing event channels event delivery. This is used
to allow, for example, the event channel userland device
driver to access low-level routines that typical kernel
consumers of event channel services should never access.
sys/xen/interface/event_channel.h:
sys/xen/xen_intr.h:
Standardize on the evtchn_port_t type for referring to
an event channel port id. In order to prevent low-level
event channel APIs from leaking to kernel consumers who
should not have access to this data, the type is defined
twice: Once in the Xen provided event_channel.h, and again
in xen/xen_intr.h. The double declaration is protected by
__XEN_EVTCHN_PORT_DEFINED__ to ensure it is never declared
twice within a given compilation unit.
sys/xen/xen_intr.h:
sys/xen/evtchn/evtchn.c:
sys/x86/xen/xen_intr.c:
sys/dev/xen/xenpci/evtchn.c:
sys/dev/xen/xenpci/xenpcivar.h:
New implementation of Xen interrupt services. This is
similar in many respects to the i386 PV implementation with
the exception that events for bound to event channel ports
(i.e. not IPI, virtual IRQ, or physical IRQ) are further
optimized to avoid mask/unmask operations that aren't
necessary for these edge triggered events.
Stubs exist for supporting physical IRQ binding, but will
need additional work before this implementation can be
fully shared between PV and HVM.
sys/amd64/amd64/mp_machdep.c:
sys/i386/i386/mp_machdep.c:
sys/i386/xen/mp_machdep.c
sys/x86/xen/hvm.c:
Add support for placing vcpu_info into an arbritary memory
page instead of using HYPERVISOR_shared_info->vcpu_info.
This allows the creation of domains with more than 32 vcpus.
sys/i386/i386/machdep.c:
sys/i386/xen/clock.c:
sys/i386/xen/xen_machdep.c:
sys/i386/xen/exception.s:
Add support for new event channle implementation.
2013-08-29 19:52:18 +00:00
|
|
|
error = xen_intr_alloc_and_bind_local_port(sc->xbd_dev,
|
|
|
|
xenbus_get_otherend_id(sc->xbd_dev), NULL, xbd_int, sc,
|
|
|
|
INTR_TYPE_BIO | INTR_MPSAFE, &sc->xen_intr_handle);
|
2013-05-31 22:21:37 +00:00
|
|
|
if (error) {
|
|
|
|
xenbus_dev_fatal(sc->xbd_dev, error,
|
Implement vector callback for PVHVM and unify event channel implementations
Re-structure Xen HVM support so that:
- Xen is detected and hypercalls can be performed very
early in system startup.
- Xen interrupt services are implemented using FreeBSD's native
interrupt delivery infrastructure.
- the Xen interrupt service implementation is shared between PV
and HVM guests.
- Xen interrupt handlers can optionally use a filter handler
in order to avoid the overhead of dispatch to an interrupt
thread.
- interrupt load can be distributed among all available CPUs.
- the overhead of accessing the emulated local and I/O apics
on HVM is removed for event channel port events.
- a similar optimization can eventually, and fairly easily,
be used to optimize MSI.
Early Xen detection, HVM refactoring, PVHVM interrupt infrastructure,
and misc Xen cleanups:
Sponsored by: Spectra Logic Corporation
Unification of PV & HVM interrupt infrastructure, bug fixes,
and misc Xen cleanups:
Submitted by: Roger Pau Monné
Sponsored by: Citrix Systems R&D
sys/x86/x86/local_apic.c:
sys/amd64/include/apicvar.h:
sys/i386/include/apicvar.h:
sys/amd64/amd64/apic_vector.S:
sys/i386/i386/apic_vector.s:
sys/amd64/amd64/machdep.c:
sys/i386/i386/machdep.c:
sys/i386/xen/exception.s:
sys/x86/include/segments.h:
Reserve IDT vector 0x93 for the Xen event channel upcall
interrupt handler. On Hypervisors that support the direct
vector callback feature, we can request that this vector be
called directly by an injected HVM interrupt event, instead
of a simulated PCI interrupt on the Xen platform PCI device.
This avoids all of the overhead of dealing with the emulated
I/O APIC and local APIC. It also means that the Hypervisor
can inject these events on any CPU, allowing upcalls for
different ports to be handled in parallel.
sys/amd64/amd64/mp_machdep.c:
sys/i386/i386/mp_machdep.c:
Map Xen per-vcpu area during AP startup.
sys/amd64/include/intr_machdep.h:
sys/i386/include/intr_machdep.h:
Increase the FreeBSD IRQ vector table to include space
for event channel interrupt sources.
sys/amd64/include/pcpu.h:
sys/i386/include/pcpu.h:
Remove Xen HVM per-cpu variable data. These fields are now
allocated via the dynamic per-cpu scheme. See xen_intr.c
for details.
sys/amd64/include/xen/hypercall.h:
sys/dev/xen/blkback/blkback.c:
sys/i386/include/xen/xenvar.h:
sys/i386/xen/clock.c:
sys/i386/xen/xen_machdep.c:
sys/xen/gnttab.c:
Prefer FreeBSD primatives to Linux ones in Xen support code.
sys/amd64/include/xen/xen-os.h:
sys/i386/include/xen/xen-os.h:
sys/xen/xen-os.h:
sys/dev/xen/balloon/balloon.c:
sys/dev/xen/blkback/blkback.c:
sys/dev/xen/blkfront/blkfront.c:
sys/dev/xen/console/xencons_ring.c:
sys/dev/xen/control/control.c:
sys/dev/xen/netback/netback.c:
sys/dev/xen/netfront/netfront.c:
sys/dev/xen/xenpci/xenpci.c:
sys/i386/i386/machdep.c:
sys/i386/include/pmap.h:
sys/i386/include/xen/xenfunc.h:
sys/i386/isa/npx.c:
sys/i386/xen/clock.c:
sys/i386/xen/mp_machdep.c:
sys/i386/xen/mptable.c:
sys/i386/xen/xen_clock_util.c:
sys/i386/xen/xen_machdep.c:
sys/i386/xen/xen_rtc.c:
sys/xen/evtchn/evtchn_dev.c:
sys/xen/features.c:
sys/xen/gnttab.c:
sys/xen/gnttab.h:
sys/xen/hvm.h:
sys/xen/xenbus/xenbus.c:
sys/xen/xenbus/xenbus_if.m:
sys/xen/xenbus/xenbusb_front.c:
sys/xen/xenbus/xenbusvar.h:
sys/xen/xenstore/xenstore.c:
sys/xen/xenstore/xenstore_dev.c:
sys/xen/xenstore/xenstorevar.h:
Pull common Xen OS support functions/settings into xen/xen-os.h.
sys/amd64/include/xen/xen-os.h:
sys/i386/include/xen/xen-os.h:
sys/xen/xen-os.h:
Remove constants, macros, and functions unused in FreeBSD's Xen
support.
sys/xen/xen-os.h:
sys/i386/xen/xen_machdep.c:
sys/x86/xen/hvm.c:
Introduce new functions xen_domain(), xen_pv_domain(), and
xen_hvm_domain(). These are used in favor of #ifdefs so that
FreeBSD can dynamically detect and adapt to the presence of
a hypervisor. The goal is to have an HVM optimized GENERIC,
but more is necessary before this is possible.
sys/amd64/amd64/machdep.c:
sys/dev/xen/xenpci/xenpcivar.h:
sys/dev/xen/xenpci/xenpci.c:
sys/x86/xen/hvm.c:
sys/sys/kernel.h:
Refactor magic ioport, Hypercall table and Hypervisor shared
information page setup, and move it to a dedicated HVM support
module.
HVM mode initialization is now triggered during the
SI_SUB_HYPERVISOR phase of system startup. This currently
occurs just after the kernel VM is fully setup which is
just enough infrastructure to allow the hypercall table
and shared info page to be properly mapped.
sys/xen/hvm.h:
sys/x86/xen/hvm.c:
Add definitions and a method for configuring Hypervisor event
delievery via a direct vector callback.
sys/amd64/include/xen/xen-os.h:
sys/x86/xen/hvm.c:
sys/conf/files:
sys/conf/files.amd64:
sys/conf/files.i386:
Adjust kernel build to reflect the refactoring of early
Xen startup code and Xen interrupt services.
sys/dev/xen/blkback/blkback.c:
sys/dev/xen/blkfront/blkfront.c:
sys/dev/xen/blkfront/block.h:
sys/dev/xen/control/control.c:
sys/dev/xen/evtchn/evtchn_dev.c:
sys/dev/xen/netback/netback.c:
sys/dev/xen/netfront/netfront.c:
sys/xen/xenstore/xenstore.c:
sys/xen/evtchn/evtchn_dev.c:
sys/dev/xen/console/console.c:
sys/dev/xen/console/xencons_ring.c
Adjust drivers to use new xen_intr_*() API.
sys/dev/xen/blkback/blkback.c:
Since blkback defers all event handling to a taskqueue,
convert this task queue to a "fast" taskqueue, and schedule
it via an interrupt filter. This avoids an unnecessary
ithread context switch.
sys/xen/xenstore/xenstore.c:
The xenstore driver is MPSAFE. Indicate as much when
registering its interrupt handler.
sys/xen/xenbus/xenbus.c:
sys/xen/xenbus/xenbusvar.h:
Remove unused event channel APIs.
sys/xen/evtchn.h:
Remove all kernel Xen interrupt service API definitions
from this file. It is now only used for structure and
ioctl definitions related to the event channel userland
device driver.
Update the definitions in this file to match those from
NetBSD. Implementing this interface will be necessary for
Dom0 support.
sys/xen/evtchn/evtchnvar.h:
Add a header file for implemenation internal APIs related
to managing event channels event delivery. This is used
to allow, for example, the event channel userland device
driver to access low-level routines that typical kernel
consumers of event channel services should never access.
sys/xen/interface/event_channel.h:
sys/xen/xen_intr.h:
Standardize on the evtchn_port_t type for referring to
an event channel port id. In order to prevent low-level
event channel APIs from leaking to kernel consumers who
should not have access to this data, the type is defined
twice: Once in the Xen provided event_channel.h, and again
in xen/xen_intr.h. The double declaration is protected by
__XEN_EVTCHN_PORT_DEFINED__ to ensure it is never declared
twice within a given compilation unit.
sys/xen/xen_intr.h:
sys/xen/evtchn/evtchn.c:
sys/x86/xen/xen_intr.c:
sys/dev/xen/xenpci/evtchn.c:
sys/dev/xen/xenpci/xenpcivar.h:
New implementation of Xen interrupt services. This is
similar in many respects to the i386 PV implementation with
the exception that events for bound to event channel ports
(i.e. not IPI, virtual IRQ, or physical IRQ) are further
optimized to avoid mask/unmask operations that aren't
necessary for these edge triggered events.
Stubs exist for supporting physical IRQ binding, but will
need additional work before this implementation can be
fully shared between PV and HVM.
sys/amd64/amd64/mp_machdep.c:
sys/i386/i386/mp_machdep.c:
sys/i386/xen/mp_machdep.c
sys/x86/xen/hvm.c:
Add support for placing vcpu_info into an arbritary memory
page instead of using HYPERVISOR_shared_info->vcpu_info.
This allows the creation of domains with more than 32 vcpus.
sys/i386/i386/machdep.c:
sys/i386/xen/clock.c:
sys/i386/xen/xen_machdep.c:
sys/i386/xen/exception.s:
Add support for new event channle implementation.
2013-08-29 19:52:18 +00:00
|
|
|
"xen_intr_alloc_and_bind_local_port failed");
|
2013-05-31 22:21:37 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
return (0);
|
2008-08-12 20:01:57 +00:00
|
|
|
}
|
|
|
|
|
2013-06-01 04:02:51 +00:00
|
|
|
static void
|
|
|
|
xbd_free_ring(struct xbd_softc *sc)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (sc->xbd_ring.sring == NULL)
|
|
|
|
return;
|
|
|
|
|
|
|
|
for (i = 0; i < sc->xbd_ring_pages; i++) {
|
|
|
|
if (sc->xbd_ring_ref[i] != GRANT_REF_INVALID) {
|
|
|
|
gnttab_end_foreign_access_ref(sc->xbd_ring_ref[i]);
|
|
|
|
sc->xbd_ring_ref[i] = GRANT_REF_INVALID;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
free(sc->xbd_ring.sring, M_XENBLOCKFRONT);
|
|
|
|
sc->xbd_ring.sring = NULL;
|
|
|
|
}
|
|
|
|
|
2013-05-31 22:21:37 +00:00
|
|
|
/*-------------------------- Initialization/Teardown -------------------------*/
|
2013-06-26 20:39:07 +00:00
|
|
|
static int
|
|
|
|
xbd_feature_string(struct xbd_softc *sc, char *features, size_t len)
|
|
|
|
{
|
|
|
|
struct sbuf sb;
|
|
|
|
int feature_cnt;
|
|
|
|
|
|
|
|
sbuf_new(&sb, features, len, SBUF_FIXEDLEN);
|
|
|
|
|
|
|
|
feature_cnt = 0;
|
|
|
|
if ((sc->xbd_flags & XBDF_FLUSH) != 0) {
|
|
|
|
sbuf_printf(&sb, "flush");
|
|
|
|
feature_cnt++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((sc->xbd_flags & XBDF_BARRIER) != 0) {
|
|
|
|
if (feature_cnt != 0)
|
|
|
|
sbuf_printf(&sb, ", ");
|
|
|
|
sbuf_printf(&sb, "write_barrier");
|
|
|
|
feature_cnt++;
|
|
|
|
}
|
|
|
|
|
2016-04-03 11:18:20 +00:00
|
|
|
if ((sc->xbd_flags & XBDF_DISCARD) != 0) {
|
|
|
|
if (feature_cnt != 0)
|
|
|
|
sbuf_printf(&sb, ", ");
|
|
|
|
sbuf_printf(&sb, "discard");
|
|
|
|
feature_cnt++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((sc->xbd_flags & XBDF_PERSISTENT) != 0) {
|
|
|
|
if (feature_cnt != 0)
|
|
|
|
sbuf_printf(&sb, ", ");
|
|
|
|
sbuf_printf(&sb, "persistent_grants");
|
|
|
|
feature_cnt++;
|
|
|
|
}
|
|
|
|
|
2013-06-26 20:39:07 +00:00
|
|
|
(void) sbuf_finish(&sb);
|
|
|
|
return (sbuf_len(&sb));
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
xbd_sysctl_features(SYSCTL_HANDLER_ARGS)
|
|
|
|
{
|
|
|
|
char features[80];
|
|
|
|
struct xbd_softc *sc = arg1;
|
|
|
|
int error;
|
|
|
|
int len;
|
|
|
|
|
|
|
|
error = sysctl_wire_old_buffer(req, 0);
|
|
|
|
if (error != 0)
|
|
|
|
return (error);
|
|
|
|
|
|
|
|
len = xbd_feature_string(sc, features, sizeof(features));
|
|
|
|
|
|
|
|
/* len is -1 on error, which will make the SYSCTL_OUT a no-op. */
|
|
|
|
return (SYSCTL_OUT(req, features, len + 1/*NUL*/));
|
|
|
|
}
|
|
|
|
|
2013-05-31 22:21:37 +00:00
|
|
|
static void
|
|
|
|
xbd_setup_sysctl(struct xbd_softc *xbd)
|
2008-08-12 20:01:57 +00:00
|
|
|
{
|
2013-05-31 22:21:37 +00:00
|
|
|
struct sysctl_ctx_list *sysctl_ctx = NULL;
|
|
|
|
struct sysctl_oid *sysctl_tree = NULL;
|
2013-06-26 20:39:07 +00:00
|
|
|
struct sysctl_oid_list *children;
|
2013-05-31 22:21:37 +00:00
|
|
|
|
|
|
|
sysctl_ctx = device_get_sysctl_ctx(xbd->xbd_dev);
|
|
|
|
if (sysctl_ctx == NULL)
|
|
|
|
return;
|
2008-08-12 20:01:57 +00:00
|
|
|
|
2013-05-31 22:21:37 +00:00
|
|
|
sysctl_tree = device_get_sysctl_tree(xbd->xbd_dev);
|
|
|
|
if (sysctl_tree == NULL)
|
|
|
|
return;
|
2008-08-12 20:01:57 +00:00
|
|
|
|
2013-06-26 20:39:07 +00:00
|
|
|
children = SYSCTL_CHILDREN(sysctl_tree);
|
|
|
|
SYSCTL_ADD_UINT(sysctl_ctx, children, OID_AUTO,
|
2013-05-31 22:21:37 +00:00
|
|
|
"max_requests", CTLFLAG_RD, &xbd->xbd_max_requests, -1,
|
|
|
|
"maximum outstanding requests (negotiated)");
|
|
|
|
|
2013-06-26 20:39:07 +00:00
|
|
|
SYSCTL_ADD_UINT(sysctl_ctx, children, OID_AUTO,
|
2013-05-31 22:21:37 +00:00
|
|
|
"max_request_segments", CTLFLAG_RD,
|
|
|
|
&xbd->xbd_max_request_segments, 0,
|
|
|
|
"maximum number of pages per requests (negotiated)");
|
|
|
|
|
2013-06-26 20:39:07 +00:00
|
|
|
SYSCTL_ADD_UINT(sysctl_ctx, children, OID_AUTO,
|
2013-05-31 22:21:37 +00:00
|
|
|
"max_request_size", CTLFLAG_RD, &xbd->xbd_max_request_size, 0,
|
|
|
|
"maximum size in bytes of a request (negotiated)");
|
|
|
|
|
2013-06-26 20:39:07 +00:00
|
|
|
SYSCTL_ADD_UINT(sysctl_ctx, children, OID_AUTO,
|
2013-05-31 22:21:37 +00:00
|
|
|
"ring_pages", CTLFLAG_RD, &xbd->xbd_ring_pages, 0,
|
|
|
|
"communication channel pages (negotiated)");
|
2013-06-26 20:39:07 +00:00
|
|
|
|
|
|
|
SYSCTL_ADD_PROC(sysctl_ctx, children, OID_AUTO,
|
|
|
|
"features", CTLTYPE_STRING|CTLFLAG_RD, xbd, 0,
|
|
|
|
xbd_sysctl_features, "A", "protocol features (negotiated)");
|
2008-08-12 20:01:57 +00:00
|
|
|
}
|
|
|
|
|
2013-05-31 22:21:37 +00:00
|
|
|
/*
|
|
|
|
* Translate Linux major/minor to an appropriate name and unit
|
|
|
|
* number. For HVM guests, this allows us to use the same drive names
|
|
|
|
* with blkfront as the emulated drives, easing transition slightly.
|
|
|
|
*/
|
Improve the Xen para-virtualized device infrastructure of FreeBSD:
o Add support for backend devices (e.g. blkback)
o Implement extensions to the Xen para-virtualized block API to allow
for larger and more outstanding I/Os.
o Import a completely rewritten block back driver with support for fronting
I/O to both raw devices and files.
o General cleanup and documentation of the XenBus and XenStore support code.
o Robustness and performance updates for the block front driver.
o Fixes to the netfront driver.
Sponsored by: Spectra Logic Corporation
sys/xen/xenbus/init.txt:
Deleted: This file explains the Linux method for XenBus device
enumeration and thus does not apply to FreeBSD's NewBus approach.
sys/xen/xenbus/xenbus_probe_backend.c:
Deleted: Linux version of backend XenBus service routines. It
was never ported to FreeBSD. See xenbusb.c, xenbusb_if.m,
xenbusb_front.c xenbusb_back.c for details of FreeBSD's XenBus
support.
sys/xen/xenbus/xenbusvar.h:
sys/xen/xenbus/xenbus_xs.c:
sys/xen/xenbus/xenbus_comms.c:
sys/xen/xenbus/xenbus_comms.h:
sys/xen/xenstore/xenstorevar.h:
sys/xen/xenstore/xenstore.c:
Split XenStore into its own tree. XenBus is a software layer built
on top of XenStore. The old arrangement and the naming of some
structures and functions blurred these lines making it difficult to
discern what services are provided by which layer and at what times
these services are available (e.g. during system startup and shutdown).
sys/xen/xenbus/xenbus_client.c:
sys/xen/xenbus/xenbus.c:
sys/xen/xenbus/xenbus_probe.c:
sys/xen/xenbus/xenbusb.c:
sys/xen/xenbus/xenbusb.h:
Split up XenBus code into methods available for use by client
drivers (xenbus.c) and code used by the XenBus "bus code" to
enumerate, attach, detach, and service bus drivers.
sys/xen/reboot.c:
sys/dev/xen/control/control.c:
Add a XenBus front driver for handling shutdown, reboot, suspend, and
resume events published in the XenStore. Move all PV suspend/reboot
support from reboot.c into this driver.
sys/xen/blkif.h:
New file from Xen vendor with macros and structures used by
a block back driver to service requests from a VM running a
different ABI (e.g. amd64 back with i386 front).
sys/conf/files:
Adjust kernel build spec for new XenBus/XenStore layout and added
Xen functionality.
sys/dev/xen/balloon/balloon.c:
sys/dev/xen/netfront/netfront.c:
sys/dev/xen/blkfront/blkfront.c:
sys/xen/xenbus/...
sys/xen/xenstore/...
o Rename XenStore APIs and structures from xenbus_* to xs_*.
o Adjust to use of M_XENBUS and M_XENSTORE malloc types for allocation
of objects returned by these APIs.
o Adjust for changes in the bus interface for Xen drivers.
sys/xen/xenbus/...
sys/xen/xenstore/...
Add Doxygen comments for these interfaces and the code that
implements them.
sys/dev/xen/blkback/blkback.c:
o Rewrite the Block Back driver to attach properly via newbus,
operate correctly in both PV and HVM mode regardless of domain
(e.g. can be in a DOM other than 0), and to deal with the latest
metadata available in XenStore for block devices.
o Allow users to specify a file as a backend to blkback, in addition
to character devices. Use the namei lookup of the backend path
to automatically configure, based on file type, the appropriate
backend method.
The current implementation is limited to a single outstanding I/O
at a time to file backed storage.
sys/dev/xen/blkback/blkback.c:
sys/xen/interface/io/blkif.h:
sys/xen/blkif.h:
sys/dev/xen/blkfront/blkfront.c:
sys/dev/xen/blkfront/block.h:
Extend the Xen blkif API: Negotiable request size and number of
requests.
This change extends the information recorded in the XenStore
allowing block front/back devices to negotiate for optimal I/O
parameters. This has been achieved without sacrificing backward
compatibility with drivers that are unaware of these protocol
enhancements. The extensions center around the connection protocol
which now includes these additions:
o The back-end device publishes its maximum supported values for,
request I/O size, the number of page segments that can be
associated with a request, the maximum number of requests that
can be concurrently active, and the maximum number of pages that
can be in the shared request ring. These values are published
before the back-end enters the XenbusStateInitWait state.
o The front-end waits for the back-end to enter either the InitWait
or Initialize state. At this point, the front end limits it's
own capabilities to the lesser of the values it finds published
by the backend, it's own maximums, or, should any back-end data
be missing in the store, the values supported by the original
protocol. It then initializes it's internal data structures
including allocation of the shared ring, publishes its maximum
capabilities to the XenStore and transitions to the Initialized
state.
o The back-end waits for the front-end to enter the Initalized
state. At this point, the back end limits it's own capabilities
to the lesser of the values it finds published by the frontend,
it's own maximums, or, should any front-end data be missing in
the store, the values supported by the original protocol. It
then initializes it's internal data structures, attaches to the
shared ring and transitions to the Connected state.
o The front-end waits for the back-end to enter the Connnected
state, transitions itself to the connected state, and can
commence I/O.
Although an updated front-end driver must be aware of the back-end's
InitWait state, the back-end has been coded such that it can
tolerate a front-end that skips this step and transitions directly
to the Initialized state without waiting for the back-end.
sys/xen/interface/io/blkif.h:
o Increase BLKIF_MAX_SEGMENTS_PER_REQUEST to 255. This is
the maximum number possible without changing the blkif
request header structure (nr_segs is a uint8_t).
o Add two new constants:
BLKIF_MAX_SEGMENTS_PER_HEADER_BLOCK, and
BLKIF_MAX_SEGMENTS_PER_SEGMENT_BLOCK. These respectively
indicate the number of segments that can fit in the first
ring-buffer entry of a request, and for each subsequent
(sg element only) ring-buffer entry associated with the
"header" ring-buffer entry of the request.
o Add the blkif_request_segment_t typedef for segment
elements.
o Add the BLKRING_GET_SG_REQUEST() macro which wraps the
RING_GET_REQUEST() macro and returns a properly cast
pointer to an array of blkif_request_segment_ts.
o Add the BLKIF_SEGS_TO_BLOCKS() macro which calculates the
number of ring entries that will be consumed by a blkif
request with the given number of segments.
sys/xen/blkif.h:
o Update for changes in interface/io/blkif.h macros.
o Update the BLKIF_MAX_RING_REQUESTS() macro to take the
ring size as an argument to allow this calculation on
multi-page rings.
o Add a companion macro to BLKIF_MAX_RING_REQUESTS(),
BLKIF_RING_PAGES(). This macro determines the number of
ring pages required in order to support a ring with the
supplied number of request blocks.
sys/dev/xen/blkback/blkback.c:
sys/dev/xen/blkfront/blkfront.c:
sys/dev/xen/blkfront/block.h:
o Negotiate with the other-end with the following limits:
Reqeust Size: MAXPHYS
Max Segments: (MAXPHYS/PAGE_SIZE) + 1
Max Requests: 256
Max Ring Pages: Sufficient to support Max Requests with
Max Segments.
o Dynamically allocate request pools and segemnts-per-request.
o Update ring allocation/attachment code to support a
multi-page shared ring.
o Update routines that access the shared ring to handle
multi-block requests.
sys/dev/xen/blkfront/blkfront.c:
o Track blkfront allocations in a blkfront driver specific
malloc pool.
o Strip out XenStore transaction retry logic in the
connection code. Transactions only need to be used when
the update to multiple XenStore nodes must be atomic.
That is not the case here.
o Fully disable blkif_resume() until it can be fixed
properly (it didn't work before this change).
o Destroy bus-dma objects during device instance tear-down.
o Properly handle backend devices with powef-of-2 sector
sizes larger than 512b.
sys/dev/xen/blkback/blkback.c:
Advertise support for and implement the BLKIF_OP_WRITE_BARRIER
and BLKIF_OP_FLUSH_DISKCACHE blkif opcodes using BIO_FLUSH and
the BIO_ORDERED attribute of bios.
sys/dev/xen/blkfront/blkfront.c:
sys/dev/xen/blkfront/block.h:
Fix various bugs in blkfront.
o gnttab_alloc_grant_references() returns 0 for success and
non-zero for failure. The check for < 0 is a leftover
Linuxism.
o When we negotiate with blkback and have to reduce some of our
capabilities, print out the original and reduced capability before
changing the local capability. So the user now gets the correct
information.
o Fix blkif_restart_queue_callback() formatting. Make sure we hold
the mutex in that function before calling xb_startio().
o Fix a couple of KASSERT()s.
o Fix a check in the xb_remove_* macro to be a little more specific.
sys/xen/gnttab.h:
sys/xen/gnttab.c:
Define GNTTAB_LIST_END publicly as GRANT_REF_INVALID.
sys/dev/xen/netfront/netfront.c:
Use GRANT_REF_INVALID instead of driver private definitions of the
same constant.
sys/xen/gnttab.h:
sys/xen/gnttab.c:
Add the gnttab_end_foreign_access_references() API.
This API allows a client to batch the release of an array of grant
references, instead of coding a private for loop. The implementation
takes advantage of this batching to reduce lock overhead to one
acquisition and release per-batch instead of per-freed grant reference.
While here, reduce the duration the gnttab_list_lock is held during
gnttab_free_grant_references() operations. The search to find the
tail of the incoming free list does not rely on global state and so
can be performed without holding the lock.
sys/dev/xen/xenpci/evtchn.c:
sys/dev/xen/evtchn/evtchn.c:
sys/xen/xen_intr.h:
o Implement the bind_interdomain_evtchn_to_irqhandler API for HVM mode.
This allows an HVM domain to serve back end devices to other domains.
This API is already implemented for PV mode.
o Synchronize the API between HVM and PV.
sys/dev/xen/xenpci/xenpci.c:
o Scan the full region of CPUID space in which the Xen VMM interface
may be implemented. On systems using SuSE as a Dom0 where the
Viridian API is also exported, the VMM interface is above the region
we used to search.
o Pass through bus_alloc_resource() calls so that XenBus drivers
attaching on an HVM system can allocate unused physical address
space from the nexus. The block back driver makes use of this
facility.
sys/i386/xen/xen_machdep.c:
Use the correct type for accessing the statically mapped xenstore
metadata.
sys/xen/interface/hvm/params.h:
sys/xen/xenstore/xenstore.c:
Move hvm_get_parameter() to the correct global header file instead
of as a private method to the XenStore.
sys/xen/interface/io/protocols.h:
Sync with vendor.
sys/xeninterface/io/ring.h:
Add macro for calculating the number of ring pages needed for an N
deep ring.
To avoid duplication within the macros, create and use the new
__RING_HEADER_SIZE() macro. This macro calculates the size of the
ring book keeping struct (producer/consumer indexes, etc.) that
resides at the head of the ring.
Add the __RING_PAGES() macro which calculates the number of shared
ring pages required to support a ring with the given number of
requests.
These APIs are used to support the multi-page ring version of the
Xen block API.
sys/xeninterface/io/xenbus.h:
Add Comments.
sys/xen/xenbus/...
o Refactor the FreeBSD XenBus support code to allow for both front and
backend device attachments.
o Make use of new config_intr_hook capabilities to allow front and back
devices to be probed/attached in parallel.
o Fix bugs in probe/attach state machine that could cause the system to
hang when confronted with a failure either in the local domain or in
a remote domain to which one of our driver instances is attaching.
o Publish all required state to the XenStore on device detach and
failure. The majority of the missing functionality was for serving
as a back end since the typical "hot-plug" scripts in Dom0 don't
handle the case of cleaning up for a "service domain" that is not
itself.
o Add dynamic sysctl nodes exposing the generic ivars of
XenBus devices.
o Add doxygen style comments to the majority of the code.
o Cleanup types, formatting, etc.
sys/xen/xenbus/xenbusb.c:
Common code used by both front and back XenBus busses.
sys/xen/xenbus/xenbusb_if.m:
Method definitions for a XenBus bus.
sys/xen/xenbus/xenbusb_front.c:
sys/xen/xenbus/xenbusb_back.c:
XenBus bus specialization for front and back devices.
MFC after: 1 month
2010-10-19 20:53:30 +00:00
|
|
|
static void
|
2013-05-31 22:21:37 +00:00
|
|
|
xbd_vdevice_to_unit(uint32_t vdevice, int *unit, const char **name)
|
2008-08-12 20:01:57 +00:00
|
|
|
{
|
2013-05-31 22:21:37 +00:00
|
|
|
static struct vdev_info {
|
|
|
|
int major;
|
|
|
|
int shift;
|
|
|
|
int base;
|
|
|
|
const char *name;
|
|
|
|
} info[] = {
|
|
|
|
{3, 6, 0, "ada"}, /* ide0 */
|
|
|
|
{22, 6, 2, "ada"}, /* ide1 */
|
|
|
|
{33, 6, 4, "ada"}, /* ide2 */
|
|
|
|
{34, 6, 6, "ada"}, /* ide3 */
|
|
|
|
{56, 6, 8, "ada"}, /* ide4 */
|
|
|
|
{57, 6, 10, "ada"}, /* ide5 */
|
|
|
|
{88, 6, 12, "ada"}, /* ide6 */
|
|
|
|
{89, 6, 14, "ada"}, /* ide7 */
|
|
|
|
{90, 6, 16, "ada"}, /* ide8 */
|
|
|
|
{91, 6, 18, "ada"}, /* ide9 */
|
|
|
|
|
|
|
|
{8, 4, 0, "da"}, /* scsi disk0 */
|
|
|
|
{65, 4, 16, "da"}, /* scsi disk1 */
|
|
|
|
{66, 4, 32, "da"}, /* scsi disk2 */
|
|
|
|
{67, 4, 48, "da"}, /* scsi disk3 */
|
|
|
|
{68, 4, 64, "da"}, /* scsi disk4 */
|
|
|
|
{69, 4, 80, "da"}, /* scsi disk5 */
|
|
|
|
{70, 4, 96, "da"}, /* scsi disk6 */
|
|
|
|
{71, 4, 112, "da"}, /* scsi disk7 */
|
|
|
|
{128, 4, 128, "da"}, /* scsi disk8 */
|
|
|
|
{129, 4, 144, "da"}, /* scsi disk9 */
|
|
|
|
{130, 4, 160, "da"}, /* scsi disk10 */
|
|
|
|
{131, 4, 176, "da"}, /* scsi disk11 */
|
|
|
|
{132, 4, 192, "da"}, /* scsi disk12 */
|
|
|
|
{133, 4, 208, "da"}, /* scsi disk13 */
|
|
|
|
{134, 4, 224, "da"}, /* scsi disk14 */
|
|
|
|
{135, 4, 240, "da"}, /* scsi disk15 */
|
|
|
|
|
|
|
|
{202, 4, 0, "xbd"}, /* xbd */
|
|
|
|
|
|
|
|
{0, 0, 0, NULL},
|
|
|
|
};
|
|
|
|
int major = vdevice >> 8;
|
|
|
|
int minor = vdevice & 0xff;
|
Improve the Xen para-virtualized device infrastructure of FreeBSD:
o Add support for backend devices (e.g. blkback)
o Implement extensions to the Xen para-virtualized block API to allow
for larger and more outstanding I/Os.
o Import a completely rewritten block back driver with support for fronting
I/O to both raw devices and files.
o General cleanup and documentation of the XenBus and XenStore support code.
o Robustness and performance updates for the block front driver.
o Fixes to the netfront driver.
Sponsored by: Spectra Logic Corporation
sys/xen/xenbus/init.txt:
Deleted: This file explains the Linux method for XenBus device
enumeration and thus does not apply to FreeBSD's NewBus approach.
sys/xen/xenbus/xenbus_probe_backend.c:
Deleted: Linux version of backend XenBus service routines. It
was never ported to FreeBSD. See xenbusb.c, xenbusb_if.m,
xenbusb_front.c xenbusb_back.c for details of FreeBSD's XenBus
support.
sys/xen/xenbus/xenbusvar.h:
sys/xen/xenbus/xenbus_xs.c:
sys/xen/xenbus/xenbus_comms.c:
sys/xen/xenbus/xenbus_comms.h:
sys/xen/xenstore/xenstorevar.h:
sys/xen/xenstore/xenstore.c:
Split XenStore into its own tree. XenBus is a software layer built
on top of XenStore. The old arrangement and the naming of some
structures and functions blurred these lines making it difficult to
discern what services are provided by which layer and at what times
these services are available (e.g. during system startup and shutdown).
sys/xen/xenbus/xenbus_client.c:
sys/xen/xenbus/xenbus.c:
sys/xen/xenbus/xenbus_probe.c:
sys/xen/xenbus/xenbusb.c:
sys/xen/xenbus/xenbusb.h:
Split up XenBus code into methods available for use by client
drivers (xenbus.c) and code used by the XenBus "bus code" to
enumerate, attach, detach, and service bus drivers.
sys/xen/reboot.c:
sys/dev/xen/control/control.c:
Add a XenBus front driver for handling shutdown, reboot, suspend, and
resume events published in the XenStore. Move all PV suspend/reboot
support from reboot.c into this driver.
sys/xen/blkif.h:
New file from Xen vendor with macros and structures used by
a block back driver to service requests from a VM running a
different ABI (e.g. amd64 back with i386 front).
sys/conf/files:
Adjust kernel build spec for new XenBus/XenStore layout and added
Xen functionality.
sys/dev/xen/balloon/balloon.c:
sys/dev/xen/netfront/netfront.c:
sys/dev/xen/blkfront/blkfront.c:
sys/xen/xenbus/...
sys/xen/xenstore/...
o Rename XenStore APIs and structures from xenbus_* to xs_*.
o Adjust to use of M_XENBUS and M_XENSTORE malloc types for allocation
of objects returned by these APIs.
o Adjust for changes in the bus interface for Xen drivers.
sys/xen/xenbus/...
sys/xen/xenstore/...
Add Doxygen comments for these interfaces and the code that
implements them.
sys/dev/xen/blkback/blkback.c:
o Rewrite the Block Back driver to attach properly via newbus,
operate correctly in both PV and HVM mode regardless of domain
(e.g. can be in a DOM other than 0), and to deal with the latest
metadata available in XenStore for block devices.
o Allow users to specify a file as a backend to blkback, in addition
to character devices. Use the namei lookup of the backend path
to automatically configure, based on file type, the appropriate
backend method.
The current implementation is limited to a single outstanding I/O
at a time to file backed storage.
sys/dev/xen/blkback/blkback.c:
sys/xen/interface/io/blkif.h:
sys/xen/blkif.h:
sys/dev/xen/blkfront/blkfront.c:
sys/dev/xen/blkfront/block.h:
Extend the Xen blkif API: Negotiable request size and number of
requests.
This change extends the information recorded in the XenStore
allowing block front/back devices to negotiate for optimal I/O
parameters. This has been achieved without sacrificing backward
compatibility with drivers that are unaware of these protocol
enhancements. The extensions center around the connection protocol
which now includes these additions:
o The back-end device publishes its maximum supported values for,
request I/O size, the number of page segments that can be
associated with a request, the maximum number of requests that
can be concurrently active, and the maximum number of pages that
can be in the shared request ring. These values are published
before the back-end enters the XenbusStateInitWait state.
o The front-end waits for the back-end to enter either the InitWait
or Initialize state. At this point, the front end limits it's
own capabilities to the lesser of the values it finds published
by the backend, it's own maximums, or, should any back-end data
be missing in the store, the values supported by the original
protocol. It then initializes it's internal data structures
including allocation of the shared ring, publishes its maximum
capabilities to the XenStore and transitions to the Initialized
state.
o The back-end waits for the front-end to enter the Initalized
state. At this point, the back end limits it's own capabilities
to the lesser of the values it finds published by the frontend,
it's own maximums, or, should any front-end data be missing in
the store, the values supported by the original protocol. It
then initializes it's internal data structures, attaches to the
shared ring and transitions to the Connected state.
o The front-end waits for the back-end to enter the Connnected
state, transitions itself to the connected state, and can
commence I/O.
Although an updated front-end driver must be aware of the back-end's
InitWait state, the back-end has been coded such that it can
tolerate a front-end that skips this step and transitions directly
to the Initialized state without waiting for the back-end.
sys/xen/interface/io/blkif.h:
o Increase BLKIF_MAX_SEGMENTS_PER_REQUEST to 255. This is
the maximum number possible without changing the blkif
request header structure (nr_segs is a uint8_t).
o Add two new constants:
BLKIF_MAX_SEGMENTS_PER_HEADER_BLOCK, and
BLKIF_MAX_SEGMENTS_PER_SEGMENT_BLOCK. These respectively
indicate the number of segments that can fit in the first
ring-buffer entry of a request, and for each subsequent
(sg element only) ring-buffer entry associated with the
"header" ring-buffer entry of the request.
o Add the blkif_request_segment_t typedef for segment
elements.
o Add the BLKRING_GET_SG_REQUEST() macro which wraps the
RING_GET_REQUEST() macro and returns a properly cast
pointer to an array of blkif_request_segment_ts.
o Add the BLKIF_SEGS_TO_BLOCKS() macro which calculates the
number of ring entries that will be consumed by a blkif
request with the given number of segments.
sys/xen/blkif.h:
o Update for changes in interface/io/blkif.h macros.
o Update the BLKIF_MAX_RING_REQUESTS() macro to take the
ring size as an argument to allow this calculation on
multi-page rings.
o Add a companion macro to BLKIF_MAX_RING_REQUESTS(),
BLKIF_RING_PAGES(). This macro determines the number of
ring pages required in order to support a ring with the
supplied number of request blocks.
sys/dev/xen/blkback/blkback.c:
sys/dev/xen/blkfront/blkfront.c:
sys/dev/xen/blkfront/block.h:
o Negotiate with the other-end with the following limits:
Reqeust Size: MAXPHYS
Max Segments: (MAXPHYS/PAGE_SIZE) + 1
Max Requests: 256
Max Ring Pages: Sufficient to support Max Requests with
Max Segments.
o Dynamically allocate request pools and segemnts-per-request.
o Update ring allocation/attachment code to support a
multi-page shared ring.
o Update routines that access the shared ring to handle
multi-block requests.
sys/dev/xen/blkfront/blkfront.c:
o Track blkfront allocations in a blkfront driver specific
malloc pool.
o Strip out XenStore transaction retry logic in the
connection code. Transactions only need to be used when
the update to multiple XenStore nodes must be atomic.
That is not the case here.
o Fully disable blkif_resume() until it can be fixed
properly (it didn't work before this change).
o Destroy bus-dma objects during device instance tear-down.
o Properly handle backend devices with powef-of-2 sector
sizes larger than 512b.
sys/dev/xen/blkback/blkback.c:
Advertise support for and implement the BLKIF_OP_WRITE_BARRIER
and BLKIF_OP_FLUSH_DISKCACHE blkif opcodes using BIO_FLUSH and
the BIO_ORDERED attribute of bios.
sys/dev/xen/blkfront/blkfront.c:
sys/dev/xen/blkfront/block.h:
Fix various bugs in blkfront.
o gnttab_alloc_grant_references() returns 0 for success and
non-zero for failure. The check for < 0 is a leftover
Linuxism.
o When we negotiate with blkback and have to reduce some of our
capabilities, print out the original and reduced capability before
changing the local capability. So the user now gets the correct
information.
o Fix blkif_restart_queue_callback() formatting. Make sure we hold
the mutex in that function before calling xb_startio().
o Fix a couple of KASSERT()s.
o Fix a check in the xb_remove_* macro to be a little more specific.
sys/xen/gnttab.h:
sys/xen/gnttab.c:
Define GNTTAB_LIST_END publicly as GRANT_REF_INVALID.
sys/dev/xen/netfront/netfront.c:
Use GRANT_REF_INVALID instead of driver private definitions of the
same constant.
sys/xen/gnttab.h:
sys/xen/gnttab.c:
Add the gnttab_end_foreign_access_references() API.
This API allows a client to batch the release of an array of grant
references, instead of coding a private for loop. The implementation
takes advantage of this batching to reduce lock overhead to one
acquisition and release per-batch instead of per-freed grant reference.
While here, reduce the duration the gnttab_list_lock is held during
gnttab_free_grant_references() operations. The search to find the
tail of the incoming free list does not rely on global state and so
can be performed without holding the lock.
sys/dev/xen/xenpci/evtchn.c:
sys/dev/xen/evtchn/evtchn.c:
sys/xen/xen_intr.h:
o Implement the bind_interdomain_evtchn_to_irqhandler API for HVM mode.
This allows an HVM domain to serve back end devices to other domains.
This API is already implemented for PV mode.
o Synchronize the API between HVM and PV.
sys/dev/xen/xenpci/xenpci.c:
o Scan the full region of CPUID space in which the Xen VMM interface
may be implemented. On systems using SuSE as a Dom0 where the
Viridian API is also exported, the VMM interface is above the region
we used to search.
o Pass through bus_alloc_resource() calls so that XenBus drivers
attaching on an HVM system can allocate unused physical address
space from the nexus. The block back driver makes use of this
facility.
sys/i386/xen/xen_machdep.c:
Use the correct type for accessing the statically mapped xenstore
metadata.
sys/xen/interface/hvm/params.h:
sys/xen/xenstore/xenstore.c:
Move hvm_get_parameter() to the correct global header file instead
of as a private method to the XenStore.
sys/xen/interface/io/protocols.h:
Sync with vendor.
sys/xeninterface/io/ring.h:
Add macro for calculating the number of ring pages needed for an N
deep ring.
To avoid duplication within the macros, create and use the new
__RING_HEADER_SIZE() macro. This macro calculates the size of the
ring book keeping struct (producer/consumer indexes, etc.) that
resides at the head of the ring.
Add the __RING_PAGES() macro which calculates the number of shared
ring pages required to support a ring with the given number of
requests.
These APIs are used to support the multi-page ring version of the
Xen block API.
sys/xeninterface/io/xenbus.h:
Add Comments.
sys/xen/xenbus/...
o Refactor the FreeBSD XenBus support code to allow for both front and
backend device attachments.
o Make use of new config_intr_hook capabilities to allow front and back
devices to be probed/attached in parallel.
o Fix bugs in probe/attach state machine that could cause the system to
hang when confronted with a failure either in the local domain or in
a remote domain to which one of our driver instances is attaching.
o Publish all required state to the XenStore on device detach and
failure. The majority of the missing functionality was for serving
as a back end since the typical "hot-plug" scripts in Dom0 don't
handle the case of cleaning up for a "service domain" that is not
itself.
o Add dynamic sysctl nodes exposing the generic ivars of
XenBus devices.
o Add doxygen style comments to the majority of the code.
o Cleanup types, formatting, etc.
sys/xen/xenbus/xenbusb.c:
Common code used by both front and back XenBus busses.
sys/xen/xenbus/xenbusb_if.m:
Method definitions for a XenBus bus.
sys/xen/xenbus/xenbusb_front.c:
sys/xen/xenbus/xenbusb_back.c:
XenBus bus specialization for front and back devices.
MFC after: 1 month
2010-10-19 20:53:30 +00:00
|
|
|
int i;
|
2008-08-12 20:01:57 +00:00
|
|
|
|
2013-05-31 22:21:37 +00:00
|
|
|
if (vdevice & (1 << 28)) {
|
|
|
|
*unit = (vdevice & ((1 << 28) - 1)) >> 8;
|
|
|
|
*name = "xbd";
|
2011-09-21 00:02:44 +00:00
|
|
|
return;
|
|
|
|
}
|
2008-08-12 20:01:57 +00:00
|
|
|
|
2013-05-31 22:21:37 +00:00
|
|
|
for (i = 0; info[i].major; i++) {
|
|
|
|
if (info[i].major == major) {
|
|
|
|
*unit = info[i].base + (minor >> info[i].shift);
|
|
|
|
*name = info[i].name;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
Improve the Xen para-virtualized device infrastructure of FreeBSD:
o Add support for backend devices (e.g. blkback)
o Implement extensions to the Xen para-virtualized block API to allow
for larger and more outstanding I/Os.
o Import a completely rewritten block back driver with support for fronting
I/O to both raw devices and files.
o General cleanup and documentation of the XenBus and XenStore support code.
o Robustness and performance updates for the block front driver.
o Fixes to the netfront driver.
Sponsored by: Spectra Logic Corporation
sys/xen/xenbus/init.txt:
Deleted: This file explains the Linux method for XenBus device
enumeration and thus does not apply to FreeBSD's NewBus approach.
sys/xen/xenbus/xenbus_probe_backend.c:
Deleted: Linux version of backend XenBus service routines. It
was never ported to FreeBSD. See xenbusb.c, xenbusb_if.m,
xenbusb_front.c xenbusb_back.c for details of FreeBSD's XenBus
support.
sys/xen/xenbus/xenbusvar.h:
sys/xen/xenbus/xenbus_xs.c:
sys/xen/xenbus/xenbus_comms.c:
sys/xen/xenbus/xenbus_comms.h:
sys/xen/xenstore/xenstorevar.h:
sys/xen/xenstore/xenstore.c:
Split XenStore into its own tree. XenBus is a software layer built
on top of XenStore. The old arrangement and the naming of some
structures and functions blurred these lines making it difficult to
discern what services are provided by which layer and at what times
these services are available (e.g. during system startup and shutdown).
sys/xen/xenbus/xenbus_client.c:
sys/xen/xenbus/xenbus.c:
sys/xen/xenbus/xenbus_probe.c:
sys/xen/xenbus/xenbusb.c:
sys/xen/xenbus/xenbusb.h:
Split up XenBus code into methods available for use by client
drivers (xenbus.c) and code used by the XenBus "bus code" to
enumerate, attach, detach, and service bus drivers.
sys/xen/reboot.c:
sys/dev/xen/control/control.c:
Add a XenBus front driver for handling shutdown, reboot, suspend, and
resume events published in the XenStore. Move all PV suspend/reboot
support from reboot.c into this driver.
sys/xen/blkif.h:
New file from Xen vendor with macros and structures used by
a block back driver to service requests from a VM running a
different ABI (e.g. amd64 back with i386 front).
sys/conf/files:
Adjust kernel build spec for new XenBus/XenStore layout and added
Xen functionality.
sys/dev/xen/balloon/balloon.c:
sys/dev/xen/netfront/netfront.c:
sys/dev/xen/blkfront/blkfront.c:
sys/xen/xenbus/...
sys/xen/xenstore/...
o Rename XenStore APIs and structures from xenbus_* to xs_*.
o Adjust to use of M_XENBUS and M_XENSTORE malloc types for allocation
of objects returned by these APIs.
o Adjust for changes in the bus interface for Xen drivers.
sys/xen/xenbus/...
sys/xen/xenstore/...
Add Doxygen comments for these interfaces and the code that
implements them.
sys/dev/xen/blkback/blkback.c:
o Rewrite the Block Back driver to attach properly via newbus,
operate correctly in both PV and HVM mode regardless of domain
(e.g. can be in a DOM other than 0), and to deal with the latest
metadata available in XenStore for block devices.
o Allow users to specify a file as a backend to blkback, in addition
to character devices. Use the namei lookup of the backend path
to automatically configure, based on file type, the appropriate
backend method.
The current implementation is limited to a single outstanding I/O
at a time to file backed storage.
sys/dev/xen/blkback/blkback.c:
sys/xen/interface/io/blkif.h:
sys/xen/blkif.h:
sys/dev/xen/blkfront/blkfront.c:
sys/dev/xen/blkfront/block.h:
Extend the Xen blkif API: Negotiable request size and number of
requests.
This change extends the information recorded in the XenStore
allowing block front/back devices to negotiate for optimal I/O
parameters. This has been achieved without sacrificing backward
compatibility with drivers that are unaware of these protocol
enhancements. The extensions center around the connection protocol
which now includes these additions:
o The back-end device publishes its maximum supported values for,
request I/O size, the number of page segments that can be
associated with a request, the maximum number of requests that
can be concurrently active, and the maximum number of pages that
can be in the shared request ring. These values are published
before the back-end enters the XenbusStateInitWait state.
o The front-end waits for the back-end to enter either the InitWait
or Initialize state. At this point, the front end limits it's
own capabilities to the lesser of the values it finds published
by the backend, it's own maximums, or, should any back-end data
be missing in the store, the values supported by the original
protocol. It then initializes it's internal data structures
including allocation of the shared ring, publishes its maximum
capabilities to the XenStore and transitions to the Initialized
state.
o The back-end waits for the front-end to enter the Initalized
state. At this point, the back end limits it's own capabilities
to the lesser of the values it finds published by the frontend,
it's own maximums, or, should any front-end data be missing in
the store, the values supported by the original protocol. It
then initializes it's internal data structures, attaches to the
shared ring and transitions to the Connected state.
o The front-end waits for the back-end to enter the Connnected
state, transitions itself to the connected state, and can
commence I/O.
Although an updated front-end driver must be aware of the back-end's
InitWait state, the back-end has been coded such that it can
tolerate a front-end that skips this step and transitions directly
to the Initialized state without waiting for the back-end.
sys/xen/interface/io/blkif.h:
o Increase BLKIF_MAX_SEGMENTS_PER_REQUEST to 255. This is
the maximum number possible without changing the blkif
request header structure (nr_segs is a uint8_t).
o Add two new constants:
BLKIF_MAX_SEGMENTS_PER_HEADER_BLOCK, and
BLKIF_MAX_SEGMENTS_PER_SEGMENT_BLOCK. These respectively
indicate the number of segments that can fit in the first
ring-buffer entry of a request, and for each subsequent
(sg element only) ring-buffer entry associated with the
"header" ring-buffer entry of the request.
o Add the blkif_request_segment_t typedef for segment
elements.
o Add the BLKRING_GET_SG_REQUEST() macro which wraps the
RING_GET_REQUEST() macro and returns a properly cast
pointer to an array of blkif_request_segment_ts.
o Add the BLKIF_SEGS_TO_BLOCKS() macro which calculates the
number of ring entries that will be consumed by a blkif
request with the given number of segments.
sys/xen/blkif.h:
o Update for changes in interface/io/blkif.h macros.
o Update the BLKIF_MAX_RING_REQUESTS() macro to take the
ring size as an argument to allow this calculation on
multi-page rings.
o Add a companion macro to BLKIF_MAX_RING_REQUESTS(),
BLKIF_RING_PAGES(). This macro determines the number of
ring pages required in order to support a ring with the
supplied number of request blocks.
sys/dev/xen/blkback/blkback.c:
sys/dev/xen/blkfront/blkfront.c:
sys/dev/xen/blkfront/block.h:
o Negotiate with the other-end with the following limits:
Reqeust Size: MAXPHYS
Max Segments: (MAXPHYS/PAGE_SIZE) + 1
Max Requests: 256
Max Ring Pages: Sufficient to support Max Requests with
Max Segments.
o Dynamically allocate request pools and segemnts-per-request.
o Update ring allocation/attachment code to support a
multi-page shared ring.
o Update routines that access the shared ring to handle
multi-block requests.
sys/dev/xen/blkfront/blkfront.c:
o Track blkfront allocations in a blkfront driver specific
malloc pool.
o Strip out XenStore transaction retry logic in the
connection code. Transactions only need to be used when
the update to multiple XenStore nodes must be atomic.
That is not the case here.
o Fully disable blkif_resume() until it can be fixed
properly (it didn't work before this change).
o Destroy bus-dma objects during device instance tear-down.
o Properly handle backend devices with powef-of-2 sector
sizes larger than 512b.
sys/dev/xen/blkback/blkback.c:
Advertise support for and implement the BLKIF_OP_WRITE_BARRIER
and BLKIF_OP_FLUSH_DISKCACHE blkif opcodes using BIO_FLUSH and
the BIO_ORDERED attribute of bios.
sys/dev/xen/blkfront/blkfront.c:
sys/dev/xen/blkfront/block.h:
Fix various bugs in blkfront.
o gnttab_alloc_grant_references() returns 0 for success and
non-zero for failure. The check for < 0 is a leftover
Linuxism.
o When we negotiate with blkback and have to reduce some of our
capabilities, print out the original and reduced capability before
changing the local capability. So the user now gets the correct
information.
o Fix blkif_restart_queue_callback() formatting. Make sure we hold
the mutex in that function before calling xb_startio().
o Fix a couple of KASSERT()s.
o Fix a check in the xb_remove_* macro to be a little more specific.
sys/xen/gnttab.h:
sys/xen/gnttab.c:
Define GNTTAB_LIST_END publicly as GRANT_REF_INVALID.
sys/dev/xen/netfront/netfront.c:
Use GRANT_REF_INVALID instead of driver private definitions of the
same constant.
sys/xen/gnttab.h:
sys/xen/gnttab.c:
Add the gnttab_end_foreign_access_references() API.
This API allows a client to batch the release of an array of grant
references, instead of coding a private for loop. The implementation
takes advantage of this batching to reduce lock overhead to one
acquisition and release per-batch instead of per-freed grant reference.
While here, reduce the duration the gnttab_list_lock is held during
gnttab_free_grant_references() operations. The search to find the
tail of the incoming free list does not rely on global state and so
can be performed without holding the lock.
sys/dev/xen/xenpci/evtchn.c:
sys/dev/xen/evtchn/evtchn.c:
sys/xen/xen_intr.h:
o Implement the bind_interdomain_evtchn_to_irqhandler API for HVM mode.
This allows an HVM domain to serve back end devices to other domains.
This API is already implemented for PV mode.
o Synchronize the API between HVM and PV.
sys/dev/xen/xenpci/xenpci.c:
o Scan the full region of CPUID space in which the Xen VMM interface
may be implemented. On systems using SuSE as a Dom0 where the
Viridian API is also exported, the VMM interface is above the region
we used to search.
o Pass through bus_alloc_resource() calls so that XenBus drivers
attaching on an HVM system can allocate unused physical address
space from the nexus. The block back driver makes use of this
facility.
sys/i386/xen/xen_machdep.c:
Use the correct type for accessing the statically mapped xenstore
metadata.
sys/xen/interface/hvm/params.h:
sys/xen/xenstore/xenstore.c:
Move hvm_get_parameter() to the correct global header file instead
of as a private method to the XenStore.
sys/xen/interface/io/protocols.h:
Sync with vendor.
sys/xeninterface/io/ring.h:
Add macro for calculating the number of ring pages needed for an N
deep ring.
To avoid duplication within the macros, create and use the new
__RING_HEADER_SIZE() macro. This macro calculates the size of the
ring book keeping struct (producer/consumer indexes, etc.) that
resides at the head of the ring.
Add the __RING_PAGES() macro which calculates the number of shared
ring pages required to support a ring with the given number of
requests.
These APIs are used to support the multi-page ring version of the
Xen block API.
sys/xeninterface/io/xenbus.h:
Add Comments.
sys/xen/xenbus/...
o Refactor the FreeBSD XenBus support code to allow for both front and
backend device attachments.
o Make use of new config_intr_hook capabilities to allow front and back
devices to be probed/attached in parallel.
o Fix bugs in probe/attach state machine that could cause the system to
hang when confronted with a failure either in the local domain or in
a remote domain to which one of our driver instances is attaching.
o Publish all required state to the XenStore on device detach and
failure. The majority of the missing functionality was for serving
as a back end since the typical "hot-plug" scripts in Dom0 don't
handle the case of cleaning up for a "service domain" that is not
itself.
o Add dynamic sysctl nodes exposing the generic ivars of
XenBus devices.
o Add doxygen style comments to the majority of the code.
o Cleanup types, formatting, etc.
sys/xen/xenbus/xenbusb.c:
Common code used by both front and back XenBus busses.
sys/xen/xenbus/xenbusb_if.m:
Method definitions for a XenBus bus.
sys/xen/xenbus/xenbusb_front.c:
sys/xen/xenbus/xenbusb_back.c:
XenBus bus specialization for front and back devices.
MFC after: 1 month
2010-10-19 20:53:30 +00:00
|
|
|
|
2013-05-31 22:21:37 +00:00
|
|
|
*unit = minor >> 4;
|
|
|
|
*name = "xbd";
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
xbd_instance_create(struct xbd_softc *sc, blkif_sector_t sectors,
|
2016-04-03 11:18:20 +00:00
|
|
|
int vdevice, uint16_t vdisk_info, unsigned long sector_size,
|
|
|
|
unsigned long phys_sector_size)
|
2013-05-31 22:21:37 +00:00
|
|
|
{
|
2013-06-26 20:39:07 +00:00
|
|
|
char features[80];
|
2013-05-31 22:21:37 +00:00
|
|
|
int unit, error = 0;
|
|
|
|
const char *name;
|
|
|
|
|
|
|
|
xbd_vdevice_to_unit(vdevice, &unit, &name);
|
|
|
|
|
|
|
|
sc->xbd_unit = unit;
|
|
|
|
|
2013-06-26 20:39:07 +00:00
|
|
|
if (strcmp(name, "xbd") != 0)
|
2013-05-31 22:21:37 +00:00
|
|
|
device_printf(sc->xbd_dev, "attaching as %s%d\n", name, unit);
|
|
|
|
|
2013-06-26 20:39:07 +00:00
|
|
|
if (xbd_feature_string(sc, features, sizeof(features)) > 0) {
|
|
|
|
device_printf(sc->xbd_dev, "features: %s\n",
|
|
|
|
features);
|
|
|
|
}
|
|
|
|
|
2013-05-31 22:21:37 +00:00
|
|
|
sc->xbd_disk = disk_alloc();
|
|
|
|
sc->xbd_disk->d_unit = sc->xbd_unit;
|
|
|
|
sc->xbd_disk->d_open = xbd_open;
|
|
|
|
sc->xbd_disk->d_close = xbd_close;
|
|
|
|
sc->xbd_disk->d_ioctl = xbd_ioctl;
|
|
|
|
sc->xbd_disk->d_strategy = xbd_strategy;
|
|
|
|
sc->xbd_disk->d_dump = xbd_dump;
|
|
|
|
sc->xbd_disk->d_name = name;
|
|
|
|
sc->xbd_disk->d_drv1 = sc;
|
|
|
|
sc->xbd_disk->d_sectorsize = sector_size;
|
2016-04-03 11:18:20 +00:00
|
|
|
sc->xbd_disk->d_stripesize = phys_sector_size;
|
|
|
|
sc->xbd_disk->d_stripeoffset = 0;
|
2013-05-31 22:21:37 +00:00
|
|
|
|
|
|
|
sc->xbd_disk->d_mediasize = sectors * sector_size;
|
|
|
|
sc->xbd_disk->d_maxsize = sc->xbd_max_request_size;
|
2015-11-09 12:22:44 +00:00
|
|
|
sc->xbd_disk->d_flags = DISKFLAG_UNMAPPED_BIO;
|
2013-06-26 20:39:07 +00:00
|
|
|
if ((sc->xbd_flags & (XBDF_FLUSH|XBDF_BARRIER)) != 0) {
|
|
|
|
sc->xbd_disk->d_flags |= DISKFLAG_CANFLUSHCACHE;
|
|
|
|
device_printf(sc->xbd_dev,
|
|
|
|
"synchronize cache commands enabled.\n");
|
|
|
|
}
|
2013-05-31 22:21:37 +00:00
|
|
|
disk_create(sc->xbd_disk, DISK_VERSION);
|
|
|
|
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
xbd_free(struct xbd_softc *sc)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
/* Prevent new requests being issued until we fix things up. */
|
|
|
|
mtx_lock(&sc->xbd_io_lock);
|
2013-06-14 17:00:58 +00:00
|
|
|
sc->xbd_state = XBD_STATE_DISCONNECTED;
|
2013-05-31 22:21:37 +00:00
|
|
|
mtx_unlock(&sc->xbd_io_lock);
|
|
|
|
|
|
|
|
/* Free resources associated with old device channel. */
|
2013-06-01 04:02:51 +00:00
|
|
|
xbd_free_ring(sc);
|
2013-05-31 22:21:37 +00:00
|
|
|
if (sc->xbd_shadow) {
|
|
|
|
|
|
|
|
for (i = 0; i < sc->xbd_max_requests; i++) {
|
|
|
|
struct xbd_command *cm;
|
|
|
|
|
|
|
|
cm = &sc->xbd_shadow[i];
|
|
|
|
if (cm->cm_sg_refs != NULL) {
|
|
|
|
free(cm->cm_sg_refs, M_XENBLOCKFRONT);
|
|
|
|
cm->cm_sg_refs = NULL;
|
|
|
|
}
|
|
|
|
|
2015-07-30 03:50:01 +00:00
|
|
|
if (cm->cm_indirectionpages != NULL) {
|
|
|
|
gnttab_end_foreign_access_references(
|
|
|
|
sc->xbd_max_request_indirectpages,
|
|
|
|
&cm->cm_indirectionrefs[0]);
|
|
|
|
contigfree(cm->cm_indirectionpages, PAGE_SIZE *
|
|
|
|
sc->xbd_max_request_indirectpages,
|
|
|
|
M_XENBLOCKFRONT);
|
|
|
|
cm->cm_indirectionpages = NULL;
|
|
|
|
}
|
|
|
|
|
2013-05-31 22:21:37 +00:00
|
|
|
bus_dmamap_destroy(sc->xbd_io_dmat, cm->cm_map);
|
|
|
|
}
|
|
|
|
free(sc->xbd_shadow, M_XENBLOCKFRONT);
|
|
|
|
sc->xbd_shadow = NULL;
|
|
|
|
|
|
|
|
bus_dma_tag_destroy(sc->xbd_io_dmat);
|
|
|
|
|
2013-06-14 17:00:58 +00:00
|
|
|
xbd_initq_cm(sc, XBD_Q_FREE);
|
|
|
|
xbd_initq_cm(sc, XBD_Q_READY);
|
|
|
|
xbd_initq_cm(sc, XBD_Q_COMPLETE);
|
2013-05-31 22:21:37 +00:00
|
|
|
}
|
|
|
|
|
Implement vector callback for PVHVM and unify event channel implementations
Re-structure Xen HVM support so that:
- Xen is detected and hypercalls can be performed very
early in system startup.
- Xen interrupt services are implemented using FreeBSD's native
interrupt delivery infrastructure.
- the Xen interrupt service implementation is shared between PV
and HVM guests.
- Xen interrupt handlers can optionally use a filter handler
in order to avoid the overhead of dispatch to an interrupt
thread.
- interrupt load can be distributed among all available CPUs.
- the overhead of accessing the emulated local and I/O apics
on HVM is removed for event channel port events.
- a similar optimization can eventually, and fairly easily,
be used to optimize MSI.
Early Xen detection, HVM refactoring, PVHVM interrupt infrastructure,
and misc Xen cleanups:
Sponsored by: Spectra Logic Corporation
Unification of PV & HVM interrupt infrastructure, bug fixes,
and misc Xen cleanups:
Submitted by: Roger Pau Monné
Sponsored by: Citrix Systems R&D
sys/x86/x86/local_apic.c:
sys/amd64/include/apicvar.h:
sys/i386/include/apicvar.h:
sys/amd64/amd64/apic_vector.S:
sys/i386/i386/apic_vector.s:
sys/amd64/amd64/machdep.c:
sys/i386/i386/machdep.c:
sys/i386/xen/exception.s:
sys/x86/include/segments.h:
Reserve IDT vector 0x93 for the Xen event channel upcall
interrupt handler. On Hypervisors that support the direct
vector callback feature, we can request that this vector be
called directly by an injected HVM interrupt event, instead
of a simulated PCI interrupt on the Xen platform PCI device.
This avoids all of the overhead of dealing with the emulated
I/O APIC and local APIC. It also means that the Hypervisor
can inject these events on any CPU, allowing upcalls for
different ports to be handled in parallel.
sys/amd64/amd64/mp_machdep.c:
sys/i386/i386/mp_machdep.c:
Map Xen per-vcpu area during AP startup.
sys/amd64/include/intr_machdep.h:
sys/i386/include/intr_machdep.h:
Increase the FreeBSD IRQ vector table to include space
for event channel interrupt sources.
sys/amd64/include/pcpu.h:
sys/i386/include/pcpu.h:
Remove Xen HVM per-cpu variable data. These fields are now
allocated via the dynamic per-cpu scheme. See xen_intr.c
for details.
sys/amd64/include/xen/hypercall.h:
sys/dev/xen/blkback/blkback.c:
sys/i386/include/xen/xenvar.h:
sys/i386/xen/clock.c:
sys/i386/xen/xen_machdep.c:
sys/xen/gnttab.c:
Prefer FreeBSD primatives to Linux ones in Xen support code.
sys/amd64/include/xen/xen-os.h:
sys/i386/include/xen/xen-os.h:
sys/xen/xen-os.h:
sys/dev/xen/balloon/balloon.c:
sys/dev/xen/blkback/blkback.c:
sys/dev/xen/blkfront/blkfront.c:
sys/dev/xen/console/xencons_ring.c:
sys/dev/xen/control/control.c:
sys/dev/xen/netback/netback.c:
sys/dev/xen/netfront/netfront.c:
sys/dev/xen/xenpci/xenpci.c:
sys/i386/i386/machdep.c:
sys/i386/include/pmap.h:
sys/i386/include/xen/xenfunc.h:
sys/i386/isa/npx.c:
sys/i386/xen/clock.c:
sys/i386/xen/mp_machdep.c:
sys/i386/xen/mptable.c:
sys/i386/xen/xen_clock_util.c:
sys/i386/xen/xen_machdep.c:
sys/i386/xen/xen_rtc.c:
sys/xen/evtchn/evtchn_dev.c:
sys/xen/features.c:
sys/xen/gnttab.c:
sys/xen/gnttab.h:
sys/xen/hvm.h:
sys/xen/xenbus/xenbus.c:
sys/xen/xenbus/xenbus_if.m:
sys/xen/xenbus/xenbusb_front.c:
sys/xen/xenbus/xenbusvar.h:
sys/xen/xenstore/xenstore.c:
sys/xen/xenstore/xenstore_dev.c:
sys/xen/xenstore/xenstorevar.h:
Pull common Xen OS support functions/settings into xen/xen-os.h.
sys/amd64/include/xen/xen-os.h:
sys/i386/include/xen/xen-os.h:
sys/xen/xen-os.h:
Remove constants, macros, and functions unused in FreeBSD's Xen
support.
sys/xen/xen-os.h:
sys/i386/xen/xen_machdep.c:
sys/x86/xen/hvm.c:
Introduce new functions xen_domain(), xen_pv_domain(), and
xen_hvm_domain(). These are used in favor of #ifdefs so that
FreeBSD can dynamically detect and adapt to the presence of
a hypervisor. The goal is to have an HVM optimized GENERIC,
but more is necessary before this is possible.
sys/amd64/amd64/machdep.c:
sys/dev/xen/xenpci/xenpcivar.h:
sys/dev/xen/xenpci/xenpci.c:
sys/x86/xen/hvm.c:
sys/sys/kernel.h:
Refactor magic ioport, Hypercall table and Hypervisor shared
information page setup, and move it to a dedicated HVM support
module.
HVM mode initialization is now triggered during the
SI_SUB_HYPERVISOR phase of system startup. This currently
occurs just after the kernel VM is fully setup which is
just enough infrastructure to allow the hypercall table
and shared info page to be properly mapped.
sys/xen/hvm.h:
sys/x86/xen/hvm.c:
Add definitions and a method for configuring Hypervisor event
delievery via a direct vector callback.
sys/amd64/include/xen/xen-os.h:
sys/x86/xen/hvm.c:
sys/conf/files:
sys/conf/files.amd64:
sys/conf/files.i386:
Adjust kernel build to reflect the refactoring of early
Xen startup code and Xen interrupt services.
sys/dev/xen/blkback/blkback.c:
sys/dev/xen/blkfront/blkfront.c:
sys/dev/xen/blkfront/block.h:
sys/dev/xen/control/control.c:
sys/dev/xen/evtchn/evtchn_dev.c:
sys/dev/xen/netback/netback.c:
sys/dev/xen/netfront/netfront.c:
sys/xen/xenstore/xenstore.c:
sys/xen/evtchn/evtchn_dev.c:
sys/dev/xen/console/console.c:
sys/dev/xen/console/xencons_ring.c
Adjust drivers to use new xen_intr_*() API.
sys/dev/xen/blkback/blkback.c:
Since blkback defers all event handling to a taskqueue,
convert this task queue to a "fast" taskqueue, and schedule
it via an interrupt filter. This avoids an unnecessary
ithread context switch.
sys/xen/xenstore/xenstore.c:
The xenstore driver is MPSAFE. Indicate as much when
registering its interrupt handler.
sys/xen/xenbus/xenbus.c:
sys/xen/xenbus/xenbusvar.h:
Remove unused event channel APIs.
sys/xen/evtchn.h:
Remove all kernel Xen interrupt service API definitions
from this file. It is now only used for structure and
ioctl definitions related to the event channel userland
device driver.
Update the definitions in this file to match those from
NetBSD. Implementing this interface will be necessary for
Dom0 support.
sys/xen/evtchn/evtchnvar.h:
Add a header file for implemenation internal APIs related
to managing event channels event delivery. This is used
to allow, for example, the event channel userland device
driver to access low-level routines that typical kernel
consumers of event channel services should never access.
sys/xen/interface/event_channel.h:
sys/xen/xen_intr.h:
Standardize on the evtchn_port_t type for referring to
an event channel port id. In order to prevent low-level
event channel APIs from leaking to kernel consumers who
should not have access to this data, the type is defined
twice: Once in the Xen provided event_channel.h, and again
in xen/xen_intr.h. The double declaration is protected by
__XEN_EVTCHN_PORT_DEFINED__ to ensure it is never declared
twice within a given compilation unit.
sys/xen/xen_intr.h:
sys/xen/evtchn/evtchn.c:
sys/x86/xen/xen_intr.c:
sys/dev/xen/xenpci/evtchn.c:
sys/dev/xen/xenpci/xenpcivar.h:
New implementation of Xen interrupt services. This is
similar in many respects to the i386 PV implementation with
the exception that events for bound to event channel ports
(i.e. not IPI, virtual IRQ, or physical IRQ) are further
optimized to avoid mask/unmask operations that aren't
necessary for these edge triggered events.
Stubs exist for supporting physical IRQ binding, but will
need additional work before this implementation can be
fully shared between PV and HVM.
sys/amd64/amd64/mp_machdep.c:
sys/i386/i386/mp_machdep.c:
sys/i386/xen/mp_machdep.c
sys/x86/xen/hvm.c:
Add support for placing vcpu_info into an arbritary memory
page instead of using HYPERVISOR_shared_info->vcpu_info.
This allows the creation of domains with more than 32 vcpus.
sys/i386/i386/machdep.c:
sys/i386/xen/clock.c:
sys/i386/xen/xen_machdep.c:
sys/i386/xen/exception.s:
Add support for new event channle implementation.
2013-08-29 19:52:18 +00:00
|
|
|
xen_intr_unbind(&sc->xen_intr_handle);
|
|
|
|
|
2013-05-31 22:21:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*--------------------------- State Change Handlers --------------------------*/
|
|
|
|
static void
|
|
|
|
xbd_initialize(struct xbd_softc *sc)
|
|
|
|
{
|
|
|
|
const char *otherend_path;
|
|
|
|
const char *node_path;
|
|
|
|
uint32_t max_ring_page_order;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
if (xenbus_get_state(sc->xbd_dev) != XenbusStateInitialising) {
|
|
|
|
/* Initialization has already been performed. */
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Protocol defaults valid even if negotiation for a
|
|
|
|
* setting fails.
|
|
|
|
*/
|
|
|
|
max_ring_page_order = 0;
|
|
|
|
sc->xbd_ring_pages = 1;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Protocol negotiation.
|
|
|
|
*
|
Improve the Xen para-virtualized device infrastructure of FreeBSD:
o Add support for backend devices (e.g. blkback)
o Implement extensions to the Xen para-virtualized block API to allow
for larger and more outstanding I/Os.
o Import a completely rewritten block back driver with support for fronting
I/O to both raw devices and files.
o General cleanup and documentation of the XenBus and XenStore support code.
o Robustness and performance updates for the block front driver.
o Fixes to the netfront driver.
Sponsored by: Spectra Logic Corporation
sys/xen/xenbus/init.txt:
Deleted: This file explains the Linux method for XenBus device
enumeration and thus does not apply to FreeBSD's NewBus approach.
sys/xen/xenbus/xenbus_probe_backend.c:
Deleted: Linux version of backend XenBus service routines. It
was never ported to FreeBSD. See xenbusb.c, xenbusb_if.m,
xenbusb_front.c xenbusb_back.c for details of FreeBSD's XenBus
support.
sys/xen/xenbus/xenbusvar.h:
sys/xen/xenbus/xenbus_xs.c:
sys/xen/xenbus/xenbus_comms.c:
sys/xen/xenbus/xenbus_comms.h:
sys/xen/xenstore/xenstorevar.h:
sys/xen/xenstore/xenstore.c:
Split XenStore into its own tree. XenBus is a software layer built
on top of XenStore. The old arrangement and the naming of some
structures and functions blurred these lines making it difficult to
discern what services are provided by which layer and at what times
these services are available (e.g. during system startup and shutdown).
sys/xen/xenbus/xenbus_client.c:
sys/xen/xenbus/xenbus.c:
sys/xen/xenbus/xenbus_probe.c:
sys/xen/xenbus/xenbusb.c:
sys/xen/xenbus/xenbusb.h:
Split up XenBus code into methods available for use by client
drivers (xenbus.c) and code used by the XenBus "bus code" to
enumerate, attach, detach, and service bus drivers.
sys/xen/reboot.c:
sys/dev/xen/control/control.c:
Add a XenBus front driver for handling shutdown, reboot, suspend, and
resume events published in the XenStore. Move all PV suspend/reboot
support from reboot.c into this driver.
sys/xen/blkif.h:
New file from Xen vendor with macros and structures used by
a block back driver to service requests from a VM running a
different ABI (e.g. amd64 back with i386 front).
sys/conf/files:
Adjust kernel build spec for new XenBus/XenStore layout and added
Xen functionality.
sys/dev/xen/balloon/balloon.c:
sys/dev/xen/netfront/netfront.c:
sys/dev/xen/blkfront/blkfront.c:
sys/xen/xenbus/...
sys/xen/xenstore/...
o Rename XenStore APIs and structures from xenbus_* to xs_*.
o Adjust to use of M_XENBUS and M_XENSTORE malloc types for allocation
of objects returned by these APIs.
o Adjust for changes in the bus interface for Xen drivers.
sys/xen/xenbus/...
sys/xen/xenstore/...
Add Doxygen comments for these interfaces and the code that
implements them.
sys/dev/xen/blkback/blkback.c:
o Rewrite the Block Back driver to attach properly via newbus,
operate correctly in both PV and HVM mode regardless of domain
(e.g. can be in a DOM other than 0), and to deal with the latest
metadata available in XenStore for block devices.
o Allow users to specify a file as a backend to blkback, in addition
to character devices. Use the namei lookup of the backend path
to automatically configure, based on file type, the appropriate
backend method.
The current implementation is limited to a single outstanding I/O
at a time to file backed storage.
sys/dev/xen/blkback/blkback.c:
sys/xen/interface/io/blkif.h:
sys/xen/blkif.h:
sys/dev/xen/blkfront/blkfront.c:
sys/dev/xen/blkfront/block.h:
Extend the Xen blkif API: Negotiable request size and number of
requests.
This change extends the information recorded in the XenStore
allowing block front/back devices to negotiate for optimal I/O
parameters. This has been achieved without sacrificing backward
compatibility with drivers that are unaware of these protocol
enhancements. The extensions center around the connection protocol
which now includes these additions:
o The back-end device publishes its maximum supported values for,
request I/O size, the number of page segments that can be
associated with a request, the maximum number of requests that
can be concurrently active, and the maximum number of pages that
can be in the shared request ring. These values are published
before the back-end enters the XenbusStateInitWait state.
o The front-end waits for the back-end to enter either the InitWait
or Initialize state. At this point, the front end limits it's
own capabilities to the lesser of the values it finds published
by the backend, it's own maximums, or, should any back-end data
be missing in the store, the values supported by the original
protocol. It then initializes it's internal data structures
including allocation of the shared ring, publishes its maximum
capabilities to the XenStore and transitions to the Initialized
state.
o The back-end waits for the front-end to enter the Initalized
state. At this point, the back end limits it's own capabilities
to the lesser of the values it finds published by the frontend,
it's own maximums, or, should any front-end data be missing in
the store, the values supported by the original protocol. It
then initializes it's internal data structures, attaches to the
shared ring and transitions to the Connected state.
o The front-end waits for the back-end to enter the Connnected
state, transitions itself to the connected state, and can
commence I/O.
Although an updated front-end driver must be aware of the back-end's
InitWait state, the back-end has been coded such that it can
tolerate a front-end that skips this step and transitions directly
to the Initialized state without waiting for the back-end.
sys/xen/interface/io/blkif.h:
o Increase BLKIF_MAX_SEGMENTS_PER_REQUEST to 255. This is
the maximum number possible without changing the blkif
request header structure (nr_segs is a uint8_t).
o Add two new constants:
BLKIF_MAX_SEGMENTS_PER_HEADER_BLOCK, and
BLKIF_MAX_SEGMENTS_PER_SEGMENT_BLOCK. These respectively
indicate the number of segments that can fit in the first
ring-buffer entry of a request, and for each subsequent
(sg element only) ring-buffer entry associated with the
"header" ring-buffer entry of the request.
o Add the blkif_request_segment_t typedef for segment
elements.
o Add the BLKRING_GET_SG_REQUEST() macro which wraps the
RING_GET_REQUEST() macro and returns a properly cast
pointer to an array of blkif_request_segment_ts.
o Add the BLKIF_SEGS_TO_BLOCKS() macro which calculates the
number of ring entries that will be consumed by a blkif
request with the given number of segments.
sys/xen/blkif.h:
o Update for changes in interface/io/blkif.h macros.
o Update the BLKIF_MAX_RING_REQUESTS() macro to take the
ring size as an argument to allow this calculation on
multi-page rings.
o Add a companion macro to BLKIF_MAX_RING_REQUESTS(),
BLKIF_RING_PAGES(). This macro determines the number of
ring pages required in order to support a ring with the
supplied number of request blocks.
sys/dev/xen/blkback/blkback.c:
sys/dev/xen/blkfront/blkfront.c:
sys/dev/xen/blkfront/block.h:
o Negotiate with the other-end with the following limits:
Reqeust Size: MAXPHYS
Max Segments: (MAXPHYS/PAGE_SIZE) + 1
Max Requests: 256
Max Ring Pages: Sufficient to support Max Requests with
Max Segments.
o Dynamically allocate request pools and segemnts-per-request.
o Update ring allocation/attachment code to support a
multi-page shared ring.
o Update routines that access the shared ring to handle
multi-block requests.
sys/dev/xen/blkfront/blkfront.c:
o Track blkfront allocations in a blkfront driver specific
malloc pool.
o Strip out XenStore transaction retry logic in the
connection code. Transactions only need to be used when
the update to multiple XenStore nodes must be atomic.
That is not the case here.
o Fully disable blkif_resume() until it can be fixed
properly (it didn't work before this change).
o Destroy bus-dma objects during device instance tear-down.
o Properly handle backend devices with powef-of-2 sector
sizes larger than 512b.
sys/dev/xen/blkback/blkback.c:
Advertise support for and implement the BLKIF_OP_WRITE_BARRIER
and BLKIF_OP_FLUSH_DISKCACHE blkif opcodes using BIO_FLUSH and
the BIO_ORDERED attribute of bios.
sys/dev/xen/blkfront/blkfront.c:
sys/dev/xen/blkfront/block.h:
Fix various bugs in blkfront.
o gnttab_alloc_grant_references() returns 0 for success and
non-zero for failure. The check for < 0 is a leftover
Linuxism.
o When we negotiate with blkback and have to reduce some of our
capabilities, print out the original and reduced capability before
changing the local capability. So the user now gets the correct
information.
o Fix blkif_restart_queue_callback() formatting. Make sure we hold
the mutex in that function before calling xb_startio().
o Fix a couple of KASSERT()s.
o Fix a check in the xb_remove_* macro to be a little more specific.
sys/xen/gnttab.h:
sys/xen/gnttab.c:
Define GNTTAB_LIST_END publicly as GRANT_REF_INVALID.
sys/dev/xen/netfront/netfront.c:
Use GRANT_REF_INVALID instead of driver private definitions of the
same constant.
sys/xen/gnttab.h:
sys/xen/gnttab.c:
Add the gnttab_end_foreign_access_references() API.
This API allows a client to batch the release of an array of grant
references, instead of coding a private for loop. The implementation
takes advantage of this batching to reduce lock overhead to one
acquisition and release per-batch instead of per-freed grant reference.
While here, reduce the duration the gnttab_list_lock is held during
gnttab_free_grant_references() operations. The search to find the
tail of the incoming free list does not rely on global state and so
can be performed without holding the lock.
sys/dev/xen/xenpci/evtchn.c:
sys/dev/xen/evtchn/evtchn.c:
sys/xen/xen_intr.h:
o Implement the bind_interdomain_evtchn_to_irqhandler API for HVM mode.
This allows an HVM domain to serve back end devices to other domains.
This API is already implemented for PV mode.
o Synchronize the API between HVM and PV.
sys/dev/xen/xenpci/xenpci.c:
o Scan the full region of CPUID space in which the Xen VMM interface
may be implemented. On systems using SuSE as a Dom0 where the
Viridian API is also exported, the VMM interface is above the region
we used to search.
o Pass through bus_alloc_resource() calls so that XenBus drivers
attaching on an HVM system can allocate unused physical address
space from the nexus. The block back driver makes use of this
facility.
sys/i386/xen/xen_machdep.c:
Use the correct type for accessing the statically mapped xenstore
metadata.
sys/xen/interface/hvm/params.h:
sys/xen/xenstore/xenstore.c:
Move hvm_get_parameter() to the correct global header file instead
of as a private method to the XenStore.
sys/xen/interface/io/protocols.h:
Sync with vendor.
sys/xeninterface/io/ring.h:
Add macro for calculating the number of ring pages needed for an N
deep ring.
To avoid duplication within the macros, create and use the new
__RING_HEADER_SIZE() macro. This macro calculates the size of the
ring book keeping struct (producer/consumer indexes, etc.) that
resides at the head of the ring.
Add the __RING_PAGES() macro which calculates the number of shared
ring pages required to support a ring with the given number of
requests.
These APIs are used to support the multi-page ring version of the
Xen block API.
sys/xeninterface/io/xenbus.h:
Add Comments.
sys/xen/xenbus/...
o Refactor the FreeBSD XenBus support code to allow for both front and
backend device attachments.
o Make use of new config_intr_hook capabilities to allow front and back
devices to be probed/attached in parallel.
o Fix bugs in probe/attach state machine that could cause the system to
hang when confronted with a failure either in the local domain or in
a remote domain to which one of our driver instances is attaching.
o Publish all required state to the XenStore on device detach and
failure. The majority of the missing functionality was for serving
as a back end since the typical "hot-plug" scripts in Dom0 don't
handle the case of cleaning up for a "service domain" that is not
itself.
o Add dynamic sysctl nodes exposing the generic ivars of
XenBus devices.
o Add doxygen style comments to the majority of the code.
o Cleanup types, formatting, etc.
sys/xen/xenbus/xenbusb.c:
Common code used by both front and back XenBus busses.
sys/xen/xenbus/xenbusb_if.m:
Method definitions for a XenBus bus.
sys/xen/xenbus/xenbusb_front.c:
sys/xen/xenbus/xenbusb_back.c:
XenBus bus specialization for front and back devices.
MFC after: 1 month
2010-10-19 20:53:30 +00:00
|
|
|
* \note xs_gather() returns on the first encountered error, so
|
2016-05-03 03:41:25 +00:00
|
|
|
* we must use independent calls in order to guarantee
|
Improve the Xen para-virtualized device infrastructure of FreeBSD:
o Add support for backend devices (e.g. blkback)
o Implement extensions to the Xen para-virtualized block API to allow
for larger and more outstanding I/Os.
o Import a completely rewritten block back driver with support for fronting
I/O to both raw devices and files.
o General cleanup and documentation of the XenBus and XenStore support code.
o Robustness and performance updates for the block front driver.
o Fixes to the netfront driver.
Sponsored by: Spectra Logic Corporation
sys/xen/xenbus/init.txt:
Deleted: This file explains the Linux method for XenBus device
enumeration and thus does not apply to FreeBSD's NewBus approach.
sys/xen/xenbus/xenbus_probe_backend.c:
Deleted: Linux version of backend XenBus service routines. It
was never ported to FreeBSD. See xenbusb.c, xenbusb_if.m,
xenbusb_front.c xenbusb_back.c for details of FreeBSD's XenBus
support.
sys/xen/xenbus/xenbusvar.h:
sys/xen/xenbus/xenbus_xs.c:
sys/xen/xenbus/xenbus_comms.c:
sys/xen/xenbus/xenbus_comms.h:
sys/xen/xenstore/xenstorevar.h:
sys/xen/xenstore/xenstore.c:
Split XenStore into its own tree. XenBus is a software layer built
on top of XenStore. The old arrangement and the naming of some
structures and functions blurred these lines making it difficult to
discern what services are provided by which layer and at what times
these services are available (e.g. during system startup and shutdown).
sys/xen/xenbus/xenbus_client.c:
sys/xen/xenbus/xenbus.c:
sys/xen/xenbus/xenbus_probe.c:
sys/xen/xenbus/xenbusb.c:
sys/xen/xenbus/xenbusb.h:
Split up XenBus code into methods available for use by client
drivers (xenbus.c) and code used by the XenBus "bus code" to
enumerate, attach, detach, and service bus drivers.
sys/xen/reboot.c:
sys/dev/xen/control/control.c:
Add a XenBus front driver for handling shutdown, reboot, suspend, and
resume events published in the XenStore. Move all PV suspend/reboot
support from reboot.c into this driver.
sys/xen/blkif.h:
New file from Xen vendor with macros and structures used by
a block back driver to service requests from a VM running a
different ABI (e.g. amd64 back with i386 front).
sys/conf/files:
Adjust kernel build spec for new XenBus/XenStore layout and added
Xen functionality.
sys/dev/xen/balloon/balloon.c:
sys/dev/xen/netfront/netfront.c:
sys/dev/xen/blkfront/blkfront.c:
sys/xen/xenbus/...
sys/xen/xenstore/...
o Rename XenStore APIs and structures from xenbus_* to xs_*.
o Adjust to use of M_XENBUS and M_XENSTORE malloc types for allocation
of objects returned by these APIs.
o Adjust for changes in the bus interface for Xen drivers.
sys/xen/xenbus/...
sys/xen/xenstore/...
Add Doxygen comments for these interfaces and the code that
implements them.
sys/dev/xen/blkback/blkback.c:
o Rewrite the Block Back driver to attach properly via newbus,
operate correctly in both PV and HVM mode regardless of domain
(e.g. can be in a DOM other than 0), and to deal with the latest
metadata available in XenStore for block devices.
o Allow users to specify a file as a backend to blkback, in addition
to character devices. Use the namei lookup of the backend path
to automatically configure, based on file type, the appropriate
backend method.
The current implementation is limited to a single outstanding I/O
at a time to file backed storage.
sys/dev/xen/blkback/blkback.c:
sys/xen/interface/io/blkif.h:
sys/xen/blkif.h:
sys/dev/xen/blkfront/blkfront.c:
sys/dev/xen/blkfront/block.h:
Extend the Xen blkif API: Negotiable request size and number of
requests.
This change extends the information recorded in the XenStore
allowing block front/back devices to negotiate for optimal I/O
parameters. This has been achieved without sacrificing backward
compatibility with drivers that are unaware of these protocol
enhancements. The extensions center around the connection protocol
which now includes these additions:
o The back-end device publishes its maximum supported values for,
request I/O size, the number of page segments that can be
associated with a request, the maximum number of requests that
can be concurrently active, and the maximum number of pages that
can be in the shared request ring. These values are published
before the back-end enters the XenbusStateInitWait state.
o The front-end waits for the back-end to enter either the InitWait
or Initialize state. At this point, the front end limits it's
own capabilities to the lesser of the values it finds published
by the backend, it's own maximums, or, should any back-end data
be missing in the store, the values supported by the original
protocol. It then initializes it's internal data structures
including allocation of the shared ring, publishes its maximum
capabilities to the XenStore and transitions to the Initialized
state.
o The back-end waits for the front-end to enter the Initalized
state. At this point, the back end limits it's own capabilities
to the lesser of the values it finds published by the frontend,
it's own maximums, or, should any front-end data be missing in
the store, the values supported by the original protocol. It
then initializes it's internal data structures, attaches to the
shared ring and transitions to the Connected state.
o The front-end waits for the back-end to enter the Connnected
state, transitions itself to the connected state, and can
commence I/O.
Although an updated front-end driver must be aware of the back-end's
InitWait state, the back-end has been coded such that it can
tolerate a front-end that skips this step and transitions directly
to the Initialized state without waiting for the back-end.
sys/xen/interface/io/blkif.h:
o Increase BLKIF_MAX_SEGMENTS_PER_REQUEST to 255. This is
the maximum number possible without changing the blkif
request header structure (nr_segs is a uint8_t).
o Add two new constants:
BLKIF_MAX_SEGMENTS_PER_HEADER_BLOCK, and
BLKIF_MAX_SEGMENTS_PER_SEGMENT_BLOCK. These respectively
indicate the number of segments that can fit in the first
ring-buffer entry of a request, and for each subsequent
(sg element only) ring-buffer entry associated with the
"header" ring-buffer entry of the request.
o Add the blkif_request_segment_t typedef for segment
elements.
o Add the BLKRING_GET_SG_REQUEST() macro which wraps the
RING_GET_REQUEST() macro and returns a properly cast
pointer to an array of blkif_request_segment_ts.
o Add the BLKIF_SEGS_TO_BLOCKS() macro which calculates the
number of ring entries that will be consumed by a blkif
request with the given number of segments.
sys/xen/blkif.h:
o Update for changes in interface/io/blkif.h macros.
o Update the BLKIF_MAX_RING_REQUESTS() macro to take the
ring size as an argument to allow this calculation on
multi-page rings.
o Add a companion macro to BLKIF_MAX_RING_REQUESTS(),
BLKIF_RING_PAGES(). This macro determines the number of
ring pages required in order to support a ring with the
supplied number of request blocks.
sys/dev/xen/blkback/blkback.c:
sys/dev/xen/blkfront/blkfront.c:
sys/dev/xen/blkfront/block.h:
o Negotiate with the other-end with the following limits:
Reqeust Size: MAXPHYS
Max Segments: (MAXPHYS/PAGE_SIZE) + 1
Max Requests: 256
Max Ring Pages: Sufficient to support Max Requests with
Max Segments.
o Dynamically allocate request pools and segemnts-per-request.
o Update ring allocation/attachment code to support a
multi-page shared ring.
o Update routines that access the shared ring to handle
multi-block requests.
sys/dev/xen/blkfront/blkfront.c:
o Track blkfront allocations in a blkfront driver specific
malloc pool.
o Strip out XenStore transaction retry logic in the
connection code. Transactions only need to be used when
the update to multiple XenStore nodes must be atomic.
That is not the case here.
o Fully disable blkif_resume() until it can be fixed
properly (it didn't work before this change).
o Destroy bus-dma objects during device instance tear-down.
o Properly handle backend devices with powef-of-2 sector
sizes larger than 512b.
sys/dev/xen/blkback/blkback.c:
Advertise support for and implement the BLKIF_OP_WRITE_BARRIER
and BLKIF_OP_FLUSH_DISKCACHE blkif opcodes using BIO_FLUSH and
the BIO_ORDERED attribute of bios.
sys/dev/xen/blkfront/blkfront.c:
sys/dev/xen/blkfront/block.h:
Fix various bugs in blkfront.
o gnttab_alloc_grant_references() returns 0 for success and
non-zero for failure. The check for < 0 is a leftover
Linuxism.
o When we negotiate with blkback and have to reduce some of our
capabilities, print out the original and reduced capability before
changing the local capability. So the user now gets the correct
information.
o Fix blkif_restart_queue_callback() formatting. Make sure we hold
the mutex in that function before calling xb_startio().
o Fix a couple of KASSERT()s.
o Fix a check in the xb_remove_* macro to be a little more specific.
sys/xen/gnttab.h:
sys/xen/gnttab.c:
Define GNTTAB_LIST_END publicly as GRANT_REF_INVALID.
sys/dev/xen/netfront/netfront.c:
Use GRANT_REF_INVALID instead of driver private definitions of the
same constant.
sys/xen/gnttab.h:
sys/xen/gnttab.c:
Add the gnttab_end_foreign_access_references() API.
This API allows a client to batch the release of an array of grant
references, instead of coding a private for loop. The implementation
takes advantage of this batching to reduce lock overhead to one
acquisition and release per-batch instead of per-freed grant reference.
While here, reduce the duration the gnttab_list_lock is held during
gnttab_free_grant_references() operations. The search to find the
tail of the incoming free list does not rely on global state and so
can be performed without holding the lock.
sys/dev/xen/xenpci/evtchn.c:
sys/dev/xen/evtchn/evtchn.c:
sys/xen/xen_intr.h:
o Implement the bind_interdomain_evtchn_to_irqhandler API for HVM mode.
This allows an HVM domain to serve back end devices to other domains.
This API is already implemented for PV mode.
o Synchronize the API between HVM and PV.
sys/dev/xen/xenpci/xenpci.c:
o Scan the full region of CPUID space in which the Xen VMM interface
may be implemented. On systems using SuSE as a Dom0 where the
Viridian API is also exported, the VMM interface is above the region
we used to search.
o Pass through bus_alloc_resource() calls so that XenBus drivers
attaching on an HVM system can allocate unused physical address
space from the nexus. The block back driver makes use of this
facility.
sys/i386/xen/xen_machdep.c:
Use the correct type for accessing the statically mapped xenstore
metadata.
sys/xen/interface/hvm/params.h:
sys/xen/xenstore/xenstore.c:
Move hvm_get_parameter() to the correct global header file instead
of as a private method to the XenStore.
sys/xen/interface/io/protocols.h:
Sync with vendor.
sys/xeninterface/io/ring.h:
Add macro for calculating the number of ring pages needed for an N
deep ring.
To avoid duplication within the macros, create and use the new
__RING_HEADER_SIZE() macro. This macro calculates the size of the
ring book keeping struct (producer/consumer indexes, etc.) that
resides at the head of the ring.
Add the __RING_PAGES() macro which calculates the number of shared
ring pages required to support a ring with the given number of
requests.
These APIs are used to support the multi-page ring version of the
Xen block API.
sys/xeninterface/io/xenbus.h:
Add Comments.
sys/xen/xenbus/...
o Refactor the FreeBSD XenBus support code to allow for both front and
backend device attachments.
o Make use of new config_intr_hook capabilities to allow front and back
devices to be probed/attached in parallel.
o Fix bugs in probe/attach state machine that could cause the system to
hang when confronted with a failure either in the local domain or in
a remote domain to which one of our driver instances is attaching.
o Publish all required state to the XenStore on device detach and
failure. The majority of the missing functionality was for serving
as a back end since the typical "hot-plug" scripts in Dom0 don't
handle the case of cleaning up for a "service domain" that is not
itself.
o Add dynamic sysctl nodes exposing the generic ivars of
XenBus devices.
o Add doxygen style comments to the majority of the code.
o Cleanup types, formatting, etc.
sys/xen/xenbus/xenbusb.c:
Common code used by both front and back XenBus busses.
sys/xen/xenbus/xenbusb_if.m:
Method definitions for a XenBus bus.
sys/xen/xenbus/xenbusb_front.c:
sys/xen/xenbus/xenbusb_back.c:
XenBus bus specialization for front and back devices.
MFC after: 1 month
2010-10-19 20:53:30 +00:00
|
|
|
* we don't miss information in a sparsly populated back-end
|
|
|
|
* tree.
|
2012-02-15 06:45:49 +00:00
|
|
|
*
|
|
|
|
* \note xs_scanf() does not update variables for unmatched
|
|
|
|
* fields.
|
Improve the Xen para-virtualized device infrastructure of FreeBSD:
o Add support for backend devices (e.g. blkback)
o Implement extensions to the Xen para-virtualized block API to allow
for larger and more outstanding I/Os.
o Import a completely rewritten block back driver with support for fronting
I/O to both raw devices and files.
o General cleanup and documentation of the XenBus and XenStore support code.
o Robustness and performance updates for the block front driver.
o Fixes to the netfront driver.
Sponsored by: Spectra Logic Corporation
sys/xen/xenbus/init.txt:
Deleted: This file explains the Linux method for XenBus device
enumeration and thus does not apply to FreeBSD's NewBus approach.
sys/xen/xenbus/xenbus_probe_backend.c:
Deleted: Linux version of backend XenBus service routines. It
was never ported to FreeBSD. See xenbusb.c, xenbusb_if.m,
xenbusb_front.c xenbusb_back.c for details of FreeBSD's XenBus
support.
sys/xen/xenbus/xenbusvar.h:
sys/xen/xenbus/xenbus_xs.c:
sys/xen/xenbus/xenbus_comms.c:
sys/xen/xenbus/xenbus_comms.h:
sys/xen/xenstore/xenstorevar.h:
sys/xen/xenstore/xenstore.c:
Split XenStore into its own tree. XenBus is a software layer built
on top of XenStore. The old arrangement and the naming of some
structures and functions blurred these lines making it difficult to
discern what services are provided by which layer and at what times
these services are available (e.g. during system startup and shutdown).
sys/xen/xenbus/xenbus_client.c:
sys/xen/xenbus/xenbus.c:
sys/xen/xenbus/xenbus_probe.c:
sys/xen/xenbus/xenbusb.c:
sys/xen/xenbus/xenbusb.h:
Split up XenBus code into methods available for use by client
drivers (xenbus.c) and code used by the XenBus "bus code" to
enumerate, attach, detach, and service bus drivers.
sys/xen/reboot.c:
sys/dev/xen/control/control.c:
Add a XenBus front driver for handling shutdown, reboot, suspend, and
resume events published in the XenStore. Move all PV suspend/reboot
support from reboot.c into this driver.
sys/xen/blkif.h:
New file from Xen vendor with macros and structures used by
a block back driver to service requests from a VM running a
different ABI (e.g. amd64 back with i386 front).
sys/conf/files:
Adjust kernel build spec for new XenBus/XenStore layout and added
Xen functionality.
sys/dev/xen/balloon/balloon.c:
sys/dev/xen/netfront/netfront.c:
sys/dev/xen/blkfront/blkfront.c:
sys/xen/xenbus/...
sys/xen/xenstore/...
o Rename XenStore APIs and structures from xenbus_* to xs_*.
o Adjust to use of M_XENBUS and M_XENSTORE malloc types for allocation
of objects returned by these APIs.
o Adjust for changes in the bus interface for Xen drivers.
sys/xen/xenbus/...
sys/xen/xenstore/...
Add Doxygen comments for these interfaces and the code that
implements them.
sys/dev/xen/blkback/blkback.c:
o Rewrite the Block Back driver to attach properly via newbus,
operate correctly in both PV and HVM mode regardless of domain
(e.g. can be in a DOM other than 0), and to deal with the latest
metadata available in XenStore for block devices.
o Allow users to specify a file as a backend to blkback, in addition
to character devices. Use the namei lookup of the backend path
to automatically configure, based on file type, the appropriate
backend method.
The current implementation is limited to a single outstanding I/O
at a time to file backed storage.
sys/dev/xen/blkback/blkback.c:
sys/xen/interface/io/blkif.h:
sys/xen/blkif.h:
sys/dev/xen/blkfront/blkfront.c:
sys/dev/xen/blkfront/block.h:
Extend the Xen blkif API: Negotiable request size and number of
requests.
This change extends the information recorded in the XenStore
allowing block front/back devices to negotiate for optimal I/O
parameters. This has been achieved without sacrificing backward
compatibility with drivers that are unaware of these protocol
enhancements. The extensions center around the connection protocol
which now includes these additions:
o The back-end device publishes its maximum supported values for,
request I/O size, the number of page segments that can be
associated with a request, the maximum number of requests that
can be concurrently active, and the maximum number of pages that
can be in the shared request ring. These values are published
before the back-end enters the XenbusStateInitWait state.
o The front-end waits for the back-end to enter either the InitWait
or Initialize state. At this point, the front end limits it's
own capabilities to the lesser of the values it finds published
by the backend, it's own maximums, or, should any back-end data
be missing in the store, the values supported by the original
protocol. It then initializes it's internal data structures
including allocation of the shared ring, publishes its maximum
capabilities to the XenStore and transitions to the Initialized
state.
o The back-end waits for the front-end to enter the Initalized
state. At this point, the back end limits it's own capabilities
to the lesser of the values it finds published by the frontend,
it's own maximums, or, should any front-end data be missing in
the store, the values supported by the original protocol. It
then initializes it's internal data structures, attaches to the
shared ring and transitions to the Connected state.
o The front-end waits for the back-end to enter the Connnected
state, transitions itself to the connected state, and can
commence I/O.
Although an updated front-end driver must be aware of the back-end's
InitWait state, the back-end has been coded such that it can
tolerate a front-end that skips this step and transitions directly
to the Initialized state without waiting for the back-end.
sys/xen/interface/io/blkif.h:
o Increase BLKIF_MAX_SEGMENTS_PER_REQUEST to 255. This is
the maximum number possible without changing the blkif
request header structure (nr_segs is a uint8_t).
o Add two new constants:
BLKIF_MAX_SEGMENTS_PER_HEADER_BLOCK, and
BLKIF_MAX_SEGMENTS_PER_SEGMENT_BLOCK. These respectively
indicate the number of segments that can fit in the first
ring-buffer entry of a request, and for each subsequent
(sg element only) ring-buffer entry associated with the
"header" ring-buffer entry of the request.
o Add the blkif_request_segment_t typedef for segment
elements.
o Add the BLKRING_GET_SG_REQUEST() macro which wraps the
RING_GET_REQUEST() macro and returns a properly cast
pointer to an array of blkif_request_segment_ts.
o Add the BLKIF_SEGS_TO_BLOCKS() macro which calculates the
number of ring entries that will be consumed by a blkif
request with the given number of segments.
sys/xen/blkif.h:
o Update for changes in interface/io/blkif.h macros.
o Update the BLKIF_MAX_RING_REQUESTS() macro to take the
ring size as an argument to allow this calculation on
multi-page rings.
o Add a companion macro to BLKIF_MAX_RING_REQUESTS(),
BLKIF_RING_PAGES(). This macro determines the number of
ring pages required in order to support a ring with the
supplied number of request blocks.
sys/dev/xen/blkback/blkback.c:
sys/dev/xen/blkfront/blkfront.c:
sys/dev/xen/blkfront/block.h:
o Negotiate with the other-end with the following limits:
Reqeust Size: MAXPHYS
Max Segments: (MAXPHYS/PAGE_SIZE) + 1
Max Requests: 256
Max Ring Pages: Sufficient to support Max Requests with
Max Segments.
o Dynamically allocate request pools and segemnts-per-request.
o Update ring allocation/attachment code to support a
multi-page shared ring.
o Update routines that access the shared ring to handle
multi-block requests.
sys/dev/xen/blkfront/blkfront.c:
o Track blkfront allocations in a blkfront driver specific
malloc pool.
o Strip out XenStore transaction retry logic in the
connection code. Transactions only need to be used when
the update to multiple XenStore nodes must be atomic.
That is not the case here.
o Fully disable blkif_resume() until it can be fixed
properly (it didn't work before this change).
o Destroy bus-dma objects during device instance tear-down.
o Properly handle backend devices with powef-of-2 sector
sizes larger than 512b.
sys/dev/xen/blkback/blkback.c:
Advertise support for and implement the BLKIF_OP_WRITE_BARRIER
and BLKIF_OP_FLUSH_DISKCACHE blkif opcodes using BIO_FLUSH and
the BIO_ORDERED attribute of bios.
sys/dev/xen/blkfront/blkfront.c:
sys/dev/xen/blkfront/block.h:
Fix various bugs in blkfront.
o gnttab_alloc_grant_references() returns 0 for success and
non-zero for failure. The check for < 0 is a leftover
Linuxism.
o When we negotiate with blkback and have to reduce some of our
capabilities, print out the original and reduced capability before
changing the local capability. So the user now gets the correct
information.
o Fix blkif_restart_queue_callback() formatting. Make sure we hold
the mutex in that function before calling xb_startio().
o Fix a couple of KASSERT()s.
o Fix a check in the xb_remove_* macro to be a little more specific.
sys/xen/gnttab.h:
sys/xen/gnttab.c:
Define GNTTAB_LIST_END publicly as GRANT_REF_INVALID.
sys/dev/xen/netfront/netfront.c:
Use GRANT_REF_INVALID instead of driver private definitions of the
same constant.
sys/xen/gnttab.h:
sys/xen/gnttab.c:
Add the gnttab_end_foreign_access_references() API.
This API allows a client to batch the release of an array of grant
references, instead of coding a private for loop. The implementation
takes advantage of this batching to reduce lock overhead to one
acquisition and release per-batch instead of per-freed grant reference.
While here, reduce the duration the gnttab_list_lock is held during
gnttab_free_grant_references() operations. The search to find the
tail of the incoming free list does not rely on global state and so
can be performed without holding the lock.
sys/dev/xen/xenpci/evtchn.c:
sys/dev/xen/evtchn/evtchn.c:
sys/xen/xen_intr.h:
o Implement the bind_interdomain_evtchn_to_irqhandler API for HVM mode.
This allows an HVM domain to serve back end devices to other domains.
This API is already implemented for PV mode.
o Synchronize the API between HVM and PV.
sys/dev/xen/xenpci/xenpci.c:
o Scan the full region of CPUID space in which the Xen VMM interface
may be implemented. On systems using SuSE as a Dom0 where the
Viridian API is also exported, the VMM interface is above the region
we used to search.
o Pass through bus_alloc_resource() calls so that XenBus drivers
attaching on an HVM system can allocate unused physical address
space from the nexus. The block back driver makes use of this
facility.
sys/i386/xen/xen_machdep.c:
Use the correct type for accessing the statically mapped xenstore
metadata.
sys/xen/interface/hvm/params.h:
sys/xen/xenstore/xenstore.c:
Move hvm_get_parameter() to the correct global header file instead
of as a private method to the XenStore.
sys/xen/interface/io/protocols.h:
Sync with vendor.
sys/xeninterface/io/ring.h:
Add macro for calculating the number of ring pages needed for an N
deep ring.
To avoid duplication within the macros, create and use the new
__RING_HEADER_SIZE() macro. This macro calculates the size of the
ring book keeping struct (producer/consumer indexes, etc.) that
resides at the head of the ring.
Add the __RING_PAGES() macro which calculates the number of shared
ring pages required to support a ring with the given number of
requests.
These APIs are used to support the multi-page ring version of the
Xen block API.
sys/xeninterface/io/xenbus.h:
Add Comments.
sys/xen/xenbus/...
o Refactor the FreeBSD XenBus support code to allow for both front and
backend device attachments.
o Make use of new config_intr_hook capabilities to allow front and back
devices to be probed/attached in parallel.
o Fix bugs in probe/attach state machine that could cause the system to
hang when confronted with a failure either in the local domain or in
a remote domain to which one of our driver instances is attaching.
o Publish all required state to the XenStore on device detach and
failure. The majority of the missing functionality was for serving
as a back end since the typical "hot-plug" scripts in Dom0 don't
handle the case of cleaning up for a "service domain" that is not
itself.
o Add dynamic sysctl nodes exposing the generic ivars of
XenBus devices.
o Add doxygen style comments to the majority of the code.
o Cleanup types, formatting, etc.
sys/xen/xenbus/xenbusb.c:
Common code used by both front and back XenBus busses.
sys/xen/xenbus/xenbusb_if.m:
Method definitions for a XenBus bus.
sys/xen/xenbus/xenbusb_front.c:
sys/xen/xenbus/xenbusb_back.c:
XenBus bus specialization for front and back devices.
MFC after: 1 month
2010-10-19 20:53:30 +00:00
|
|
|
*/
|
2013-05-31 21:05:07 +00:00
|
|
|
otherend_path = xenbus_get_otherend_path(sc->xbd_dev);
|
|
|
|
node_path = xenbus_get_node(sc->xbd_dev);
|
2012-02-15 06:45:49 +00:00
|
|
|
|
|
|
|
/* Support both backend schemes for relaying ring page limits. */
|
|
|
|
(void)xs_scanf(XST_NIL, otherend_path,
|
2013-05-31 21:05:07 +00:00
|
|
|
"max-ring-page-order", NULL, "%" PRIu32,
|
|
|
|
&max_ring_page_order);
|
|
|
|
sc->xbd_ring_pages = 1 << max_ring_page_order;
|
Improve the Xen para-virtualized device infrastructure of FreeBSD:
o Add support for backend devices (e.g. blkback)
o Implement extensions to the Xen para-virtualized block API to allow
for larger and more outstanding I/Os.
o Import a completely rewritten block back driver with support for fronting
I/O to both raw devices and files.
o General cleanup and documentation of the XenBus and XenStore support code.
o Robustness and performance updates for the block front driver.
o Fixes to the netfront driver.
Sponsored by: Spectra Logic Corporation
sys/xen/xenbus/init.txt:
Deleted: This file explains the Linux method for XenBus device
enumeration and thus does not apply to FreeBSD's NewBus approach.
sys/xen/xenbus/xenbus_probe_backend.c:
Deleted: Linux version of backend XenBus service routines. It
was never ported to FreeBSD. See xenbusb.c, xenbusb_if.m,
xenbusb_front.c xenbusb_back.c for details of FreeBSD's XenBus
support.
sys/xen/xenbus/xenbusvar.h:
sys/xen/xenbus/xenbus_xs.c:
sys/xen/xenbus/xenbus_comms.c:
sys/xen/xenbus/xenbus_comms.h:
sys/xen/xenstore/xenstorevar.h:
sys/xen/xenstore/xenstore.c:
Split XenStore into its own tree. XenBus is a software layer built
on top of XenStore. The old arrangement and the naming of some
structures and functions blurred these lines making it difficult to
discern what services are provided by which layer and at what times
these services are available (e.g. during system startup and shutdown).
sys/xen/xenbus/xenbus_client.c:
sys/xen/xenbus/xenbus.c:
sys/xen/xenbus/xenbus_probe.c:
sys/xen/xenbus/xenbusb.c:
sys/xen/xenbus/xenbusb.h:
Split up XenBus code into methods available for use by client
drivers (xenbus.c) and code used by the XenBus "bus code" to
enumerate, attach, detach, and service bus drivers.
sys/xen/reboot.c:
sys/dev/xen/control/control.c:
Add a XenBus front driver for handling shutdown, reboot, suspend, and
resume events published in the XenStore. Move all PV suspend/reboot
support from reboot.c into this driver.
sys/xen/blkif.h:
New file from Xen vendor with macros and structures used by
a block back driver to service requests from a VM running a
different ABI (e.g. amd64 back with i386 front).
sys/conf/files:
Adjust kernel build spec for new XenBus/XenStore layout and added
Xen functionality.
sys/dev/xen/balloon/balloon.c:
sys/dev/xen/netfront/netfront.c:
sys/dev/xen/blkfront/blkfront.c:
sys/xen/xenbus/...
sys/xen/xenstore/...
o Rename XenStore APIs and structures from xenbus_* to xs_*.
o Adjust to use of M_XENBUS and M_XENSTORE malloc types for allocation
of objects returned by these APIs.
o Adjust for changes in the bus interface for Xen drivers.
sys/xen/xenbus/...
sys/xen/xenstore/...
Add Doxygen comments for these interfaces and the code that
implements them.
sys/dev/xen/blkback/blkback.c:
o Rewrite the Block Back driver to attach properly via newbus,
operate correctly in both PV and HVM mode regardless of domain
(e.g. can be in a DOM other than 0), and to deal with the latest
metadata available in XenStore for block devices.
o Allow users to specify a file as a backend to blkback, in addition
to character devices. Use the namei lookup of the backend path
to automatically configure, based on file type, the appropriate
backend method.
The current implementation is limited to a single outstanding I/O
at a time to file backed storage.
sys/dev/xen/blkback/blkback.c:
sys/xen/interface/io/blkif.h:
sys/xen/blkif.h:
sys/dev/xen/blkfront/blkfront.c:
sys/dev/xen/blkfront/block.h:
Extend the Xen blkif API: Negotiable request size and number of
requests.
This change extends the information recorded in the XenStore
allowing block front/back devices to negotiate for optimal I/O
parameters. This has been achieved without sacrificing backward
compatibility with drivers that are unaware of these protocol
enhancements. The extensions center around the connection protocol
which now includes these additions:
o The back-end device publishes its maximum supported values for,
request I/O size, the number of page segments that can be
associated with a request, the maximum number of requests that
can be concurrently active, and the maximum number of pages that
can be in the shared request ring. These values are published
before the back-end enters the XenbusStateInitWait state.
o The front-end waits for the back-end to enter either the InitWait
or Initialize state. At this point, the front end limits it's
own capabilities to the lesser of the values it finds published
by the backend, it's own maximums, or, should any back-end data
be missing in the store, the values supported by the original
protocol. It then initializes it's internal data structures
including allocation of the shared ring, publishes its maximum
capabilities to the XenStore and transitions to the Initialized
state.
o The back-end waits for the front-end to enter the Initalized
state. At this point, the back end limits it's own capabilities
to the lesser of the values it finds published by the frontend,
it's own maximums, or, should any front-end data be missing in
the store, the values supported by the original protocol. It
then initializes it's internal data structures, attaches to the
shared ring and transitions to the Connected state.
o The front-end waits for the back-end to enter the Connnected
state, transitions itself to the connected state, and can
commence I/O.
Although an updated front-end driver must be aware of the back-end's
InitWait state, the back-end has been coded such that it can
tolerate a front-end that skips this step and transitions directly
to the Initialized state without waiting for the back-end.
sys/xen/interface/io/blkif.h:
o Increase BLKIF_MAX_SEGMENTS_PER_REQUEST to 255. This is
the maximum number possible without changing the blkif
request header structure (nr_segs is a uint8_t).
o Add two new constants:
BLKIF_MAX_SEGMENTS_PER_HEADER_BLOCK, and
BLKIF_MAX_SEGMENTS_PER_SEGMENT_BLOCK. These respectively
indicate the number of segments that can fit in the first
ring-buffer entry of a request, and for each subsequent
(sg element only) ring-buffer entry associated with the
"header" ring-buffer entry of the request.
o Add the blkif_request_segment_t typedef for segment
elements.
o Add the BLKRING_GET_SG_REQUEST() macro which wraps the
RING_GET_REQUEST() macro and returns a properly cast
pointer to an array of blkif_request_segment_ts.
o Add the BLKIF_SEGS_TO_BLOCKS() macro which calculates the
number of ring entries that will be consumed by a blkif
request with the given number of segments.
sys/xen/blkif.h:
o Update for changes in interface/io/blkif.h macros.
o Update the BLKIF_MAX_RING_REQUESTS() macro to take the
ring size as an argument to allow this calculation on
multi-page rings.
o Add a companion macro to BLKIF_MAX_RING_REQUESTS(),
BLKIF_RING_PAGES(). This macro determines the number of
ring pages required in order to support a ring with the
supplied number of request blocks.
sys/dev/xen/blkback/blkback.c:
sys/dev/xen/blkfront/blkfront.c:
sys/dev/xen/blkfront/block.h:
o Negotiate with the other-end with the following limits:
Reqeust Size: MAXPHYS
Max Segments: (MAXPHYS/PAGE_SIZE) + 1
Max Requests: 256
Max Ring Pages: Sufficient to support Max Requests with
Max Segments.
o Dynamically allocate request pools and segemnts-per-request.
o Update ring allocation/attachment code to support a
multi-page shared ring.
o Update routines that access the shared ring to handle
multi-block requests.
sys/dev/xen/blkfront/blkfront.c:
o Track blkfront allocations in a blkfront driver specific
malloc pool.
o Strip out XenStore transaction retry logic in the
connection code. Transactions only need to be used when
the update to multiple XenStore nodes must be atomic.
That is not the case here.
o Fully disable blkif_resume() until it can be fixed
properly (it didn't work before this change).
o Destroy bus-dma objects during device instance tear-down.
o Properly handle backend devices with powef-of-2 sector
sizes larger than 512b.
sys/dev/xen/blkback/blkback.c:
Advertise support for and implement the BLKIF_OP_WRITE_BARRIER
and BLKIF_OP_FLUSH_DISKCACHE blkif opcodes using BIO_FLUSH and
the BIO_ORDERED attribute of bios.
sys/dev/xen/blkfront/blkfront.c:
sys/dev/xen/blkfront/block.h:
Fix various bugs in blkfront.
o gnttab_alloc_grant_references() returns 0 for success and
non-zero for failure. The check for < 0 is a leftover
Linuxism.
o When we negotiate with blkback and have to reduce some of our
capabilities, print out the original and reduced capability before
changing the local capability. So the user now gets the correct
information.
o Fix blkif_restart_queue_callback() formatting. Make sure we hold
the mutex in that function before calling xb_startio().
o Fix a couple of KASSERT()s.
o Fix a check in the xb_remove_* macro to be a little more specific.
sys/xen/gnttab.h:
sys/xen/gnttab.c:
Define GNTTAB_LIST_END publicly as GRANT_REF_INVALID.
sys/dev/xen/netfront/netfront.c:
Use GRANT_REF_INVALID instead of driver private definitions of the
same constant.
sys/xen/gnttab.h:
sys/xen/gnttab.c:
Add the gnttab_end_foreign_access_references() API.
This API allows a client to batch the release of an array of grant
references, instead of coding a private for loop. The implementation
takes advantage of this batching to reduce lock overhead to one
acquisition and release per-batch instead of per-freed grant reference.
While here, reduce the duration the gnttab_list_lock is held during
gnttab_free_grant_references() operations. The search to find the
tail of the incoming free list does not rely on global state and so
can be performed without holding the lock.
sys/dev/xen/xenpci/evtchn.c:
sys/dev/xen/evtchn/evtchn.c:
sys/xen/xen_intr.h:
o Implement the bind_interdomain_evtchn_to_irqhandler API for HVM mode.
This allows an HVM domain to serve back end devices to other domains.
This API is already implemented for PV mode.
o Synchronize the API between HVM and PV.
sys/dev/xen/xenpci/xenpci.c:
o Scan the full region of CPUID space in which the Xen VMM interface
may be implemented. On systems using SuSE as a Dom0 where the
Viridian API is also exported, the VMM interface is above the region
we used to search.
o Pass through bus_alloc_resource() calls so that XenBus drivers
attaching on an HVM system can allocate unused physical address
space from the nexus. The block back driver makes use of this
facility.
sys/i386/xen/xen_machdep.c:
Use the correct type for accessing the statically mapped xenstore
metadata.
sys/xen/interface/hvm/params.h:
sys/xen/xenstore/xenstore.c:
Move hvm_get_parameter() to the correct global header file instead
of as a private method to the XenStore.
sys/xen/interface/io/protocols.h:
Sync with vendor.
sys/xeninterface/io/ring.h:
Add macro for calculating the number of ring pages needed for an N
deep ring.
To avoid duplication within the macros, create and use the new
__RING_HEADER_SIZE() macro. This macro calculates the size of the
ring book keeping struct (producer/consumer indexes, etc.) that
resides at the head of the ring.
Add the __RING_PAGES() macro which calculates the number of shared
ring pages required to support a ring with the given number of
requests.
These APIs are used to support the multi-page ring version of the
Xen block API.
sys/xeninterface/io/xenbus.h:
Add Comments.
sys/xen/xenbus/...
o Refactor the FreeBSD XenBus support code to allow for both front and
backend device attachments.
o Make use of new config_intr_hook capabilities to allow front and back
devices to be probed/attached in parallel.
o Fix bugs in probe/attach state machine that could cause the system to
hang when confronted with a failure either in the local domain or in
a remote domain to which one of our driver instances is attaching.
o Publish all required state to the XenStore on device detach and
failure. The majority of the missing functionality was for serving
as a back end since the typical "hot-plug" scripts in Dom0 don't
handle the case of cleaning up for a "service domain" that is not
itself.
o Add dynamic sysctl nodes exposing the generic ivars of
XenBus devices.
o Add doxygen style comments to the majority of the code.
o Cleanup types, formatting, etc.
sys/xen/xenbus/xenbusb.c:
Common code used by both front and back XenBus busses.
sys/xen/xenbus/xenbusb_if.m:
Method definitions for a XenBus bus.
sys/xen/xenbus/xenbusb_front.c:
sys/xen/xenbus/xenbusb_back.c:
XenBus bus specialization for front and back devices.
MFC after: 1 month
2010-10-19 20:53:30 +00:00
|
|
|
(void)xs_scanf(XST_NIL, otherend_path,
|
2013-05-31 21:05:07 +00:00
|
|
|
"max-ring-pages", NULL, "%" PRIu32,
|
|
|
|
&sc->xbd_ring_pages);
|
|
|
|
if (sc->xbd_ring_pages < 1)
|
|
|
|
sc->xbd_ring_pages = 1;
|
Improve the Xen para-virtualized device infrastructure of FreeBSD:
o Add support for backend devices (e.g. blkback)
o Implement extensions to the Xen para-virtualized block API to allow
for larger and more outstanding I/Os.
o Import a completely rewritten block back driver with support for fronting
I/O to both raw devices and files.
o General cleanup and documentation of the XenBus and XenStore support code.
o Robustness and performance updates for the block front driver.
o Fixes to the netfront driver.
Sponsored by: Spectra Logic Corporation
sys/xen/xenbus/init.txt:
Deleted: This file explains the Linux method for XenBus device
enumeration and thus does not apply to FreeBSD's NewBus approach.
sys/xen/xenbus/xenbus_probe_backend.c:
Deleted: Linux version of backend XenBus service routines. It
was never ported to FreeBSD. See xenbusb.c, xenbusb_if.m,
xenbusb_front.c xenbusb_back.c for details of FreeBSD's XenBus
support.
sys/xen/xenbus/xenbusvar.h:
sys/xen/xenbus/xenbus_xs.c:
sys/xen/xenbus/xenbus_comms.c:
sys/xen/xenbus/xenbus_comms.h:
sys/xen/xenstore/xenstorevar.h:
sys/xen/xenstore/xenstore.c:
Split XenStore into its own tree. XenBus is a software layer built
on top of XenStore. The old arrangement and the naming of some
structures and functions blurred these lines making it difficult to
discern what services are provided by which layer and at what times
these services are available (e.g. during system startup and shutdown).
sys/xen/xenbus/xenbus_client.c:
sys/xen/xenbus/xenbus.c:
sys/xen/xenbus/xenbus_probe.c:
sys/xen/xenbus/xenbusb.c:
sys/xen/xenbus/xenbusb.h:
Split up XenBus code into methods available for use by client
drivers (xenbus.c) and code used by the XenBus "bus code" to
enumerate, attach, detach, and service bus drivers.
sys/xen/reboot.c:
sys/dev/xen/control/control.c:
Add a XenBus front driver for handling shutdown, reboot, suspend, and
resume events published in the XenStore. Move all PV suspend/reboot
support from reboot.c into this driver.
sys/xen/blkif.h:
New file from Xen vendor with macros and structures used by
a block back driver to service requests from a VM running a
different ABI (e.g. amd64 back with i386 front).
sys/conf/files:
Adjust kernel build spec for new XenBus/XenStore layout and added
Xen functionality.
sys/dev/xen/balloon/balloon.c:
sys/dev/xen/netfront/netfront.c:
sys/dev/xen/blkfront/blkfront.c:
sys/xen/xenbus/...
sys/xen/xenstore/...
o Rename XenStore APIs and structures from xenbus_* to xs_*.
o Adjust to use of M_XENBUS and M_XENSTORE malloc types for allocation
of objects returned by these APIs.
o Adjust for changes in the bus interface for Xen drivers.
sys/xen/xenbus/...
sys/xen/xenstore/...
Add Doxygen comments for these interfaces and the code that
implements them.
sys/dev/xen/blkback/blkback.c:
o Rewrite the Block Back driver to attach properly via newbus,
operate correctly in both PV and HVM mode regardless of domain
(e.g. can be in a DOM other than 0), and to deal with the latest
metadata available in XenStore for block devices.
o Allow users to specify a file as a backend to blkback, in addition
to character devices. Use the namei lookup of the backend path
to automatically configure, based on file type, the appropriate
backend method.
The current implementation is limited to a single outstanding I/O
at a time to file backed storage.
sys/dev/xen/blkback/blkback.c:
sys/xen/interface/io/blkif.h:
sys/xen/blkif.h:
sys/dev/xen/blkfront/blkfront.c:
sys/dev/xen/blkfront/block.h:
Extend the Xen blkif API: Negotiable request size and number of
requests.
This change extends the information recorded in the XenStore
allowing block front/back devices to negotiate for optimal I/O
parameters. This has been achieved without sacrificing backward
compatibility with drivers that are unaware of these protocol
enhancements. The extensions center around the connection protocol
which now includes these additions:
o The back-end device publishes its maximum supported values for,
request I/O size, the number of page segments that can be
associated with a request, the maximum number of requests that
can be concurrently active, and the maximum number of pages that
can be in the shared request ring. These values are published
before the back-end enters the XenbusStateInitWait state.
o The front-end waits for the back-end to enter either the InitWait
or Initialize state. At this point, the front end limits it's
own capabilities to the lesser of the values it finds published
by the backend, it's own maximums, or, should any back-end data
be missing in the store, the values supported by the original
protocol. It then initializes it's internal data structures
including allocation of the shared ring, publishes its maximum
capabilities to the XenStore and transitions to the Initialized
state.
o The back-end waits for the front-end to enter the Initalized
state. At this point, the back end limits it's own capabilities
to the lesser of the values it finds published by the frontend,
it's own maximums, or, should any front-end data be missing in
the store, the values supported by the original protocol. It
then initializes it's internal data structures, attaches to the
shared ring and transitions to the Connected state.
o The front-end waits for the back-end to enter the Connnected
state, transitions itself to the connected state, and can
commence I/O.
Although an updated front-end driver must be aware of the back-end's
InitWait state, the back-end has been coded such that it can
tolerate a front-end that skips this step and transitions directly
to the Initialized state without waiting for the back-end.
sys/xen/interface/io/blkif.h:
o Increase BLKIF_MAX_SEGMENTS_PER_REQUEST to 255. This is
the maximum number possible without changing the blkif
request header structure (nr_segs is a uint8_t).
o Add two new constants:
BLKIF_MAX_SEGMENTS_PER_HEADER_BLOCK, and
BLKIF_MAX_SEGMENTS_PER_SEGMENT_BLOCK. These respectively
indicate the number of segments that can fit in the first
ring-buffer entry of a request, and for each subsequent
(sg element only) ring-buffer entry associated with the
"header" ring-buffer entry of the request.
o Add the blkif_request_segment_t typedef for segment
elements.
o Add the BLKRING_GET_SG_REQUEST() macro which wraps the
RING_GET_REQUEST() macro and returns a properly cast
pointer to an array of blkif_request_segment_ts.
o Add the BLKIF_SEGS_TO_BLOCKS() macro which calculates the
number of ring entries that will be consumed by a blkif
request with the given number of segments.
sys/xen/blkif.h:
o Update for changes in interface/io/blkif.h macros.
o Update the BLKIF_MAX_RING_REQUESTS() macro to take the
ring size as an argument to allow this calculation on
multi-page rings.
o Add a companion macro to BLKIF_MAX_RING_REQUESTS(),
BLKIF_RING_PAGES(). This macro determines the number of
ring pages required in order to support a ring with the
supplied number of request blocks.
sys/dev/xen/blkback/blkback.c:
sys/dev/xen/blkfront/blkfront.c:
sys/dev/xen/blkfront/block.h:
o Negotiate with the other-end with the following limits:
Reqeust Size: MAXPHYS
Max Segments: (MAXPHYS/PAGE_SIZE) + 1
Max Requests: 256
Max Ring Pages: Sufficient to support Max Requests with
Max Segments.
o Dynamically allocate request pools and segemnts-per-request.
o Update ring allocation/attachment code to support a
multi-page shared ring.
o Update routines that access the shared ring to handle
multi-block requests.
sys/dev/xen/blkfront/blkfront.c:
o Track blkfront allocations in a blkfront driver specific
malloc pool.
o Strip out XenStore transaction retry logic in the
connection code. Transactions only need to be used when
the update to multiple XenStore nodes must be atomic.
That is not the case here.
o Fully disable blkif_resume() until it can be fixed
properly (it didn't work before this change).
o Destroy bus-dma objects during device instance tear-down.
o Properly handle backend devices with powef-of-2 sector
sizes larger than 512b.
sys/dev/xen/blkback/blkback.c:
Advertise support for and implement the BLKIF_OP_WRITE_BARRIER
and BLKIF_OP_FLUSH_DISKCACHE blkif opcodes using BIO_FLUSH and
the BIO_ORDERED attribute of bios.
sys/dev/xen/blkfront/blkfront.c:
sys/dev/xen/blkfront/block.h:
Fix various bugs in blkfront.
o gnttab_alloc_grant_references() returns 0 for success and
non-zero for failure. The check for < 0 is a leftover
Linuxism.
o When we negotiate with blkback and have to reduce some of our
capabilities, print out the original and reduced capability before
changing the local capability. So the user now gets the correct
information.
o Fix blkif_restart_queue_callback() formatting. Make sure we hold
the mutex in that function before calling xb_startio().
o Fix a couple of KASSERT()s.
o Fix a check in the xb_remove_* macro to be a little more specific.
sys/xen/gnttab.h:
sys/xen/gnttab.c:
Define GNTTAB_LIST_END publicly as GRANT_REF_INVALID.
sys/dev/xen/netfront/netfront.c:
Use GRANT_REF_INVALID instead of driver private definitions of the
same constant.
sys/xen/gnttab.h:
sys/xen/gnttab.c:
Add the gnttab_end_foreign_access_references() API.
This API allows a client to batch the release of an array of grant
references, instead of coding a private for loop. The implementation
takes advantage of this batching to reduce lock overhead to one
acquisition and release per-batch instead of per-freed grant reference.
While here, reduce the duration the gnttab_list_lock is held during
gnttab_free_grant_references() operations. The search to find the
tail of the incoming free list does not rely on global state and so
can be performed without holding the lock.
sys/dev/xen/xenpci/evtchn.c:
sys/dev/xen/evtchn/evtchn.c:
sys/xen/xen_intr.h:
o Implement the bind_interdomain_evtchn_to_irqhandler API for HVM mode.
This allows an HVM domain to serve back end devices to other domains.
This API is already implemented for PV mode.
o Synchronize the API between HVM and PV.
sys/dev/xen/xenpci/xenpci.c:
o Scan the full region of CPUID space in which the Xen VMM interface
may be implemented. On systems using SuSE as a Dom0 where the
Viridian API is also exported, the VMM interface is above the region
we used to search.
o Pass through bus_alloc_resource() calls so that XenBus drivers
attaching on an HVM system can allocate unused physical address
space from the nexus. The block back driver makes use of this
facility.
sys/i386/xen/xen_machdep.c:
Use the correct type for accessing the statically mapped xenstore
metadata.
sys/xen/interface/hvm/params.h:
sys/xen/xenstore/xenstore.c:
Move hvm_get_parameter() to the correct global header file instead
of as a private method to the XenStore.
sys/xen/interface/io/protocols.h:
Sync with vendor.
sys/xeninterface/io/ring.h:
Add macro for calculating the number of ring pages needed for an N
deep ring.
To avoid duplication within the macros, create and use the new
__RING_HEADER_SIZE() macro. This macro calculates the size of the
ring book keeping struct (producer/consumer indexes, etc.) that
resides at the head of the ring.
Add the __RING_PAGES() macro which calculates the number of shared
ring pages required to support a ring with the given number of
requests.
These APIs are used to support the multi-page ring version of the
Xen block API.
sys/xeninterface/io/xenbus.h:
Add Comments.
sys/xen/xenbus/...
o Refactor the FreeBSD XenBus support code to allow for both front and
backend device attachments.
o Make use of new config_intr_hook capabilities to allow front and back
devices to be probed/attached in parallel.
o Fix bugs in probe/attach state machine that could cause the system to
hang when confronted with a failure either in the local domain or in
a remote domain to which one of our driver instances is attaching.
o Publish all required state to the XenStore on device detach and
failure. The majority of the missing functionality was for serving
as a back end since the typical "hot-plug" scripts in Dom0 don't
handle the case of cleaning up for a "service domain" that is not
itself.
o Add dynamic sysctl nodes exposing the generic ivars of
XenBus devices.
o Add doxygen style comments to the majority of the code.
o Cleanup types, formatting, etc.
sys/xen/xenbus/xenbusb.c:
Common code used by both front and back XenBus busses.
sys/xen/xenbus/xenbusb_if.m:
Method definitions for a XenBus bus.
sys/xen/xenbus/xenbusb_front.c:
sys/xen/xenbus/xenbusb_back.c:
XenBus bus specialization for front and back devices.
MFC after: 1 month
2010-10-19 20:53:30 +00:00
|
|
|
|
2013-05-31 21:05:07 +00:00
|
|
|
if (sc->xbd_ring_pages > XBD_MAX_RING_PAGES) {
|
|
|
|
device_printf(sc->xbd_dev,
|
|
|
|
"Back-end specified ring-pages of %u "
|
2015-06-12 07:50:34 +00:00
|
|
|
"limited to front-end limit of %u.\n",
|
2013-05-31 21:05:07 +00:00
|
|
|
sc->xbd_ring_pages, XBD_MAX_RING_PAGES);
|
|
|
|
sc->xbd_ring_pages = XBD_MAX_RING_PAGES;
|
2008-08-12 20:01:57 +00:00
|
|
|
}
|
|
|
|
|
2013-05-31 21:05:07 +00:00
|
|
|
if (powerof2(sc->xbd_ring_pages) == 0) {
|
2012-02-15 06:45:49 +00:00
|
|
|
uint32_t new_page_limit;
|
|
|
|
|
2013-05-31 21:05:07 +00:00
|
|
|
new_page_limit = 0x01 << (fls(sc->xbd_ring_pages) - 1);
|
|
|
|
device_printf(sc->xbd_dev,
|
|
|
|
"Back-end specified ring-pages of %u "
|
|
|
|
"is not a power of 2. Limited to %u.\n",
|
|
|
|
sc->xbd_ring_pages, new_page_limit);
|
|
|
|
sc->xbd_ring_pages = new_page_limit;
|
2012-02-15 06:45:49 +00:00
|
|
|
}
|
|
|
|
|
2015-06-12 07:50:34 +00:00
|
|
|
sc->xbd_max_requests =
|
|
|
|
BLKIF_MAX_RING_REQUESTS(sc->xbd_ring_pages * PAGE_SIZE);
|
2013-05-31 21:05:07 +00:00
|
|
|
if (sc->xbd_max_requests > XBD_MAX_REQUESTS) {
|
|
|
|
device_printf(sc->xbd_dev,
|
|
|
|
"Back-end specified max_requests of %u "
|
2015-06-12 07:50:34 +00:00
|
|
|
"limited to front-end limit of %zu.\n",
|
2013-05-31 21:05:07 +00:00
|
|
|
sc->xbd_max_requests, XBD_MAX_REQUESTS);
|
|
|
|
sc->xbd_max_requests = XBD_MAX_REQUESTS;
|
2008-08-12 20:01:57 +00:00
|
|
|
}
|
Improve the Xen para-virtualized device infrastructure of FreeBSD:
o Add support for backend devices (e.g. blkback)
o Implement extensions to the Xen para-virtualized block API to allow
for larger and more outstanding I/Os.
o Import a completely rewritten block back driver with support for fronting
I/O to both raw devices and files.
o General cleanup and documentation of the XenBus and XenStore support code.
o Robustness and performance updates for the block front driver.
o Fixes to the netfront driver.
Sponsored by: Spectra Logic Corporation
sys/xen/xenbus/init.txt:
Deleted: This file explains the Linux method for XenBus device
enumeration and thus does not apply to FreeBSD's NewBus approach.
sys/xen/xenbus/xenbus_probe_backend.c:
Deleted: Linux version of backend XenBus service routines. It
was never ported to FreeBSD. See xenbusb.c, xenbusb_if.m,
xenbusb_front.c xenbusb_back.c for details of FreeBSD's XenBus
support.
sys/xen/xenbus/xenbusvar.h:
sys/xen/xenbus/xenbus_xs.c:
sys/xen/xenbus/xenbus_comms.c:
sys/xen/xenbus/xenbus_comms.h:
sys/xen/xenstore/xenstorevar.h:
sys/xen/xenstore/xenstore.c:
Split XenStore into its own tree. XenBus is a software layer built
on top of XenStore. The old arrangement and the naming of some
structures and functions blurred these lines making it difficult to
discern what services are provided by which layer and at what times
these services are available (e.g. during system startup and shutdown).
sys/xen/xenbus/xenbus_client.c:
sys/xen/xenbus/xenbus.c:
sys/xen/xenbus/xenbus_probe.c:
sys/xen/xenbus/xenbusb.c:
sys/xen/xenbus/xenbusb.h:
Split up XenBus code into methods available for use by client
drivers (xenbus.c) and code used by the XenBus "bus code" to
enumerate, attach, detach, and service bus drivers.
sys/xen/reboot.c:
sys/dev/xen/control/control.c:
Add a XenBus front driver for handling shutdown, reboot, suspend, and
resume events published in the XenStore. Move all PV suspend/reboot
support from reboot.c into this driver.
sys/xen/blkif.h:
New file from Xen vendor with macros and structures used by
a block back driver to service requests from a VM running a
different ABI (e.g. amd64 back with i386 front).
sys/conf/files:
Adjust kernel build spec for new XenBus/XenStore layout and added
Xen functionality.
sys/dev/xen/balloon/balloon.c:
sys/dev/xen/netfront/netfront.c:
sys/dev/xen/blkfront/blkfront.c:
sys/xen/xenbus/...
sys/xen/xenstore/...
o Rename XenStore APIs and structures from xenbus_* to xs_*.
o Adjust to use of M_XENBUS and M_XENSTORE malloc types for allocation
of objects returned by these APIs.
o Adjust for changes in the bus interface for Xen drivers.
sys/xen/xenbus/...
sys/xen/xenstore/...
Add Doxygen comments for these interfaces and the code that
implements them.
sys/dev/xen/blkback/blkback.c:
o Rewrite the Block Back driver to attach properly via newbus,
operate correctly in both PV and HVM mode regardless of domain
(e.g. can be in a DOM other than 0), and to deal with the latest
metadata available in XenStore for block devices.
o Allow users to specify a file as a backend to blkback, in addition
to character devices. Use the namei lookup of the backend path
to automatically configure, based on file type, the appropriate
backend method.
The current implementation is limited to a single outstanding I/O
at a time to file backed storage.
sys/dev/xen/blkback/blkback.c:
sys/xen/interface/io/blkif.h:
sys/xen/blkif.h:
sys/dev/xen/blkfront/blkfront.c:
sys/dev/xen/blkfront/block.h:
Extend the Xen blkif API: Negotiable request size and number of
requests.
This change extends the information recorded in the XenStore
allowing block front/back devices to negotiate for optimal I/O
parameters. This has been achieved without sacrificing backward
compatibility with drivers that are unaware of these protocol
enhancements. The extensions center around the connection protocol
which now includes these additions:
o The back-end device publishes its maximum supported values for,
request I/O size, the number of page segments that can be
associated with a request, the maximum number of requests that
can be concurrently active, and the maximum number of pages that
can be in the shared request ring. These values are published
before the back-end enters the XenbusStateInitWait state.
o The front-end waits for the back-end to enter either the InitWait
or Initialize state. At this point, the front end limits it's
own capabilities to the lesser of the values it finds published
by the backend, it's own maximums, or, should any back-end data
be missing in the store, the values supported by the original
protocol. It then initializes it's internal data structures
including allocation of the shared ring, publishes its maximum
capabilities to the XenStore and transitions to the Initialized
state.
o The back-end waits for the front-end to enter the Initalized
state. At this point, the back end limits it's own capabilities
to the lesser of the values it finds published by the frontend,
it's own maximums, or, should any front-end data be missing in
the store, the values supported by the original protocol. It
then initializes it's internal data structures, attaches to the
shared ring and transitions to the Connected state.
o The front-end waits for the back-end to enter the Connnected
state, transitions itself to the connected state, and can
commence I/O.
Although an updated front-end driver must be aware of the back-end's
InitWait state, the back-end has been coded such that it can
tolerate a front-end that skips this step and transitions directly
to the Initialized state without waiting for the back-end.
sys/xen/interface/io/blkif.h:
o Increase BLKIF_MAX_SEGMENTS_PER_REQUEST to 255. This is
the maximum number possible without changing the blkif
request header structure (nr_segs is a uint8_t).
o Add two new constants:
BLKIF_MAX_SEGMENTS_PER_HEADER_BLOCK, and
BLKIF_MAX_SEGMENTS_PER_SEGMENT_BLOCK. These respectively
indicate the number of segments that can fit in the first
ring-buffer entry of a request, and for each subsequent
(sg element only) ring-buffer entry associated with the
"header" ring-buffer entry of the request.
o Add the blkif_request_segment_t typedef for segment
elements.
o Add the BLKRING_GET_SG_REQUEST() macro which wraps the
RING_GET_REQUEST() macro and returns a properly cast
pointer to an array of blkif_request_segment_ts.
o Add the BLKIF_SEGS_TO_BLOCKS() macro which calculates the
number of ring entries that will be consumed by a blkif
request with the given number of segments.
sys/xen/blkif.h:
o Update for changes in interface/io/blkif.h macros.
o Update the BLKIF_MAX_RING_REQUESTS() macro to take the
ring size as an argument to allow this calculation on
multi-page rings.
o Add a companion macro to BLKIF_MAX_RING_REQUESTS(),
BLKIF_RING_PAGES(). This macro determines the number of
ring pages required in order to support a ring with the
supplied number of request blocks.
sys/dev/xen/blkback/blkback.c:
sys/dev/xen/blkfront/blkfront.c:
sys/dev/xen/blkfront/block.h:
o Negotiate with the other-end with the following limits:
Reqeust Size: MAXPHYS
Max Segments: (MAXPHYS/PAGE_SIZE) + 1
Max Requests: 256
Max Ring Pages: Sufficient to support Max Requests with
Max Segments.
o Dynamically allocate request pools and segemnts-per-request.
o Update ring allocation/attachment code to support a
multi-page shared ring.
o Update routines that access the shared ring to handle
multi-block requests.
sys/dev/xen/blkfront/blkfront.c:
o Track blkfront allocations in a blkfront driver specific
malloc pool.
o Strip out XenStore transaction retry logic in the
connection code. Transactions only need to be used when
the update to multiple XenStore nodes must be atomic.
That is not the case here.
o Fully disable blkif_resume() until it can be fixed
properly (it didn't work before this change).
o Destroy bus-dma objects during device instance tear-down.
o Properly handle backend devices with powef-of-2 sector
sizes larger than 512b.
sys/dev/xen/blkback/blkback.c:
Advertise support for and implement the BLKIF_OP_WRITE_BARRIER
and BLKIF_OP_FLUSH_DISKCACHE blkif opcodes using BIO_FLUSH and
the BIO_ORDERED attribute of bios.
sys/dev/xen/blkfront/blkfront.c:
sys/dev/xen/blkfront/block.h:
Fix various bugs in blkfront.
o gnttab_alloc_grant_references() returns 0 for success and
non-zero for failure. The check for < 0 is a leftover
Linuxism.
o When we negotiate with blkback and have to reduce some of our
capabilities, print out the original and reduced capability before
changing the local capability. So the user now gets the correct
information.
o Fix blkif_restart_queue_callback() formatting. Make sure we hold
the mutex in that function before calling xb_startio().
o Fix a couple of KASSERT()s.
o Fix a check in the xb_remove_* macro to be a little more specific.
sys/xen/gnttab.h:
sys/xen/gnttab.c:
Define GNTTAB_LIST_END publicly as GRANT_REF_INVALID.
sys/dev/xen/netfront/netfront.c:
Use GRANT_REF_INVALID instead of driver private definitions of the
same constant.
sys/xen/gnttab.h:
sys/xen/gnttab.c:
Add the gnttab_end_foreign_access_references() API.
This API allows a client to batch the release of an array of grant
references, instead of coding a private for loop. The implementation
takes advantage of this batching to reduce lock overhead to one
acquisition and release per-batch instead of per-freed grant reference.
While here, reduce the duration the gnttab_list_lock is held during
gnttab_free_grant_references() operations. The search to find the
tail of the incoming free list does not rely on global state and so
can be performed without holding the lock.
sys/dev/xen/xenpci/evtchn.c:
sys/dev/xen/evtchn/evtchn.c:
sys/xen/xen_intr.h:
o Implement the bind_interdomain_evtchn_to_irqhandler API for HVM mode.
This allows an HVM domain to serve back end devices to other domains.
This API is already implemented for PV mode.
o Synchronize the API between HVM and PV.
sys/dev/xen/xenpci/xenpci.c:
o Scan the full region of CPUID space in which the Xen VMM interface
may be implemented. On systems using SuSE as a Dom0 where the
Viridian API is also exported, the VMM interface is above the region
we used to search.
o Pass through bus_alloc_resource() calls so that XenBus drivers
attaching on an HVM system can allocate unused physical address
space from the nexus. The block back driver makes use of this
facility.
sys/i386/xen/xen_machdep.c:
Use the correct type for accessing the statically mapped xenstore
metadata.
sys/xen/interface/hvm/params.h:
sys/xen/xenstore/xenstore.c:
Move hvm_get_parameter() to the correct global header file instead
of as a private method to the XenStore.
sys/xen/interface/io/protocols.h:
Sync with vendor.
sys/xeninterface/io/ring.h:
Add macro for calculating the number of ring pages needed for an N
deep ring.
To avoid duplication within the macros, create and use the new
__RING_HEADER_SIZE() macro. This macro calculates the size of the
ring book keeping struct (producer/consumer indexes, etc.) that
resides at the head of the ring.
Add the __RING_PAGES() macro which calculates the number of shared
ring pages required to support a ring with the given number of
requests.
These APIs are used to support the multi-page ring version of the
Xen block API.
sys/xeninterface/io/xenbus.h:
Add Comments.
sys/xen/xenbus/...
o Refactor the FreeBSD XenBus support code to allow for both front and
backend device attachments.
o Make use of new config_intr_hook capabilities to allow front and back
devices to be probed/attached in parallel.
o Fix bugs in probe/attach state machine that could cause the system to
hang when confronted with a failure either in the local domain or in
a remote domain to which one of our driver instances is attaching.
o Publish all required state to the XenStore on device detach and
failure. The majority of the missing functionality was for serving
as a back end since the typical "hot-plug" scripts in Dom0 don't
handle the case of cleaning up for a "service domain" that is not
itself.
o Add dynamic sysctl nodes exposing the generic ivars of
XenBus devices.
o Add doxygen style comments to the majority of the code.
o Cleanup types, formatting, etc.
sys/xen/xenbus/xenbusb.c:
Common code used by both front and back XenBus busses.
sys/xen/xenbus/xenbusb_if.m:
Method definitions for a XenBus bus.
sys/xen/xenbus/xenbusb_front.c:
sys/xen/xenbus/xenbusb_back.c:
XenBus bus specialization for front and back devices.
MFC after: 1 month
2010-10-19 20:53:30 +00:00
|
|
|
|
2013-05-31 22:33:28 +00:00
|
|
|
if (xbd_alloc_ring(sc) != 0)
|
Improve the Xen para-virtualized device infrastructure of FreeBSD:
o Add support for backend devices (e.g. blkback)
o Implement extensions to the Xen para-virtualized block API to allow
for larger and more outstanding I/Os.
o Import a completely rewritten block back driver with support for fronting
I/O to both raw devices and files.
o General cleanup and documentation of the XenBus and XenStore support code.
o Robustness and performance updates for the block front driver.
o Fixes to the netfront driver.
Sponsored by: Spectra Logic Corporation
sys/xen/xenbus/init.txt:
Deleted: This file explains the Linux method for XenBus device
enumeration and thus does not apply to FreeBSD's NewBus approach.
sys/xen/xenbus/xenbus_probe_backend.c:
Deleted: Linux version of backend XenBus service routines. It
was never ported to FreeBSD. See xenbusb.c, xenbusb_if.m,
xenbusb_front.c xenbusb_back.c for details of FreeBSD's XenBus
support.
sys/xen/xenbus/xenbusvar.h:
sys/xen/xenbus/xenbus_xs.c:
sys/xen/xenbus/xenbus_comms.c:
sys/xen/xenbus/xenbus_comms.h:
sys/xen/xenstore/xenstorevar.h:
sys/xen/xenstore/xenstore.c:
Split XenStore into its own tree. XenBus is a software layer built
on top of XenStore. The old arrangement and the naming of some
structures and functions blurred these lines making it difficult to
discern what services are provided by which layer and at what times
these services are available (e.g. during system startup and shutdown).
sys/xen/xenbus/xenbus_client.c:
sys/xen/xenbus/xenbus.c:
sys/xen/xenbus/xenbus_probe.c:
sys/xen/xenbus/xenbusb.c:
sys/xen/xenbus/xenbusb.h:
Split up XenBus code into methods available for use by client
drivers (xenbus.c) and code used by the XenBus "bus code" to
enumerate, attach, detach, and service bus drivers.
sys/xen/reboot.c:
sys/dev/xen/control/control.c:
Add a XenBus front driver for handling shutdown, reboot, suspend, and
resume events published in the XenStore. Move all PV suspend/reboot
support from reboot.c into this driver.
sys/xen/blkif.h:
New file from Xen vendor with macros and structures used by
a block back driver to service requests from a VM running a
different ABI (e.g. amd64 back with i386 front).
sys/conf/files:
Adjust kernel build spec for new XenBus/XenStore layout and added
Xen functionality.
sys/dev/xen/balloon/balloon.c:
sys/dev/xen/netfront/netfront.c:
sys/dev/xen/blkfront/blkfront.c:
sys/xen/xenbus/...
sys/xen/xenstore/...
o Rename XenStore APIs and structures from xenbus_* to xs_*.
o Adjust to use of M_XENBUS and M_XENSTORE malloc types for allocation
of objects returned by these APIs.
o Adjust for changes in the bus interface for Xen drivers.
sys/xen/xenbus/...
sys/xen/xenstore/...
Add Doxygen comments for these interfaces and the code that
implements them.
sys/dev/xen/blkback/blkback.c:
o Rewrite the Block Back driver to attach properly via newbus,
operate correctly in both PV and HVM mode regardless of domain
(e.g. can be in a DOM other than 0), and to deal with the latest
metadata available in XenStore for block devices.
o Allow users to specify a file as a backend to blkback, in addition
to character devices. Use the namei lookup of the backend path
to automatically configure, based on file type, the appropriate
backend method.
The current implementation is limited to a single outstanding I/O
at a time to file backed storage.
sys/dev/xen/blkback/blkback.c:
sys/xen/interface/io/blkif.h:
sys/xen/blkif.h:
sys/dev/xen/blkfront/blkfront.c:
sys/dev/xen/blkfront/block.h:
Extend the Xen blkif API: Negotiable request size and number of
requests.
This change extends the information recorded in the XenStore
allowing block front/back devices to negotiate for optimal I/O
parameters. This has been achieved without sacrificing backward
compatibility with drivers that are unaware of these protocol
enhancements. The extensions center around the connection protocol
which now includes these additions:
o The back-end device publishes its maximum supported values for,
request I/O size, the number of page segments that can be
associated with a request, the maximum number of requests that
can be concurrently active, and the maximum number of pages that
can be in the shared request ring. These values are published
before the back-end enters the XenbusStateInitWait state.
o The front-end waits for the back-end to enter either the InitWait
or Initialize state. At this point, the front end limits it's
own capabilities to the lesser of the values it finds published
by the backend, it's own maximums, or, should any back-end data
be missing in the store, the values supported by the original
protocol. It then initializes it's internal data structures
including allocation of the shared ring, publishes its maximum
capabilities to the XenStore and transitions to the Initialized
state.
o The back-end waits for the front-end to enter the Initalized
state. At this point, the back end limits it's own capabilities
to the lesser of the values it finds published by the frontend,
it's own maximums, or, should any front-end data be missing in
the store, the values supported by the original protocol. It
then initializes it's internal data structures, attaches to the
shared ring and transitions to the Connected state.
o The front-end waits for the back-end to enter the Connnected
state, transitions itself to the connected state, and can
commence I/O.
Although an updated front-end driver must be aware of the back-end's
InitWait state, the back-end has been coded such that it can
tolerate a front-end that skips this step and transitions directly
to the Initialized state without waiting for the back-end.
sys/xen/interface/io/blkif.h:
o Increase BLKIF_MAX_SEGMENTS_PER_REQUEST to 255. This is
the maximum number possible without changing the blkif
request header structure (nr_segs is a uint8_t).
o Add two new constants:
BLKIF_MAX_SEGMENTS_PER_HEADER_BLOCK, and
BLKIF_MAX_SEGMENTS_PER_SEGMENT_BLOCK. These respectively
indicate the number of segments that can fit in the first
ring-buffer entry of a request, and for each subsequent
(sg element only) ring-buffer entry associated with the
"header" ring-buffer entry of the request.
o Add the blkif_request_segment_t typedef for segment
elements.
o Add the BLKRING_GET_SG_REQUEST() macro which wraps the
RING_GET_REQUEST() macro and returns a properly cast
pointer to an array of blkif_request_segment_ts.
o Add the BLKIF_SEGS_TO_BLOCKS() macro which calculates the
number of ring entries that will be consumed by a blkif
request with the given number of segments.
sys/xen/blkif.h:
o Update for changes in interface/io/blkif.h macros.
o Update the BLKIF_MAX_RING_REQUESTS() macro to take the
ring size as an argument to allow this calculation on
multi-page rings.
o Add a companion macro to BLKIF_MAX_RING_REQUESTS(),
BLKIF_RING_PAGES(). This macro determines the number of
ring pages required in order to support a ring with the
supplied number of request blocks.
sys/dev/xen/blkback/blkback.c:
sys/dev/xen/blkfront/blkfront.c:
sys/dev/xen/blkfront/block.h:
o Negotiate with the other-end with the following limits:
Reqeust Size: MAXPHYS
Max Segments: (MAXPHYS/PAGE_SIZE) + 1
Max Requests: 256
Max Ring Pages: Sufficient to support Max Requests with
Max Segments.
o Dynamically allocate request pools and segemnts-per-request.
o Update ring allocation/attachment code to support a
multi-page shared ring.
o Update routines that access the shared ring to handle
multi-block requests.
sys/dev/xen/blkfront/blkfront.c:
o Track blkfront allocations in a blkfront driver specific
malloc pool.
o Strip out XenStore transaction retry logic in the
connection code. Transactions only need to be used when
the update to multiple XenStore nodes must be atomic.
That is not the case here.
o Fully disable blkif_resume() until it can be fixed
properly (it didn't work before this change).
o Destroy bus-dma objects during device instance tear-down.
o Properly handle backend devices with powef-of-2 sector
sizes larger than 512b.
sys/dev/xen/blkback/blkback.c:
Advertise support for and implement the BLKIF_OP_WRITE_BARRIER
and BLKIF_OP_FLUSH_DISKCACHE blkif opcodes using BIO_FLUSH and
the BIO_ORDERED attribute of bios.
sys/dev/xen/blkfront/blkfront.c:
sys/dev/xen/blkfront/block.h:
Fix various bugs in blkfront.
o gnttab_alloc_grant_references() returns 0 for success and
non-zero for failure. The check for < 0 is a leftover
Linuxism.
o When we negotiate with blkback and have to reduce some of our
capabilities, print out the original and reduced capability before
changing the local capability. So the user now gets the correct
information.
o Fix blkif_restart_queue_callback() formatting. Make sure we hold
the mutex in that function before calling xb_startio().
o Fix a couple of KASSERT()s.
o Fix a check in the xb_remove_* macro to be a little more specific.
sys/xen/gnttab.h:
sys/xen/gnttab.c:
Define GNTTAB_LIST_END publicly as GRANT_REF_INVALID.
sys/dev/xen/netfront/netfront.c:
Use GRANT_REF_INVALID instead of driver private definitions of the
same constant.
sys/xen/gnttab.h:
sys/xen/gnttab.c:
Add the gnttab_end_foreign_access_references() API.
This API allows a client to batch the release of an array of grant
references, instead of coding a private for loop. The implementation
takes advantage of this batching to reduce lock overhead to one
acquisition and release per-batch instead of per-freed grant reference.
While here, reduce the duration the gnttab_list_lock is held during
gnttab_free_grant_references() operations. The search to find the
tail of the incoming free list does not rely on global state and so
can be performed without holding the lock.
sys/dev/xen/xenpci/evtchn.c:
sys/dev/xen/evtchn/evtchn.c:
sys/xen/xen_intr.h:
o Implement the bind_interdomain_evtchn_to_irqhandler API for HVM mode.
This allows an HVM domain to serve back end devices to other domains.
This API is already implemented for PV mode.
o Synchronize the API between HVM and PV.
sys/dev/xen/xenpci/xenpci.c:
o Scan the full region of CPUID space in which the Xen VMM interface
may be implemented. On systems using SuSE as a Dom0 where the
Viridian API is also exported, the VMM interface is above the region
we used to search.
o Pass through bus_alloc_resource() calls so that XenBus drivers
attaching on an HVM system can allocate unused physical address
space from the nexus. The block back driver makes use of this
facility.
sys/i386/xen/xen_machdep.c:
Use the correct type for accessing the statically mapped xenstore
metadata.
sys/xen/interface/hvm/params.h:
sys/xen/xenstore/xenstore.c:
Move hvm_get_parameter() to the correct global header file instead
of as a private method to the XenStore.
sys/xen/interface/io/protocols.h:
Sync with vendor.
sys/xeninterface/io/ring.h:
Add macro for calculating the number of ring pages needed for an N
deep ring.
To avoid duplication within the macros, create and use the new
__RING_HEADER_SIZE() macro. This macro calculates the size of the
ring book keeping struct (producer/consumer indexes, etc.) that
resides at the head of the ring.
Add the __RING_PAGES() macro which calculates the number of shared
ring pages required to support a ring with the given number of
requests.
These APIs are used to support the multi-page ring version of the
Xen block API.
sys/xeninterface/io/xenbus.h:
Add Comments.
sys/xen/xenbus/...
o Refactor the FreeBSD XenBus support code to allow for both front and
backend device attachments.
o Make use of new config_intr_hook capabilities to allow front and back
devices to be probed/attached in parallel.
o Fix bugs in probe/attach state machine that could cause the system to
hang when confronted with a failure either in the local domain or in
a remote domain to which one of our driver instances is attaching.
o Publish all required state to the XenStore on device detach and
failure. The majority of the missing functionality was for serving
as a back end since the typical "hot-plug" scripts in Dom0 don't
handle the case of cleaning up for a "service domain" that is not
itself.
o Add dynamic sysctl nodes exposing the generic ivars of
XenBus devices.
o Add doxygen style comments to the majority of the code.
o Cleanup types, formatting, etc.
sys/xen/xenbus/xenbusb.c:
Common code used by both front and back XenBus busses.
sys/xen/xenbus/xenbusb_if.m:
Method definitions for a XenBus bus.
sys/xen/xenbus/xenbusb_front.c:
sys/xen/xenbus/xenbusb_back.c:
XenBus bus specialization for front and back devices.
MFC after: 1 month
2010-10-19 20:53:30 +00:00
|
|
|
return;
|
|
|
|
|
2012-02-15 06:45:49 +00:00
|
|
|
/* Support both backend schemes for relaying ring page limits. */
|
2013-05-31 21:05:07 +00:00
|
|
|
if (sc->xbd_ring_pages > 1) {
|
2012-03-25 14:20:43 +00:00
|
|
|
error = xs_printf(XST_NIL, node_path,
|
2013-05-31 21:05:07 +00:00
|
|
|
"num-ring-pages","%u",
|
|
|
|
sc->xbd_ring_pages);
|
2012-03-25 14:20:43 +00:00
|
|
|
if (error) {
|
2013-05-31 21:05:07 +00:00
|
|
|
xenbus_dev_fatal(sc->xbd_dev, error,
|
|
|
|
"writing %s/num-ring-pages",
|
|
|
|
node_path);
|
2012-03-25 14:20:43 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
error = xs_printf(XST_NIL, node_path,
|
2013-05-31 21:05:07 +00:00
|
|
|
"ring-page-order", "%u",
|
|
|
|
fls(sc->xbd_ring_pages) - 1);
|
2012-03-25 14:20:43 +00:00
|
|
|
if (error) {
|
2013-05-31 21:05:07 +00:00
|
|
|
xenbus_dev_fatal(sc->xbd_dev, error,
|
|
|
|
"writing %s/ring-page-order",
|
|
|
|
node_path);
|
2012-03-25 14:20:43 +00:00
|
|
|
return;
|
|
|
|
}
|
Improve the Xen para-virtualized device infrastructure of FreeBSD:
o Add support for backend devices (e.g. blkback)
o Implement extensions to the Xen para-virtualized block API to allow
for larger and more outstanding I/Os.
o Import a completely rewritten block back driver with support for fronting
I/O to both raw devices and files.
o General cleanup and documentation of the XenBus and XenStore support code.
o Robustness and performance updates for the block front driver.
o Fixes to the netfront driver.
Sponsored by: Spectra Logic Corporation
sys/xen/xenbus/init.txt:
Deleted: This file explains the Linux method for XenBus device
enumeration and thus does not apply to FreeBSD's NewBus approach.
sys/xen/xenbus/xenbus_probe_backend.c:
Deleted: Linux version of backend XenBus service routines. It
was never ported to FreeBSD. See xenbusb.c, xenbusb_if.m,
xenbusb_front.c xenbusb_back.c for details of FreeBSD's XenBus
support.
sys/xen/xenbus/xenbusvar.h:
sys/xen/xenbus/xenbus_xs.c:
sys/xen/xenbus/xenbus_comms.c:
sys/xen/xenbus/xenbus_comms.h:
sys/xen/xenstore/xenstorevar.h:
sys/xen/xenstore/xenstore.c:
Split XenStore into its own tree. XenBus is a software layer built
on top of XenStore. The old arrangement and the naming of some
structures and functions blurred these lines making it difficult to
discern what services are provided by which layer and at what times
these services are available (e.g. during system startup and shutdown).
sys/xen/xenbus/xenbus_client.c:
sys/xen/xenbus/xenbus.c:
sys/xen/xenbus/xenbus_probe.c:
sys/xen/xenbus/xenbusb.c:
sys/xen/xenbus/xenbusb.h:
Split up XenBus code into methods available for use by client
drivers (xenbus.c) and code used by the XenBus "bus code" to
enumerate, attach, detach, and service bus drivers.
sys/xen/reboot.c:
sys/dev/xen/control/control.c:
Add a XenBus front driver for handling shutdown, reboot, suspend, and
resume events published in the XenStore. Move all PV suspend/reboot
support from reboot.c into this driver.
sys/xen/blkif.h:
New file from Xen vendor with macros and structures used by
a block back driver to service requests from a VM running a
different ABI (e.g. amd64 back with i386 front).
sys/conf/files:
Adjust kernel build spec for new XenBus/XenStore layout and added
Xen functionality.
sys/dev/xen/balloon/balloon.c:
sys/dev/xen/netfront/netfront.c:
sys/dev/xen/blkfront/blkfront.c:
sys/xen/xenbus/...
sys/xen/xenstore/...
o Rename XenStore APIs and structures from xenbus_* to xs_*.
o Adjust to use of M_XENBUS and M_XENSTORE malloc types for allocation
of objects returned by these APIs.
o Adjust for changes in the bus interface for Xen drivers.
sys/xen/xenbus/...
sys/xen/xenstore/...
Add Doxygen comments for these interfaces and the code that
implements them.
sys/dev/xen/blkback/blkback.c:
o Rewrite the Block Back driver to attach properly via newbus,
operate correctly in both PV and HVM mode regardless of domain
(e.g. can be in a DOM other than 0), and to deal with the latest
metadata available in XenStore for block devices.
o Allow users to specify a file as a backend to blkback, in addition
to character devices. Use the namei lookup of the backend path
to automatically configure, based on file type, the appropriate
backend method.
The current implementation is limited to a single outstanding I/O
at a time to file backed storage.
sys/dev/xen/blkback/blkback.c:
sys/xen/interface/io/blkif.h:
sys/xen/blkif.h:
sys/dev/xen/blkfront/blkfront.c:
sys/dev/xen/blkfront/block.h:
Extend the Xen blkif API: Negotiable request size and number of
requests.
This change extends the information recorded in the XenStore
allowing block front/back devices to negotiate for optimal I/O
parameters. This has been achieved without sacrificing backward
compatibility with drivers that are unaware of these protocol
enhancements. The extensions center around the connection protocol
which now includes these additions:
o The back-end device publishes its maximum supported values for,
request I/O size, the number of page segments that can be
associated with a request, the maximum number of requests that
can be concurrently active, and the maximum number of pages that
can be in the shared request ring. These values are published
before the back-end enters the XenbusStateInitWait state.
o The front-end waits for the back-end to enter either the InitWait
or Initialize state. At this point, the front end limits it's
own capabilities to the lesser of the values it finds published
by the backend, it's own maximums, or, should any back-end data
be missing in the store, the values supported by the original
protocol. It then initializes it's internal data structures
including allocation of the shared ring, publishes its maximum
capabilities to the XenStore and transitions to the Initialized
state.
o The back-end waits for the front-end to enter the Initalized
state. At this point, the back end limits it's own capabilities
to the lesser of the values it finds published by the frontend,
it's own maximums, or, should any front-end data be missing in
the store, the values supported by the original protocol. It
then initializes it's internal data structures, attaches to the
shared ring and transitions to the Connected state.
o The front-end waits for the back-end to enter the Connnected
state, transitions itself to the connected state, and can
commence I/O.
Although an updated front-end driver must be aware of the back-end's
InitWait state, the back-end has been coded such that it can
tolerate a front-end that skips this step and transitions directly
to the Initialized state without waiting for the back-end.
sys/xen/interface/io/blkif.h:
o Increase BLKIF_MAX_SEGMENTS_PER_REQUEST to 255. This is
the maximum number possible without changing the blkif
request header structure (nr_segs is a uint8_t).
o Add two new constants:
BLKIF_MAX_SEGMENTS_PER_HEADER_BLOCK, and
BLKIF_MAX_SEGMENTS_PER_SEGMENT_BLOCK. These respectively
indicate the number of segments that can fit in the first
ring-buffer entry of a request, and for each subsequent
(sg element only) ring-buffer entry associated with the
"header" ring-buffer entry of the request.
o Add the blkif_request_segment_t typedef for segment
elements.
o Add the BLKRING_GET_SG_REQUEST() macro which wraps the
RING_GET_REQUEST() macro and returns a properly cast
pointer to an array of blkif_request_segment_ts.
o Add the BLKIF_SEGS_TO_BLOCKS() macro which calculates the
number of ring entries that will be consumed by a blkif
request with the given number of segments.
sys/xen/blkif.h:
o Update for changes in interface/io/blkif.h macros.
o Update the BLKIF_MAX_RING_REQUESTS() macro to take the
ring size as an argument to allow this calculation on
multi-page rings.
o Add a companion macro to BLKIF_MAX_RING_REQUESTS(),
BLKIF_RING_PAGES(). This macro determines the number of
ring pages required in order to support a ring with the
supplied number of request blocks.
sys/dev/xen/blkback/blkback.c:
sys/dev/xen/blkfront/blkfront.c:
sys/dev/xen/blkfront/block.h:
o Negotiate with the other-end with the following limits:
Reqeust Size: MAXPHYS
Max Segments: (MAXPHYS/PAGE_SIZE) + 1
Max Requests: 256
Max Ring Pages: Sufficient to support Max Requests with
Max Segments.
o Dynamically allocate request pools and segemnts-per-request.
o Update ring allocation/attachment code to support a
multi-page shared ring.
o Update routines that access the shared ring to handle
multi-block requests.
sys/dev/xen/blkfront/blkfront.c:
o Track blkfront allocations in a blkfront driver specific
malloc pool.
o Strip out XenStore transaction retry logic in the
connection code. Transactions only need to be used when
the update to multiple XenStore nodes must be atomic.
That is not the case here.
o Fully disable blkif_resume() until it can be fixed
properly (it didn't work before this change).
o Destroy bus-dma objects during device instance tear-down.
o Properly handle backend devices with powef-of-2 sector
sizes larger than 512b.
sys/dev/xen/blkback/blkback.c:
Advertise support for and implement the BLKIF_OP_WRITE_BARRIER
and BLKIF_OP_FLUSH_DISKCACHE blkif opcodes using BIO_FLUSH and
the BIO_ORDERED attribute of bios.
sys/dev/xen/blkfront/blkfront.c:
sys/dev/xen/blkfront/block.h:
Fix various bugs in blkfront.
o gnttab_alloc_grant_references() returns 0 for success and
non-zero for failure. The check for < 0 is a leftover
Linuxism.
o When we negotiate with blkback and have to reduce some of our
capabilities, print out the original and reduced capability before
changing the local capability. So the user now gets the correct
information.
o Fix blkif_restart_queue_callback() formatting. Make sure we hold
the mutex in that function before calling xb_startio().
o Fix a couple of KASSERT()s.
o Fix a check in the xb_remove_* macro to be a little more specific.
sys/xen/gnttab.h:
sys/xen/gnttab.c:
Define GNTTAB_LIST_END publicly as GRANT_REF_INVALID.
sys/dev/xen/netfront/netfront.c:
Use GRANT_REF_INVALID instead of driver private definitions of the
same constant.
sys/xen/gnttab.h:
sys/xen/gnttab.c:
Add the gnttab_end_foreign_access_references() API.
This API allows a client to batch the release of an array of grant
references, instead of coding a private for loop. The implementation
takes advantage of this batching to reduce lock overhead to one
acquisition and release per-batch instead of per-freed grant reference.
While here, reduce the duration the gnttab_list_lock is held during
gnttab_free_grant_references() operations. The search to find the
tail of the incoming free list does not rely on global state and so
can be performed without holding the lock.
sys/dev/xen/xenpci/evtchn.c:
sys/dev/xen/evtchn/evtchn.c:
sys/xen/xen_intr.h:
o Implement the bind_interdomain_evtchn_to_irqhandler API for HVM mode.
This allows an HVM domain to serve back end devices to other domains.
This API is already implemented for PV mode.
o Synchronize the API between HVM and PV.
sys/dev/xen/xenpci/xenpci.c:
o Scan the full region of CPUID space in which the Xen VMM interface
may be implemented. On systems using SuSE as a Dom0 where the
Viridian API is also exported, the VMM interface is above the region
we used to search.
o Pass through bus_alloc_resource() calls so that XenBus drivers
attaching on an HVM system can allocate unused physical address
space from the nexus. The block back driver makes use of this
facility.
sys/i386/xen/xen_machdep.c:
Use the correct type for accessing the statically mapped xenstore
metadata.
sys/xen/interface/hvm/params.h:
sys/xen/xenstore/xenstore.c:
Move hvm_get_parameter() to the correct global header file instead
of as a private method to the XenStore.
sys/xen/interface/io/protocols.h:
Sync with vendor.
sys/xeninterface/io/ring.h:
Add macro for calculating the number of ring pages needed for an N
deep ring.
To avoid duplication within the macros, create and use the new
__RING_HEADER_SIZE() macro. This macro calculates the size of the
ring book keeping struct (producer/consumer indexes, etc.) that
resides at the head of the ring.
Add the __RING_PAGES() macro which calculates the number of shared
ring pages required to support a ring with the given number of
requests.
These APIs are used to support the multi-page ring version of the
Xen block API.
sys/xeninterface/io/xenbus.h:
Add Comments.
sys/xen/xenbus/...
o Refactor the FreeBSD XenBus support code to allow for both front and
backend device attachments.
o Make use of new config_intr_hook capabilities to allow front and back
devices to be probed/attached in parallel.
o Fix bugs in probe/attach state machine that could cause the system to
hang when confronted with a failure either in the local domain or in
a remote domain to which one of our driver instances is attaching.
o Publish all required state to the XenStore on device detach and
failure. The majority of the missing functionality was for serving
as a back end since the typical "hot-plug" scripts in Dom0 don't
handle the case of cleaning up for a "service domain" that is not
itself.
o Add dynamic sysctl nodes exposing the generic ivars of
XenBus devices.
o Add doxygen style comments to the majority of the code.
o Cleanup types, formatting, etc.
sys/xen/xenbus/xenbusb.c:
Common code used by both front and back XenBus busses.
sys/xen/xenbus/xenbusb_if.m:
Method definitions for a XenBus bus.
sys/xen/xenbus/xenbusb_front.c:
sys/xen/xenbus/xenbusb_back.c:
XenBus bus specialization for front and back devices.
MFC after: 1 month
2010-10-19 20:53:30 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
error = xs_printf(XST_NIL, node_path, "event-channel",
|
Implement vector callback for PVHVM and unify event channel implementations
Re-structure Xen HVM support so that:
- Xen is detected and hypercalls can be performed very
early in system startup.
- Xen interrupt services are implemented using FreeBSD's native
interrupt delivery infrastructure.
- the Xen interrupt service implementation is shared between PV
and HVM guests.
- Xen interrupt handlers can optionally use a filter handler
in order to avoid the overhead of dispatch to an interrupt
thread.
- interrupt load can be distributed among all available CPUs.
- the overhead of accessing the emulated local and I/O apics
on HVM is removed for event channel port events.
- a similar optimization can eventually, and fairly easily,
be used to optimize MSI.
Early Xen detection, HVM refactoring, PVHVM interrupt infrastructure,
and misc Xen cleanups:
Sponsored by: Spectra Logic Corporation
Unification of PV & HVM interrupt infrastructure, bug fixes,
and misc Xen cleanups:
Submitted by: Roger Pau Monné
Sponsored by: Citrix Systems R&D
sys/x86/x86/local_apic.c:
sys/amd64/include/apicvar.h:
sys/i386/include/apicvar.h:
sys/amd64/amd64/apic_vector.S:
sys/i386/i386/apic_vector.s:
sys/amd64/amd64/machdep.c:
sys/i386/i386/machdep.c:
sys/i386/xen/exception.s:
sys/x86/include/segments.h:
Reserve IDT vector 0x93 for the Xen event channel upcall
interrupt handler. On Hypervisors that support the direct
vector callback feature, we can request that this vector be
called directly by an injected HVM interrupt event, instead
of a simulated PCI interrupt on the Xen platform PCI device.
This avoids all of the overhead of dealing with the emulated
I/O APIC and local APIC. It also means that the Hypervisor
can inject these events on any CPU, allowing upcalls for
different ports to be handled in parallel.
sys/amd64/amd64/mp_machdep.c:
sys/i386/i386/mp_machdep.c:
Map Xen per-vcpu area during AP startup.
sys/amd64/include/intr_machdep.h:
sys/i386/include/intr_machdep.h:
Increase the FreeBSD IRQ vector table to include space
for event channel interrupt sources.
sys/amd64/include/pcpu.h:
sys/i386/include/pcpu.h:
Remove Xen HVM per-cpu variable data. These fields are now
allocated via the dynamic per-cpu scheme. See xen_intr.c
for details.
sys/amd64/include/xen/hypercall.h:
sys/dev/xen/blkback/blkback.c:
sys/i386/include/xen/xenvar.h:
sys/i386/xen/clock.c:
sys/i386/xen/xen_machdep.c:
sys/xen/gnttab.c:
Prefer FreeBSD primatives to Linux ones in Xen support code.
sys/amd64/include/xen/xen-os.h:
sys/i386/include/xen/xen-os.h:
sys/xen/xen-os.h:
sys/dev/xen/balloon/balloon.c:
sys/dev/xen/blkback/blkback.c:
sys/dev/xen/blkfront/blkfront.c:
sys/dev/xen/console/xencons_ring.c:
sys/dev/xen/control/control.c:
sys/dev/xen/netback/netback.c:
sys/dev/xen/netfront/netfront.c:
sys/dev/xen/xenpci/xenpci.c:
sys/i386/i386/machdep.c:
sys/i386/include/pmap.h:
sys/i386/include/xen/xenfunc.h:
sys/i386/isa/npx.c:
sys/i386/xen/clock.c:
sys/i386/xen/mp_machdep.c:
sys/i386/xen/mptable.c:
sys/i386/xen/xen_clock_util.c:
sys/i386/xen/xen_machdep.c:
sys/i386/xen/xen_rtc.c:
sys/xen/evtchn/evtchn_dev.c:
sys/xen/features.c:
sys/xen/gnttab.c:
sys/xen/gnttab.h:
sys/xen/hvm.h:
sys/xen/xenbus/xenbus.c:
sys/xen/xenbus/xenbus_if.m:
sys/xen/xenbus/xenbusb_front.c:
sys/xen/xenbus/xenbusvar.h:
sys/xen/xenstore/xenstore.c:
sys/xen/xenstore/xenstore_dev.c:
sys/xen/xenstore/xenstorevar.h:
Pull common Xen OS support functions/settings into xen/xen-os.h.
sys/amd64/include/xen/xen-os.h:
sys/i386/include/xen/xen-os.h:
sys/xen/xen-os.h:
Remove constants, macros, and functions unused in FreeBSD's Xen
support.
sys/xen/xen-os.h:
sys/i386/xen/xen_machdep.c:
sys/x86/xen/hvm.c:
Introduce new functions xen_domain(), xen_pv_domain(), and
xen_hvm_domain(). These are used in favor of #ifdefs so that
FreeBSD can dynamically detect and adapt to the presence of
a hypervisor. The goal is to have an HVM optimized GENERIC,
but more is necessary before this is possible.
sys/amd64/amd64/machdep.c:
sys/dev/xen/xenpci/xenpcivar.h:
sys/dev/xen/xenpci/xenpci.c:
sys/x86/xen/hvm.c:
sys/sys/kernel.h:
Refactor magic ioport, Hypercall table and Hypervisor shared
information page setup, and move it to a dedicated HVM support
module.
HVM mode initialization is now triggered during the
SI_SUB_HYPERVISOR phase of system startup. This currently
occurs just after the kernel VM is fully setup which is
just enough infrastructure to allow the hypercall table
and shared info page to be properly mapped.
sys/xen/hvm.h:
sys/x86/xen/hvm.c:
Add definitions and a method for configuring Hypervisor event
delievery via a direct vector callback.
sys/amd64/include/xen/xen-os.h:
sys/x86/xen/hvm.c:
sys/conf/files:
sys/conf/files.amd64:
sys/conf/files.i386:
Adjust kernel build to reflect the refactoring of early
Xen startup code and Xen interrupt services.
sys/dev/xen/blkback/blkback.c:
sys/dev/xen/blkfront/blkfront.c:
sys/dev/xen/blkfront/block.h:
sys/dev/xen/control/control.c:
sys/dev/xen/evtchn/evtchn_dev.c:
sys/dev/xen/netback/netback.c:
sys/dev/xen/netfront/netfront.c:
sys/xen/xenstore/xenstore.c:
sys/xen/evtchn/evtchn_dev.c:
sys/dev/xen/console/console.c:
sys/dev/xen/console/xencons_ring.c
Adjust drivers to use new xen_intr_*() API.
sys/dev/xen/blkback/blkback.c:
Since blkback defers all event handling to a taskqueue,
convert this task queue to a "fast" taskqueue, and schedule
it via an interrupt filter. This avoids an unnecessary
ithread context switch.
sys/xen/xenstore/xenstore.c:
The xenstore driver is MPSAFE. Indicate as much when
registering its interrupt handler.
sys/xen/xenbus/xenbus.c:
sys/xen/xenbus/xenbusvar.h:
Remove unused event channel APIs.
sys/xen/evtchn.h:
Remove all kernel Xen interrupt service API definitions
from this file. It is now only used for structure and
ioctl definitions related to the event channel userland
device driver.
Update the definitions in this file to match those from
NetBSD. Implementing this interface will be necessary for
Dom0 support.
sys/xen/evtchn/evtchnvar.h:
Add a header file for implemenation internal APIs related
to managing event channels event delivery. This is used
to allow, for example, the event channel userland device
driver to access low-level routines that typical kernel
consumers of event channel services should never access.
sys/xen/interface/event_channel.h:
sys/xen/xen_intr.h:
Standardize on the evtchn_port_t type for referring to
an event channel port id. In order to prevent low-level
event channel APIs from leaking to kernel consumers who
should not have access to this data, the type is defined
twice: Once in the Xen provided event_channel.h, and again
in xen/xen_intr.h. The double declaration is protected by
__XEN_EVTCHN_PORT_DEFINED__ to ensure it is never declared
twice within a given compilation unit.
sys/xen/xen_intr.h:
sys/xen/evtchn/evtchn.c:
sys/x86/xen/xen_intr.c:
sys/dev/xen/xenpci/evtchn.c:
sys/dev/xen/xenpci/xenpcivar.h:
New implementation of Xen interrupt services. This is
similar in many respects to the i386 PV implementation with
the exception that events for bound to event channel ports
(i.e. not IPI, virtual IRQ, or physical IRQ) are further
optimized to avoid mask/unmask operations that aren't
necessary for these edge triggered events.
Stubs exist for supporting physical IRQ binding, but will
need additional work before this implementation can be
fully shared between PV and HVM.
sys/amd64/amd64/mp_machdep.c:
sys/i386/i386/mp_machdep.c:
sys/i386/xen/mp_machdep.c
sys/x86/xen/hvm.c:
Add support for placing vcpu_info into an arbritary memory
page instead of using HYPERVISOR_shared_info->vcpu_info.
This allows the creation of domains with more than 32 vcpus.
sys/i386/i386/machdep.c:
sys/i386/xen/clock.c:
sys/i386/xen/xen_machdep.c:
sys/i386/xen/exception.s:
Add support for new event channle implementation.
2013-08-29 19:52:18 +00:00
|
|
|
"%u", xen_intr_port(sc->xen_intr_handle));
|
Improve the Xen para-virtualized device infrastructure of FreeBSD:
o Add support for backend devices (e.g. blkback)
o Implement extensions to the Xen para-virtualized block API to allow
for larger and more outstanding I/Os.
o Import a completely rewritten block back driver with support for fronting
I/O to both raw devices and files.
o General cleanup and documentation of the XenBus and XenStore support code.
o Robustness and performance updates for the block front driver.
o Fixes to the netfront driver.
Sponsored by: Spectra Logic Corporation
sys/xen/xenbus/init.txt:
Deleted: This file explains the Linux method for XenBus device
enumeration and thus does not apply to FreeBSD's NewBus approach.
sys/xen/xenbus/xenbus_probe_backend.c:
Deleted: Linux version of backend XenBus service routines. It
was never ported to FreeBSD. See xenbusb.c, xenbusb_if.m,
xenbusb_front.c xenbusb_back.c for details of FreeBSD's XenBus
support.
sys/xen/xenbus/xenbusvar.h:
sys/xen/xenbus/xenbus_xs.c:
sys/xen/xenbus/xenbus_comms.c:
sys/xen/xenbus/xenbus_comms.h:
sys/xen/xenstore/xenstorevar.h:
sys/xen/xenstore/xenstore.c:
Split XenStore into its own tree. XenBus is a software layer built
on top of XenStore. The old arrangement and the naming of some
structures and functions blurred these lines making it difficult to
discern what services are provided by which layer and at what times
these services are available (e.g. during system startup and shutdown).
sys/xen/xenbus/xenbus_client.c:
sys/xen/xenbus/xenbus.c:
sys/xen/xenbus/xenbus_probe.c:
sys/xen/xenbus/xenbusb.c:
sys/xen/xenbus/xenbusb.h:
Split up XenBus code into methods available for use by client
drivers (xenbus.c) and code used by the XenBus "bus code" to
enumerate, attach, detach, and service bus drivers.
sys/xen/reboot.c:
sys/dev/xen/control/control.c:
Add a XenBus front driver for handling shutdown, reboot, suspend, and
resume events published in the XenStore. Move all PV suspend/reboot
support from reboot.c into this driver.
sys/xen/blkif.h:
New file from Xen vendor with macros and structures used by
a block back driver to service requests from a VM running a
different ABI (e.g. amd64 back with i386 front).
sys/conf/files:
Adjust kernel build spec for new XenBus/XenStore layout and added
Xen functionality.
sys/dev/xen/balloon/balloon.c:
sys/dev/xen/netfront/netfront.c:
sys/dev/xen/blkfront/blkfront.c:
sys/xen/xenbus/...
sys/xen/xenstore/...
o Rename XenStore APIs and structures from xenbus_* to xs_*.
o Adjust to use of M_XENBUS and M_XENSTORE malloc types for allocation
of objects returned by these APIs.
o Adjust for changes in the bus interface for Xen drivers.
sys/xen/xenbus/...
sys/xen/xenstore/...
Add Doxygen comments for these interfaces and the code that
implements them.
sys/dev/xen/blkback/blkback.c:
o Rewrite the Block Back driver to attach properly via newbus,
operate correctly in both PV and HVM mode regardless of domain
(e.g. can be in a DOM other than 0), and to deal with the latest
metadata available in XenStore for block devices.
o Allow users to specify a file as a backend to blkback, in addition
to character devices. Use the namei lookup of the backend path
to automatically configure, based on file type, the appropriate
backend method.
The current implementation is limited to a single outstanding I/O
at a time to file backed storage.
sys/dev/xen/blkback/blkback.c:
sys/xen/interface/io/blkif.h:
sys/xen/blkif.h:
sys/dev/xen/blkfront/blkfront.c:
sys/dev/xen/blkfront/block.h:
Extend the Xen blkif API: Negotiable request size and number of
requests.
This change extends the information recorded in the XenStore
allowing block front/back devices to negotiate for optimal I/O
parameters. This has been achieved without sacrificing backward
compatibility with drivers that are unaware of these protocol
enhancements. The extensions center around the connection protocol
which now includes these additions:
o The back-end device publishes its maximum supported values for,
request I/O size, the number of page segments that can be
associated with a request, the maximum number of requests that
can be concurrently active, and the maximum number of pages that
can be in the shared request ring. These values are published
before the back-end enters the XenbusStateInitWait state.
o The front-end waits for the back-end to enter either the InitWait
or Initialize state. At this point, the front end limits it's
own capabilities to the lesser of the values it finds published
by the backend, it's own maximums, or, should any back-end data
be missing in the store, the values supported by the original
protocol. It then initializes it's internal data structures
including allocation of the shared ring, publishes its maximum
capabilities to the XenStore and transitions to the Initialized
state.
o The back-end waits for the front-end to enter the Initalized
state. At this point, the back end limits it's own capabilities
to the lesser of the values it finds published by the frontend,
it's own maximums, or, should any front-end data be missing in
the store, the values supported by the original protocol. It
then initializes it's internal data structures, attaches to the
shared ring and transitions to the Connected state.
o The front-end waits for the back-end to enter the Connnected
state, transitions itself to the connected state, and can
commence I/O.
Although an updated front-end driver must be aware of the back-end's
InitWait state, the back-end has been coded such that it can
tolerate a front-end that skips this step and transitions directly
to the Initialized state without waiting for the back-end.
sys/xen/interface/io/blkif.h:
o Increase BLKIF_MAX_SEGMENTS_PER_REQUEST to 255. This is
the maximum number possible without changing the blkif
request header structure (nr_segs is a uint8_t).
o Add two new constants:
BLKIF_MAX_SEGMENTS_PER_HEADER_BLOCK, and
BLKIF_MAX_SEGMENTS_PER_SEGMENT_BLOCK. These respectively
indicate the number of segments that can fit in the first
ring-buffer entry of a request, and for each subsequent
(sg element only) ring-buffer entry associated with the
"header" ring-buffer entry of the request.
o Add the blkif_request_segment_t typedef for segment
elements.
o Add the BLKRING_GET_SG_REQUEST() macro which wraps the
RING_GET_REQUEST() macro and returns a properly cast
pointer to an array of blkif_request_segment_ts.
o Add the BLKIF_SEGS_TO_BLOCKS() macro which calculates the
number of ring entries that will be consumed by a blkif
request with the given number of segments.
sys/xen/blkif.h:
o Update for changes in interface/io/blkif.h macros.
o Update the BLKIF_MAX_RING_REQUESTS() macro to take the
ring size as an argument to allow this calculation on
multi-page rings.
o Add a companion macro to BLKIF_MAX_RING_REQUESTS(),
BLKIF_RING_PAGES(). This macro determines the number of
ring pages required in order to support a ring with the
supplied number of request blocks.
sys/dev/xen/blkback/blkback.c:
sys/dev/xen/blkfront/blkfront.c:
sys/dev/xen/blkfront/block.h:
o Negotiate with the other-end with the following limits:
Reqeust Size: MAXPHYS
Max Segments: (MAXPHYS/PAGE_SIZE) + 1
Max Requests: 256
Max Ring Pages: Sufficient to support Max Requests with
Max Segments.
o Dynamically allocate request pools and segemnts-per-request.
o Update ring allocation/attachment code to support a
multi-page shared ring.
o Update routines that access the shared ring to handle
multi-block requests.
sys/dev/xen/blkfront/blkfront.c:
o Track blkfront allocations in a blkfront driver specific
malloc pool.
o Strip out XenStore transaction retry logic in the
connection code. Transactions only need to be used when
the update to multiple XenStore nodes must be atomic.
That is not the case here.
o Fully disable blkif_resume() until it can be fixed
properly (it didn't work before this change).
o Destroy bus-dma objects during device instance tear-down.
o Properly handle backend devices with powef-of-2 sector
sizes larger than 512b.
sys/dev/xen/blkback/blkback.c:
Advertise support for and implement the BLKIF_OP_WRITE_BARRIER
and BLKIF_OP_FLUSH_DISKCACHE blkif opcodes using BIO_FLUSH and
the BIO_ORDERED attribute of bios.
sys/dev/xen/blkfront/blkfront.c:
sys/dev/xen/blkfront/block.h:
Fix various bugs in blkfront.
o gnttab_alloc_grant_references() returns 0 for success and
non-zero for failure. The check for < 0 is a leftover
Linuxism.
o When we negotiate with blkback and have to reduce some of our
capabilities, print out the original and reduced capability before
changing the local capability. So the user now gets the correct
information.
o Fix blkif_restart_queue_callback() formatting. Make sure we hold
the mutex in that function before calling xb_startio().
o Fix a couple of KASSERT()s.
o Fix a check in the xb_remove_* macro to be a little more specific.
sys/xen/gnttab.h:
sys/xen/gnttab.c:
Define GNTTAB_LIST_END publicly as GRANT_REF_INVALID.
sys/dev/xen/netfront/netfront.c:
Use GRANT_REF_INVALID instead of driver private definitions of the
same constant.
sys/xen/gnttab.h:
sys/xen/gnttab.c:
Add the gnttab_end_foreign_access_references() API.
This API allows a client to batch the release of an array of grant
references, instead of coding a private for loop. The implementation
takes advantage of this batching to reduce lock overhead to one
acquisition and release per-batch instead of per-freed grant reference.
While here, reduce the duration the gnttab_list_lock is held during
gnttab_free_grant_references() operations. The search to find the
tail of the incoming free list does not rely on global state and so
can be performed without holding the lock.
sys/dev/xen/xenpci/evtchn.c:
sys/dev/xen/evtchn/evtchn.c:
sys/xen/xen_intr.h:
o Implement the bind_interdomain_evtchn_to_irqhandler API for HVM mode.
This allows an HVM domain to serve back end devices to other domains.
This API is already implemented for PV mode.
o Synchronize the API between HVM and PV.
sys/dev/xen/xenpci/xenpci.c:
o Scan the full region of CPUID space in which the Xen VMM interface
may be implemented. On systems using SuSE as a Dom0 where the
Viridian API is also exported, the VMM interface is above the region
we used to search.
o Pass through bus_alloc_resource() calls so that XenBus drivers
attaching on an HVM system can allocate unused physical address
space from the nexus. The block back driver makes use of this
facility.
sys/i386/xen/xen_machdep.c:
Use the correct type for accessing the statically mapped xenstore
metadata.
sys/xen/interface/hvm/params.h:
sys/xen/xenstore/xenstore.c:
Move hvm_get_parameter() to the correct global header file instead
of as a private method to the XenStore.
sys/xen/interface/io/protocols.h:
Sync with vendor.
sys/xeninterface/io/ring.h:
Add macro for calculating the number of ring pages needed for an N
deep ring.
To avoid duplication within the macros, create and use the new
__RING_HEADER_SIZE() macro. This macro calculates the size of the
ring book keeping struct (producer/consumer indexes, etc.) that
resides at the head of the ring.
Add the __RING_PAGES() macro which calculates the number of shared
ring pages required to support a ring with the given number of
requests.
These APIs are used to support the multi-page ring version of the
Xen block API.
sys/xeninterface/io/xenbus.h:
Add Comments.
sys/xen/xenbus/...
o Refactor the FreeBSD XenBus support code to allow for both front and
backend device attachments.
o Make use of new config_intr_hook capabilities to allow front and back
devices to be probed/attached in parallel.
o Fix bugs in probe/attach state machine that could cause the system to
hang when confronted with a failure either in the local domain or in
a remote domain to which one of our driver instances is attaching.
o Publish all required state to the XenStore on device detach and
failure. The majority of the missing functionality was for serving
as a back end since the typical "hot-plug" scripts in Dom0 don't
handle the case of cleaning up for a "service domain" that is not
itself.
o Add dynamic sysctl nodes exposing the generic ivars of
XenBus devices.
o Add doxygen style comments to the majority of the code.
o Cleanup types, formatting, etc.
sys/xen/xenbus/xenbusb.c:
Common code used by both front and back XenBus busses.
sys/xen/xenbus/xenbusb_if.m:
Method definitions for a XenBus bus.
sys/xen/xenbus/xenbusb_front.c:
sys/xen/xenbus/xenbusb_back.c:
XenBus bus specialization for front and back devices.
MFC after: 1 month
2010-10-19 20:53:30 +00:00
|
|
|
if (error) {
|
2013-05-31 21:05:07 +00:00
|
|
|
xenbus_dev_fatal(sc->xbd_dev, error,
|
|
|
|
"writing %s/event-channel",
|
|
|
|
node_path);
|
Improve the Xen para-virtualized device infrastructure of FreeBSD:
o Add support for backend devices (e.g. blkback)
o Implement extensions to the Xen para-virtualized block API to allow
for larger and more outstanding I/Os.
o Import a completely rewritten block back driver with support for fronting
I/O to both raw devices and files.
o General cleanup and documentation of the XenBus and XenStore support code.
o Robustness and performance updates for the block front driver.
o Fixes to the netfront driver.
Sponsored by: Spectra Logic Corporation
sys/xen/xenbus/init.txt:
Deleted: This file explains the Linux method for XenBus device
enumeration and thus does not apply to FreeBSD's NewBus approach.
sys/xen/xenbus/xenbus_probe_backend.c:
Deleted: Linux version of backend XenBus service routines. It
was never ported to FreeBSD. See xenbusb.c, xenbusb_if.m,
xenbusb_front.c xenbusb_back.c for details of FreeBSD's XenBus
support.
sys/xen/xenbus/xenbusvar.h:
sys/xen/xenbus/xenbus_xs.c:
sys/xen/xenbus/xenbus_comms.c:
sys/xen/xenbus/xenbus_comms.h:
sys/xen/xenstore/xenstorevar.h:
sys/xen/xenstore/xenstore.c:
Split XenStore into its own tree. XenBus is a software layer built
on top of XenStore. The old arrangement and the naming of some
structures and functions blurred these lines making it difficult to
discern what services are provided by which layer and at what times
these services are available (e.g. during system startup and shutdown).
sys/xen/xenbus/xenbus_client.c:
sys/xen/xenbus/xenbus.c:
sys/xen/xenbus/xenbus_probe.c:
sys/xen/xenbus/xenbusb.c:
sys/xen/xenbus/xenbusb.h:
Split up XenBus code into methods available for use by client
drivers (xenbus.c) and code used by the XenBus "bus code" to
enumerate, attach, detach, and service bus drivers.
sys/xen/reboot.c:
sys/dev/xen/control/control.c:
Add a XenBus front driver for handling shutdown, reboot, suspend, and
resume events published in the XenStore. Move all PV suspend/reboot
support from reboot.c into this driver.
sys/xen/blkif.h:
New file from Xen vendor with macros and structures used by
a block back driver to service requests from a VM running a
different ABI (e.g. amd64 back with i386 front).
sys/conf/files:
Adjust kernel build spec for new XenBus/XenStore layout and added
Xen functionality.
sys/dev/xen/balloon/balloon.c:
sys/dev/xen/netfront/netfront.c:
sys/dev/xen/blkfront/blkfront.c:
sys/xen/xenbus/...
sys/xen/xenstore/...
o Rename XenStore APIs and structures from xenbus_* to xs_*.
o Adjust to use of M_XENBUS and M_XENSTORE malloc types for allocation
of objects returned by these APIs.
o Adjust for changes in the bus interface for Xen drivers.
sys/xen/xenbus/...
sys/xen/xenstore/...
Add Doxygen comments for these interfaces and the code that
implements them.
sys/dev/xen/blkback/blkback.c:
o Rewrite the Block Back driver to attach properly via newbus,
operate correctly in both PV and HVM mode regardless of domain
(e.g. can be in a DOM other than 0), and to deal with the latest
metadata available in XenStore for block devices.
o Allow users to specify a file as a backend to blkback, in addition
to character devices. Use the namei lookup of the backend path
to automatically configure, based on file type, the appropriate
backend method.
The current implementation is limited to a single outstanding I/O
at a time to file backed storage.
sys/dev/xen/blkback/blkback.c:
sys/xen/interface/io/blkif.h:
sys/xen/blkif.h:
sys/dev/xen/blkfront/blkfront.c:
sys/dev/xen/blkfront/block.h:
Extend the Xen blkif API: Negotiable request size and number of
requests.
This change extends the information recorded in the XenStore
allowing block front/back devices to negotiate for optimal I/O
parameters. This has been achieved without sacrificing backward
compatibility with drivers that are unaware of these protocol
enhancements. The extensions center around the connection protocol
which now includes these additions:
o The back-end device publishes its maximum supported values for,
request I/O size, the number of page segments that can be
associated with a request, the maximum number of requests that
can be concurrently active, and the maximum number of pages that
can be in the shared request ring. These values are published
before the back-end enters the XenbusStateInitWait state.
o The front-end waits for the back-end to enter either the InitWait
or Initialize state. At this point, the front end limits it's
own capabilities to the lesser of the values it finds published
by the backend, it's own maximums, or, should any back-end data
be missing in the store, the values supported by the original
protocol. It then initializes it's internal data structures
including allocation of the shared ring, publishes its maximum
capabilities to the XenStore and transitions to the Initialized
state.
o The back-end waits for the front-end to enter the Initalized
state. At this point, the back end limits it's own capabilities
to the lesser of the values it finds published by the frontend,
it's own maximums, or, should any front-end data be missing in
the store, the values supported by the original protocol. It
then initializes it's internal data structures, attaches to the
shared ring and transitions to the Connected state.
o The front-end waits for the back-end to enter the Connnected
state, transitions itself to the connected state, and can
commence I/O.
Although an updated front-end driver must be aware of the back-end's
InitWait state, the back-end has been coded such that it can
tolerate a front-end that skips this step and transitions directly
to the Initialized state without waiting for the back-end.
sys/xen/interface/io/blkif.h:
o Increase BLKIF_MAX_SEGMENTS_PER_REQUEST to 255. This is
the maximum number possible without changing the blkif
request header structure (nr_segs is a uint8_t).
o Add two new constants:
BLKIF_MAX_SEGMENTS_PER_HEADER_BLOCK, and
BLKIF_MAX_SEGMENTS_PER_SEGMENT_BLOCK. These respectively
indicate the number of segments that can fit in the first
ring-buffer entry of a request, and for each subsequent
(sg element only) ring-buffer entry associated with the
"header" ring-buffer entry of the request.
o Add the blkif_request_segment_t typedef for segment
elements.
o Add the BLKRING_GET_SG_REQUEST() macro which wraps the
RING_GET_REQUEST() macro and returns a properly cast
pointer to an array of blkif_request_segment_ts.
o Add the BLKIF_SEGS_TO_BLOCKS() macro which calculates the
number of ring entries that will be consumed by a blkif
request with the given number of segments.
sys/xen/blkif.h:
o Update for changes in interface/io/blkif.h macros.
o Update the BLKIF_MAX_RING_REQUESTS() macro to take the
ring size as an argument to allow this calculation on
multi-page rings.
o Add a companion macro to BLKIF_MAX_RING_REQUESTS(),
BLKIF_RING_PAGES(). This macro determines the number of
ring pages required in order to support a ring with the
supplied number of request blocks.
sys/dev/xen/blkback/blkback.c:
sys/dev/xen/blkfront/blkfront.c:
sys/dev/xen/blkfront/block.h:
o Negotiate with the other-end with the following limits:
Reqeust Size: MAXPHYS
Max Segments: (MAXPHYS/PAGE_SIZE) + 1
Max Requests: 256
Max Ring Pages: Sufficient to support Max Requests with
Max Segments.
o Dynamically allocate request pools and segemnts-per-request.
o Update ring allocation/attachment code to support a
multi-page shared ring.
o Update routines that access the shared ring to handle
multi-block requests.
sys/dev/xen/blkfront/blkfront.c:
o Track blkfront allocations in a blkfront driver specific
malloc pool.
o Strip out XenStore transaction retry logic in the
connection code. Transactions only need to be used when
the update to multiple XenStore nodes must be atomic.
That is not the case here.
o Fully disable blkif_resume() until it can be fixed
properly (it didn't work before this change).
o Destroy bus-dma objects during device instance tear-down.
o Properly handle backend devices with powef-of-2 sector
sizes larger than 512b.
sys/dev/xen/blkback/blkback.c:
Advertise support for and implement the BLKIF_OP_WRITE_BARRIER
and BLKIF_OP_FLUSH_DISKCACHE blkif opcodes using BIO_FLUSH and
the BIO_ORDERED attribute of bios.
sys/dev/xen/blkfront/blkfront.c:
sys/dev/xen/blkfront/block.h:
Fix various bugs in blkfront.
o gnttab_alloc_grant_references() returns 0 for success and
non-zero for failure. The check for < 0 is a leftover
Linuxism.
o When we negotiate with blkback and have to reduce some of our
capabilities, print out the original and reduced capability before
changing the local capability. So the user now gets the correct
information.
o Fix blkif_restart_queue_callback() formatting. Make sure we hold
the mutex in that function before calling xb_startio().
o Fix a couple of KASSERT()s.
o Fix a check in the xb_remove_* macro to be a little more specific.
sys/xen/gnttab.h:
sys/xen/gnttab.c:
Define GNTTAB_LIST_END publicly as GRANT_REF_INVALID.
sys/dev/xen/netfront/netfront.c:
Use GRANT_REF_INVALID instead of driver private definitions of the
same constant.
sys/xen/gnttab.h:
sys/xen/gnttab.c:
Add the gnttab_end_foreign_access_references() API.
This API allows a client to batch the release of an array of grant
references, instead of coding a private for loop. The implementation
takes advantage of this batching to reduce lock overhead to one
acquisition and release per-batch instead of per-freed grant reference.
While here, reduce the duration the gnttab_list_lock is held during
gnttab_free_grant_references() operations. The search to find the
tail of the incoming free list does not rely on global state and so
can be performed without holding the lock.
sys/dev/xen/xenpci/evtchn.c:
sys/dev/xen/evtchn/evtchn.c:
sys/xen/xen_intr.h:
o Implement the bind_interdomain_evtchn_to_irqhandler API for HVM mode.
This allows an HVM domain to serve back end devices to other domains.
This API is already implemented for PV mode.
o Synchronize the API between HVM and PV.
sys/dev/xen/xenpci/xenpci.c:
o Scan the full region of CPUID space in which the Xen VMM interface
may be implemented. On systems using SuSE as a Dom0 where the
Viridian API is also exported, the VMM interface is above the region
we used to search.
o Pass through bus_alloc_resource() calls so that XenBus drivers
attaching on an HVM system can allocate unused physical address
space from the nexus. The block back driver makes use of this
facility.
sys/i386/xen/xen_machdep.c:
Use the correct type for accessing the statically mapped xenstore
metadata.
sys/xen/interface/hvm/params.h:
sys/xen/xenstore/xenstore.c:
Move hvm_get_parameter() to the correct global header file instead
of as a private method to the XenStore.
sys/xen/interface/io/protocols.h:
Sync with vendor.
sys/xeninterface/io/ring.h:
Add macro for calculating the number of ring pages needed for an N
deep ring.
To avoid duplication within the macros, create and use the new
__RING_HEADER_SIZE() macro. This macro calculates the size of the
ring book keeping struct (producer/consumer indexes, etc.) that
resides at the head of the ring.
Add the __RING_PAGES() macro which calculates the number of shared
ring pages required to support a ring with the given number of
requests.
These APIs are used to support the multi-page ring version of the
Xen block API.
sys/xeninterface/io/xenbus.h:
Add Comments.
sys/xen/xenbus/...
o Refactor the FreeBSD XenBus support code to allow for both front and
backend device attachments.
o Make use of new config_intr_hook capabilities to allow front and back
devices to be probed/attached in parallel.
o Fix bugs in probe/attach state machine that could cause the system to
hang when confronted with a failure either in the local domain or in
a remote domain to which one of our driver instances is attaching.
o Publish all required state to the XenStore on device detach and
failure. The majority of the missing functionality was for serving
as a back end since the typical "hot-plug" scripts in Dom0 don't
handle the case of cleaning up for a "service domain" that is not
itself.
o Add dynamic sysctl nodes exposing the generic ivars of
XenBus devices.
o Add doxygen style comments to the majority of the code.
o Cleanup types, formatting, etc.
sys/xen/xenbus/xenbusb.c:
Common code used by both front and back XenBus busses.
sys/xen/xenbus/xenbusb_if.m:
Method definitions for a XenBus bus.
sys/xen/xenbus/xenbusb_front.c:
sys/xen/xenbus/xenbusb_back.c:
XenBus bus specialization for front and back devices.
MFC after: 1 month
2010-10-19 20:53:30 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2013-05-31 21:05:07 +00:00
|
|
|
error = xs_printf(XST_NIL, node_path, "protocol",
|
|
|
|
"%s", XEN_IO_PROTO_ABI_NATIVE);
|
Improve the Xen para-virtualized device infrastructure of FreeBSD:
o Add support for backend devices (e.g. blkback)
o Implement extensions to the Xen para-virtualized block API to allow
for larger and more outstanding I/Os.
o Import a completely rewritten block back driver with support for fronting
I/O to both raw devices and files.
o General cleanup and documentation of the XenBus and XenStore support code.
o Robustness and performance updates for the block front driver.
o Fixes to the netfront driver.
Sponsored by: Spectra Logic Corporation
sys/xen/xenbus/init.txt:
Deleted: This file explains the Linux method for XenBus device
enumeration and thus does not apply to FreeBSD's NewBus approach.
sys/xen/xenbus/xenbus_probe_backend.c:
Deleted: Linux version of backend XenBus service routines. It
was never ported to FreeBSD. See xenbusb.c, xenbusb_if.m,
xenbusb_front.c xenbusb_back.c for details of FreeBSD's XenBus
support.
sys/xen/xenbus/xenbusvar.h:
sys/xen/xenbus/xenbus_xs.c:
sys/xen/xenbus/xenbus_comms.c:
sys/xen/xenbus/xenbus_comms.h:
sys/xen/xenstore/xenstorevar.h:
sys/xen/xenstore/xenstore.c:
Split XenStore into its own tree. XenBus is a software layer built
on top of XenStore. The old arrangement and the naming of some
structures and functions blurred these lines making it difficult to
discern what services are provided by which layer and at what times
these services are available (e.g. during system startup and shutdown).
sys/xen/xenbus/xenbus_client.c:
sys/xen/xenbus/xenbus.c:
sys/xen/xenbus/xenbus_probe.c:
sys/xen/xenbus/xenbusb.c:
sys/xen/xenbus/xenbusb.h:
Split up XenBus code into methods available for use by client
drivers (xenbus.c) and code used by the XenBus "bus code" to
enumerate, attach, detach, and service bus drivers.
sys/xen/reboot.c:
sys/dev/xen/control/control.c:
Add a XenBus front driver for handling shutdown, reboot, suspend, and
resume events published in the XenStore. Move all PV suspend/reboot
support from reboot.c into this driver.
sys/xen/blkif.h:
New file from Xen vendor with macros and structures used by
a block back driver to service requests from a VM running a
different ABI (e.g. amd64 back with i386 front).
sys/conf/files:
Adjust kernel build spec for new XenBus/XenStore layout and added
Xen functionality.
sys/dev/xen/balloon/balloon.c:
sys/dev/xen/netfront/netfront.c:
sys/dev/xen/blkfront/blkfront.c:
sys/xen/xenbus/...
sys/xen/xenstore/...
o Rename XenStore APIs and structures from xenbus_* to xs_*.
o Adjust to use of M_XENBUS and M_XENSTORE malloc types for allocation
of objects returned by these APIs.
o Adjust for changes in the bus interface for Xen drivers.
sys/xen/xenbus/...
sys/xen/xenstore/...
Add Doxygen comments for these interfaces and the code that
implements them.
sys/dev/xen/blkback/blkback.c:
o Rewrite the Block Back driver to attach properly via newbus,
operate correctly in both PV and HVM mode regardless of domain
(e.g. can be in a DOM other than 0), and to deal with the latest
metadata available in XenStore for block devices.
o Allow users to specify a file as a backend to blkback, in addition
to character devices. Use the namei lookup of the backend path
to automatically configure, based on file type, the appropriate
backend method.
The current implementation is limited to a single outstanding I/O
at a time to file backed storage.
sys/dev/xen/blkback/blkback.c:
sys/xen/interface/io/blkif.h:
sys/xen/blkif.h:
sys/dev/xen/blkfront/blkfront.c:
sys/dev/xen/blkfront/block.h:
Extend the Xen blkif API: Negotiable request size and number of
requests.
This change extends the information recorded in the XenStore
allowing block front/back devices to negotiate for optimal I/O
parameters. This has been achieved without sacrificing backward
compatibility with drivers that are unaware of these protocol
enhancements. The extensions center around the connection protocol
which now includes these additions:
o The back-end device publishes its maximum supported values for,
request I/O size, the number of page segments that can be
associated with a request, the maximum number of requests that
can be concurrently active, and the maximum number of pages that
can be in the shared request ring. These values are published
before the back-end enters the XenbusStateInitWait state.
o The front-end waits for the back-end to enter either the InitWait
or Initialize state. At this point, the front end limits it's
own capabilities to the lesser of the values it finds published
by the backend, it's own maximums, or, should any back-end data
be missing in the store, the values supported by the original
protocol. It then initializes it's internal data structures
including allocation of the shared ring, publishes its maximum
capabilities to the XenStore and transitions to the Initialized
state.
o The back-end waits for the front-end to enter the Initalized
state. At this point, the back end limits it's own capabilities
to the lesser of the values it finds published by the frontend,
it's own maximums, or, should any front-end data be missing in
the store, the values supported by the original protocol. It
then initializes it's internal data structures, attaches to the
shared ring and transitions to the Connected state.
o The front-end waits for the back-end to enter the Connnected
state, transitions itself to the connected state, and can
commence I/O.
Although an updated front-end driver must be aware of the back-end's
InitWait state, the back-end has been coded such that it can
tolerate a front-end that skips this step and transitions directly
to the Initialized state without waiting for the back-end.
sys/xen/interface/io/blkif.h:
o Increase BLKIF_MAX_SEGMENTS_PER_REQUEST to 255. This is
the maximum number possible without changing the blkif
request header structure (nr_segs is a uint8_t).
o Add two new constants:
BLKIF_MAX_SEGMENTS_PER_HEADER_BLOCK, and
BLKIF_MAX_SEGMENTS_PER_SEGMENT_BLOCK. These respectively
indicate the number of segments that can fit in the first
ring-buffer entry of a request, and for each subsequent
(sg element only) ring-buffer entry associated with the
"header" ring-buffer entry of the request.
o Add the blkif_request_segment_t typedef for segment
elements.
o Add the BLKRING_GET_SG_REQUEST() macro which wraps the
RING_GET_REQUEST() macro and returns a properly cast
pointer to an array of blkif_request_segment_ts.
o Add the BLKIF_SEGS_TO_BLOCKS() macro which calculates the
number of ring entries that will be consumed by a blkif
request with the given number of segments.
sys/xen/blkif.h:
o Update for changes in interface/io/blkif.h macros.
o Update the BLKIF_MAX_RING_REQUESTS() macro to take the
ring size as an argument to allow this calculation on
multi-page rings.
o Add a companion macro to BLKIF_MAX_RING_REQUESTS(),
BLKIF_RING_PAGES(). This macro determines the number of
ring pages required in order to support a ring with the
supplied number of request blocks.
sys/dev/xen/blkback/blkback.c:
sys/dev/xen/blkfront/blkfront.c:
sys/dev/xen/blkfront/block.h:
o Negotiate with the other-end with the following limits:
Reqeust Size: MAXPHYS
Max Segments: (MAXPHYS/PAGE_SIZE) + 1
Max Requests: 256
Max Ring Pages: Sufficient to support Max Requests with
Max Segments.
o Dynamically allocate request pools and segemnts-per-request.
o Update ring allocation/attachment code to support a
multi-page shared ring.
o Update routines that access the shared ring to handle
multi-block requests.
sys/dev/xen/blkfront/blkfront.c:
o Track blkfront allocations in a blkfront driver specific
malloc pool.
o Strip out XenStore transaction retry logic in the
connection code. Transactions only need to be used when
the update to multiple XenStore nodes must be atomic.
That is not the case here.
o Fully disable blkif_resume() until it can be fixed
properly (it didn't work before this change).
o Destroy bus-dma objects during device instance tear-down.
o Properly handle backend devices with powef-of-2 sector
sizes larger than 512b.
sys/dev/xen/blkback/blkback.c:
Advertise support for and implement the BLKIF_OP_WRITE_BARRIER
and BLKIF_OP_FLUSH_DISKCACHE blkif opcodes using BIO_FLUSH and
the BIO_ORDERED attribute of bios.
sys/dev/xen/blkfront/blkfront.c:
sys/dev/xen/blkfront/block.h:
Fix various bugs in blkfront.
o gnttab_alloc_grant_references() returns 0 for success and
non-zero for failure. The check for < 0 is a leftover
Linuxism.
o When we negotiate with blkback and have to reduce some of our
capabilities, print out the original and reduced capability before
changing the local capability. So the user now gets the correct
information.
o Fix blkif_restart_queue_callback() formatting. Make sure we hold
the mutex in that function before calling xb_startio().
o Fix a couple of KASSERT()s.
o Fix a check in the xb_remove_* macro to be a little more specific.
sys/xen/gnttab.h:
sys/xen/gnttab.c:
Define GNTTAB_LIST_END publicly as GRANT_REF_INVALID.
sys/dev/xen/netfront/netfront.c:
Use GRANT_REF_INVALID instead of driver private definitions of the
same constant.
sys/xen/gnttab.h:
sys/xen/gnttab.c:
Add the gnttab_end_foreign_access_references() API.
This API allows a client to batch the release of an array of grant
references, instead of coding a private for loop. The implementation
takes advantage of this batching to reduce lock overhead to one
acquisition and release per-batch instead of per-freed grant reference.
While here, reduce the duration the gnttab_list_lock is held during
gnttab_free_grant_references() operations. The search to find the
tail of the incoming free list does not rely on global state and so
can be performed without holding the lock.
sys/dev/xen/xenpci/evtchn.c:
sys/dev/xen/evtchn/evtchn.c:
sys/xen/xen_intr.h:
o Implement the bind_interdomain_evtchn_to_irqhandler API for HVM mode.
This allows an HVM domain to serve back end devices to other domains.
This API is already implemented for PV mode.
o Synchronize the API between HVM and PV.
sys/dev/xen/xenpci/xenpci.c:
o Scan the full region of CPUID space in which the Xen VMM interface
may be implemented. On systems using SuSE as a Dom0 where the
Viridian API is also exported, the VMM interface is above the region
we used to search.
o Pass through bus_alloc_resource() calls so that XenBus drivers
attaching on an HVM system can allocate unused physical address
space from the nexus. The block back driver makes use of this
facility.
sys/i386/xen/xen_machdep.c:
Use the correct type for accessing the statically mapped xenstore
metadata.
sys/xen/interface/hvm/params.h:
sys/xen/xenstore/xenstore.c:
Move hvm_get_parameter() to the correct global header file instead
of as a private method to the XenStore.
sys/xen/interface/io/protocols.h:
Sync with vendor.
sys/xeninterface/io/ring.h:
Add macro for calculating the number of ring pages needed for an N
deep ring.
To avoid duplication within the macros, create and use the new
__RING_HEADER_SIZE() macro. This macro calculates the size of the
ring book keeping struct (producer/consumer indexes, etc.) that
resides at the head of the ring.
Add the __RING_PAGES() macro which calculates the number of shared
ring pages required to support a ring with the given number of
requests.
These APIs are used to support the multi-page ring version of the
Xen block API.
sys/xeninterface/io/xenbus.h:
Add Comments.
sys/xen/xenbus/...
o Refactor the FreeBSD XenBus support code to allow for both front and
backend device attachments.
o Make use of new config_intr_hook capabilities to allow front and back
devices to be probed/attached in parallel.
o Fix bugs in probe/attach state machine that could cause the system to
hang when confronted with a failure either in the local domain or in
a remote domain to which one of our driver instances is attaching.
o Publish all required state to the XenStore on device detach and
failure. The majority of the missing functionality was for serving
as a back end since the typical "hot-plug" scripts in Dom0 don't
handle the case of cleaning up for a "service domain" that is not
itself.
o Add dynamic sysctl nodes exposing the generic ivars of
XenBus devices.
o Add doxygen style comments to the majority of the code.
o Cleanup types, formatting, etc.
sys/xen/xenbus/xenbusb.c:
Common code used by both front and back XenBus busses.
sys/xen/xenbus/xenbusb_if.m:
Method definitions for a XenBus bus.
sys/xen/xenbus/xenbusb_front.c:
sys/xen/xenbus/xenbusb_back.c:
XenBus bus specialization for front and back devices.
MFC after: 1 month
2010-10-19 20:53:30 +00:00
|
|
|
if (error) {
|
2013-05-31 21:05:07 +00:00
|
|
|
xenbus_dev_fatal(sc->xbd_dev, error,
|
|
|
|
"writing %s/protocol",
|
|
|
|
node_path);
|
Improve the Xen para-virtualized device infrastructure of FreeBSD:
o Add support for backend devices (e.g. blkback)
o Implement extensions to the Xen para-virtualized block API to allow
for larger and more outstanding I/Os.
o Import a completely rewritten block back driver with support for fronting
I/O to both raw devices and files.
o General cleanup and documentation of the XenBus and XenStore support code.
o Robustness and performance updates for the block front driver.
o Fixes to the netfront driver.
Sponsored by: Spectra Logic Corporation
sys/xen/xenbus/init.txt:
Deleted: This file explains the Linux method for XenBus device
enumeration and thus does not apply to FreeBSD's NewBus approach.
sys/xen/xenbus/xenbus_probe_backend.c:
Deleted: Linux version of backend XenBus service routines. It
was never ported to FreeBSD. See xenbusb.c, xenbusb_if.m,
xenbusb_front.c xenbusb_back.c for details of FreeBSD's XenBus
support.
sys/xen/xenbus/xenbusvar.h:
sys/xen/xenbus/xenbus_xs.c:
sys/xen/xenbus/xenbus_comms.c:
sys/xen/xenbus/xenbus_comms.h:
sys/xen/xenstore/xenstorevar.h:
sys/xen/xenstore/xenstore.c:
Split XenStore into its own tree. XenBus is a software layer built
on top of XenStore. The old arrangement and the naming of some
structures and functions blurred these lines making it difficult to
discern what services are provided by which layer and at what times
these services are available (e.g. during system startup and shutdown).
sys/xen/xenbus/xenbus_client.c:
sys/xen/xenbus/xenbus.c:
sys/xen/xenbus/xenbus_probe.c:
sys/xen/xenbus/xenbusb.c:
sys/xen/xenbus/xenbusb.h:
Split up XenBus code into methods available for use by client
drivers (xenbus.c) and code used by the XenBus "bus code" to
enumerate, attach, detach, and service bus drivers.
sys/xen/reboot.c:
sys/dev/xen/control/control.c:
Add a XenBus front driver for handling shutdown, reboot, suspend, and
resume events published in the XenStore. Move all PV suspend/reboot
support from reboot.c into this driver.
sys/xen/blkif.h:
New file from Xen vendor with macros and structures used by
a block back driver to service requests from a VM running a
different ABI (e.g. amd64 back with i386 front).
sys/conf/files:
Adjust kernel build spec for new XenBus/XenStore layout and added
Xen functionality.
sys/dev/xen/balloon/balloon.c:
sys/dev/xen/netfront/netfront.c:
sys/dev/xen/blkfront/blkfront.c:
sys/xen/xenbus/...
sys/xen/xenstore/...
o Rename XenStore APIs and structures from xenbus_* to xs_*.
o Adjust to use of M_XENBUS and M_XENSTORE malloc types for allocation
of objects returned by these APIs.
o Adjust for changes in the bus interface for Xen drivers.
sys/xen/xenbus/...
sys/xen/xenstore/...
Add Doxygen comments for these interfaces and the code that
implements them.
sys/dev/xen/blkback/blkback.c:
o Rewrite the Block Back driver to attach properly via newbus,
operate correctly in both PV and HVM mode regardless of domain
(e.g. can be in a DOM other than 0), and to deal with the latest
metadata available in XenStore for block devices.
o Allow users to specify a file as a backend to blkback, in addition
to character devices. Use the namei lookup of the backend path
to automatically configure, based on file type, the appropriate
backend method.
The current implementation is limited to a single outstanding I/O
at a time to file backed storage.
sys/dev/xen/blkback/blkback.c:
sys/xen/interface/io/blkif.h:
sys/xen/blkif.h:
sys/dev/xen/blkfront/blkfront.c:
sys/dev/xen/blkfront/block.h:
Extend the Xen blkif API: Negotiable request size and number of
requests.
This change extends the information recorded in the XenStore
allowing block front/back devices to negotiate for optimal I/O
parameters. This has been achieved without sacrificing backward
compatibility with drivers that are unaware of these protocol
enhancements. The extensions center around the connection protocol
which now includes these additions:
o The back-end device publishes its maximum supported values for,
request I/O size, the number of page segments that can be
associated with a request, the maximum number of requests that
can be concurrently active, and the maximum number of pages that
can be in the shared request ring. These values are published
before the back-end enters the XenbusStateInitWait state.
o The front-end waits for the back-end to enter either the InitWait
or Initialize state. At this point, the front end limits it's
own capabilities to the lesser of the values it finds published
by the backend, it's own maximums, or, should any back-end data
be missing in the store, the values supported by the original
protocol. It then initializes it's internal data structures
including allocation of the shared ring, publishes its maximum
capabilities to the XenStore and transitions to the Initialized
state.
o The back-end waits for the front-end to enter the Initalized
state. At this point, the back end limits it's own capabilities
to the lesser of the values it finds published by the frontend,
it's own maximums, or, should any front-end data be missing in
the store, the values supported by the original protocol. It
then initializes it's internal data structures, attaches to the
shared ring and transitions to the Connected state.
o The front-end waits for the back-end to enter the Connnected
state, transitions itself to the connected state, and can
commence I/O.
Although an updated front-end driver must be aware of the back-end's
InitWait state, the back-end has been coded such that it can
tolerate a front-end that skips this step and transitions directly
to the Initialized state without waiting for the back-end.
sys/xen/interface/io/blkif.h:
o Increase BLKIF_MAX_SEGMENTS_PER_REQUEST to 255. This is
the maximum number possible without changing the blkif
request header structure (nr_segs is a uint8_t).
o Add two new constants:
BLKIF_MAX_SEGMENTS_PER_HEADER_BLOCK, and
BLKIF_MAX_SEGMENTS_PER_SEGMENT_BLOCK. These respectively
indicate the number of segments that can fit in the first
ring-buffer entry of a request, and for each subsequent
(sg element only) ring-buffer entry associated with the
"header" ring-buffer entry of the request.
o Add the blkif_request_segment_t typedef for segment
elements.
o Add the BLKRING_GET_SG_REQUEST() macro which wraps the
RING_GET_REQUEST() macro and returns a properly cast
pointer to an array of blkif_request_segment_ts.
o Add the BLKIF_SEGS_TO_BLOCKS() macro which calculates the
number of ring entries that will be consumed by a blkif
request with the given number of segments.
sys/xen/blkif.h:
o Update for changes in interface/io/blkif.h macros.
o Update the BLKIF_MAX_RING_REQUESTS() macro to take the
ring size as an argument to allow this calculation on
multi-page rings.
o Add a companion macro to BLKIF_MAX_RING_REQUESTS(),
BLKIF_RING_PAGES(). This macro determines the number of
ring pages required in order to support a ring with the
supplied number of request blocks.
sys/dev/xen/blkback/blkback.c:
sys/dev/xen/blkfront/blkfront.c:
sys/dev/xen/blkfront/block.h:
o Negotiate with the other-end with the following limits:
Reqeust Size: MAXPHYS
Max Segments: (MAXPHYS/PAGE_SIZE) + 1
Max Requests: 256
Max Ring Pages: Sufficient to support Max Requests with
Max Segments.
o Dynamically allocate request pools and segemnts-per-request.
o Update ring allocation/attachment code to support a
multi-page shared ring.
o Update routines that access the shared ring to handle
multi-block requests.
sys/dev/xen/blkfront/blkfront.c:
o Track blkfront allocations in a blkfront driver specific
malloc pool.
o Strip out XenStore transaction retry logic in the
connection code. Transactions only need to be used when
the update to multiple XenStore nodes must be atomic.
That is not the case here.
o Fully disable blkif_resume() until it can be fixed
properly (it didn't work before this change).
o Destroy bus-dma objects during device instance tear-down.
o Properly handle backend devices with powef-of-2 sector
sizes larger than 512b.
sys/dev/xen/blkback/blkback.c:
Advertise support for and implement the BLKIF_OP_WRITE_BARRIER
and BLKIF_OP_FLUSH_DISKCACHE blkif opcodes using BIO_FLUSH and
the BIO_ORDERED attribute of bios.
sys/dev/xen/blkfront/blkfront.c:
sys/dev/xen/blkfront/block.h:
Fix various bugs in blkfront.
o gnttab_alloc_grant_references() returns 0 for success and
non-zero for failure. The check for < 0 is a leftover
Linuxism.
o When we negotiate with blkback and have to reduce some of our
capabilities, print out the original and reduced capability before
changing the local capability. So the user now gets the correct
information.
o Fix blkif_restart_queue_callback() formatting. Make sure we hold
the mutex in that function before calling xb_startio().
o Fix a couple of KASSERT()s.
o Fix a check in the xb_remove_* macro to be a little more specific.
sys/xen/gnttab.h:
sys/xen/gnttab.c:
Define GNTTAB_LIST_END publicly as GRANT_REF_INVALID.
sys/dev/xen/netfront/netfront.c:
Use GRANT_REF_INVALID instead of driver private definitions of the
same constant.
sys/xen/gnttab.h:
sys/xen/gnttab.c:
Add the gnttab_end_foreign_access_references() API.
This API allows a client to batch the release of an array of grant
references, instead of coding a private for loop. The implementation
takes advantage of this batching to reduce lock overhead to one
acquisition and release per-batch instead of per-freed grant reference.
While here, reduce the duration the gnttab_list_lock is held during
gnttab_free_grant_references() operations. The search to find the
tail of the incoming free list does not rely on global state and so
can be performed without holding the lock.
sys/dev/xen/xenpci/evtchn.c:
sys/dev/xen/evtchn/evtchn.c:
sys/xen/xen_intr.h:
o Implement the bind_interdomain_evtchn_to_irqhandler API for HVM mode.
This allows an HVM domain to serve back end devices to other domains.
This API is already implemented for PV mode.
o Synchronize the API between HVM and PV.
sys/dev/xen/xenpci/xenpci.c:
o Scan the full region of CPUID space in which the Xen VMM interface
may be implemented. On systems using SuSE as a Dom0 where the
Viridian API is also exported, the VMM interface is above the region
we used to search.
o Pass through bus_alloc_resource() calls so that XenBus drivers
attaching on an HVM system can allocate unused physical address
space from the nexus. The block back driver makes use of this
facility.
sys/i386/xen/xen_machdep.c:
Use the correct type for accessing the statically mapped xenstore
metadata.
sys/xen/interface/hvm/params.h:
sys/xen/xenstore/xenstore.c:
Move hvm_get_parameter() to the correct global header file instead
of as a private method to the XenStore.
sys/xen/interface/io/protocols.h:
Sync with vendor.
sys/xeninterface/io/ring.h:
Add macro for calculating the number of ring pages needed for an N
deep ring.
To avoid duplication within the macros, create and use the new
__RING_HEADER_SIZE() macro. This macro calculates the size of the
ring book keeping struct (producer/consumer indexes, etc.) that
resides at the head of the ring.
Add the __RING_PAGES() macro which calculates the number of shared
ring pages required to support a ring with the given number of
requests.
These APIs are used to support the multi-page ring version of the
Xen block API.
sys/xeninterface/io/xenbus.h:
Add Comments.
sys/xen/xenbus/...
o Refactor the FreeBSD XenBus support code to allow for both front and
backend device attachments.
o Make use of new config_intr_hook capabilities to allow front and back
devices to be probed/attached in parallel.
o Fix bugs in probe/attach state machine that could cause the system to
hang when confronted with a failure either in the local domain or in
a remote domain to which one of our driver instances is attaching.
o Publish all required state to the XenStore on device detach and
failure. The majority of the missing functionality was for serving
as a back end since the typical "hot-plug" scripts in Dom0 don't
handle the case of cleaning up for a "service domain" that is not
itself.
o Add dynamic sysctl nodes exposing the generic ivars of
XenBus devices.
o Add doxygen style comments to the majority of the code.
o Cleanup types, formatting, etc.
sys/xen/xenbus/xenbusb.c:
Common code used by both front and back XenBus busses.
sys/xen/xenbus/xenbusb_if.m:
Method definitions for a XenBus bus.
sys/xen/xenbus/xenbusb_front.c:
sys/xen/xenbus/xenbusb_back.c:
XenBus bus specialization for front and back devices.
MFC after: 1 month
2010-10-19 20:53:30 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2013-05-31 21:05:07 +00:00
|
|
|
xenbus_set_state(sc->xbd_dev, XenbusStateInitialised);
|
2008-08-12 20:01:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2013-05-31 21:05:07 +00:00
|
|
|
* Invoked when the backend is finally 'ready' (and has published
|
|
|
|
* the details about the physical device - #sectors, size, etc).
|
|
|
|
*/
|
2008-08-12 20:01:57 +00:00
|
|
|
static void
|
2013-05-31 21:05:07 +00:00
|
|
|
xbd_connect(struct xbd_softc *sc)
|
2008-08-12 20:01:57 +00:00
|
|
|
{
|
2013-05-31 21:05:07 +00:00
|
|
|
device_t dev = sc->xbd_dev;
|
2016-04-03 11:18:20 +00:00
|
|
|
unsigned long sectors, sector_size, phys_sector_size;
|
2008-08-12 20:01:57 +00:00
|
|
|
unsigned int binfo;
|
2013-06-26 20:39:07 +00:00
|
|
|
int err, feature_barrier, feature_flush;
|
2015-07-30 03:50:01 +00:00
|
|
|
int i, j;
|
2008-08-12 20:01:57 +00:00
|
|
|
|
2013-06-14 17:00:58 +00:00
|
|
|
if (sc->xbd_state == XBD_STATE_CONNECTED ||
|
|
|
|
sc->xbd_state == XBD_STATE_SUSPENDED)
|
2008-08-12 20:01:57 +00:00
|
|
|
return;
|
|
|
|
|
2008-12-04 07:59:05 +00:00
|
|
|
DPRINTK("blkfront.c:connect:%s.\n", xenbus_get_otherend_path(dev));
|
2008-08-12 20:01:57 +00:00
|
|
|
|
Improve the Xen para-virtualized device infrastructure of FreeBSD:
o Add support for backend devices (e.g. blkback)
o Implement extensions to the Xen para-virtualized block API to allow
for larger and more outstanding I/Os.
o Import a completely rewritten block back driver with support for fronting
I/O to both raw devices and files.
o General cleanup and documentation of the XenBus and XenStore support code.
o Robustness and performance updates for the block front driver.
o Fixes to the netfront driver.
Sponsored by: Spectra Logic Corporation
sys/xen/xenbus/init.txt:
Deleted: This file explains the Linux method for XenBus device
enumeration and thus does not apply to FreeBSD's NewBus approach.
sys/xen/xenbus/xenbus_probe_backend.c:
Deleted: Linux version of backend XenBus service routines. It
was never ported to FreeBSD. See xenbusb.c, xenbusb_if.m,
xenbusb_front.c xenbusb_back.c for details of FreeBSD's XenBus
support.
sys/xen/xenbus/xenbusvar.h:
sys/xen/xenbus/xenbus_xs.c:
sys/xen/xenbus/xenbus_comms.c:
sys/xen/xenbus/xenbus_comms.h:
sys/xen/xenstore/xenstorevar.h:
sys/xen/xenstore/xenstore.c:
Split XenStore into its own tree. XenBus is a software layer built
on top of XenStore. The old arrangement and the naming of some
structures and functions blurred these lines making it difficult to
discern what services are provided by which layer and at what times
these services are available (e.g. during system startup and shutdown).
sys/xen/xenbus/xenbus_client.c:
sys/xen/xenbus/xenbus.c:
sys/xen/xenbus/xenbus_probe.c:
sys/xen/xenbus/xenbusb.c:
sys/xen/xenbus/xenbusb.h:
Split up XenBus code into methods available for use by client
drivers (xenbus.c) and code used by the XenBus "bus code" to
enumerate, attach, detach, and service bus drivers.
sys/xen/reboot.c:
sys/dev/xen/control/control.c:
Add a XenBus front driver for handling shutdown, reboot, suspend, and
resume events published in the XenStore. Move all PV suspend/reboot
support from reboot.c into this driver.
sys/xen/blkif.h:
New file from Xen vendor with macros and structures used by
a block back driver to service requests from a VM running a
different ABI (e.g. amd64 back with i386 front).
sys/conf/files:
Adjust kernel build spec for new XenBus/XenStore layout and added
Xen functionality.
sys/dev/xen/balloon/balloon.c:
sys/dev/xen/netfront/netfront.c:
sys/dev/xen/blkfront/blkfront.c:
sys/xen/xenbus/...
sys/xen/xenstore/...
o Rename XenStore APIs and structures from xenbus_* to xs_*.
o Adjust to use of M_XENBUS and M_XENSTORE malloc types for allocation
of objects returned by these APIs.
o Adjust for changes in the bus interface for Xen drivers.
sys/xen/xenbus/...
sys/xen/xenstore/...
Add Doxygen comments for these interfaces and the code that
implements them.
sys/dev/xen/blkback/blkback.c:
o Rewrite the Block Back driver to attach properly via newbus,
operate correctly in both PV and HVM mode regardless of domain
(e.g. can be in a DOM other than 0), and to deal with the latest
metadata available in XenStore for block devices.
o Allow users to specify a file as a backend to blkback, in addition
to character devices. Use the namei lookup of the backend path
to automatically configure, based on file type, the appropriate
backend method.
The current implementation is limited to a single outstanding I/O
at a time to file backed storage.
sys/dev/xen/blkback/blkback.c:
sys/xen/interface/io/blkif.h:
sys/xen/blkif.h:
sys/dev/xen/blkfront/blkfront.c:
sys/dev/xen/blkfront/block.h:
Extend the Xen blkif API: Negotiable request size and number of
requests.
This change extends the information recorded in the XenStore
allowing block front/back devices to negotiate for optimal I/O
parameters. This has been achieved without sacrificing backward
compatibility with drivers that are unaware of these protocol
enhancements. The extensions center around the connection protocol
which now includes these additions:
o The back-end device publishes its maximum supported values for,
request I/O size, the number of page segments that can be
associated with a request, the maximum number of requests that
can be concurrently active, and the maximum number of pages that
can be in the shared request ring. These values are published
before the back-end enters the XenbusStateInitWait state.
o The front-end waits for the back-end to enter either the InitWait
or Initialize state. At this point, the front end limits it's
own capabilities to the lesser of the values it finds published
by the backend, it's own maximums, or, should any back-end data
be missing in the store, the values supported by the original
protocol. It then initializes it's internal data structures
including allocation of the shared ring, publishes its maximum
capabilities to the XenStore and transitions to the Initialized
state.
o The back-end waits for the front-end to enter the Initalized
state. At this point, the back end limits it's own capabilities
to the lesser of the values it finds published by the frontend,
it's own maximums, or, should any front-end data be missing in
the store, the values supported by the original protocol. It
then initializes it's internal data structures, attaches to the
shared ring and transitions to the Connected state.
o The front-end waits for the back-end to enter the Connnected
state, transitions itself to the connected state, and can
commence I/O.
Although an updated front-end driver must be aware of the back-end's
InitWait state, the back-end has been coded such that it can
tolerate a front-end that skips this step and transitions directly
to the Initialized state without waiting for the back-end.
sys/xen/interface/io/blkif.h:
o Increase BLKIF_MAX_SEGMENTS_PER_REQUEST to 255. This is
the maximum number possible without changing the blkif
request header structure (nr_segs is a uint8_t).
o Add two new constants:
BLKIF_MAX_SEGMENTS_PER_HEADER_BLOCK, and
BLKIF_MAX_SEGMENTS_PER_SEGMENT_BLOCK. These respectively
indicate the number of segments that can fit in the first
ring-buffer entry of a request, and for each subsequent
(sg element only) ring-buffer entry associated with the
"header" ring-buffer entry of the request.
o Add the blkif_request_segment_t typedef for segment
elements.
o Add the BLKRING_GET_SG_REQUEST() macro which wraps the
RING_GET_REQUEST() macro and returns a properly cast
pointer to an array of blkif_request_segment_ts.
o Add the BLKIF_SEGS_TO_BLOCKS() macro which calculates the
number of ring entries that will be consumed by a blkif
request with the given number of segments.
sys/xen/blkif.h:
o Update for changes in interface/io/blkif.h macros.
o Update the BLKIF_MAX_RING_REQUESTS() macro to take the
ring size as an argument to allow this calculation on
multi-page rings.
o Add a companion macro to BLKIF_MAX_RING_REQUESTS(),
BLKIF_RING_PAGES(). This macro determines the number of
ring pages required in order to support a ring with the
supplied number of request blocks.
sys/dev/xen/blkback/blkback.c:
sys/dev/xen/blkfront/blkfront.c:
sys/dev/xen/blkfront/block.h:
o Negotiate with the other-end with the following limits:
Reqeust Size: MAXPHYS
Max Segments: (MAXPHYS/PAGE_SIZE) + 1
Max Requests: 256
Max Ring Pages: Sufficient to support Max Requests with
Max Segments.
o Dynamically allocate request pools and segemnts-per-request.
o Update ring allocation/attachment code to support a
multi-page shared ring.
o Update routines that access the shared ring to handle
multi-block requests.
sys/dev/xen/blkfront/blkfront.c:
o Track blkfront allocations in a blkfront driver specific
malloc pool.
o Strip out XenStore transaction retry logic in the
connection code. Transactions only need to be used when
the update to multiple XenStore nodes must be atomic.
That is not the case here.
o Fully disable blkif_resume() until it can be fixed
properly (it didn't work before this change).
o Destroy bus-dma objects during device instance tear-down.
o Properly handle backend devices with powef-of-2 sector
sizes larger than 512b.
sys/dev/xen/blkback/blkback.c:
Advertise support for and implement the BLKIF_OP_WRITE_BARRIER
and BLKIF_OP_FLUSH_DISKCACHE blkif opcodes using BIO_FLUSH and
the BIO_ORDERED attribute of bios.
sys/dev/xen/blkfront/blkfront.c:
sys/dev/xen/blkfront/block.h:
Fix various bugs in blkfront.
o gnttab_alloc_grant_references() returns 0 for success and
non-zero for failure. The check for < 0 is a leftover
Linuxism.
o When we negotiate with blkback and have to reduce some of our
capabilities, print out the original and reduced capability before
changing the local capability. So the user now gets the correct
information.
o Fix blkif_restart_queue_callback() formatting. Make sure we hold
the mutex in that function before calling xb_startio().
o Fix a couple of KASSERT()s.
o Fix a check in the xb_remove_* macro to be a little more specific.
sys/xen/gnttab.h:
sys/xen/gnttab.c:
Define GNTTAB_LIST_END publicly as GRANT_REF_INVALID.
sys/dev/xen/netfront/netfront.c:
Use GRANT_REF_INVALID instead of driver private definitions of the
same constant.
sys/xen/gnttab.h:
sys/xen/gnttab.c:
Add the gnttab_end_foreign_access_references() API.
This API allows a client to batch the release of an array of grant
references, instead of coding a private for loop. The implementation
takes advantage of this batching to reduce lock overhead to one
acquisition and release per-batch instead of per-freed grant reference.
While here, reduce the duration the gnttab_list_lock is held during
gnttab_free_grant_references() operations. The search to find the
tail of the incoming free list does not rely on global state and so
can be performed without holding the lock.
sys/dev/xen/xenpci/evtchn.c:
sys/dev/xen/evtchn/evtchn.c:
sys/xen/xen_intr.h:
o Implement the bind_interdomain_evtchn_to_irqhandler API for HVM mode.
This allows an HVM domain to serve back end devices to other domains.
This API is already implemented for PV mode.
o Synchronize the API between HVM and PV.
sys/dev/xen/xenpci/xenpci.c:
o Scan the full region of CPUID space in which the Xen VMM interface
may be implemented. On systems using SuSE as a Dom0 where the
Viridian API is also exported, the VMM interface is above the region
we used to search.
o Pass through bus_alloc_resource() calls so that XenBus drivers
attaching on an HVM system can allocate unused physical address
space from the nexus. The block back driver makes use of this
facility.
sys/i386/xen/xen_machdep.c:
Use the correct type for accessing the statically mapped xenstore
metadata.
sys/xen/interface/hvm/params.h:
sys/xen/xenstore/xenstore.c:
Move hvm_get_parameter() to the correct global header file instead
of as a private method to the XenStore.
sys/xen/interface/io/protocols.h:
Sync with vendor.
sys/xeninterface/io/ring.h:
Add macro for calculating the number of ring pages needed for an N
deep ring.
To avoid duplication within the macros, create and use the new
__RING_HEADER_SIZE() macro. This macro calculates the size of the
ring book keeping struct (producer/consumer indexes, etc.) that
resides at the head of the ring.
Add the __RING_PAGES() macro which calculates the number of shared
ring pages required to support a ring with the given number of
requests.
These APIs are used to support the multi-page ring version of the
Xen block API.
sys/xeninterface/io/xenbus.h:
Add Comments.
sys/xen/xenbus/...
o Refactor the FreeBSD XenBus support code to allow for both front and
backend device attachments.
o Make use of new config_intr_hook capabilities to allow front and back
devices to be probed/attached in parallel.
o Fix bugs in probe/attach state machine that could cause the system to
hang when confronted with a failure either in the local domain or in
a remote domain to which one of our driver instances is attaching.
o Publish all required state to the XenStore on device detach and
failure. The majority of the missing functionality was for serving
as a back end since the typical "hot-plug" scripts in Dom0 don't
handle the case of cleaning up for a "service domain" that is not
itself.
o Add dynamic sysctl nodes exposing the generic ivars of
XenBus devices.
o Add doxygen style comments to the majority of the code.
o Cleanup types, formatting, etc.
sys/xen/xenbus/xenbusb.c:
Common code used by both front and back XenBus busses.
sys/xen/xenbus/xenbusb_if.m:
Method definitions for a XenBus bus.
sys/xen/xenbus/xenbusb_front.c:
sys/xen/xenbus/xenbusb_back.c:
XenBus bus specialization for front and back devices.
MFC after: 1 month
2010-10-19 20:53:30 +00:00
|
|
|
err = xs_gather(XST_NIL, xenbus_get_otherend_path(dev),
|
2013-05-31 21:05:07 +00:00
|
|
|
"sectors", "%lu", §ors,
|
|
|
|
"info", "%u", &binfo,
|
|
|
|
"sector-size", "%lu", §or_size,
|
|
|
|
NULL);
|
2008-08-12 20:01:57 +00:00
|
|
|
if (err) {
|
2008-12-04 07:59:05 +00:00
|
|
|
xenbus_dev_fatal(dev, err,
|
|
|
|
"reading backend fields at %s",
|
|
|
|
xenbus_get_otherend_path(dev));
|
2008-08-12 20:01:57 +00:00
|
|
|
return;
|
|
|
|
}
|
2016-04-03 11:18:20 +00:00
|
|
|
err = xs_gather(XST_NIL, xenbus_get_otherend_path(dev),
|
|
|
|
"physical-sector-size", "%lu", &phys_sector_size,
|
|
|
|
NULL);
|
|
|
|
if (err || phys_sector_size <= sector_size)
|
|
|
|
phys_sector_size = 0;
|
Improve the Xen para-virtualized device infrastructure of FreeBSD:
o Add support for backend devices (e.g. blkback)
o Implement extensions to the Xen para-virtualized block API to allow
for larger and more outstanding I/Os.
o Import a completely rewritten block back driver with support for fronting
I/O to both raw devices and files.
o General cleanup and documentation of the XenBus and XenStore support code.
o Robustness and performance updates for the block front driver.
o Fixes to the netfront driver.
Sponsored by: Spectra Logic Corporation
sys/xen/xenbus/init.txt:
Deleted: This file explains the Linux method for XenBus device
enumeration and thus does not apply to FreeBSD's NewBus approach.
sys/xen/xenbus/xenbus_probe_backend.c:
Deleted: Linux version of backend XenBus service routines. It
was never ported to FreeBSD. See xenbusb.c, xenbusb_if.m,
xenbusb_front.c xenbusb_back.c for details of FreeBSD's XenBus
support.
sys/xen/xenbus/xenbusvar.h:
sys/xen/xenbus/xenbus_xs.c:
sys/xen/xenbus/xenbus_comms.c:
sys/xen/xenbus/xenbus_comms.h:
sys/xen/xenstore/xenstorevar.h:
sys/xen/xenstore/xenstore.c:
Split XenStore into its own tree. XenBus is a software layer built
on top of XenStore. The old arrangement and the naming of some
structures and functions blurred these lines making it difficult to
discern what services are provided by which layer and at what times
these services are available (e.g. during system startup and shutdown).
sys/xen/xenbus/xenbus_client.c:
sys/xen/xenbus/xenbus.c:
sys/xen/xenbus/xenbus_probe.c:
sys/xen/xenbus/xenbusb.c:
sys/xen/xenbus/xenbusb.h:
Split up XenBus code into methods available for use by client
drivers (xenbus.c) and code used by the XenBus "bus code" to
enumerate, attach, detach, and service bus drivers.
sys/xen/reboot.c:
sys/dev/xen/control/control.c:
Add a XenBus front driver for handling shutdown, reboot, suspend, and
resume events published in the XenStore. Move all PV suspend/reboot
support from reboot.c into this driver.
sys/xen/blkif.h:
New file from Xen vendor with macros and structures used by
a block back driver to service requests from a VM running a
different ABI (e.g. amd64 back with i386 front).
sys/conf/files:
Adjust kernel build spec for new XenBus/XenStore layout and added
Xen functionality.
sys/dev/xen/balloon/balloon.c:
sys/dev/xen/netfront/netfront.c:
sys/dev/xen/blkfront/blkfront.c:
sys/xen/xenbus/...
sys/xen/xenstore/...
o Rename XenStore APIs and structures from xenbus_* to xs_*.
o Adjust to use of M_XENBUS and M_XENSTORE malloc types for allocation
of objects returned by these APIs.
o Adjust for changes in the bus interface for Xen drivers.
sys/xen/xenbus/...
sys/xen/xenstore/...
Add Doxygen comments for these interfaces and the code that
implements them.
sys/dev/xen/blkback/blkback.c:
o Rewrite the Block Back driver to attach properly via newbus,
operate correctly in both PV and HVM mode regardless of domain
(e.g. can be in a DOM other than 0), and to deal with the latest
metadata available in XenStore for block devices.
o Allow users to specify a file as a backend to blkback, in addition
to character devices. Use the namei lookup of the backend path
to automatically configure, based on file type, the appropriate
backend method.
The current implementation is limited to a single outstanding I/O
at a time to file backed storage.
sys/dev/xen/blkback/blkback.c:
sys/xen/interface/io/blkif.h:
sys/xen/blkif.h:
sys/dev/xen/blkfront/blkfront.c:
sys/dev/xen/blkfront/block.h:
Extend the Xen blkif API: Negotiable request size and number of
requests.
This change extends the information recorded in the XenStore
allowing block front/back devices to negotiate for optimal I/O
parameters. This has been achieved without sacrificing backward
compatibility with drivers that are unaware of these protocol
enhancements. The extensions center around the connection protocol
which now includes these additions:
o The back-end device publishes its maximum supported values for,
request I/O size, the number of page segments that can be
associated with a request, the maximum number of requests that
can be concurrently active, and the maximum number of pages that
can be in the shared request ring. These values are published
before the back-end enters the XenbusStateInitWait state.
o The front-end waits for the back-end to enter either the InitWait
or Initialize state. At this point, the front end limits it's
own capabilities to the lesser of the values it finds published
by the backend, it's own maximums, or, should any back-end data
be missing in the store, the values supported by the original
protocol. It then initializes it's internal data structures
including allocation of the shared ring, publishes its maximum
capabilities to the XenStore and transitions to the Initialized
state.
o The back-end waits for the front-end to enter the Initalized
state. At this point, the back end limits it's own capabilities
to the lesser of the values it finds published by the frontend,
it's own maximums, or, should any front-end data be missing in
the store, the values supported by the original protocol. It
then initializes it's internal data structures, attaches to the
shared ring and transitions to the Connected state.
o The front-end waits for the back-end to enter the Connnected
state, transitions itself to the connected state, and can
commence I/O.
Although an updated front-end driver must be aware of the back-end's
InitWait state, the back-end has been coded such that it can
tolerate a front-end that skips this step and transitions directly
to the Initialized state without waiting for the back-end.
sys/xen/interface/io/blkif.h:
o Increase BLKIF_MAX_SEGMENTS_PER_REQUEST to 255. This is
the maximum number possible without changing the blkif
request header structure (nr_segs is a uint8_t).
o Add two new constants:
BLKIF_MAX_SEGMENTS_PER_HEADER_BLOCK, and
BLKIF_MAX_SEGMENTS_PER_SEGMENT_BLOCK. These respectively
indicate the number of segments that can fit in the first
ring-buffer entry of a request, and for each subsequent
(sg element only) ring-buffer entry associated with the
"header" ring-buffer entry of the request.
o Add the blkif_request_segment_t typedef for segment
elements.
o Add the BLKRING_GET_SG_REQUEST() macro which wraps the
RING_GET_REQUEST() macro and returns a properly cast
pointer to an array of blkif_request_segment_ts.
o Add the BLKIF_SEGS_TO_BLOCKS() macro which calculates the
number of ring entries that will be consumed by a blkif
request with the given number of segments.
sys/xen/blkif.h:
o Update for changes in interface/io/blkif.h macros.
o Update the BLKIF_MAX_RING_REQUESTS() macro to take the
ring size as an argument to allow this calculation on
multi-page rings.
o Add a companion macro to BLKIF_MAX_RING_REQUESTS(),
BLKIF_RING_PAGES(). This macro determines the number of
ring pages required in order to support a ring with the
supplied number of request blocks.
sys/dev/xen/blkback/blkback.c:
sys/dev/xen/blkfront/blkfront.c:
sys/dev/xen/blkfront/block.h:
o Negotiate with the other-end with the following limits:
Reqeust Size: MAXPHYS
Max Segments: (MAXPHYS/PAGE_SIZE) + 1
Max Requests: 256
Max Ring Pages: Sufficient to support Max Requests with
Max Segments.
o Dynamically allocate request pools and segemnts-per-request.
o Update ring allocation/attachment code to support a
multi-page shared ring.
o Update routines that access the shared ring to handle
multi-block requests.
sys/dev/xen/blkfront/blkfront.c:
o Track blkfront allocations in a blkfront driver specific
malloc pool.
o Strip out XenStore transaction retry logic in the
connection code. Transactions only need to be used when
the update to multiple XenStore nodes must be atomic.
That is not the case here.
o Fully disable blkif_resume() until it can be fixed
properly (it didn't work before this change).
o Destroy bus-dma objects during device instance tear-down.
o Properly handle backend devices with powef-of-2 sector
sizes larger than 512b.
sys/dev/xen/blkback/blkback.c:
Advertise support for and implement the BLKIF_OP_WRITE_BARRIER
and BLKIF_OP_FLUSH_DISKCACHE blkif opcodes using BIO_FLUSH and
the BIO_ORDERED attribute of bios.
sys/dev/xen/blkfront/blkfront.c:
sys/dev/xen/blkfront/block.h:
Fix various bugs in blkfront.
o gnttab_alloc_grant_references() returns 0 for success and
non-zero for failure. The check for < 0 is a leftover
Linuxism.
o When we negotiate with blkback and have to reduce some of our
capabilities, print out the original and reduced capability before
changing the local capability. So the user now gets the correct
information.
o Fix blkif_restart_queue_callback() formatting. Make sure we hold
the mutex in that function before calling xb_startio().
o Fix a couple of KASSERT()s.
o Fix a check in the xb_remove_* macro to be a little more specific.
sys/xen/gnttab.h:
sys/xen/gnttab.c:
Define GNTTAB_LIST_END publicly as GRANT_REF_INVALID.
sys/dev/xen/netfront/netfront.c:
Use GRANT_REF_INVALID instead of driver private definitions of the
same constant.
sys/xen/gnttab.h:
sys/xen/gnttab.c:
Add the gnttab_end_foreign_access_references() API.
This API allows a client to batch the release of an array of grant
references, instead of coding a private for loop. The implementation
takes advantage of this batching to reduce lock overhead to one
acquisition and release per-batch instead of per-freed grant reference.
While here, reduce the duration the gnttab_list_lock is held during
gnttab_free_grant_references() operations. The search to find the
tail of the incoming free list does not rely on global state and so
can be performed without holding the lock.
sys/dev/xen/xenpci/evtchn.c:
sys/dev/xen/evtchn/evtchn.c:
sys/xen/xen_intr.h:
o Implement the bind_interdomain_evtchn_to_irqhandler API for HVM mode.
This allows an HVM domain to serve back end devices to other domains.
This API is already implemented for PV mode.
o Synchronize the API between HVM and PV.
sys/dev/xen/xenpci/xenpci.c:
o Scan the full region of CPUID space in which the Xen VMM interface
may be implemented. On systems using SuSE as a Dom0 where the
Viridian API is also exported, the VMM interface is above the region
we used to search.
o Pass through bus_alloc_resource() calls so that XenBus drivers
attaching on an HVM system can allocate unused physical address
space from the nexus. The block back driver makes use of this
facility.
sys/i386/xen/xen_machdep.c:
Use the correct type for accessing the statically mapped xenstore
metadata.
sys/xen/interface/hvm/params.h:
sys/xen/xenstore/xenstore.c:
Move hvm_get_parameter() to the correct global header file instead
of as a private method to the XenStore.
sys/xen/interface/io/protocols.h:
Sync with vendor.
sys/xeninterface/io/ring.h:
Add macro for calculating the number of ring pages needed for an N
deep ring.
To avoid duplication within the macros, create and use the new
__RING_HEADER_SIZE() macro. This macro calculates the size of the
ring book keeping struct (producer/consumer indexes, etc.) that
resides at the head of the ring.
Add the __RING_PAGES() macro which calculates the number of shared
ring pages required to support a ring with the given number of
requests.
These APIs are used to support the multi-page ring version of the
Xen block API.
sys/xeninterface/io/xenbus.h:
Add Comments.
sys/xen/xenbus/...
o Refactor the FreeBSD XenBus support code to allow for both front and
backend device attachments.
o Make use of new config_intr_hook capabilities to allow front and back
devices to be probed/attached in parallel.
o Fix bugs in probe/attach state machine that could cause the system to
hang when confronted with a failure either in the local domain or in
a remote domain to which one of our driver instances is attaching.
o Publish all required state to the XenStore on device detach and
failure. The majority of the missing functionality was for serving
as a back end since the typical "hot-plug" scripts in Dom0 don't
handle the case of cleaning up for a "service domain" that is not
itself.
o Add dynamic sysctl nodes exposing the generic ivars of
XenBus devices.
o Add doxygen style comments to the majority of the code.
o Cleanup types, formatting, etc.
sys/xen/xenbus/xenbusb.c:
Common code used by both front and back XenBus busses.
sys/xen/xenbus/xenbusb_if.m:
Method definitions for a XenBus bus.
sys/xen/xenbus/xenbusb_front.c:
sys/xen/xenbus/xenbusb_back.c:
XenBus bus specialization for front and back devices.
MFC after: 1 month
2010-10-19 20:53:30 +00:00
|
|
|
err = xs_gather(XST_NIL, xenbus_get_otherend_path(dev),
|
2013-05-31 21:05:07 +00:00
|
|
|
"feature-barrier", "%lu", &feature_barrier,
|
|
|
|
NULL);
|
2013-06-26 20:39:07 +00:00
|
|
|
if (err == 0 && feature_barrier != 0)
|
2013-06-14 17:00:58 +00:00
|
|
|
sc->xbd_flags |= XBDF_BARRIER;
|
2008-08-12 20:01:57 +00:00
|
|
|
|
2013-06-26 20:39:07 +00:00
|
|
|
err = xs_gather(XST_NIL, xenbus_get_otherend_path(dev),
|
|
|
|
"feature-flush-cache", "%lu", &feature_flush,
|
|
|
|
NULL);
|
|
|
|
if (err == 0 && feature_flush != 0)
|
|
|
|
sc->xbd_flags |= XBDF_FLUSH;
|
|
|
|
|
2015-07-30 03:50:01 +00:00
|
|
|
err = xs_gather(XST_NIL, xenbus_get_otherend_path(dev),
|
|
|
|
"feature-max-indirect-segments", "%" PRIu32,
|
|
|
|
&sc->xbd_max_request_segments, NULL);
|
|
|
|
if ((err != 0) || (xbd_enable_indirect == 0))
|
|
|
|
sc->xbd_max_request_segments = 0;
|
|
|
|
if (sc->xbd_max_request_segments > XBD_MAX_INDIRECT_SEGMENTS)
|
|
|
|
sc->xbd_max_request_segments = XBD_MAX_INDIRECT_SEGMENTS;
|
|
|
|
if (sc->xbd_max_request_segments > XBD_SIZE_TO_SEGS(MAXPHYS))
|
|
|
|
sc->xbd_max_request_segments = XBD_SIZE_TO_SEGS(MAXPHYS);
|
|
|
|
sc->xbd_max_request_indirectpages =
|
|
|
|
XBD_INDIRECT_SEGS_TO_PAGES(sc->xbd_max_request_segments);
|
|
|
|
if (sc->xbd_max_request_segments < BLKIF_MAX_SEGMENTS_PER_REQUEST)
|
|
|
|
sc->xbd_max_request_segments = BLKIF_MAX_SEGMENTS_PER_REQUEST;
|
|
|
|
sc->xbd_max_request_size =
|
|
|
|
XBD_SEGS_TO_SIZE(sc->xbd_max_request_segments);
|
|
|
|
|
2015-06-21 05:36:58 +00:00
|
|
|
/* Allocate datastructures based on negotiated values. */
|
|
|
|
err = bus_dma_tag_create(
|
|
|
|
bus_get_dma_tag(sc->xbd_dev), /* parent */
|
|
|
|
512, PAGE_SIZE, /* algnmnt, boundary */
|
|
|
|
BUS_SPACE_MAXADDR, /* lowaddr */
|
|
|
|
BUS_SPACE_MAXADDR, /* highaddr */
|
|
|
|
NULL, NULL, /* filter, filterarg */
|
|
|
|
sc->xbd_max_request_size,
|
|
|
|
sc->xbd_max_request_segments,
|
|
|
|
PAGE_SIZE, /* maxsegsize */
|
|
|
|
BUS_DMA_ALLOCNOW, /* flags */
|
|
|
|
busdma_lock_mutex, /* lockfunc */
|
|
|
|
&sc->xbd_io_lock, /* lockarg */
|
|
|
|
&sc->xbd_io_dmat);
|
|
|
|
if (err != 0) {
|
|
|
|
xenbus_dev_fatal(sc->xbd_dev, err,
|
|
|
|
"Cannot allocate parent DMA tag\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Per-transaction data allocation. */
|
|
|
|
sc->xbd_shadow = malloc(sizeof(*sc->xbd_shadow) * sc->xbd_max_requests,
|
|
|
|
M_XENBLOCKFRONT, M_NOWAIT|M_ZERO);
|
|
|
|
if (sc->xbd_shadow == NULL) {
|
|
|
|
bus_dma_tag_destroy(sc->xbd_io_dmat);
|
|
|
|
xenbus_dev_fatal(sc->xbd_dev, ENOMEM,
|
|
|
|
"Cannot allocate request structures\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < sc->xbd_max_requests; i++) {
|
|
|
|
struct xbd_command *cm;
|
2015-07-30 03:50:01 +00:00
|
|
|
void * indirectpages;
|
2015-06-21 05:36:58 +00:00
|
|
|
|
|
|
|
cm = &sc->xbd_shadow[i];
|
|
|
|
cm->cm_sg_refs = malloc(
|
|
|
|
sizeof(grant_ref_t) * sc->xbd_max_request_segments,
|
|
|
|
M_XENBLOCKFRONT, M_NOWAIT);
|
|
|
|
if (cm->cm_sg_refs == NULL)
|
|
|
|
break;
|
|
|
|
cm->cm_id = i;
|
|
|
|
cm->cm_flags = XBDCF_INITIALIZER;
|
|
|
|
cm->cm_sc = sc;
|
|
|
|
if (bus_dmamap_create(sc->xbd_io_dmat, 0, &cm->cm_map) != 0)
|
|
|
|
break;
|
2015-07-30 03:50:01 +00:00
|
|
|
if (sc->xbd_max_request_indirectpages > 0) {
|
|
|
|
indirectpages = contigmalloc(
|
|
|
|
PAGE_SIZE * sc->xbd_max_request_indirectpages,
|
|
|
|
M_XENBLOCKFRONT, M_ZERO, 0, ~0, PAGE_SIZE, 0);
|
|
|
|
} else {
|
|
|
|
indirectpages = NULL;
|
|
|
|
}
|
|
|
|
for (j = 0; j < sc->xbd_max_request_indirectpages; j++) {
|
|
|
|
if (gnttab_grant_foreign_access(
|
|
|
|
xenbus_get_otherend_id(sc->xbd_dev),
|
2015-08-06 17:07:21 +00:00
|
|
|
(vtophys(indirectpages) >> PAGE_SHIFT) + j,
|
2015-07-30 03:50:01 +00:00
|
|
|
1 /* grant read-only access */,
|
|
|
|
&cm->cm_indirectionrefs[j]))
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (j < sc->xbd_max_request_indirectpages)
|
|
|
|
break;
|
|
|
|
cm->cm_indirectionpages = indirectpages;
|
2015-06-21 05:36:58 +00:00
|
|
|
xbd_free_command(cm);
|
|
|
|
}
|
|
|
|
|
2013-05-31 21:05:07 +00:00
|
|
|
if (sc->xbd_disk == NULL) {
|
2011-09-21 00:02:44 +00:00
|
|
|
device_printf(dev, "%juMB <%s> at %s",
|
|
|
|
(uintmax_t) sectors / (1048576 / sector_size),
|
|
|
|
device_get_desc(dev),
|
|
|
|
xenbus_get_node(dev));
|
|
|
|
bus_print_child_footer(device_get_parent(dev), dev);
|
2008-08-12 20:01:57 +00:00
|
|
|
|
2013-05-31 21:05:07 +00:00
|
|
|
xbd_instance_create(sc, sectors, sc->xbd_vdevice, binfo,
|
2016-04-03 11:18:20 +00:00
|
|
|
sector_size, phys_sector_size);
|
2011-09-21 00:02:44 +00:00
|
|
|
}
|
2008-12-04 07:59:05 +00:00
|
|
|
|
|
|
|
(void)xenbus_set_state(dev, XenbusStateConnected);
|
2008-08-12 20:01:57 +00:00
|
|
|
|
|
|
|
/* Kick pending requests. */
|
2013-05-31 21:05:07 +00:00
|
|
|
mtx_lock(&sc->xbd_io_lock);
|
2013-06-14 17:00:58 +00:00
|
|
|
sc->xbd_state = XBD_STATE_CONNECTED;
|
2013-05-31 21:05:07 +00:00
|
|
|
xbd_startio(sc);
|
2013-06-14 17:00:58 +00:00
|
|
|
sc->xbd_flags |= XBDF_READY;
|
2013-05-31 21:05:07 +00:00
|
|
|
mtx_unlock(&sc->xbd_io_lock);
|
2008-08-12 20:01:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Handle the change of state of the backend to Closing. We must delete our
|
|
|
|
* device-layer structures now, to ensure that writes are flushed through to
|
2009-12-28 18:59:13 +00:00
|
|
|
* the backend. Once this is done, we can switch to Closed in
|
2008-08-12 20:01:57 +00:00
|
|
|
* acknowledgement.
|
|
|
|
*/
|
2008-12-04 07:59:05 +00:00
|
|
|
static void
|
2013-05-31 22:21:37 +00:00
|
|
|
xbd_closing(device_t dev)
|
|
|
|
{
|
|
|
|
struct xbd_softc *sc = device_get_softc(dev);
|
2009-11-30 04:32:34 +00:00
|
|
|
|
2013-05-31 22:21:37 +00:00
|
|
|
xenbus_set_state(dev, XenbusStateClosing);
|
2011-09-21 00:02:44 +00:00
|
|
|
|
2013-05-31 22:21:37 +00:00
|
|
|
DPRINTK("xbd_closing: %s removed\n", xenbus_get_node(dev));
|
2009-11-30 04:32:34 +00:00
|
|
|
|
2013-05-31 22:21:37 +00:00
|
|
|
if (sc->xbd_disk != NULL) {
|
|
|
|
disk_destroy(sc->xbd_disk);
|
|
|
|
sc->xbd_disk = NULL;
|
|
|
|
}
|
2008-08-12 20:01:57 +00:00
|
|
|
|
2013-05-31 22:21:37 +00:00
|
|
|
xenbus_set_state(dev, XenbusStateClosed);
|
|
|
|
}
|
2008-08-12 20:01:57 +00:00
|
|
|
|
2013-05-31 22:21:37 +00:00
|
|
|
/*---------------------------- NewBus Entrypoints ----------------------------*/
|
|
|
|
static int
|
|
|
|
xbd_probe(device_t dev)
|
|
|
|
{
|
2013-10-13 02:34:20 +00:00
|
|
|
if (strcmp(xenbus_get_type(dev), "vbd") != 0)
|
|
|
|
return (ENXIO);
|
2008-08-12 20:01:57 +00:00
|
|
|
|
2015-08-21 15:53:08 +00:00
|
|
|
if (xen_hvm_domain() && xen_disable_pv_disks != 0)
|
|
|
|
return (ENXIO);
|
|
|
|
|
2013-10-13 02:34:20 +00:00
|
|
|
if (xen_hvm_domain()) {
|
|
|
|
int error;
|
|
|
|
char *type;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* When running in an HVM domain, IDE disk emulation is
|
|
|
|
* disabled early in boot so that native drivers will
|
|
|
|
* not see emulated hardware. However, CDROM device
|
|
|
|
* emulation cannot be disabled.
|
|
|
|
*
|
|
|
|
* Through use of FreeBSD's vm_guest and xen_hvm_domain()
|
|
|
|
* APIs, we could modify the native CDROM driver to fail its
|
|
|
|
* probe when running under Xen. Unfortunatlely, the PV
|
|
|
|
* CDROM support in XenServer (up through at least version
|
|
|
|
* 6.2) isn't functional, so we instead rely on the emulated
|
|
|
|
* CDROM instance, and fail to attach the PV one here in
|
|
|
|
* the blkfront driver.
|
|
|
|
*/
|
|
|
|
error = xs_read(XST_NIL, xenbus_get_node(dev),
|
|
|
|
"device-type", NULL, (void **) &type);
|
|
|
|
if (error)
|
|
|
|
return (ENXIO);
|
|
|
|
|
|
|
|
if (strncmp(type, "cdrom", 5) == 0) {
|
|
|
|
free(type, M_XENSTORE);
|
|
|
|
return (ENXIO);
|
|
|
|
}
|
|
|
|
free(type, M_XENSTORE);
|
2008-08-12 20:01:57 +00:00
|
|
|
}
|
|
|
|
|
2013-10-13 02:34:20 +00:00
|
|
|
device_set_desc(dev, "Virtual Block Device");
|
|
|
|
device_quiet(dev);
|
|
|
|
return (0);
|
2008-08-12 20:01:57 +00:00
|
|
|
}
|
|
|
|
|
2013-05-31 22:21:37 +00:00
|
|
|
/*
|
|
|
|
* Setup supplies the backend dir, virtual device. We place an event
|
|
|
|
* channel and shared frame entries. We watch backend to wait if it's
|
|
|
|
* ok.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
xbd_attach(device_t dev)
|
2008-08-12 20:01:57 +00:00
|
|
|
{
|
2013-05-31 22:21:37 +00:00
|
|
|
struct xbd_softc *sc;
|
|
|
|
const char *name;
|
|
|
|
uint32_t vdevice;
|
|
|
|
int error;
|
|
|
|
int i;
|
|
|
|
int unit;
|
2008-08-12 20:01:57 +00:00
|
|
|
|
2013-05-31 22:21:37 +00:00
|
|
|
/* FIXME: Use dynamic device id if this is not set. */
|
|
|
|
error = xs_scanf(XST_NIL, xenbus_get_node(dev),
|
|
|
|
"virtual-device", NULL, "%" PRIu32, &vdevice);
|
2013-08-30 01:46:56 +00:00
|
|
|
if (error)
|
|
|
|
error = xs_scanf(XST_NIL, xenbus_get_node(dev),
|
|
|
|
"virtual-device-ext", NULL, "%" PRIu32, &vdevice);
|
2013-05-31 22:21:37 +00:00
|
|
|
if (error) {
|
|
|
|
xenbus_dev_fatal(dev, error, "reading virtual-device");
|
|
|
|
device_printf(dev, "Couldn't determine virtual device.\n");
|
|
|
|
return (error);
|
2008-08-12 20:01:57 +00:00
|
|
|
}
|
|
|
|
|
2013-05-31 22:21:37 +00:00
|
|
|
xbd_vdevice_to_unit(vdevice, &unit, &name);
|
|
|
|
if (!strcmp(name, "xbd"))
|
|
|
|
device_set_unit(dev, unit);
|
2008-08-12 20:01:57 +00:00
|
|
|
|
2013-05-31 22:21:37 +00:00
|
|
|
sc = device_get_softc(dev);
|
|
|
|
mtx_init(&sc->xbd_io_lock, "blkfront i/o lock", NULL, MTX_DEF);
|
2013-06-14 17:00:58 +00:00
|
|
|
xbd_initqs(sc);
|
2013-05-31 22:21:37 +00:00
|
|
|
for (i = 0; i < XBD_MAX_RING_PAGES; i++)
|
2013-06-01 04:02:51 +00:00
|
|
|
sc->xbd_ring_ref[i] = GRANT_REF_INVALID;
|
2008-08-12 20:01:57 +00:00
|
|
|
|
2013-05-31 22:21:37 +00:00
|
|
|
sc->xbd_dev = dev;
|
|
|
|
sc->xbd_vdevice = vdevice;
|
2013-06-14 17:00:58 +00:00
|
|
|
sc->xbd_state = XBD_STATE_DISCONNECTED;
|
2009-11-30 04:32:34 +00:00
|
|
|
|
2013-05-31 22:21:37 +00:00
|
|
|
xbd_setup_sysctl(sc);
|
2009-11-30 04:32:34 +00:00
|
|
|
|
2013-05-31 22:21:37 +00:00
|
|
|
/* Wait for backend device to publish its protocol capabilities. */
|
|
|
|
xenbus_set_state(dev, XenbusStateInitialising);
|
2008-08-12 20:01:57 +00:00
|
|
|
|
2013-05-31 22:21:37 +00:00
|
|
|
return (0);
|
|
|
|
}
|
2008-08-12 20:01:57 +00:00
|
|
|
|
2013-05-31 22:21:37 +00:00
|
|
|
static int
|
|
|
|
xbd_detach(device_t dev)
|
|
|
|
{
|
|
|
|
struct xbd_softc *sc = device_get_softc(dev);
|
2008-08-12 20:01:57 +00:00
|
|
|
|
2013-06-14 17:00:58 +00:00
|
|
|
DPRINTK("%s: %s removed\n", __func__, xenbus_get_node(dev));
|
2008-08-12 20:01:57 +00:00
|
|
|
|
2013-05-31 22:21:37 +00:00
|
|
|
xbd_free(sc);
|
|
|
|
mtx_destroy(&sc->xbd_io_lock);
|
2011-09-21 00:02:44 +00:00
|
|
|
|
2013-05-31 22:21:37 +00:00
|
|
|
return 0;
|
2008-08-12 20:01:57 +00:00
|
|
|
}
|
|
|
|
|
2013-05-31 22:21:37 +00:00
|
|
|
static int
|
|
|
|
xbd_suspend(device_t dev)
|
2008-08-12 20:01:57 +00:00
|
|
|
{
|
2013-05-31 22:21:37 +00:00
|
|
|
struct xbd_softc *sc = device_get_softc(dev);
|
|
|
|
int retval;
|
|
|
|
int saved_state;
|
|
|
|
|
Improve the Xen para-virtualized device infrastructure of FreeBSD:
o Add support for backend devices (e.g. blkback)
o Implement extensions to the Xen para-virtualized block API to allow
for larger and more outstanding I/Os.
o Import a completely rewritten block back driver with support for fronting
I/O to both raw devices and files.
o General cleanup and documentation of the XenBus and XenStore support code.
o Robustness and performance updates for the block front driver.
o Fixes to the netfront driver.
Sponsored by: Spectra Logic Corporation
sys/xen/xenbus/init.txt:
Deleted: This file explains the Linux method for XenBus device
enumeration and thus does not apply to FreeBSD's NewBus approach.
sys/xen/xenbus/xenbus_probe_backend.c:
Deleted: Linux version of backend XenBus service routines. It
was never ported to FreeBSD. See xenbusb.c, xenbusb_if.m,
xenbusb_front.c xenbusb_back.c for details of FreeBSD's XenBus
support.
sys/xen/xenbus/xenbusvar.h:
sys/xen/xenbus/xenbus_xs.c:
sys/xen/xenbus/xenbus_comms.c:
sys/xen/xenbus/xenbus_comms.h:
sys/xen/xenstore/xenstorevar.h:
sys/xen/xenstore/xenstore.c:
Split XenStore into its own tree. XenBus is a software layer built
on top of XenStore. The old arrangement and the naming of some
structures and functions blurred these lines making it difficult to
discern what services are provided by which layer and at what times
these services are available (e.g. during system startup and shutdown).
sys/xen/xenbus/xenbus_client.c:
sys/xen/xenbus/xenbus.c:
sys/xen/xenbus/xenbus_probe.c:
sys/xen/xenbus/xenbusb.c:
sys/xen/xenbus/xenbusb.h:
Split up XenBus code into methods available for use by client
drivers (xenbus.c) and code used by the XenBus "bus code" to
enumerate, attach, detach, and service bus drivers.
sys/xen/reboot.c:
sys/dev/xen/control/control.c:
Add a XenBus front driver for handling shutdown, reboot, suspend, and
resume events published in the XenStore. Move all PV suspend/reboot
support from reboot.c into this driver.
sys/xen/blkif.h:
New file from Xen vendor with macros and structures used by
a block back driver to service requests from a VM running a
different ABI (e.g. amd64 back with i386 front).
sys/conf/files:
Adjust kernel build spec for new XenBus/XenStore layout and added
Xen functionality.
sys/dev/xen/balloon/balloon.c:
sys/dev/xen/netfront/netfront.c:
sys/dev/xen/blkfront/blkfront.c:
sys/xen/xenbus/...
sys/xen/xenstore/...
o Rename XenStore APIs and structures from xenbus_* to xs_*.
o Adjust to use of M_XENBUS and M_XENSTORE malloc types for allocation
of objects returned by these APIs.
o Adjust for changes in the bus interface for Xen drivers.
sys/xen/xenbus/...
sys/xen/xenstore/...
Add Doxygen comments for these interfaces and the code that
implements them.
sys/dev/xen/blkback/blkback.c:
o Rewrite the Block Back driver to attach properly via newbus,
operate correctly in both PV and HVM mode regardless of domain
(e.g. can be in a DOM other than 0), and to deal with the latest
metadata available in XenStore for block devices.
o Allow users to specify a file as a backend to blkback, in addition
to character devices. Use the namei lookup of the backend path
to automatically configure, based on file type, the appropriate
backend method.
The current implementation is limited to a single outstanding I/O
at a time to file backed storage.
sys/dev/xen/blkback/blkback.c:
sys/xen/interface/io/blkif.h:
sys/xen/blkif.h:
sys/dev/xen/blkfront/blkfront.c:
sys/dev/xen/blkfront/block.h:
Extend the Xen blkif API: Negotiable request size and number of
requests.
This change extends the information recorded in the XenStore
allowing block front/back devices to negotiate for optimal I/O
parameters. This has been achieved without sacrificing backward
compatibility with drivers that are unaware of these protocol
enhancements. The extensions center around the connection protocol
which now includes these additions:
o The back-end device publishes its maximum supported values for,
request I/O size, the number of page segments that can be
associated with a request, the maximum number of requests that
can be concurrently active, and the maximum number of pages that
can be in the shared request ring. These values are published
before the back-end enters the XenbusStateInitWait state.
o The front-end waits for the back-end to enter either the InitWait
or Initialize state. At this point, the front end limits it's
own capabilities to the lesser of the values it finds published
by the backend, it's own maximums, or, should any back-end data
be missing in the store, the values supported by the original
protocol. It then initializes it's internal data structures
including allocation of the shared ring, publishes its maximum
capabilities to the XenStore and transitions to the Initialized
state.
o The back-end waits for the front-end to enter the Initalized
state. At this point, the back end limits it's own capabilities
to the lesser of the values it finds published by the frontend,
it's own maximums, or, should any front-end data be missing in
the store, the values supported by the original protocol. It
then initializes it's internal data structures, attaches to the
shared ring and transitions to the Connected state.
o The front-end waits for the back-end to enter the Connnected
state, transitions itself to the connected state, and can
commence I/O.
Although an updated front-end driver must be aware of the back-end's
InitWait state, the back-end has been coded such that it can
tolerate a front-end that skips this step and transitions directly
to the Initialized state without waiting for the back-end.
sys/xen/interface/io/blkif.h:
o Increase BLKIF_MAX_SEGMENTS_PER_REQUEST to 255. This is
the maximum number possible without changing the blkif
request header structure (nr_segs is a uint8_t).
o Add two new constants:
BLKIF_MAX_SEGMENTS_PER_HEADER_BLOCK, and
BLKIF_MAX_SEGMENTS_PER_SEGMENT_BLOCK. These respectively
indicate the number of segments that can fit in the first
ring-buffer entry of a request, and for each subsequent
(sg element only) ring-buffer entry associated with the
"header" ring-buffer entry of the request.
o Add the blkif_request_segment_t typedef for segment
elements.
o Add the BLKRING_GET_SG_REQUEST() macro which wraps the
RING_GET_REQUEST() macro and returns a properly cast
pointer to an array of blkif_request_segment_ts.
o Add the BLKIF_SEGS_TO_BLOCKS() macro which calculates the
number of ring entries that will be consumed by a blkif
request with the given number of segments.
sys/xen/blkif.h:
o Update for changes in interface/io/blkif.h macros.
o Update the BLKIF_MAX_RING_REQUESTS() macro to take the
ring size as an argument to allow this calculation on
multi-page rings.
o Add a companion macro to BLKIF_MAX_RING_REQUESTS(),
BLKIF_RING_PAGES(). This macro determines the number of
ring pages required in order to support a ring with the
supplied number of request blocks.
sys/dev/xen/blkback/blkback.c:
sys/dev/xen/blkfront/blkfront.c:
sys/dev/xen/blkfront/block.h:
o Negotiate with the other-end with the following limits:
Reqeust Size: MAXPHYS
Max Segments: (MAXPHYS/PAGE_SIZE) + 1
Max Requests: 256
Max Ring Pages: Sufficient to support Max Requests with
Max Segments.
o Dynamically allocate request pools and segemnts-per-request.
o Update ring allocation/attachment code to support a
multi-page shared ring.
o Update routines that access the shared ring to handle
multi-block requests.
sys/dev/xen/blkfront/blkfront.c:
o Track blkfront allocations in a blkfront driver specific
malloc pool.
o Strip out XenStore transaction retry logic in the
connection code. Transactions only need to be used when
the update to multiple XenStore nodes must be atomic.
That is not the case here.
o Fully disable blkif_resume() until it can be fixed
properly (it didn't work before this change).
o Destroy bus-dma objects during device instance tear-down.
o Properly handle backend devices with powef-of-2 sector
sizes larger than 512b.
sys/dev/xen/blkback/blkback.c:
Advertise support for and implement the BLKIF_OP_WRITE_BARRIER
and BLKIF_OP_FLUSH_DISKCACHE blkif opcodes using BIO_FLUSH and
the BIO_ORDERED attribute of bios.
sys/dev/xen/blkfront/blkfront.c:
sys/dev/xen/blkfront/block.h:
Fix various bugs in blkfront.
o gnttab_alloc_grant_references() returns 0 for success and
non-zero for failure. The check for < 0 is a leftover
Linuxism.
o When we negotiate with blkback and have to reduce some of our
capabilities, print out the original and reduced capability before
changing the local capability. So the user now gets the correct
information.
o Fix blkif_restart_queue_callback() formatting. Make sure we hold
the mutex in that function before calling xb_startio().
o Fix a couple of KASSERT()s.
o Fix a check in the xb_remove_* macro to be a little more specific.
sys/xen/gnttab.h:
sys/xen/gnttab.c:
Define GNTTAB_LIST_END publicly as GRANT_REF_INVALID.
sys/dev/xen/netfront/netfront.c:
Use GRANT_REF_INVALID instead of driver private definitions of the
same constant.
sys/xen/gnttab.h:
sys/xen/gnttab.c:
Add the gnttab_end_foreign_access_references() API.
This API allows a client to batch the release of an array of grant
references, instead of coding a private for loop. The implementation
takes advantage of this batching to reduce lock overhead to one
acquisition and release per-batch instead of per-freed grant reference.
While here, reduce the duration the gnttab_list_lock is held during
gnttab_free_grant_references() operations. The search to find the
tail of the incoming free list does not rely on global state and so
can be performed without holding the lock.
sys/dev/xen/xenpci/evtchn.c:
sys/dev/xen/evtchn/evtchn.c:
sys/xen/xen_intr.h:
o Implement the bind_interdomain_evtchn_to_irqhandler API for HVM mode.
This allows an HVM domain to serve back end devices to other domains.
This API is already implemented for PV mode.
o Synchronize the API between HVM and PV.
sys/dev/xen/xenpci/xenpci.c:
o Scan the full region of CPUID space in which the Xen VMM interface
may be implemented. On systems using SuSE as a Dom0 where the
Viridian API is also exported, the VMM interface is above the region
we used to search.
o Pass through bus_alloc_resource() calls so that XenBus drivers
attaching on an HVM system can allocate unused physical address
space from the nexus. The block back driver makes use of this
facility.
sys/i386/xen/xen_machdep.c:
Use the correct type for accessing the statically mapped xenstore
metadata.
sys/xen/interface/hvm/params.h:
sys/xen/xenstore/xenstore.c:
Move hvm_get_parameter() to the correct global header file instead
of as a private method to the XenStore.
sys/xen/interface/io/protocols.h:
Sync with vendor.
sys/xeninterface/io/ring.h:
Add macro for calculating the number of ring pages needed for an N
deep ring.
To avoid duplication within the macros, create and use the new
__RING_HEADER_SIZE() macro. This macro calculates the size of the
ring book keeping struct (producer/consumer indexes, etc.) that
resides at the head of the ring.
Add the __RING_PAGES() macro which calculates the number of shared
ring pages required to support a ring with the given number of
requests.
These APIs are used to support the multi-page ring version of the
Xen block API.
sys/xeninterface/io/xenbus.h:
Add Comments.
sys/xen/xenbus/...
o Refactor the FreeBSD XenBus support code to allow for both front and
backend device attachments.
o Make use of new config_intr_hook capabilities to allow front and back
devices to be probed/attached in parallel.
o Fix bugs in probe/attach state machine that could cause the system to
hang when confronted with a failure either in the local domain or in
a remote domain to which one of our driver instances is attaching.
o Publish all required state to the XenStore on device detach and
failure. The majority of the missing functionality was for serving
as a back end since the typical "hot-plug" scripts in Dom0 don't
handle the case of cleaning up for a "service domain" that is not
itself.
o Add dynamic sysctl nodes exposing the generic ivars of
XenBus devices.
o Add doxygen style comments to the majority of the code.
o Cleanup types, formatting, etc.
sys/xen/xenbus/xenbusb.c:
Common code used by both front and back XenBus busses.
sys/xen/xenbus/xenbusb_if.m:
Method definitions for a XenBus bus.
sys/xen/xenbus/xenbusb_front.c:
sys/xen/xenbus/xenbusb_back.c:
XenBus bus specialization for front and back devices.
MFC after: 1 month
2010-10-19 20:53:30 +00:00
|
|
|
/* Prevent new requests being issued until we fix things up. */
|
2013-05-31 21:05:07 +00:00
|
|
|
mtx_lock(&sc->xbd_io_lock);
|
2013-06-14 17:00:58 +00:00
|
|
|
saved_state = sc->xbd_state;
|
|
|
|
sc->xbd_state = XBD_STATE_SUSPENDED;
|
2013-05-31 21:05:07 +00:00
|
|
|
|
2013-05-31 22:21:37 +00:00
|
|
|
/* Wait for outstanding I/O to drain. */
|
|
|
|
retval = 0;
|
2013-06-26 20:39:07 +00:00
|
|
|
while (xbd_queue_length(sc, XBD_Q_BUSY) != 0) {
|
2013-06-14 17:00:58 +00:00
|
|
|
if (msleep(&sc->xbd_cm_q[XBD_Q_BUSY], &sc->xbd_io_lock,
|
2013-05-31 22:21:37 +00:00
|
|
|
PRIBIO, "blkf_susp", 30 * hz) == EWOULDBLOCK) {
|
|
|
|
retval = EBUSY;
|
|
|
|
break;
|
Improve the Xen para-virtualized device infrastructure of FreeBSD:
o Add support for backend devices (e.g. blkback)
o Implement extensions to the Xen para-virtualized block API to allow
for larger and more outstanding I/Os.
o Import a completely rewritten block back driver with support for fronting
I/O to both raw devices and files.
o General cleanup and documentation of the XenBus and XenStore support code.
o Robustness and performance updates for the block front driver.
o Fixes to the netfront driver.
Sponsored by: Spectra Logic Corporation
sys/xen/xenbus/init.txt:
Deleted: This file explains the Linux method for XenBus device
enumeration and thus does not apply to FreeBSD's NewBus approach.
sys/xen/xenbus/xenbus_probe_backend.c:
Deleted: Linux version of backend XenBus service routines. It
was never ported to FreeBSD. See xenbusb.c, xenbusb_if.m,
xenbusb_front.c xenbusb_back.c for details of FreeBSD's XenBus
support.
sys/xen/xenbus/xenbusvar.h:
sys/xen/xenbus/xenbus_xs.c:
sys/xen/xenbus/xenbus_comms.c:
sys/xen/xenbus/xenbus_comms.h:
sys/xen/xenstore/xenstorevar.h:
sys/xen/xenstore/xenstore.c:
Split XenStore into its own tree. XenBus is a software layer built
on top of XenStore. The old arrangement and the naming of some
structures and functions blurred these lines making it difficult to
discern what services are provided by which layer and at what times
these services are available (e.g. during system startup and shutdown).
sys/xen/xenbus/xenbus_client.c:
sys/xen/xenbus/xenbus.c:
sys/xen/xenbus/xenbus_probe.c:
sys/xen/xenbus/xenbusb.c:
sys/xen/xenbus/xenbusb.h:
Split up XenBus code into methods available for use by client
drivers (xenbus.c) and code used by the XenBus "bus code" to
enumerate, attach, detach, and service bus drivers.
sys/xen/reboot.c:
sys/dev/xen/control/control.c:
Add a XenBus front driver for handling shutdown, reboot, suspend, and
resume events published in the XenStore. Move all PV suspend/reboot
support from reboot.c into this driver.
sys/xen/blkif.h:
New file from Xen vendor with macros and structures used by
a block back driver to service requests from a VM running a
different ABI (e.g. amd64 back with i386 front).
sys/conf/files:
Adjust kernel build spec for new XenBus/XenStore layout and added
Xen functionality.
sys/dev/xen/balloon/balloon.c:
sys/dev/xen/netfront/netfront.c:
sys/dev/xen/blkfront/blkfront.c:
sys/xen/xenbus/...
sys/xen/xenstore/...
o Rename XenStore APIs and structures from xenbus_* to xs_*.
o Adjust to use of M_XENBUS and M_XENSTORE malloc types for allocation
of objects returned by these APIs.
o Adjust for changes in the bus interface for Xen drivers.
sys/xen/xenbus/...
sys/xen/xenstore/...
Add Doxygen comments for these interfaces and the code that
implements them.
sys/dev/xen/blkback/blkback.c:
o Rewrite the Block Back driver to attach properly via newbus,
operate correctly in both PV and HVM mode regardless of domain
(e.g. can be in a DOM other than 0), and to deal with the latest
metadata available in XenStore for block devices.
o Allow users to specify a file as a backend to blkback, in addition
to character devices. Use the namei lookup of the backend path
to automatically configure, based on file type, the appropriate
backend method.
The current implementation is limited to a single outstanding I/O
at a time to file backed storage.
sys/dev/xen/blkback/blkback.c:
sys/xen/interface/io/blkif.h:
sys/xen/blkif.h:
sys/dev/xen/blkfront/blkfront.c:
sys/dev/xen/blkfront/block.h:
Extend the Xen blkif API: Negotiable request size and number of
requests.
This change extends the information recorded in the XenStore
allowing block front/back devices to negotiate for optimal I/O
parameters. This has been achieved without sacrificing backward
compatibility with drivers that are unaware of these protocol
enhancements. The extensions center around the connection protocol
which now includes these additions:
o The back-end device publishes its maximum supported values for,
request I/O size, the number of page segments that can be
associated with a request, the maximum number of requests that
can be concurrently active, and the maximum number of pages that
can be in the shared request ring. These values are published
before the back-end enters the XenbusStateInitWait state.
o The front-end waits for the back-end to enter either the InitWait
or Initialize state. At this point, the front end limits it's
own capabilities to the lesser of the values it finds published
by the backend, it's own maximums, or, should any back-end data
be missing in the store, the values supported by the original
protocol. It then initializes it's internal data structures
including allocation of the shared ring, publishes its maximum
capabilities to the XenStore and transitions to the Initialized
state.
o The back-end waits for the front-end to enter the Initalized
state. At this point, the back end limits it's own capabilities
to the lesser of the values it finds published by the frontend,
it's own maximums, or, should any front-end data be missing in
the store, the values supported by the original protocol. It
then initializes it's internal data structures, attaches to the
shared ring and transitions to the Connected state.
o The front-end waits for the back-end to enter the Connnected
state, transitions itself to the connected state, and can
commence I/O.
Although an updated front-end driver must be aware of the back-end's
InitWait state, the back-end has been coded such that it can
tolerate a front-end that skips this step and transitions directly
to the Initialized state without waiting for the back-end.
sys/xen/interface/io/blkif.h:
o Increase BLKIF_MAX_SEGMENTS_PER_REQUEST to 255. This is
the maximum number possible without changing the blkif
request header structure (nr_segs is a uint8_t).
o Add two new constants:
BLKIF_MAX_SEGMENTS_PER_HEADER_BLOCK, and
BLKIF_MAX_SEGMENTS_PER_SEGMENT_BLOCK. These respectively
indicate the number of segments that can fit in the first
ring-buffer entry of a request, and for each subsequent
(sg element only) ring-buffer entry associated with the
"header" ring-buffer entry of the request.
o Add the blkif_request_segment_t typedef for segment
elements.
o Add the BLKRING_GET_SG_REQUEST() macro which wraps the
RING_GET_REQUEST() macro and returns a properly cast
pointer to an array of blkif_request_segment_ts.
o Add the BLKIF_SEGS_TO_BLOCKS() macro which calculates the
number of ring entries that will be consumed by a blkif
request with the given number of segments.
sys/xen/blkif.h:
o Update for changes in interface/io/blkif.h macros.
o Update the BLKIF_MAX_RING_REQUESTS() macro to take the
ring size as an argument to allow this calculation on
multi-page rings.
o Add a companion macro to BLKIF_MAX_RING_REQUESTS(),
BLKIF_RING_PAGES(). This macro determines the number of
ring pages required in order to support a ring with the
supplied number of request blocks.
sys/dev/xen/blkback/blkback.c:
sys/dev/xen/blkfront/blkfront.c:
sys/dev/xen/blkfront/block.h:
o Negotiate with the other-end with the following limits:
Reqeust Size: MAXPHYS
Max Segments: (MAXPHYS/PAGE_SIZE) + 1
Max Requests: 256
Max Ring Pages: Sufficient to support Max Requests with
Max Segments.
o Dynamically allocate request pools and segemnts-per-request.
o Update ring allocation/attachment code to support a
multi-page shared ring.
o Update routines that access the shared ring to handle
multi-block requests.
sys/dev/xen/blkfront/blkfront.c:
o Track blkfront allocations in a blkfront driver specific
malloc pool.
o Strip out XenStore transaction retry logic in the
connection code. Transactions only need to be used when
the update to multiple XenStore nodes must be atomic.
That is not the case here.
o Fully disable blkif_resume() until it can be fixed
properly (it didn't work before this change).
o Destroy bus-dma objects during device instance tear-down.
o Properly handle backend devices with powef-of-2 sector
sizes larger than 512b.
sys/dev/xen/blkback/blkback.c:
Advertise support for and implement the BLKIF_OP_WRITE_BARRIER
and BLKIF_OP_FLUSH_DISKCACHE blkif opcodes using BIO_FLUSH and
the BIO_ORDERED attribute of bios.
sys/dev/xen/blkfront/blkfront.c:
sys/dev/xen/blkfront/block.h:
Fix various bugs in blkfront.
o gnttab_alloc_grant_references() returns 0 for success and
non-zero for failure. The check for < 0 is a leftover
Linuxism.
o When we negotiate with blkback and have to reduce some of our
capabilities, print out the original and reduced capability before
changing the local capability. So the user now gets the correct
information.
o Fix blkif_restart_queue_callback() formatting. Make sure we hold
the mutex in that function before calling xb_startio().
o Fix a couple of KASSERT()s.
o Fix a check in the xb_remove_* macro to be a little more specific.
sys/xen/gnttab.h:
sys/xen/gnttab.c:
Define GNTTAB_LIST_END publicly as GRANT_REF_INVALID.
sys/dev/xen/netfront/netfront.c:
Use GRANT_REF_INVALID instead of driver private definitions of the
same constant.
sys/xen/gnttab.h:
sys/xen/gnttab.c:
Add the gnttab_end_foreign_access_references() API.
This API allows a client to batch the release of an array of grant
references, instead of coding a private for loop. The implementation
takes advantage of this batching to reduce lock overhead to one
acquisition and release per-batch instead of per-freed grant reference.
While here, reduce the duration the gnttab_list_lock is held during
gnttab_free_grant_references() operations. The search to find the
tail of the incoming free list does not rely on global state and so
can be performed without holding the lock.
sys/dev/xen/xenpci/evtchn.c:
sys/dev/xen/evtchn/evtchn.c:
sys/xen/xen_intr.h:
o Implement the bind_interdomain_evtchn_to_irqhandler API for HVM mode.
This allows an HVM domain to serve back end devices to other domains.
This API is already implemented for PV mode.
o Synchronize the API between HVM and PV.
sys/dev/xen/xenpci/xenpci.c:
o Scan the full region of CPUID space in which the Xen VMM interface
may be implemented. On systems using SuSE as a Dom0 where the
Viridian API is also exported, the VMM interface is above the region
we used to search.
o Pass through bus_alloc_resource() calls so that XenBus drivers
attaching on an HVM system can allocate unused physical address
space from the nexus. The block back driver makes use of this
facility.
sys/i386/xen/xen_machdep.c:
Use the correct type for accessing the statically mapped xenstore
metadata.
sys/xen/interface/hvm/params.h:
sys/xen/xenstore/xenstore.c:
Move hvm_get_parameter() to the correct global header file instead
of as a private method to the XenStore.
sys/xen/interface/io/protocols.h:
Sync with vendor.
sys/xeninterface/io/ring.h:
Add macro for calculating the number of ring pages needed for an N
deep ring.
To avoid duplication within the macros, create and use the new
__RING_HEADER_SIZE() macro. This macro calculates the size of the
ring book keeping struct (producer/consumer indexes, etc.) that
resides at the head of the ring.
Add the __RING_PAGES() macro which calculates the number of shared
ring pages required to support a ring with the given number of
requests.
These APIs are used to support the multi-page ring version of the
Xen block API.
sys/xeninterface/io/xenbus.h:
Add Comments.
sys/xen/xenbus/...
o Refactor the FreeBSD XenBus support code to allow for both front and
backend device attachments.
o Make use of new config_intr_hook capabilities to allow front and back
devices to be probed/attached in parallel.
o Fix bugs in probe/attach state machine that could cause the system to
hang when confronted with a failure either in the local domain or in
a remote domain to which one of our driver instances is attaching.
o Publish all required state to the XenStore on device detach and
failure. The majority of the missing functionality was for serving
as a back end since the typical "hot-plug" scripts in Dom0 don't
handle the case of cleaning up for a "service domain" that is not
itself.
o Add dynamic sysctl nodes exposing the generic ivars of
XenBus devices.
o Add doxygen style comments to the majority of the code.
o Cleanup types, formatting, etc.
sys/xen/xenbus/xenbusb.c:
Common code used by both front and back XenBus busses.
sys/xen/xenbus/xenbusb_if.m:
Method definitions for a XenBus bus.
sys/xen/xenbus/xenbusb_front.c:
sys/xen/xenbus/xenbusb_back.c:
XenBus bus specialization for front and back devices.
MFC after: 1 month
2010-10-19 20:53:30 +00:00
|
|
|
}
|
2008-08-12 20:01:57 +00:00
|
|
|
}
|
2013-05-31 22:21:37 +00:00
|
|
|
mtx_unlock(&sc->xbd_io_lock);
|
2008-08-12 20:01:57 +00:00
|
|
|
|
2013-05-31 22:21:37 +00:00
|
|
|
if (retval != 0)
|
2013-06-14 17:00:58 +00:00
|
|
|
sc->xbd_state = saved_state;
|
Improve the Xen para-virtualized device infrastructure of FreeBSD:
o Add support for backend devices (e.g. blkback)
o Implement extensions to the Xen para-virtualized block API to allow
for larger and more outstanding I/Os.
o Import a completely rewritten block back driver with support for fronting
I/O to both raw devices and files.
o General cleanup and documentation of the XenBus and XenStore support code.
o Robustness and performance updates for the block front driver.
o Fixes to the netfront driver.
Sponsored by: Spectra Logic Corporation
sys/xen/xenbus/init.txt:
Deleted: This file explains the Linux method for XenBus device
enumeration and thus does not apply to FreeBSD's NewBus approach.
sys/xen/xenbus/xenbus_probe_backend.c:
Deleted: Linux version of backend XenBus service routines. It
was never ported to FreeBSD. See xenbusb.c, xenbusb_if.m,
xenbusb_front.c xenbusb_back.c for details of FreeBSD's XenBus
support.
sys/xen/xenbus/xenbusvar.h:
sys/xen/xenbus/xenbus_xs.c:
sys/xen/xenbus/xenbus_comms.c:
sys/xen/xenbus/xenbus_comms.h:
sys/xen/xenstore/xenstorevar.h:
sys/xen/xenstore/xenstore.c:
Split XenStore into its own tree. XenBus is a software layer built
on top of XenStore. The old arrangement and the naming of some
structures and functions blurred these lines making it difficult to
discern what services are provided by which layer and at what times
these services are available (e.g. during system startup and shutdown).
sys/xen/xenbus/xenbus_client.c:
sys/xen/xenbus/xenbus.c:
sys/xen/xenbus/xenbus_probe.c:
sys/xen/xenbus/xenbusb.c:
sys/xen/xenbus/xenbusb.h:
Split up XenBus code into methods available for use by client
drivers (xenbus.c) and code used by the XenBus "bus code" to
enumerate, attach, detach, and service bus drivers.
sys/xen/reboot.c:
sys/dev/xen/control/control.c:
Add a XenBus front driver for handling shutdown, reboot, suspend, and
resume events published in the XenStore. Move all PV suspend/reboot
support from reboot.c into this driver.
sys/xen/blkif.h:
New file from Xen vendor with macros and structures used by
a block back driver to service requests from a VM running a
different ABI (e.g. amd64 back with i386 front).
sys/conf/files:
Adjust kernel build spec for new XenBus/XenStore layout and added
Xen functionality.
sys/dev/xen/balloon/balloon.c:
sys/dev/xen/netfront/netfront.c:
sys/dev/xen/blkfront/blkfront.c:
sys/xen/xenbus/...
sys/xen/xenstore/...
o Rename XenStore APIs and structures from xenbus_* to xs_*.
o Adjust to use of M_XENBUS and M_XENSTORE malloc types for allocation
of objects returned by these APIs.
o Adjust for changes in the bus interface for Xen drivers.
sys/xen/xenbus/...
sys/xen/xenstore/...
Add Doxygen comments for these interfaces and the code that
implements them.
sys/dev/xen/blkback/blkback.c:
o Rewrite the Block Back driver to attach properly via newbus,
operate correctly in both PV and HVM mode regardless of domain
(e.g. can be in a DOM other than 0), and to deal with the latest
metadata available in XenStore for block devices.
o Allow users to specify a file as a backend to blkback, in addition
to character devices. Use the namei lookup of the backend path
to automatically configure, based on file type, the appropriate
backend method.
The current implementation is limited to a single outstanding I/O
at a time to file backed storage.
sys/dev/xen/blkback/blkback.c:
sys/xen/interface/io/blkif.h:
sys/xen/blkif.h:
sys/dev/xen/blkfront/blkfront.c:
sys/dev/xen/blkfront/block.h:
Extend the Xen blkif API: Negotiable request size and number of
requests.
This change extends the information recorded in the XenStore
allowing block front/back devices to negotiate for optimal I/O
parameters. This has been achieved without sacrificing backward
compatibility with drivers that are unaware of these protocol
enhancements. The extensions center around the connection protocol
which now includes these additions:
o The back-end device publishes its maximum supported values for,
request I/O size, the number of page segments that can be
associated with a request, the maximum number of requests that
can be concurrently active, and the maximum number of pages that
can be in the shared request ring. These values are published
before the back-end enters the XenbusStateInitWait state.
o The front-end waits for the back-end to enter either the InitWait
or Initialize state. At this point, the front end limits it's
own capabilities to the lesser of the values it finds published
by the backend, it's own maximums, or, should any back-end data
be missing in the store, the values supported by the original
protocol. It then initializes it's internal data structures
including allocation of the shared ring, publishes its maximum
capabilities to the XenStore and transitions to the Initialized
state.
o The back-end waits for the front-end to enter the Initalized
state. At this point, the back end limits it's own capabilities
to the lesser of the values it finds published by the frontend,
it's own maximums, or, should any front-end data be missing in
the store, the values supported by the original protocol. It
then initializes it's internal data structures, attaches to the
shared ring and transitions to the Connected state.
o The front-end waits for the back-end to enter the Connnected
state, transitions itself to the connected state, and can
commence I/O.
Although an updated front-end driver must be aware of the back-end's
InitWait state, the back-end has been coded such that it can
tolerate a front-end that skips this step and transitions directly
to the Initialized state without waiting for the back-end.
sys/xen/interface/io/blkif.h:
o Increase BLKIF_MAX_SEGMENTS_PER_REQUEST to 255. This is
the maximum number possible without changing the blkif
request header structure (nr_segs is a uint8_t).
o Add two new constants:
BLKIF_MAX_SEGMENTS_PER_HEADER_BLOCK, and
BLKIF_MAX_SEGMENTS_PER_SEGMENT_BLOCK. These respectively
indicate the number of segments that can fit in the first
ring-buffer entry of a request, and for each subsequent
(sg element only) ring-buffer entry associated with the
"header" ring-buffer entry of the request.
o Add the blkif_request_segment_t typedef for segment
elements.
o Add the BLKRING_GET_SG_REQUEST() macro which wraps the
RING_GET_REQUEST() macro and returns a properly cast
pointer to an array of blkif_request_segment_ts.
o Add the BLKIF_SEGS_TO_BLOCKS() macro which calculates the
number of ring entries that will be consumed by a blkif
request with the given number of segments.
sys/xen/blkif.h:
o Update for changes in interface/io/blkif.h macros.
o Update the BLKIF_MAX_RING_REQUESTS() macro to take the
ring size as an argument to allow this calculation on
multi-page rings.
o Add a companion macro to BLKIF_MAX_RING_REQUESTS(),
BLKIF_RING_PAGES(). This macro determines the number of
ring pages required in order to support a ring with the
supplied number of request blocks.
sys/dev/xen/blkback/blkback.c:
sys/dev/xen/blkfront/blkfront.c:
sys/dev/xen/blkfront/block.h:
o Negotiate with the other-end with the following limits:
Reqeust Size: MAXPHYS
Max Segments: (MAXPHYS/PAGE_SIZE) + 1
Max Requests: 256
Max Ring Pages: Sufficient to support Max Requests with
Max Segments.
o Dynamically allocate request pools and segemnts-per-request.
o Update ring allocation/attachment code to support a
multi-page shared ring.
o Update routines that access the shared ring to handle
multi-block requests.
sys/dev/xen/blkfront/blkfront.c:
o Track blkfront allocations in a blkfront driver specific
malloc pool.
o Strip out XenStore transaction retry logic in the
connection code. Transactions only need to be used when
the update to multiple XenStore nodes must be atomic.
That is not the case here.
o Fully disable blkif_resume() until it can be fixed
properly (it didn't work before this change).
o Destroy bus-dma objects during device instance tear-down.
o Properly handle backend devices with powef-of-2 sector
sizes larger than 512b.
sys/dev/xen/blkback/blkback.c:
Advertise support for and implement the BLKIF_OP_WRITE_BARRIER
and BLKIF_OP_FLUSH_DISKCACHE blkif opcodes using BIO_FLUSH and
the BIO_ORDERED attribute of bios.
sys/dev/xen/blkfront/blkfront.c:
sys/dev/xen/blkfront/block.h:
Fix various bugs in blkfront.
o gnttab_alloc_grant_references() returns 0 for success and
non-zero for failure. The check for < 0 is a leftover
Linuxism.
o When we negotiate with blkback and have to reduce some of our
capabilities, print out the original and reduced capability before
changing the local capability. So the user now gets the correct
information.
o Fix blkif_restart_queue_callback() formatting. Make sure we hold
the mutex in that function before calling xb_startio().
o Fix a couple of KASSERT()s.
o Fix a check in the xb_remove_* macro to be a little more specific.
sys/xen/gnttab.h:
sys/xen/gnttab.c:
Define GNTTAB_LIST_END publicly as GRANT_REF_INVALID.
sys/dev/xen/netfront/netfront.c:
Use GRANT_REF_INVALID instead of driver private definitions of the
same constant.
sys/xen/gnttab.h:
sys/xen/gnttab.c:
Add the gnttab_end_foreign_access_references() API.
This API allows a client to batch the release of an array of grant
references, instead of coding a private for loop. The implementation
takes advantage of this batching to reduce lock overhead to one
acquisition and release per-batch instead of per-freed grant reference.
While here, reduce the duration the gnttab_list_lock is held during
gnttab_free_grant_references() operations. The search to find the
tail of the incoming free list does not rely on global state and so
can be performed without holding the lock.
sys/dev/xen/xenpci/evtchn.c:
sys/dev/xen/evtchn/evtchn.c:
sys/xen/xen_intr.h:
o Implement the bind_interdomain_evtchn_to_irqhandler API for HVM mode.
This allows an HVM domain to serve back end devices to other domains.
This API is already implemented for PV mode.
o Synchronize the API between HVM and PV.
sys/dev/xen/xenpci/xenpci.c:
o Scan the full region of CPUID space in which the Xen VMM interface
may be implemented. On systems using SuSE as a Dom0 where the
Viridian API is also exported, the VMM interface is above the region
we used to search.
o Pass through bus_alloc_resource() calls so that XenBus drivers
attaching on an HVM system can allocate unused physical address
space from the nexus. The block back driver makes use of this
facility.
sys/i386/xen/xen_machdep.c:
Use the correct type for accessing the statically mapped xenstore
metadata.
sys/xen/interface/hvm/params.h:
sys/xen/xenstore/xenstore.c:
Move hvm_get_parameter() to the correct global header file instead
of as a private method to the XenStore.
sys/xen/interface/io/protocols.h:
Sync with vendor.
sys/xeninterface/io/ring.h:
Add macro for calculating the number of ring pages needed for an N
deep ring.
To avoid duplication within the macros, create and use the new
__RING_HEADER_SIZE() macro. This macro calculates the size of the
ring book keeping struct (producer/consumer indexes, etc.) that
resides at the head of the ring.
Add the __RING_PAGES() macro which calculates the number of shared
ring pages required to support a ring with the given number of
requests.
These APIs are used to support the multi-page ring version of the
Xen block API.
sys/xeninterface/io/xenbus.h:
Add Comments.
sys/xen/xenbus/...
o Refactor the FreeBSD XenBus support code to allow for both front and
backend device attachments.
o Make use of new config_intr_hook capabilities to allow front and back
devices to be probed/attached in parallel.
o Fix bugs in probe/attach state machine that could cause the system to
hang when confronted with a failure either in the local domain or in
a remote domain to which one of our driver instances is attaching.
o Publish all required state to the XenStore on device detach and
failure. The majority of the missing functionality was for serving
as a back end since the typical "hot-plug" scripts in Dom0 don't
handle the case of cleaning up for a "service domain" that is not
itself.
o Add dynamic sysctl nodes exposing the generic ivars of
XenBus devices.
o Add doxygen style comments to the majority of the code.
o Cleanup types, formatting, etc.
sys/xen/xenbus/xenbusb.c:
Common code used by both front and back XenBus busses.
sys/xen/xenbus/xenbusb_if.m:
Method definitions for a XenBus bus.
sys/xen/xenbus/xenbusb_front.c:
sys/xen/xenbus/xenbusb_back.c:
XenBus bus specialization for front and back devices.
MFC after: 1 month
2010-10-19 20:53:30 +00:00
|
|
|
|
2013-05-31 22:21:37 +00:00
|
|
|
return (retval);
|
|
|
|
}
|
Improve the Xen para-virtualized device infrastructure of FreeBSD:
o Add support for backend devices (e.g. blkback)
o Implement extensions to the Xen para-virtualized block API to allow
for larger and more outstanding I/Os.
o Import a completely rewritten block back driver with support for fronting
I/O to both raw devices and files.
o General cleanup and documentation of the XenBus and XenStore support code.
o Robustness and performance updates for the block front driver.
o Fixes to the netfront driver.
Sponsored by: Spectra Logic Corporation
sys/xen/xenbus/init.txt:
Deleted: This file explains the Linux method for XenBus device
enumeration and thus does not apply to FreeBSD's NewBus approach.
sys/xen/xenbus/xenbus_probe_backend.c:
Deleted: Linux version of backend XenBus service routines. It
was never ported to FreeBSD. See xenbusb.c, xenbusb_if.m,
xenbusb_front.c xenbusb_back.c for details of FreeBSD's XenBus
support.
sys/xen/xenbus/xenbusvar.h:
sys/xen/xenbus/xenbus_xs.c:
sys/xen/xenbus/xenbus_comms.c:
sys/xen/xenbus/xenbus_comms.h:
sys/xen/xenstore/xenstorevar.h:
sys/xen/xenstore/xenstore.c:
Split XenStore into its own tree. XenBus is a software layer built
on top of XenStore. The old arrangement and the naming of some
structures and functions blurred these lines making it difficult to
discern what services are provided by which layer and at what times
these services are available (e.g. during system startup and shutdown).
sys/xen/xenbus/xenbus_client.c:
sys/xen/xenbus/xenbus.c:
sys/xen/xenbus/xenbus_probe.c:
sys/xen/xenbus/xenbusb.c:
sys/xen/xenbus/xenbusb.h:
Split up XenBus code into methods available for use by client
drivers (xenbus.c) and code used by the XenBus "bus code" to
enumerate, attach, detach, and service bus drivers.
sys/xen/reboot.c:
sys/dev/xen/control/control.c:
Add a XenBus front driver for handling shutdown, reboot, suspend, and
resume events published in the XenStore. Move all PV suspend/reboot
support from reboot.c into this driver.
sys/xen/blkif.h:
New file from Xen vendor with macros and structures used by
a block back driver to service requests from a VM running a
different ABI (e.g. amd64 back with i386 front).
sys/conf/files:
Adjust kernel build spec for new XenBus/XenStore layout and added
Xen functionality.
sys/dev/xen/balloon/balloon.c:
sys/dev/xen/netfront/netfront.c:
sys/dev/xen/blkfront/blkfront.c:
sys/xen/xenbus/...
sys/xen/xenstore/...
o Rename XenStore APIs and structures from xenbus_* to xs_*.
o Adjust to use of M_XENBUS and M_XENSTORE malloc types for allocation
of objects returned by these APIs.
o Adjust for changes in the bus interface for Xen drivers.
sys/xen/xenbus/...
sys/xen/xenstore/...
Add Doxygen comments for these interfaces and the code that
implements them.
sys/dev/xen/blkback/blkback.c:
o Rewrite the Block Back driver to attach properly via newbus,
operate correctly in both PV and HVM mode regardless of domain
(e.g. can be in a DOM other than 0), and to deal with the latest
metadata available in XenStore for block devices.
o Allow users to specify a file as a backend to blkback, in addition
to character devices. Use the namei lookup of the backend path
to automatically configure, based on file type, the appropriate
backend method.
The current implementation is limited to a single outstanding I/O
at a time to file backed storage.
sys/dev/xen/blkback/blkback.c:
sys/xen/interface/io/blkif.h:
sys/xen/blkif.h:
sys/dev/xen/blkfront/blkfront.c:
sys/dev/xen/blkfront/block.h:
Extend the Xen blkif API: Negotiable request size and number of
requests.
This change extends the information recorded in the XenStore
allowing block front/back devices to negotiate for optimal I/O
parameters. This has been achieved without sacrificing backward
compatibility with drivers that are unaware of these protocol
enhancements. The extensions center around the connection protocol
which now includes these additions:
o The back-end device publishes its maximum supported values for,
request I/O size, the number of page segments that can be
associated with a request, the maximum number of requests that
can be concurrently active, and the maximum number of pages that
can be in the shared request ring. These values are published
before the back-end enters the XenbusStateInitWait state.
o The front-end waits for the back-end to enter either the InitWait
or Initialize state. At this point, the front end limits it's
own capabilities to the lesser of the values it finds published
by the backend, it's own maximums, or, should any back-end data
be missing in the store, the values supported by the original
protocol. It then initializes it's internal data structures
including allocation of the shared ring, publishes its maximum
capabilities to the XenStore and transitions to the Initialized
state.
o The back-end waits for the front-end to enter the Initalized
state. At this point, the back end limits it's own capabilities
to the lesser of the values it finds published by the frontend,
it's own maximums, or, should any front-end data be missing in
the store, the values supported by the original protocol. It
then initializes it's internal data structures, attaches to the
shared ring and transitions to the Connected state.
o The front-end waits for the back-end to enter the Connnected
state, transitions itself to the connected state, and can
commence I/O.
Although an updated front-end driver must be aware of the back-end's
InitWait state, the back-end has been coded such that it can
tolerate a front-end that skips this step and transitions directly
to the Initialized state without waiting for the back-end.
sys/xen/interface/io/blkif.h:
o Increase BLKIF_MAX_SEGMENTS_PER_REQUEST to 255. This is
the maximum number possible without changing the blkif
request header structure (nr_segs is a uint8_t).
o Add two new constants:
BLKIF_MAX_SEGMENTS_PER_HEADER_BLOCK, and
BLKIF_MAX_SEGMENTS_PER_SEGMENT_BLOCK. These respectively
indicate the number of segments that can fit in the first
ring-buffer entry of a request, and for each subsequent
(sg element only) ring-buffer entry associated with the
"header" ring-buffer entry of the request.
o Add the blkif_request_segment_t typedef for segment
elements.
o Add the BLKRING_GET_SG_REQUEST() macro which wraps the
RING_GET_REQUEST() macro and returns a properly cast
pointer to an array of blkif_request_segment_ts.
o Add the BLKIF_SEGS_TO_BLOCKS() macro which calculates the
number of ring entries that will be consumed by a blkif
request with the given number of segments.
sys/xen/blkif.h:
o Update for changes in interface/io/blkif.h macros.
o Update the BLKIF_MAX_RING_REQUESTS() macro to take the
ring size as an argument to allow this calculation on
multi-page rings.
o Add a companion macro to BLKIF_MAX_RING_REQUESTS(),
BLKIF_RING_PAGES(). This macro determines the number of
ring pages required in order to support a ring with the
supplied number of request blocks.
sys/dev/xen/blkback/blkback.c:
sys/dev/xen/blkfront/blkfront.c:
sys/dev/xen/blkfront/block.h:
o Negotiate with the other-end with the following limits:
Reqeust Size: MAXPHYS
Max Segments: (MAXPHYS/PAGE_SIZE) + 1
Max Requests: 256
Max Ring Pages: Sufficient to support Max Requests with
Max Segments.
o Dynamically allocate request pools and segemnts-per-request.
o Update ring allocation/attachment code to support a
multi-page shared ring.
o Update routines that access the shared ring to handle
multi-block requests.
sys/dev/xen/blkfront/blkfront.c:
o Track blkfront allocations in a blkfront driver specific
malloc pool.
o Strip out XenStore transaction retry logic in the
connection code. Transactions only need to be used when
the update to multiple XenStore nodes must be atomic.
That is not the case here.
o Fully disable blkif_resume() until it can be fixed
properly (it didn't work before this change).
o Destroy bus-dma objects during device instance tear-down.
o Properly handle backend devices with powef-of-2 sector
sizes larger than 512b.
sys/dev/xen/blkback/blkback.c:
Advertise support for and implement the BLKIF_OP_WRITE_BARRIER
and BLKIF_OP_FLUSH_DISKCACHE blkif opcodes using BIO_FLUSH and
the BIO_ORDERED attribute of bios.
sys/dev/xen/blkfront/blkfront.c:
sys/dev/xen/blkfront/block.h:
Fix various bugs in blkfront.
o gnttab_alloc_grant_references() returns 0 for success and
non-zero for failure. The check for < 0 is a leftover
Linuxism.
o When we negotiate with blkback and have to reduce some of our
capabilities, print out the original and reduced capability before
changing the local capability. So the user now gets the correct
information.
o Fix blkif_restart_queue_callback() formatting. Make sure we hold
the mutex in that function before calling xb_startio().
o Fix a couple of KASSERT()s.
o Fix a check in the xb_remove_* macro to be a little more specific.
sys/xen/gnttab.h:
sys/xen/gnttab.c:
Define GNTTAB_LIST_END publicly as GRANT_REF_INVALID.
sys/dev/xen/netfront/netfront.c:
Use GRANT_REF_INVALID instead of driver private definitions of the
same constant.
sys/xen/gnttab.h:
sys/xen/gnttab.c:
Add the gnttab_end_foreign_access_references() API.
This API allows a client to batch the release of an array of grant
references, instead of coding a private for loop. The implementation
takes advantage of this batching to reduce lock overhead to one
acquisition and release per-batch instead of per-freed grant reference.
While here, reduce the duration the gnttab_list_lock is held during
gnttab_free_grant_references() operations. The search to find the
tail of the incoming free list does not rely on global state and so
can be performed without holding the lock.
sys/dev/xen/xenpci/evtchn.c:
sys/dev/xen/evtchn/evtchn.c:
sys/xen/xen_intr.h:
o Implement the bind_interdomain_evtchn_to_irqhandler API for HVM mode.
This allows an HVM domain to serve back end devices to other domains.
This API is already implemented for PV mode.
o Synchronize the API between HVM and PV.
sys/dev/xen/xenpci/xenpci.c:
o Scan the full region of CPUID space in which the Xen VMM interface
may be implemented. On systems using SuSE as a Dom0 where the
Viridian API is also exported, the VMM interface is above the region
we used to search.
o Pass through bus_alloc_resource() calls so that XenBus drivers
attaching on an HVM system can allocate unused physical address
space from the nexus. The block back driver makes use of this
facility.
sys/i386/xen/xen_machdep.c:
Use the correct type for accessing the statically mapped xenstore
metadata.
sys/xen/interface/hvm/params.h:
sys/xen/xenstore/xenstore.c:
Move hvm_get_parameter() to the correct global header file instead
of as a private method to the XenStore.
sys/xen/interface/io/protocols.h:
Sync with vendor.
sys/xeninterface/io/ring.h:
Add macro for calculating the number of ring pages needed for an N
deep ring.
To avoid duplication within the macros, create and use the new
__RING_HEADER_SIZE() macro. This macro calculates the size of the
ring book keeping struct (producer/consumer indexes, etc.) that
resides at the head of the ring.
Add the __RING_PAGES() macro which calculates the number of shared
ring pages required to support a ring with the given number of
requests.
These APIs are used to support the multi-page ring version of the
Xen block API.
sys/xeninterface/io/xenbus.h:
Add Comments.
sys/xen/xenbus/...
o Refactor the FreeBSD XenBus support code to allow for both front and
backend device attachments.
o Make use of new config_intr_hook capabilities to allow front and back
devices to be probed/attached in parallel.
o Fix bugs in probe/attach state machine that could cause the system to
hang when confronted with a failure either in the local domain or in
a remote domain to which one of our driver instances is attaching.
o Publish all required state to the XenStore on device detach and
failure. The majority of the missing functionality was for serving
as a back end since the typical "hot-plug" scripts in Dom0 don't
handle the case of cleaning up for a "service domain" that is not
itself.
o Add dynamic sysctl nodes exposing the generic ivars of
XenBus devices.
o Add doxygen style comments to the majority of the code.
o Cleanup types, formatting, etc.
sys/xen/xenbus/xenbusb.c:
Common code used by both front and back XenBus busses.
sys/xen/xenbus/xenbusb_if.m:
Method definitions for a XenBus bus.
sys/xen/xenbus/xenbusb_front.c:
sys/xen/xenbus/xenbusb_back.c:
XenBus bus specialization for front and back devices.
MFC after: 1 month
2010-10-19 20:53:30 +00:00
|
|
|
|
2013-05-31 22:21:37 +00:00
|
|
|
static int
|
|
|
|
xbd_resume(device_t dev)
|
|
|
|
{
|
|
|
|
struct xbd_softc *sc = device_get_softc(dev);
|
Improve the Xen para-virtualized device infrastructure of FreeBSD:
o Add support for backend devices (e.g. blkback)
o Implement extensions to the Xen para-virtualized block API to allow
for larger and more outstanding I/Os.
o Import a completely rewritten block back driver with support for fronting
I/O to both raw devices and files.
o General cleanup and documentation of the XenBus and XenStore support code.
o Robustness and performance updates for the block front driver.
o Fixes to the netfront driver.
Sponsored by: Spectra Logic Corporation
sys/xen/xenbus/init.txt:
Deleted: This file explains the Linux method for XenBus device
enumeration and thus does not apply to FreeBSD's NewBus approach.
sys/xen/xenbus/xenbus_probe_backend.c:
Deleted: Linux version of backend XenBus service routines. It
was never ported to FreeBSD. See xenbusb.c, xenbusb_if.m,
xenbusb_front.c xenbusb_back.c for details of FreeBSD's XenBus
support.
sys/xen/xenbus/xenbusvar.h:
sys/xen/xenbus/xenbus_xs.c:
sys/xen/xenbus/xenbus_comms.c:
sys/xen/xenbus/xenbus_comms.h:
sys/xen/xenstore/xenstorevar.h:
sys/xen/xenstore/xenstore.c:
Split XenStore into its own tree. XenBus is a software layer built
on top of XenStore. The old arrangement and the naming of some
structures and functions blurred these lines making it difficult to
discern what services are provided by which layer and at what times
these services are available (e.g. during system startup and shutdown).
sys/xen/xenbus/xenbus_client.c:
sys/xen/xenbus/xenbus.c:
sys/xen/xenbus/xenbus_probe.c:
sys/xen/xenbus/xenbusb.c:
sys/xen/xenbus/xenbusb.h:
Split up XenBus code into methods available for use by client
drivers (xenbus.c) and code used by the XenBus "bus code" to
enumerate, attach, detach, and service bus drivers.
sys/xen/reboot.c:
sys/dev/xen/control/control.c:
Add a XenBus front driver for handling shutdown, reboot, suspend, and
resume events published in the XenStore. Move all PV suspend/reboot
support from reboot.c into this driver.
sys/xen/blkif.h:
New file from Xen vendor with macros and structures used by
a block back driver to service requests from a VM running a
different ABI (e.g. amd64 back with i386 front).
sys/conf/files:
Adjust kernel build spec for new XenBus/XenStore layout and added
Xen functionality.
sys/dev/xen/balloon/balloon.c:
sys/dev/xen/netfront/netfront.c:
sys/dev/xen/blkfront/blkfront.c:
sys/xen/xenbus/...
sys/xen/xenstore/...
o Rename XenStore APIs and structures from xenbus_* to xs_*.
o Adjust to use of M_XENBUS and M_XENSTORE malloc types for allocation
of objects returned by these APIs.
o Adjust for changes in the bus interface for Xen drivers.
sys/xen/xenbus/...
sys/xen/xenstore/...
Add Doxygen comments for these interfaces and the code that
implements them.
sys/dev/xen/blkback/blkback.c:
o Rewrite the Block Back driver to attach properly via newbus,
operate correctly in both PV and HVM mode regardless of domain
(e.g. can be in a DOM other than 0), and to deal with the latest
metadata available in XenStore for block devices.
o Allow users to specify a file as a backend to blkback, in addition
to character devices. Use the namei lookup of the backend path
to automatically configure, based on file type, the appropriate
backend method.
The current implementation is limited to a single outstanding I/O
at a time to file backed storage.
sys/dev/xen/blkback/blkback.c:
sys/xen/interface/io/blkif.h:
sys/xen/blkif.h:
sys/dev/xen/blkfront/blkfront.c:
sys/dev/xen/blkfront/block.h:
Extend the Xen blkif API: Negotiable request size and number of
requests.
This change extends the information recorded in the XenStore
allowing block front/back devices to negotiate for optimal I/O
parameters. This has been achieved without sacrificing backward
compatibility with drivers that are unaware of these protocol
enhancements. The extensions center around the connection protocol
which now includes these additions:
o The back-end device publishes its maximum supported values for,
request I/O size, the number of page segments that can be
associated with a request, the maximum number of requests that
can be concurrently active, and the maximum number of pages that
can be in the shared request ring. These values are published
before the back-end enters the XenbusStateInitWait state.
o The front-end waits for the back-end to enter either the InitWait
or Initialize state. At this point, the front end limits it's
own capabilities to the lesser of the values it finds published
by the backend, it's own maximums, or, should any back-end data
be missing in the store, the values supported by the original
protocol. It then initializes it's internal data structures
including allocation of the shared ring, publishes its maximum
capabilities to the XenStore and transitions to the Initialized
state.
o The back-end waits for the front-end to enter the Initalized
state. At this point, the back end limits it's own capabilities
to the lesser of the values it finds published by the frontend,
it's own maximums, or, should any front-end data be missing in
the store, the values supported by the original protocol. It
then initializes it's internal data structures, attaches to the
shared ring and transitions to the Connected state.
o The front-end waits for the back-end to enter the Connnected
state, transitions itself to the connected state, and can
commence I/O.
Although an updated front-end driver must be aware of the back-end's
InitWait state, the back-end has been coded such that it can
tolerate a front-end that skips this step and transitions directly
to the Initialized state without waiting for the back-end.
sys/xen/interface/io/blkif.h:
o Increase BLKIF_MAX_SEGMENTS_PER_REQUEST to 255. This is
the maximum number possible without changing the blkif
request header structure (nr_segs is a uint8_t).
o Add two new constants:
BLKIF_MAX_SEGMENTS_PER_HEADER_BLOCK, and
BLKIF_MAX_SEGMENTS_PER_SEGMENT_BLOCK. These respectively
indicate the number of segments that can fit in the first
ring-buffer entry of a request, and for each subsequent
(sg element only) ring-buffer entry associated with the
"header" ring-buffer entry of the request.
o Add the blkif_request_segment_t typedef for segment
elements.
o Add the BLKRING_GET_SG_REQUEST() macro which wraps the
RING_GET_REQUEST() macro and returns a properly cast
pointer to an array of blkif_request_segment_ts.
o Add the BLKIF_SEGS_TO_BLOCKS() macro which calculates the
number of ring entries that will be consumed by a blkif
request with the given number of segments.
sys/xen/blkif.h:
o Update for changes in interface/io/blkif.h macros.
o Update the BLKIF_MAX_RING_REQUESTS() macro to take the
ring size as an argument to allow this calculation on
multi-page rings.
o Add a companion macro to BLKIF_MAX_RING_REQUESTS(),
BLKIF_RING_PAGES(). This macro determines the number of
ring pages required in order to support a ring with the
supplied number of request blocks.
sys/dev/xen/blkback/blkback.c:
sys/dev/xen/blkfront/blkfront.c:
sys/dev/xen/blkfront/block.h:
o Negotiate with the other-end with the following limits:
Reqeust Size: MAXPHYS
Max Segments: (MAXPHYS/PAGE_SIZE) + 1
Max Requests: 256
Max Ring Pages: Sufficient to support Max Requests with
Max Segments.
o Dynamically allocate request pools and segemnts-per-request.
o Update ring allocation/attachment code to support a
multi-page shared ring.
o Update routines that access the shared ring to handle
multi-block requests.
sys/dev/xen/blkfront/blkfront.c:
o Track blkfront allocations in a blkfront driver specific
malloc pool.
o Strip out XenStore transaction retry logic in the
connection code. Transactions only need to be used when
the update to multiple XenStore nodes must be atomic.
That is not the case here.
o Fully disable blkif_resume() until it can be fixed
properly (it didn't work before this change).
o Destroy bus-dma objects during device instance tear-down.
o Properly handle backend devices with powef-of-2 sector
sizes larger than 512b.
sys/dev/xen/blkback/blkback.c:
Advertise support for and implement the BLKIF_OP_WRITE_BARRIER
and BLKIF_OP_FLUSH_DISKCACHE blkif opcodes using BIO_FLUSH and
the BIO_ORDERED attribute of bios.
sys/dev/xen/blkfront/blkfront.c:
sys/dev/xen/blkfront/block.h:
Fix various bugs in blkfront.
o gnttab_alloc_grant_references() returns 0 for success and
non-zero for failure. The check for < 0 is a leftover
Linuxism.
o When we negotiate with blkback and have to reduce some of our
capabilities, print out the original and reduced capability before
changing the local capability. So the user now gets the correct
information.
o Fix blkif_restart_queue_callback() formatting. Make sure we hold
the mutex in that function before calling xb_startio().
o Fix a couple of KASSERT()s.
o Fix a check in the xb_remove_* macro to be a little more specific.
sys/xen/gnttab.h:
sys/xen/gnttab.c:
Define GNTTAB_LIST_END publicly as GRANT_REF_INVALID.
sys/dev/xen/netfront/netfront.c:
Use GRANT_REF_INVALID instead of driver private definitions of the
same constant.
sys/xen/gnttab.h:
sys/xen/gnttab.c:
Add the gnttab_end_foreign_access_references() API.
This API allows a client to batch the release of an array of grant
references, instead of coding a private for loop. The implementation
takes advantage of this batching to reduce lock overhead to one
acquisition and release per-batch instead of per-freed grant reference.
While here, reduce the duration the gnttab_list_lock is held during
gnttab_free_grant_references() operations. The search to find the
tail of the incoming free list does not rely on global state and so
can be performed without holding the lock.
sys/dev/xen/xenpci/evtchn.c:
sys/dev/xen/evtchn/evtchn.c:
sys/xen/xen_intr.h:
o Implement the bind_interdomain_evtchn_to_irqhandler API for HVM mode.
This allows an HVM domain to serve back end devices to other domains.
This API is already implemented for PV mode.
o Synchronize the API between HVM and PV.
sys/dev/xen/xenpci/xenpci.c:
o Scan the full region of CPUID space in which the Xen VMM interface
may be implemented. On systems using SuSE as a Dom0 where the
Viridian API is also exported, the VMM interface is above the region
we used to search.
o Pass through bus_alloc_resource() calls so that XenBus drivers
attaching on an HVM system can allocate unused physical address
space from the nexus. The block back driver makes use of this
facility.
sys/i386/xen/xen_machdep.c:
Use the correct type for accessing the statically mapped xenstore
metadata.
sys/xen/interface/hvm/params.h:
sys/xen/xenstore/xenstore.c:
Move hvm_get_parameter() to the correct global header file instead
of as a private method to the XenStore.
sys/xen/interface/io/protocols.h:
Sync with vendor.
sys/xeninterface/io/ring.h:
Add macro for calculating the number of ring pages needed for an N
deep ring.
To avoid duplication within the macros, create and use the new
__RING_HEADER_SIZE() macro. This macro calculates the size of the
ring book keeping struct (producer/consumer indexes, etc.) that
resides at the head of the ring.
Add the __RING_PAGES() macro which calculates the number of shared
ring pages required to support a ring with the given number of
requests.
These APIs are used to support the multi-page ring version of the
Xen block API.
sys/xeninterface/io/xenbus.h:
Add Comments.
sys/xen/xenbus/...
o Refactor the FreeBSD XenBus support code to allow for both front and
backend device attachments.
o Make use of new config_intr_hook capabilities to allow front and back
devices to be probed/attached in parallel.
o Fix bugs in probe/attach state machine that could cause the system to
hang when confronted with a failure either in the local domain or in
a remote domain to which one of our driver instances is attaching.
o Publish all required state to the XenStore on device detach and
failure. The majority of the missing functionality was for serving
as a back end since the typical "hot-plug" scripts in Dom0 don't
handle the case of cleaning up for a "service domain" that is not
itself.
o Add dynamic sysctl nodes exposing the generic ivars of
XenBus devices.
o Add doxygen style comments to the majority of the code.
o Cleanup types, formatting, etc.
sys/xen/xenbus/xenbusb.c:
Common code used by both front and back XenBus busses.
sys/xen/xenbus/xenbusb_if.m:
Method definitions for a XenBus bus.
sys/xen/xenbus/xenbusb_front.c:
sys/xen/xenbus/xenbusb_back.c:
XenBus bus specialization for front and back devices.
MFC after: 1 month
2010-10-19 20:53:30 +00:00
|
|
|
|
2013-05-31 22:21:37 +00:00
|
|
|
DPRINTK("xbd_resume: %s\n", xenbus_get_node(dev));
|
2011-09-21 00:02:44 +00:00
|
|
|
|
2013-05-31 22:21:37 +00:00
|
|
|
xbd_free(sc);
|
|
|
|
xbd_initialize(sc);
|
|
|
|
return (0);
|
2008-08-12 20:01:57 +00:00
|
|
|
}
|
|
|
|
|
2013-05-31 22:21:37 +00:00
|
|
|
/**
|
|
|
|
* Callback received when the backend's state changes.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
xbd_backend_changed(device_t dev, XenbusState backend_state)
|
2008-08-12 20:01:57 +00:00
|
|
|
{
|
2013-05-31 22:21:37 +00:00
|
|
|
struct xbd_softc *sc = device_get_softc(dev);
|
|
|
|
|
|
|
|
DPRINTK("backend_state=%d\n", backend_state);
|
|
|
|
|
|
|
|
switch (backend_state) {
|
|
|
|
case XenbusStateUnknown:
|
|
|
|
case XenbusStateInitialising:
|
|
|
|
case XenbusStateReconfigured:
|
|
|
|
case XenbusStateReconfiguring:
|
|
|
|
case XenbusStateClosed:
|
|
|
|
break;
|
|
|
|
|
|
|
|
case XenbusStateInitWait:
|
|
|
|
case XenbusStateInitialised:
|
|
|
|
xbd_initialize(sc);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case XenbusStateConnected:
|
|
|
|
xbd_initialize(sc);
|
|
|
|
xbd_connect(sc);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case XenbusStateClosing:
|
|
|
|
if (sc->xbd_users > 0)
|
|
|
|
xenbus_dev_error(dev, -EBUSY,
|
|
|
|
"Device in use; refusing to close");
|
|
|
|
else
|
|
|
|
xbd_closing(dev);
|
|
|
|
break;
|
|
|
|
}
|
2008-08-12 20:01:57 +00:00
|
|
|
}
|
|
|
|
|
2013-05-31 22:21:37 +00:00
|
|
|
/*---------------------------- NewBus Registration ---------------------------*/
|
2013-05-31 21:05:07 +00:00
|
|
|
static device_method_t xbd_methods[] = {
|
2008-12-04 07:59:05 +00:00
|
|
|
/* Device interface */
|
2013-05-31 21:05:07 +00:00
|
|
|
DEVMETHOD(device_probe, xbd_probe),
|
|
|
|
DEVMETHOD(device_attach, xbd_attach),
|
|
|
|
DEVMETHOD(device_detach, xbd_detach),
|
2008-12-04 07:59:05 +00:00
|
|
|
DEVMETHOD(device_shutdown, bus_generic_shutdown),
|
2013-05-31 21:05:07 +00:00
|
|
|
DEVMETHOD(device_suspend, xbd_suspend),
|
|
|
|
DEVMETHOD(device_resume, xbd_resume),
|
2008-12-04 07:59:05 +00:00
|
|
|
|
|
|
|
/* Xenbus interface */
|
2013-05-31 21:05:07 +00:00
|
|
|
DEVMETHOD(xenbus_otherend_changed, xbd_backend_changed),
|
2008-12-04 07:59:05 +00:00
|
|
|
|
|
|
|
{ 0, 0 }
|
|
|
|
};
|
|
|
|
|
2013-05-31 21:05:07 +00:00
|
|
|
static driver_t xbd_driver = {
|
2008-12-04 07:59:05 +00:00
|
|
|
"xbd",
|
2013-05-31 21:05:07 +00:00
|
|
|
xbd_methods,
|
|
|
|
sizeof(struct xbd_softc),
|
2008-12-04 07:59:05 +00:00
|
|
|
};
|
2013-05-31 21:05:07 +00:00
|
|
|
devclass_t xbd_devclass;
|
2008-12-04 07:59:05 +00:00
|
|
|
|
2013-05-31 21:05:07 +00:00
|
|
|
DRIVER_MODULE(xbd, xenbusb_front, xbd_driver, xbd_devclass, 0, 0);
|