xen: Code cleanup and small bug fixes

xen/hypervisor.h:
 - Remove unused helpers: MULTI_update_va_mapping, is_initial_xendomain,
   is_running_on_xen
 - Remove unused define CONFIG_X86_PAE
 - Remove unused variable xen_start_info: note that it's used inpcifront
   which is not built at all
 - Remove forward declaration of HYPERVISOR_crash

xen/xen-os.h:
 - Remove unused define CONFIG_X86_PAE
 - Drop unused helpers: test_and_clear_bit, clear_bit,
   force_evtchn_callback
 - Implement a generic version (based on ofed/include/linux/bitops.h) of
   set_bit and test_bit and prefix them by xen_ to avoid any use by other
   code than Xen. Note that It would be worth to investigate a generic
   implementation in FreeBSD.
 - Replace barrier() by __compiler_membar()
 - Replace cpu_relax() by cpu_spinwait(): it's exactly the same as rep;nop
   = pause

xen/xen_intr.h:
 - Move the prototype of xen_intr_handle_upcall in it: Use by all the
   platform

x86/xen/xen_intr.c:
 - Use BITSET* for the enabledbits: Avoid to use custom helpers
 - test_bit/set_bit has been renamed to xen_test_bit/xen_set_bit
 - Don't export the variable xen_intr_pcpu

dev/xen/blkback/blkback.c:
 - Fix the string format when XBB_DEBUG is enabled: host_addr is typed
   uint64_t

dev/xen/balloon/balloon.c:
 - Remove set but not used variable
 - Use the correct type for frame_list: xen_pfn_t represents the frame
   number on any architecture

dev/xen/control/control.c:
 - Return BUS_PROBE_WILDCARD in xs_probe: Returning 0 in a probe callback
   means the driver can handle this device. If by any chance xenstore is the
   first driver, every new device with the driver is unset will use
   xenstore.

dev/xen/grant-table/grant_table.c:
 - Remove unused cmpxchg
 - Drop unused include opt_pmap.h: Doesn't exist on ARM64 and it doesn't
   contain anything required for the code on x86

dev/xen/netfront/netfront.c:
 - Use the correct type for rx_pfn_array: xen_pfn_t represents the frame
   number on any architecture

dev/xen/netback/netback.c:
 - Use the correct type for gmfn: xen_pfn_t represents the frame number on
   any architecture

dev/xen/xenstore/xenstore.c:
 - Return BUS_PROBE_WILDCARD in xctrl_probe: Returning 0 in a probe callback
   means the driver can handle this device. If by any chance xenstore is the
  first driver, every new device with the driver is unset will use xenstore.

Note that with the changes, x86/include/xen/xen-os.h doesn't contain anymore
arch-specific code. Although, a new series will add some helpers that differ
between x86 and ARM64, so I've kept the headers for now.

Submitted by:		Julien Grall <julien.grall@citrix.com>
Reviewed by:		royger
Differential Revision:	https://reviews.freebsd.org/D3921
Sponsored by:		Citrix Systems R&D
This commit is contained in:
Roger Pau Monné 2015-10-21 10:44:07 +00:00
parent 6a306bff7f
commit 2f9ec994bc
15 changed files with 69 additions and 188 deletions

View File

@ -57,7 +57,7 @@ static MALLOC_DEFINE(M_BALLOON, "Balloon", "Xen Balloon Driver");
struct mtx balloon_mutex;
/* We increase/decrease in batches which fit in a page */
static unsigned long frame_list[PAGE_SIZE / sizeof(unsigned long)];
static xen_pfn_t frame_list[PAGE_SIZE / sizeof(xen_pfn_t)];
struct balloon_stats {
/* We aim for 'current allocation' == 'target allocation'. */
@ -149,7 +149,7 @@ minimum_target(void)
static int
increase_reservation(unsigned long nr_pages)
{
unsigned long pfn, i;
unsigned long i;
vm_page_t page;
long rc;
struct xen_memory_reservation reservation = {
@ -195,7 +195,6 @@ increase_reservation(unsigned long nr_pages)
TAILQ_REMOVE(&ballooned_pages, page, plinks.q);
bs.balloon_low--;
pfn = (VM_PAGE_TO_PHYS(page) >> PAGE_SHIFT);
KASSERT(xen_feature(XENFEAT_auto_translated_physmap),
("auto translated physmap but mapping is valid"));
@ -211,7 +210,7 @@ increase_reservation(unsigned long nr_pages)
static int
decrease_reservation(unsigned long nr_pages)
{
unsigned long pfn, i;
unsigned long i;
vm_page_t page;
int need_sleep = 0;
int ret;
@ -246,8 +245,7 @@ decrease_reservation(unsigned long nr_pages)
pmap_zero_page(page);
}
pfn = (VM_PAGE_TO_PHYS(page) >> PAGE_SHIFT);
frame_list[i] = pfn;
frame_list[i] = (VM_PAGE_TO_PHYS(page) >> PAGE_SHIFT);
TAILQ_INSERT_HEAD(&ballooned_pages, page, plinks.q);
bs.balloon_low++;

View File

@ -1741,7 +1741,7 @@ xbb_dispatch_io(struct xbb_softc *xbb, struct xbb_xen_reqlist *reqlist)
if (__predict_false(map->status != 0)) {
DPRINTF("invalid buffer -- could not remap "
"it (%d)\n", map->status);
DPRINTF("Mapping(%d): Host Addr 0x%lx, flags "
DPRINTF("Mapping(%d): Host Addr 0x%"PRIx64", flags "
"0x%x ref 0x%x, dom %d\n", seg_idx,
map->host_addr, map->flags, map->ref,
map->dom);

View File

@ -365,7 +365,7 @@ xctrl_probe(device_t dev)
{
device_set_desc(dev, "Xen Control Device");
return (0);
return (BUS_PROBE_NOWILDCARD);
}
/**

View File

@ -13,8 +13,6 @@
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include "opt_pmap.h"
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/bus.h>
@ -27,6 +25,7 @@ __FBSDID("$FreeBSD$");
#include <sys/limits.h>
#include <sys/rman.h>
#include <machine/resource.h>
#include <machine/cpu.h>
#include <xen/xen-os.h>
#include <xen/hypervisor.h>
@ -40,8 +39,6 @@ __FBSDID("$FreeBSD$");
#include <vm/vm_extern.h>
#include <vm/pmap.h>
#define cmpxchg(a, b, c) atomic_cmpset_int((volatile u_int *)(a),(b),(c))
/* External tools reserve first few grant table entries. */
#define NR_RESERVED_ENTRIES 8
#define GREFS_PER_GRANT_FRAME (PAGE_SIZE / sizeof(grant_entry_t))
@ -291,13 +288,13 @@ gnttab_end_foreign_transfer_ref(grant_ref_t ref)
while (!((flags = shared[ref].flags) & GTF_transfer_committed)) {
if ( synch_cmpxchg(&shared[ref].flags, flags, 0) == flags )
return (0);
cpu_relax();
cpu_spinwait();
}
/* If a transfer is in progress then wait until it is completed. */
while (!(flags & GTF_transfer_completed)) {
flags = shared[ref].flags;
cpu_relax();
cpu_spinwait();
}
/* Read the frame number /after/ reading completion status. */

View File

@ -524,13 +524,15 @@ xnb_dump_gnttab_copy(const struct gnttab_copy *entry)
if (entry->flags & GNTCOPY_dest_gref)
printf("gnttab dest ref=\t%u\n", entry->dest.u.ref);
else
printf("gnttab dest gmfn=\t%lu\n", entry->dest.u.gmfn);
printf("gnttab dest gmfn=\t%"PRI_xen_pfn"\n",
entry->dest.u.gmfn);
printf("gnttab dest offset=\t%hu\n", entry->dest.offset);
printf("gnttab dest domid=\t%hu\n", entry->dest.domid);
if (entry->flags & GNTCOPY_source_gref)
printf("gnttab source ref=\t%u\n", entry->source.u.ref);
else
printf("gnttab source gmfn=\t%lu\n", entry->source.u.gmfn);
printf("gnttab source gmfn=\t%"PRI_xen_pfn"\n",
entry->source.u.gmfn);
printf("gnttab source offset=\t%hu\n", entry->source.offset);
printf("gnttab source domid=\t%hu\n", entry->source.domid);
printf("gnttab len=\t%hu\n", entry->len);

View File

@ -232,7 +232,7 @@ struct netfront_info {
int xn_if_flags;
struct callout xn_stat_ch;
u_long rx_pfn_array[NET_RX_RING_SIZE];
xen_pfn_t rx_pfn_array[NET_RX_RING_SIZE];
struct ifmedia sc_media;
bool xn_resume;

View File

@ -43,14 +43,13 @@ __FBSDID("$FreeBSD$");
#include <xen/features.h>
#include <xen/hypervisor.h>
#include <xen/hvm.h>
#include <xen/xen_intr.h>
#include <dev/pci/pcireg.h>
#include <dev/pci/pcivar.h>
#include <dev/xen/xenpci/xenpcivar.h>
extern void xen_intr_handle_upcall(struct trapframe *trap_frame);
/*
* This is used to find our platform device instance.
*/

View File

@ -1124,7 +1124,7 @@ xs_probe(device_t dev)
* Unconditionally return success.
*/
device_set_desc(dev, "XenStore");
return (0);
return (BUS_PROBE_NOWILDCARD);
}
static void

View File

@ -455,7 +455,6 @@ void lapic_handle_cmc(void);
void lapic_handle_error(void);
void lapic_handle_intr(int vector, struct trapframe *frame);
void lapic_handle_timer(struct trapframe *frame);
void xen_intr_handle_upcall(struct trapframe *frame);
void hv_vector_handler(struct trapframe *frame);
extern int x2apic_mode;

View File

@ -30,103 +30,9 @@
#ifndef _MACHINE_X86_XEN_XEN_OS_H_
#define _MACHINE_X86_XEN_XEN_OS_H_
#ifdef PAE
#define CONFIG_X86_PAE
#endif
/* Everything below this point is not included by assembler (.S) files. */
#ifndef __ASSEMBLY__
/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
static inline void rep_nop(void)
{
__asm__ __volatile__ ( "rep;nop" : : : "memory" );
}
#define cpu_relax() rep_nop()
/* This is a barrier for the compiler only, NOT the processor! */
#define barrier() __asm__ __volatile__("": : :"memory")
#define LOCK_PREFIX ""
#define LOCK ""
#define ADDR (*(volatile long *) addr)
/**
* test_and_clear_bit - Clear a bit and return its old value
* @nr: Bit to set
* @addr: Address to count from
*
* This operation is atomic and cannot be reordered.
* It also implies a memory barrier.
*/
static __inline int test_and_clear_bit(int nr, volatile void * addr)
{
int oldbit;
__asm__ __volatile__( LOCK_PREFIX
"btrl %2,%1\n\tsbbl %0,%0"
:"=r" (oldbit),"=m" (ADDR)
:"Ir" (nr) : "memory");
return oldbit;
}
static __inline int constant_test_bit(int nr, const volatile void * addr)
{
return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
}
static __inline int variable_test_bit(int nr, volatile void * addr)
{
int oldbit;
__asm__ __volatile__(
"btl %2,%1\n\tsbbl %0,%0"
:"=r" (oldbit)
:"m" (ADDR),"Ir" (nr));
return oldbit;
}
#define test_bit(nr,addr) \
(__builtin_constant_p(nr) ? \
constant_test_bit((nr),(addr)) : \
variable_test_bit((nr),(addr)))
/**
* set_bit - Atomically set a bit in memory
* @nr: the bit to set
* @addr: the address to start counting from
*
* This function is atomic and may not be reordered. See __set_bit()
* if you do not require the atomic guarantees.
* Note that @nr may be almost arbitrarily large; this function is not
* restricted to acting on a single-word quantity.
*/
static __inline__ void set_bit(int nr, volatile void * addr)
{
__asm__ __volatile__( LOCK_PREFIX
"btsl %1,%0"
:"=m" (ADDR)
:"Ir" (nr));
}
/**
* clear_bit - Clears a bit in memory
* @nr: Bit to clear
* @addr: Address to start counting from
*
* clear_bit() is atomic and may not be reordered. However, it does
* not contain a memory barrier, so if it is used for locking purposes,
* you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
* in order to ensure changes are visible on other processors.
*/
static __inline__ void clear_bit(int nr, volatile void * addr)
{
__asm__ __volatile__( LOCK_PREFIX
"btrl %1,%0"
:"=m" (ADDR)
:"Ir" (nr));
}
#endif /* !__ASSEMBLY__ */
#endif /* _MACHINE_X86_XEN_XEN_OS_H_ */

View File

@ -71,6 +71,9 @@ __FBSDID("$FreeBSD$");
static MALLOC_DEFINE(M_XENINTR, "xen_intr", "Xen Interrupt Services");
#define ENABLED_SETSIZE (sizeof(u_long) * 8)
BITSET_DEFINE(enabledbits, ENABLED_SETSIZE)
/**
* Per-cpu event channel processing state.
*/
@ -95,14 +98,14 @@ struct xen_intr_pcpu_data {
* A bitmap of ports that can be serviced from this CPU.
* A set bit means interrupt handling is enabled.
*/
u_long evtchn_enabled[sizeof(u_long) * 8];
struct enabledbits evtchn_enabled;
};
/*
* Start the scan at port 0 by initializing the last scanned
* location as the highest numbered event channel port.
*/
DPCPU_DEFINE(struct xen_intr_pcpu_data, xen_intr_pcpu) = {
static DPCPU_DEFINE(struct xen_intr_pcpu_data, xen_intr_pcpu) = {
.last_processed_l1i = LONG_BIT - 1,
.last_processed_l2i = LONG_BIT - 1
};
@ -212,7 +215,7 @@ evtchn_cpu_mask_port(u_int cpu, evtchn_port_t port)
struct xen_intr_pcpu_data *pcpu;
pcpu = DPCPU_ID_PTR(cpu, xen_intr_pcpu);
clear_bit(port, pcpu->evtchn_enabled);
BIT_CLR_ATOMIC(ENABLED_SETSIZE, port, &pcpu->evtchn_enabled);
}
/**
@ -234,7 +237,7 @@ evtchn_cpu_unmask_port(u_int cpu, evtchn_port_t port)
struct xen_intr_pcpu_data *pcpu;
pcpu = DPCPU_ID_PTR(cpu, xen_intr_pcpu);
set_bit(port, pcpu->evtchn_enabled);
BIT_SET_ATOMIC(ENABLED_SETSIZE, port, &pcpu->evtchn_enabled);
}
/**
@ -498,7 +501,7 @@ xen_intr_active_ports(struct xen_intr_pcpu_data *pcpu, shared_info_t *sh,
{
return (sh->evtchn_pending[idx]
& ~sh->evtchn_mask[idx]
& pcpu->evtchn_enabled[idx]);
& pcpu->evtchn_enabled.__bits[idx]);
}
/**
@ -634,8 +637,10 @@ xen_intr_init(void *dummy __unused)
*/
CPU_FOREACH(i) {
pcpu = DPCPU_ID_PTR(i, xen_intr_pcpu);
memset(pcpu->evtchn_enabled, i == 0 ? ~0 : 0,
sizeof(pcpu->evtchn_enabled));
if (i == 0)
BIT_FILL(ENABLED_SETSIZE, &pcpu->evtchn_enabled);
else
BIT_ZERO(ENABLED_SETSIZE, &pcpu->evtchn_enabled);
xen_intr_intrcnt_add(i);
}
@ -748,8 +753,11 @@ xen_intr_resume(struct pic *unused, bool suspend_cancelled)
struct xen_intr_pcpu_data *pcpu;
pcpu = DPCPU_ID_PTR(i, xen_intr_pcpu);
memset(pcpu->evtchn_enabled,
i == 0 ? ~0 : 0, sizeof(pcpu->evtchn_enabled));
if (i == 0)
BIT_FILL(ENABLED_SETSIZE, &pcpu->evtchn_enabled);
else
BIT_ZERO(ENABLED_SETSIZE, &pcpu->evtchn_enabled);
}
/* Mask all event channels. */
@ -1033,7 +1041,7 @@ xen_intr_pirq_eoi_source(struct intsrc *base_isrc)
isrc = (struct xenisrc *)base_isrc;
if (test_bit(isrc->xi_pirq, xen_intr_pirq_eoi_map)) {
if (xen_test_bit(isrc->xi_pirq, xen_intr_pirq_eoi_map)) {
struct physdev_eoi eoi = { .irq = isrc->xi_pirq };
error = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi);
@ -1070,7 +1078,7 @@ xen_intr_pirq_enable_intr(struct intsrc *base_isrc)
* Since the dynamic PIRQ EOI map is not available
* mark the PIRQ as needing EOI unconditionally.
*/
set_bit(isrc->xi_pirq, xen_intr_pirq_eoi_map);
xen_set_bit(isrc->xi_pirq, xen_intr_pirq_eoi_map);
}
}
@ -1591,20 +1599,21 @@ xen_intr_dump_port(struct xenisrc *isrc)
db_printf("\tPirq: %d ActiveHi: %d EdgeTrigger: %d "
"NeedsEOI: %d\n",
isrc->xi_pirq, isrc->xi_activehi, isrc->xi_edgetrigger,
!!test_bit(isrc->xi_pirq, xen_intr_pirq_eoi_map));
!!xen_test_bit(isrc->xi_pirq, xen_intr_pirq_eoi_map));
}
if (isrc->xi_type == EVTCHN_TYPE_VIRQ)
db_printf("\tVirq: %d\n", isrc->xi_virq);
db_printf("\tMasked: %d Pending: %d\n",
!!test_bit(isrc->xi_port, &s->evtchn_mask[0]),
!!test_bit(isrc->xi_port, &s->evtchn_pending[0]));
!!xen_test_bit(isrc->xi_port, &s->evtchn_mask[0]),
!!xen_test_bit(isrc->xi_port, &s->evtchn_pending[0]));
db_printf("\tPer-CPU Masks: ");
CPU_FOREACH(i) {
pcpu = DPCPU_ID_PTR(i, xen_intr_pcpu);
db_printf("cpu#%d: %d ", i,
!!test_bit(isrc->xi_port, pcpu->evtchn_enabled));
BIT_ISSET(ENABLED_SETSIZE, isrc->xi_port,
&pcpu->evtchn_enabled));
}
db_printf("\n");
}

View File

@ -120,7 +120,7 @@ static void inline blkif_get_x86_32_req(blkif_request_t *dst, blkif_x86_32_reque
dst->handle = src->handle;
dst->id = src->id;
dst->sector_number = src->sector_number;
barrier();
__compiler_membar();
if (n > dst->nr_segments)
n = dst->nr_segments;
for (i = 0; i < n; i++)
@ -135,7 +135,7 @@ static void inline blkif_get_x86_64_req(blkif_request_t *dst, blkif_x86_64_reque
dst->handle = src->handle;
dst->id = src->id;
dst->sector_number = src->sector_number;
barrier();
__compiler_membar();
if (n > dst->nr_segments)
n = dst->nr_segments;
for (i = 0; i < n; i++)

View File

@ -11,22 +11,6 @@
#ifndef __XEN_HYPERVISOR_H__
#define __XEN_HYPERVISOR_H__
#ifdef XENHVM
#define is_running_on_xen() (HYPERVISOR_shared_info != NULL)
#else
#define is_running_on_xen() 1
#endif
#ifdef PAE
#ifndef CONFIG_X86_PAE
#define CONFIG_X86_PAE
#endif
#endif
#include <sys/cdefs.h>
#include <sys/systm.h>
#include <xen/interface/xen.h>
@ -38,22 +22,6 @@
#include <xen/interface/memory.h>
#include <machine/xen/hypercall.h>
#if defined(__amd64__)
#define MULTI_UVMFLAGS_INDEX 2
#define MULTI_UVMDOMID_INDEX 3
#else
#define MULTI_UVMFLAGS_INDEX 3
#define MULTI_UVMDOMID_INDEX 4
#endif
#ifdef CONFIG_XEN_PRIVILEGED_GUEST
#define is_initial_xendomain() (xen_start_info->flags & SIF_INITDOMAIN)
#else
#define is_initial_xendomain() 0
#endif
extern start_info_t *xen_start_info;
extern uint64_t get_system_time(int ticks);
static inline int
@ -62,8 +30,6 @@ HYPERVISOR_console_write(const char *str, int count)
return HYPERVISOR_console_io(CONSOLEIO_write, count, str);
}
static inline void HYPERVISOR_crash(void) __dead2;
static inline int
HYPERVISOR_yield(void)
{
@ -132,23 +98,4 @@ HYPERVISOR_poll(
return (rc);
}
static inline void
MULTI_update_va_mapping(
multicall_entry_t *mcl, unsigned long va,
uint64_t new_val, unsigned long flags)
{
mcl->op = __HYPERVISOR_update_va_mapping;
mcl->args[0] = va;
#if defined(__amd64__)
mcl->args[1] = new_val;
#elif defined(PAE)
mcl->args[1] = (uint32_t)(new_val & 0xffffffff) ;
mcl->args[2] = (uint32_t)(new_val >> 32);
#else
mcl->args[1] = new_val;
mcl->args[2] = 0;
#endif
mcl->args[MULTI_UVMFLAGS_INDEX] = flags;
}
#endif /* __XEN_HYPERVISOR_H__ */

View File

@ -47,9 +47,6 @@
/* Everything below this point is not included by assembler (.S) files. */
#ifndef __ASSEMBLY__
/* Force a proper event-channel callback from Xen. */
void force_evtchn_callback(void);
extern shared_info_t *HYPERVISOR_shared_info;
extern start_info_t *HYPERVISOR_start_info;
@ -92,6 +89,31 @@ xen_initial_domain(void)
(HYPERVISOR_start_info->flags & SIF_INITDOMAIN) != 0);
}
/*
* Based on ofed/include/linux/bitops.h
*
* Those helpers are prefixed by xen_ because xen-os.h is widely included
* and we don't want the other drivers using them.
*
*/
#define NBPL (NBBY * sizeof(long))
static inline bool
xen_test_bit(int bit, volatile long *addr)
{
unsigned long mask = 1UL << (bit % NBPL);
return !!(atomic_load_acq_long(&addr[bit / NBPL]) & mask);
}
static inline void
xen_set_bit(int bit, volatile long *addr)
{
atomic_set_long(&addr[bit / NBPL], 1UL << (bit % NBPL));
}
#undef NPBL
/*
* Functions to allocate/free unused memory in order
* to map memory from other domains.

View File

@ -41,6 +41,8 @@ typedef void * xen_intr_handle_t;
/** If non-zero, the hypervisor has been configured to use a direct vector */
extern int xen_vector_callback_enabled;
void xen_intr_handle_upcall(struct trapframe *trap_frame);
/**
* Associate an already allocated local event channel port an interrupt
* handler.