xen: switch to using core atomics for synchronization
Now that the atomic macros are always genuinely atomic on x86, they can be used for synchronization with Xen. A single core VM isn't too unusual, but actual single core hardware is uncommon. Replace an open-coding of evtchn_clear_port() with the inline. Substantially inspired by work done by Julien Grall <julien@xen.org>, 2014-01-13 17:40:58. Reviewed by: royger MFC after: 1 week
This commit is contained in:
parent
49ca3167b7
commit
9f3be3a6ec
@ -1,129 +0,0 @@
|
||||
#ifndef __XEN_SYNCH_BITOPS_H__
|
||||
#define __XEN_SYNCH_BITOPS_H__
|
||||
|
||||
/*
|
||||
* Copyright 1992, Linus Torvalds.
|
||||
* Heavily modified to provide guaranteed strong synchronisation
|
||||
* when communicating with Xen or other guest OSes running on other CPUs.
|
||||
*/
|
||||
|
||||
|
||||
#define ADDR (*(volatile long *) addr)
|
||||
|
||||
static __inline__ void synch_set_bit(int nr, volatile void * addr)
|
||||
{
|
||||
__asm__ __volatile__ (
|
||||
"lock btsl %1,%0"
|
||||
: "=m" (ADDR) : "Ir" (nr) : "memory" );
|
||||
}
|
||||
|
||||
static __inline__ void synch_clear_bit(int nr, volatile void * addr)
|
||||
{
|
||||
__asm__ __volatile__ (
|
||||
"lock btrl %1,%0"
|
||||
: "=m" (ADDR) : "Ir" (nr) : "memory" );
|
||||
}
|
||||
|
||||
static __inline__ void synch_change_bit(int nr, volatile void * addr)
|
||||
{
|
||||
__asm__ __volatile__ (
|
||||
"lock btcl %1,%0"
|
||||
: "=m" (ADDR) : "Ir" (nr) : "memory" );
|
||||
}
|
||||
|
||||
static __inline__ int synch_test_and_set_bit(int nr, volatile void * addr)
|
||||
{
|
||||
int oldbit;
|
||||
__asm__ __volatile__ (
|
||||
"lock btsl %2,%1\n\tsbbl %0,%0"
|
||||
: "=r" (oldbit), "=m" (ADDR) : "Ir" (nr) : "memory");
|
||||
return oldbit;
|
||||
}
|
||||
|
||||
static __inline__ int synch_test_and_clear_bit(int nr, volatile void * addr)
|
||||
{
|
||||
int oldbit;
|
||||
__asm__ __volatile__ (
|
||||
"lock btrl %2,%1\n\tsbbl %0,%0"
|
||||
: "=r" (oldbit), "=m" (ADDR) : "Ir" (nr) : "memory");
|
||||
return oldbit;
|
||||
}
|
||||
|
||||
static __inline__ int synch_test_and_change_bit(int nr, volatile void * addr)
|
||||
{
|
||||
int oldbit;
|
||||
|
||||
__asm__ __volatile__ (
|
||||
"lock btcl %2,%1\n\tsbbl %0,%0"
|
||||
: "=r" (oldbit), "=m" (ADDR) : "Ir" (nr) : "memory");
|
||||
return oldbit;
|
||||
}
|
||||
|
||||
struct __synch_xchg_dummy { unsigned long a[100]; };
|
||||
#define __synch_xg(x) ((volatile struct __synch_xchg_dummy *)(x))
|
||||
|
||||
#define synch_cmpxchg(ptr, old, new) \
|
||||
((__typeof__(*(ptr)))__synch_cmpxchg((ptr),\
|
||||
(unsigned long)(old), \
|
||||
(unsigned long)(new), \
|
||||
sizeof(*(ptr))))
|
||||
|
||||
static inline unsigned long __synch_cmpxchg(volatile void *ptr,
|
||||
unsigned long old,
|
||||
unsigned long new, int size)
|
||||
{
|
||||
unsigned long prev;
|
||||
switch (size) {
|
||||
case 1:
|
||||
__asm__ __volatile__("lock; cmpxchgb %b1,%2"
|
||||
: "=a"(prev)
|
||||
: "q"(new), "m"(*__synch_xg(ptr)),
|
||||
"0"(old)
|
||||
: "memory");
|
||||
return prev;
|
||||
case 2:
|
||||
__asm__ __volatile__("lock; cmpxchgw %w1,%2"
|
||||
: "=a"(prev)
|
||||
: "q"(new), "m"(*__synch_xg(ptr)),
|
||||
"0"(old)
|
||||
: "memory");
|
||||
return prev;
|
||||
case 4:
|
||||
__asm__ __volatile__("lock; cmpxchgl %k1,%2"
|
||||
: "=a"(prev)
|
||||
: "q"(new), "m"(*__synch_xg(ptr)),
|
||||
"0"(old)
|
||||
: "memory");
|
||||
return prev;
|
||||
case 8:
|
||||
__asm__ __volatile__("lock; cmpxchgq %1,%2"
|
||||
: "=a"(prev)
|
||||
: "q"(new), "m"(*__synch_xg(ptr)),
|
||||
"0"(old)
|
||||
: "memory");
|
||||
return prev;
|
||||
}
|
||||
return old;
|
||||
}
|
||||
|
||||
static __inline__ int synch_const_test_bit(int nr, const volatile void * addr)
|
||||
{
|
||||
return ((1UL << (nr & 31)) &
|
||||
(((const volatile unsigned int *) addr)[nr >> 5])) != 0;
|
||||
}
|
||||
|
||||
static __inline__ int synch_var_test_bit(int nr, volatile void * addr)
|
||||
{
|
||||
int oldbit;
|
||||
__asm__ __volatile__ (
|
||||
"btl %2,%1\n\tsbbl %0,%0"
|
||||
: "=r" (oldbit) : "m" (ADDR), "Ir" (nr) );
|
||||
return oldbit;
|
||||
}
|
||||
|
||||
#define synch_test_bit(nr,addr) \
|
||||
(__builtin_constant_p(nr) ? \
|
||||
synch_const_test_bit((nr),(addr)) : \
|
||||
synch_var_test_bit((nr),(addr)))
|
||||
|
||||
#endif /* __XEN_SYNCH_BITOPS_H__ */
|
@ -58,8 +58,6 @@ __FBSDID("$FreeBSD$");
|
||||
#include <sys/filio.h>
|
||||
#include <sys/vnode.h>
|
||||
|
||||
#include <machine/xen/synch_bitops.h>
|
||||
|
||||
#include <xen/xen-os.h>
|
||||
#include <xen/evtchn.h>
|
||||
#include <xen/xen_intr.h>
|
||||
|
@ -28,9 +28,6 @@ __FBSDID("$FreeBSD$");
|
||||
#include <machine/cpu.h>
|
||||
|
||||
#include <xen/xen-os.h>
|
||||
#include <xen/hypervisor.h>
|
||||
#include <machine/xen/synch_bitops.h>
|
||||
|
||||
#include <xen/hypervisor.h>
|
||||
#include <xen/gnttab.h>
|
||||
|
||||
@ -182,18 +179,15 @@ gnttab_query_foreign_access(grant_ref_t ref)
|
||||
int
|
||||
gnttab_end_foreign_access_ref(grant_ref_t ref)
|
||||
{
|
||||
uint16_t flags, nflags;
|
||||
uint16_t flags;
|
||||
|
||||
nflags = shared[ref].flags;
|
||||
do {
|
||||
if ( (flags = nflags) & (GTF_reading|GTF_writing) ) {
|
||||
printf("%s: WARNING: g.e. still in use!\n", __func__);
|
||||
return (0);
|
||||
}
|
||||
} while ((nflags = synch_cmpxchg(&shared[ref].flags, flags, 0)) !=
|
||||
flags);
|
||||
while (!((flags = atomic_load_16(&shared[ref].flags)) &
|
||||
(GTF_reading|GTF_writing)))
|
||||
if (atomic_cmpset_16(&shared[ref].flags, flags, 0))
|
||||
return (1);
|
||||
|
||||
return (1);
|
||||
printf("%s: WARNING: g.e. still in use!\n", __func__);
|
||||
return (0);
|
||||
}
|
||||
|
||||
void
|
||||
@ -284,17 +278,21 @@ gnttab_end_foreign_transfer_ref(grant_ref_t ref)
|
||||
/*
|
||||
* If a transfer is not even yet started, try to reclaim the grant
|
||||
* reference and return failure (== 0).
|
||||
*
|
||||
* NOTE: This is a loop since the atomic cmpset can fail multiple
|
||||
* times. In normal operation it will be rare to execute more than
|
||||
* twice. Attempting an attack would consume a great deal of
|
||||
* attacker resources and be unlikely to prolong the loop very much.
|
||||
*/
|
||||
while (!((flags = shared[ref].flags) & GTF_transfer_committed)) {
|
||||
if ( synch_cmpxchg(&shared[ref].flags, flags, 0) == flags )
|
||||
while (!((flags = atomic_load_16(&shared[ref].flags)) &
|
||||
GTF_transfer_committed))
|
||||
if (atomic_cmpset_16(&shared[ref].flags, flags, 0))
|
||||
return (0);
|
||||
cpu_spinwait();
|
||||
}
|
||||
|
||||
/* If a transfer is in progress then wait until it is completed. */
|
||||
while (!(flags & GTF_transfer_completed)) {
|
||||
flags = shared[ref].flags;
|
||||
cpu_spinwait();
|
||||
flags = atomic_load_16(&shared[ref].flags);
|
||||
}
|
||||
|
||||
/* Read the frame number /after/ reading completion status. */
|
||||
|
@ -1,138 +0,0 @@
|
||||
#ifndef __XEN_SYNCH_BITOPS_H__
|
||||
#define __XEN_SYNCH_BITOPS_H__
|
||||
|
||||
/*
|
||||
* Copyright 1992, Linus Torvalds.
|
||||
* Heavily modified to provide guaranteed strong synchronisation
|
||||
* when communicating with Xen or other guest OSes running on other CPUs.
|
||||
*/
|
||||
|
||||
#define ADDR (*(volatile long *) addr)
|
||||
|
||||
static __inline__ void synch_set_bit(int nr, volatile void * addr)
|
||||
{
|
||||
__asm__ __volatile__ (
|
||||
"lock btsl %1,%0"
|
||||
: "=m" (ADDR) : "Ir" (nr) : "memory" );
|
||||
}
|
||||
|
||||
static __inline__ void synch_clear_bit(int nr, volatile void * addr)
|
||||
{
|
||||
__asm__ __volatile__ (
|
||||
"lock btrl %1,%0"
|
||||
: "=m" (ADDR) : "Ir" (nr) : "memory" );
|
||||
}
|
||||
|
||||
static __inline__ void synch_change_bit(int nr, volatile void * addr)
|
||||
{
|
||||
__asm__ __volatile__ (
|
||||
"lock btcl %1,%0"
|
||||
: "=m" (ADDR) : "Ir" (nr) : "memory" );
|
||||
}
|
||||
|
||||
static __inline__ int synch_test_and_set_bit(int nr, volatile void * addr)
|
||||
{
|
||||
int oldbit;
|
||||
__asm__ __volatile__ (
|
||||
"lock btsl %2,%1\n\tsbbl %0,%0"
|
||||
: "=r" (oldbit), "=m" (ADDR) : "Ir" (nr) : "memory");
|
||||
return oldbit;
|
||||
}
|
||||
|
||||
static __inline__ int synch_test_and_clear_bit(int nr, volatile void * addr)
|
||||
{
|
||||
int oldbit;
|
||||
__asm__ __volatile__ (
|
||||
"lock btrl %2,%1\n\tsbbl %0,%0"
|
||||
: "=r" (oldbit), "=m" (ADDR) : "Ir" (nr) : "memory");
|
||||
return oldbit;
|
||||
}
|
||||
|
||||
static __inline__ int synch_test_and_change_bit(int nr, volatile void * addr)
|
||||
{
|
||||
int oldbit;
|
||||
|
||||
__asm__ __volatile__ (
|
||||
"lock btcl %2,%1\n\tsbbl %0,%0"
|
||||
: "=r" (oldbit), "=m" (ADDR) : "Ir" (nr) : "memory");
|
||||
return oldbit;
|
||||
}
|
||||
|
||||
struct __synch_xchg_dummy { unsigned long a[100]; };
|
||||
#define __synch_xg(x) ((volatile struct __synch_xchg_dummy *)(x))
|
||||
|
||||
#define synch_cmpxchg(ptr, old, new) \
|
||||
((__typeof__(*(ptr)))__synch_cmpxchg((ptr),\
|
||||
(unsigned long)(old), \
|
||||
(unsigned long)(new), \
|
||||
sizeof(*(ptr))))
|
||||
|
||||
static inline unsigned long __synch_cmpxchg(volatile void *ptr,
|
||||
unsigned long old,
|
||||
unsigned long new, int size)
|
||||
{
|
||||
unsigned long prev;
|
||||
switch (size) {
|
||||
case 1:
|
||||
__asm__ __volatile__("lock; cmpxchgb %b1,%2"
|
||||
: "=a"(prev)
|
||||
: "q"(new), "m"(*__synch_xg(ptr)),
|
||||
"0"(old)
|
||||
: "memory");
|
||||
return prev;
|
||||
case 2:
|
||||
__asm__ __volatile__("lock; cmpxchgw %w1,%2"
|
||||
: "=a"(prev)
|
||||
: "q"(new), "m"(*__synch_xg(ptr)),
|
||||
"0"(old)
|
||||
: "memory");
|
||||
return prev;
|
||||
#ifdef CONFIG_X86_64
|
||||
case 4:
|
||||
__asm__ __volatile__("lock; cmpxchgl %k1,%2"
|
||||
: "=a"(prev)
|
||||
: "q"(new), "m"(*__synch_xg(ptr)),
|
||||
"0"(old)
|
||||
: "memory");
|
||||
return prev;
|
||||
case 8:
|
||||
__asm__ __volatile__("lock; cmpxchgq %1,%2"
|
||||
: "=a"(prev)
|
||||
: "q"(new), "m"(*__synch_xg(ptr)),
|
||||
"0"(old)
|
||||
: "memory");
|
||||
return prev;
|
||||
#else
|
||||
case 4:
|
||||
__asm__ __volatile__("lock; cmpxchgl %1,%2"
|
||||
: "=a"(prev)
|
||||
: "q"(new), "m"(*__synch_xg(ptr)),
|
||||
"0"(old)
|
||||
: "memory");
|
||||
return prev;
|
||||
#endif
|
||||
}
|
||||
return old;
|
||||
}
|
||||
|
||||
static __inline__ int synch_const_test_bit(int nr, const volatile void * addr)
|
||||
{
|
||||
return ((1UL << (nr & 31)) &
|
||||
(((const volatile unsigned int *) addr)[nr >> 5])) != 0;
|
||||
}
|
||||
|
||||
static __inline__ int synch_var_test_bit(int nr, volatile void * addr)
|
||||
{
|
||||
int oldbit;
|
||||
__asm__ __volatile__ (
|
||||
"btl %2,%1\n\tsbbl %0,%0"
|
||||
: "=r" (oldbit) : "m" (ADDR), "Ir" (nr) );
|
||||
return oldbit;
|
||||
}
|
||||
|
||||
#define synch_test_bit(nr,addr) \
|
||||
(__builtin_constant_p(nr) ? \
|
||||
synch_const_test_bit((nr),(addr)) : \
|
||||
synch_var_test_bit((nr),(addr)))
|
||||
|
||||
#endif /* __XEN_SYNCH_BITOPS_H__ */
|
@ -57,8 +57,6 @@ __FBSDID("$FreeBSD$");
|
||||
#include <machine/smp.h>
|
||||
#include <machine/stdarg.h>
|
||||
|
||||
#include <machine/xen/synch_bitops.h>
|
||||
|
||||
#include <xen/xen-os.h>
|
||||
#include <xen/hvm.h>
|
||||
#include <xen/hypervisor.h>
|
||||
@ -576,7 +574,7 @@ xen_intr_handle_upcall(struct trapframe *trap_frame)
|
||||
|
||||
/* process port */
|
||||
port = (l1i * LONG_BIT) + l2i;
|
||||
synch_clear_bit(port, &s->evtchn_pending[0]);
|
||||
evtchn_clear_port(port);
|
||||
|
||||
isrc = xen_intr_port_to_isrc[port];
|
||||
if (__predict_false(isrc == NULL))
|
||||
|
@ -6,6 +6,7 @@
|
||||
*
|
||||
* Copyright (c) 2004, K A Fraser
|
||||
* Copyright (c) 2012, Spectra Logic Corporation
|
||||
* Copyright © 2022, Elliott Mitchell
|
||||
*
|
||||
* This file may be distributed separately from the Linux kernel, or
|
||||
* incorporated into other software packages, subject to the following license:
|
||||
@ -48,6 +49,12 @@ enum evtchn_type {
|
||||
/** Submit a port notification for delivery to a userland evtchn consumer */
|
||||
void evtchn_device_upcall(evtchn_port_t port);
|
||||
|
||||
/* Macros for accessing event channel values */
|
||||
#define EVTCHN_PTR(type, port) \
|
||||
(HYPERVISOR_shared_info->evtchn_##type + ((port) / __LONG_BIT))
|
||||
#define EVTCHN_BIT(port) ((port) & (__LONG_BIT - 1))
|
||||
#define EVTCHN_MASK(port) (1UL << EVTCHN_BIT(port))
|
||||
|
||||
/**
|
||||
* Disable signal delivery for an event channel port, returning its
|
||||
* previous mask state.
|
||||
@ -59,8 +66,9 @@ void evtchn_device_upcall(evtchn_port_t port);
|
||||
static inline int
|
||||
evtchn_test_and_set_mask(evtchn_port_t port)
|
||||
{
|
||||
shared_info_t *s = HYPERVISOR_shared_info;
|
||||
return synch_test_and_set_bit(port, s->evtchn_mask);
|
||||
|
||||
return (atomic_testandset_long(EVTCHN_PTR(mask, port),
|
||||
EVTCHN_BIT(port)));
|
||||
}
|
||||
|
||||
/**
|
||||
@ -71,8 +79,8 @@ evtchn_test_and_set_mask(evtchn_port_t port)
|
||||
static inline void
|
||||
evtchn_clear_port(evtchn_port_t port)
|
||||
{
|
||||
shared_info_t *s = HYPERVISOR_shared_info;
|
||||
synch_clear_bit(port, &s->evtchn_pending[0]);
|
||||
|
||||
atomic_clear_long(EVTCHN_PTR(pending, port), EVTCHN_MASK(port));
|
||||
}
|
||||
|
||||
/**
|
||||
@ -83,9 +91,8 @@ evtchn_clear_port(evtchn_port_t port)
|
||||
static inline void
|
||||
evtchn_mask_port(evtchn_port_t port)
|
||||
{
|
||||
shared_info_t *s = HYPERVISOR_shared_info;
|
||||
|
||||
synch_set_bit(port, &s->evtchn_mask[0]);
|
||||
atomic_set_long(EVTCHN_PTR(mask, port), EVTCHN_MASK(port));
|
||||
}
|
||||
|
||||
/**
|
||||
|
Loading…
Reference in New Issue
Block a user