From 7baccf64d35b11e5cb15df42db815d2d3f041075 Mon Sep 17 00:00:00 2001 From: Bruce Evans Date: Mon, 1 Jul 1996 20:16:10 +0000 Subject: [PATCH] Fixed lots of warnings about unportable casts of pointers to volatile variables: don't depend on the compiler generating atomic code to set the variables - use inline asm to specify the atomic instruction(s) explicitly. --- sys/amd64/include/cpufunc.h | 13 ++++++++++--- sys/i386/include/cpufunc.h | 13 ++++++++++--- sys/i386/include/spl.h | 27 +++++++++++++++------------ 3 files changed, 35 insertions(+), 18 deletions(-) diff --git a/sys/amd64/include/cpufunc.h b/sys/amd64/include/cpufunc.h index 204a228efabb..9dce13643eab 100644 --- a/sys/amd64/include/cpufunc.h +++ b/sys/amd64/include/cpufunc.h @@ -30,7 +30,7 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $Id: cpufunc.h,v 1.50 1996/06/14 11:01:01 asami Exp $ + * $Id: cpufunc.h,v 1.51 1996/07/01 18:12:23 bde Exp $ */ /* @@ -43,8 +43,6 @@ #include #include -#include /* XXX belongs elsewhere */ - #ifdef __GNUC__ static __inline void @@ -357,6 +355,12 @@ rdtsc(void) return (rv); } +static __inline void +setbits(volatile unsigned *addr, u_int bits) +{ + __asm __volatile("orl %1,%0" : "=m" (*addr) : "ir" (bits)); +} + static __inline void write_eflags(u_long ef) { @@ -393,6 +397,7 @@ quad_t rdmsr __P((u_int msr)); quad_t rdpmc __P((u_int pmc)); quad_t rdtsc __P((void)); u_long read_eflags __P((void)); +void setbits __P((volatile unsigned *addr, u_int bits)); void write_eflags __P((u_long ef)); void wrmsr __P((u_int msr, quad_t newval)); @@ -404,4 +409,6 @@ void ltr __P((u_short sel)); u_int rcr0 __P((void)); u_long rcr3 __P((void)); +#include /* XXX belongs elsewhere */ + #endif /* !_MACHINE_CPUFUNC_H_ */ diff --git a/sys/i386/include/cpufunc.h b/sys/i386/include/cpufunc.h index 204a228efabb..9dce13643eab 100644 --- a/sys/i386/include/cpufunc.h +++ b/sys/i386/include/cpufunc.h @@ -30,7 +30,7 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $Id: cpufunc.h,v 1.50 1996/06/14 11:01:01 asami Exp $ + * $Id: cpufunc.h,v 1.51 1996/07/01 18:12:23 bde Exp $ */ /* @@ -43,8 +43,6 @@ #include #include -#include /* XXX belongs elsewhere */ - #ifdef __GNUC__ static __inline void @@ -357,6 +355,12 @@ rdtsc(void) return (rv); } +static __inline void +setbits(volatile unsigned *addr, u_int bits) +{ + __asm __volatile("orl %1,%0" : "=m" (*addr) : "ir" (bits)); +} + static __inline void write_eflags(u_long ef) { @@ -393,6 +397,7 @@ quad_t rdmsr __P((u_int msr)); quad_t rdpmc __P((u_int pmc)); quad_t rdtsc __P((void)); u_long read_eflags __P((void)); +void setbits __P((volatile unsigned *addr, u_int bits)); void write_eflags __P((u_long ef)); void wrmsr __P((u_int msr, quad_t newval)); @@ -404,4 +409,6 @@ void ltr __P((u_short sel)); u_int rcr0 __P((void)); u_long rcr3 __P((void)); +#include /* XXX belongs elsewhere */ + #endif /* !_MACHINE_CPUFUNC_H_ */ diff --git a/sys/i386/include/spl.h b/sys/i386/include/spl.h index 2c14df512a4a..59f2104e59b1 100644 --- a/sys/i386/include/spl.h +++ b/sys/i386/include/spl.h @@ -30,7 +30,7 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $Id: spl.h,v 1.13 1996/02/07 21:52:57 wollman Exp $ + * $Id: spl.h,v 1.14 1996/05/18 03:36:42 dyson Exp $ */ #ifndef _MACHINE_IPL_H_ @@ -76,6 +76,11 @@ #ifndef LOCORE +/* + * cpl is preserved by interrupt handlers so it is effectively nonvolatile. + * ipending and idelayed are changed by interrupt handlers so they are + * volatile. + */ extern unsigned bio_imask; /* group of interrupts masked with splbio() */ extern unsigned cpl; /* current priority level mask */ extern volatile unsigned idelayed; /* interrupts to become pending */ @@ -85,19 +90,17 @@ extern unsigned stat_imask; /* interrupts masked with splstatclock() */ extern unsigned tty_imask; /* group of interrupts masked with spltty() */ /* - * ipending has to be volatile so that it is read every time it is accessed - * in splx() and spl0(), but we don't want it to be read nonatomically when - * it is changed. Pretending that ipending is a plain int happens to give - * suitable atomic code for "ipending |= constant;". + * The volatile bitmap variables must be set atomically. This normally + * involves using a machine-dependent bit-set or `or' instruction. */ -#define setdelayed() (*(unsigned *)&ipending |= loadandclear(&idelayed)) -#define setsoftast() (*(unsigned *)&ipending |= SWI_AST_PENDING) -#define setsoftclock() (*(unsigned *)&ipending |= SWI_CLOCK_PENDING) -#define setsoftnet() (*(unsigned *)&ipending |= SWI_NET_PENDING) -#define setsofttty() (*(unsigned *)&ipending |= SWI_TTY_PENDING) +#define setdelayed() setbits(&ipending, loadandclear(&idelayed)) +#define setsoftast() setbits(&ipending, SWI_AST_PENDING) +#define setsoftclock() setbits(&ipending, SWI_CLOCK_PENDING) +#define setsoftnet() setbits(&ipending, SWI_NET_PENDING) +#define setsofttty() setbits(&ipending, SWI_TTY_PENDING) -#define schedsofttty() (*(unsigned *)&idelayed |= SWI_TTY_PENDING) -#define schedsoftnet() (*(unsigned *)&idelayed |= SWI_NET_PENDING) +#define schedsofttty() setbits(&idelayed, SWI_TTY_PENDING) +#define schedsoftnet() setbits(&idelayed, SWI_NET_PENDING) #define softclockpending() (ipending & SWI_CLOCK_PENDING)