Push down the implementation of PCPU_LAZY_INC() into the machine-dependent
header file. Reimplement PCPU_LAZY_INC() on amd64 and i386 making it atomic with respect to interrupts. Reviewed by: bde, jhb
This commit is contained in:
parent
fa80feee0e
commit
c640357f04
@ -56,6 +56,7 @@
|
||||
extern struct pcpu *pcpup;
|
||||
|
||||
#define PCPU_GET(member) (pcpup->pc_ ## member)
|
||||
#define PCPU_LAZY_INC(member) (++pcpup->pc_ ## member)
|
||||
#define PCPU_PTR(member) (&pcpup->pc_ ## member)
|
||||
#define PCPU_SET(member, val) (pcpup->pc_ ## member = (val))
|
||||
|
||||
@ -108,6 +109,34 @@ extern struct pcpu *pcpup;
|
||||
__res; \
|
||||
})
|
||||
|
||||
/*
|
||||
* Increments the value of the per-cpu counter name. The implementation
|
||||
* must be atomic with respect to interrupts.
|
||||
*/
|
||||
#define __PCPU_LAZY_INC(name) do { \
|
||||
CTASSERT(sizeof(__pcpu_type(name)) == 1 || \
|
||||
sizeof(__pcpu_type(name)) == 2 || \
|
||||
sizeof(__pcpu_type(name)) == 4 || \
|
||||
sizeof(__pcpu_type(name)) == 8); \
|
||||
if (sizeof(__pcpu_type(name)) == 1) { \
|
||||
__asm __volatile("incb %%gs:%0" \
|
||||
: "=m" (*(__pcpu_type(name) *)(__pcpu_offset(name)))\
|
||||
: "m" (*(__pcpu_type(name) *)(__pcpu_offset(name))));\
|
||||
} else if (sizeof(__pcpu_type(name)) == 2) { \
|
||||
__asm __volatile("incw %%gs:%0" \
|
||||
: "=m" (*(__pcpu_type(name) *)(__pcpu_offset(name)))\
|
||||
: "m" (*(__pcpu_type(name) *)(__pcpu_offset(name))));\
|
||||
} else if (sizeof(__pcpu_type(name)) == 4) { \
|
||||
__asm __volatile("incl %%gs:%0" \
|
||||
: "=m" (*(__pcpu_type(name) *)(__pcpu_offset(name)))\
|
||||
: "m" (*(__pcpu_type(name) *)(__pcpu_offset(name))));\
|
||||
} else if (sizeof(__pcpu_type(name)) == 8) { \
|
||||
__asm __volatile("incq %%gs:%0" \
|
||||
: "=m" (*(__pcpu_type(name) *)(__pcpu_offset(name)))\
|
||||
: "m" (*(__pcpu_type(name) *)(__pcpu_offset(name))));\
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
* Sets the value of the per-cpu variable name to value val.
|
||||
*/
|
||||
@ -130,6 +159,7 @@ extern struct pcpu *pcpup;
|
||||
}
|
||||
|
||||
#define PCPU_GET(member) __PCPU_GET(pc_ ## member)
|
||||
#define PCPU_LAZY_INC(member) __PCPU_LAZY_INC(pc_ ## member)
|
||||
#define PCPU_PTR(member) __PCPU_PTR(pc_ ## member)
|
||||
#define PCPU_SET(member, val) __PCPU_SET(pc_ ## member, val)
|
||||
|
||||
|
@ -52,6 +52,12 @@ extern struct pcpu *pcpup;
|
||||
extern struct pcpu __pcpu;
|
||||
|
||||
#define PCPU_GET(member) (__pcpu.pc_ ## member)
|
||||
|
||||
/*
|
||||
* XXX The implementation of this operation should be made atomic
|
||||
* with respect to preemption.
|
||||
*/
|
||||
#define PCPU_LAZY_INC(member) (++__pcpu.pc_ ## member)
|
||||
#define PCPU_PTR(member) (&__pcpu.pc_ ## member)
|
||||
#define PCPU_SET(member,value) (__pcpu.pc_ ## member = (value))
|
||||
|
||||
|
@ -62,6 +62,7 @@
|
||||
extern struct pcpu *pcpup;
|
||||
|
||||
#define PCPU_GET(member) (pcpup->pc_ ## member)
|
||||
#define PCPU_LAZY_INC(member) (++pcpup->pc_ ## member)
|
||||
#define PCPU_PTR(member) (&pcpup->pc_ ## member)
|
||||
#define PCPU_SET(member, val) (pcpup->pc_ ## member = (val))
|
||||
|
||||
@ -114,6 +115,29 @@ extern struct pcpu *pcpup;
|
||||
__res; \
|
||||
})
|
||||
|
||||
/*
|
||||
* Increments the value of the per-cpu counter name. The implementation
|
||||
* must be atomic with respect to interrupts.
|
||||
*/
|
||||
#define __PCPU_LAZY_INC(name) do { \
|
||||
CTASSERT(sizeof(__pcpu_type(name)) == 1 || \
|
||||
sizeof(__pcpu_type(name)) == 2 || \
|
||||
sizeof(__pcpu_type(name)) == 4); \
|
||||
if (sizeof(__pcpu_type(name)) == 1) { \
|
||||
__asm __volatile("incb %%fs:%0" \
|
||||
: "=m" (*(__pcpu_type(name) *)(__pcpu_offset(name)))\
|
||||
: "m" (*(__pcpu_type(name) *)(__pcpu_offset(name))));\
|
||||
} else if (sizeof(__pcpu_type(name)) == 2) { \
|
||||
__asm __volatile("incw %%fs:%0" \
|
||||
: "=m" (*(__pcpu_type(name) *)(__pcpu_offset(name)))\
|
||||
: "m" (*(__pcpu_type(name) *)(__pcpu_offset(name))));\
|
||||
} else if (sizeof(__pcpu_type(name)) == 4) { \
|
||||
__asm __volatile("incl %%fs:%0" \
|
||||
: "=m" (*(__pcpu_type(name) *)(__pcpu_offset(name)))\
|
||||
: "m" (*(__pcpu_type(name) *)(__pcpu_offset(name))));\
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
* Sets the value of the per-cpu variable name to value val.
|
||||
*/
|
||||
@ -136,6 +160,7 @@ extern struct pcpu *pcpup;
|
||||
}
|
||||
|
||||
#define PCPU_GET(member) __PCPU_GET(pc_ ## member)
|
||||
#define PCPU_LAZY_INC(member) __PCPU_LAZY_INC(pc_ ## member)
|
||||
#define PCPU_PTR(member) __PCPU_PTR(pc_ ## member)
|
||||
#define PCPU_SET(member, val) __PCPU_SET(pc_ ## member, val)
|
||||
|
||||
|
@ -48,6 +48,12 @@ struct pcpu;
|
||||
register struct pcpu *pcpup __asm__("r13");
|
||||
|
||||
#define PCPU_GET(member) (pcpup->pc_ ## member)
|
||||
|
||||
/*
|
||||
* XXX The implementation of this operation should be made atomic
|
||||
* with respect to preemption.
|
||||
*/
|
||||
#define PCPU_LAZY_INC(member) (++pcpup->pc_ ## member)
|
||||
#define PCPU_PTR(member) (&pcpup->pc_ ## member)
|
||||
#define PCPU_SET(member,value) (pcpup->pc_ ## member = (value))
|
||||
|
||||
|
@ -57,6 +57,12 @@ struct pmap;
|
||||
#define PCPUP ((struct pcpu *) powerpc_get_pcpup())
|
||||
|
||||
#define PCPU_GET(member) (PCPUP->pc_ ## member)
|
||||
|
||||
/*
|
||||
* XXX The implementation of this operation should be made atomic
|
||||
* with respect to preemption.
|
||||
*/
|
||||
#define PCPU_LAZY_INC(member) (++PCPUP->pc_ ## member)
|
||||
#define PCPU_PTR(member) (&PCPUP->pc_ ## member)
|
||||
#define PCPU_SET(member,value) (PCPUP->pc_ ## member = (value))
|
||||
|
||||
|
@ -66,6 +66,12 @@ register struct pcb *curpcb __asm__(__XSTRING(PCB_REG));
|
||||
register struct pcpu *pcpup __asm__(__XSTRING(PCPU_REG));
|
||||
|
||||
#define PCPU_GET(member) (pcpup->pc_ ## member)
|
||||
|
||||
/*
|
||||
* XXX The implementation of this operation should be made atomic
|
||||
* with respect to preemption.
|
||||
*/
|
||||
#define PCPU_LAZY_INC(member) (++pcpup->pc_ ## member)
|
||||
#define PCPU_PTR(member) (&pcpup->pc_ ## member)
|
||||
#define PCPU_SET(member,value) (pcpup->pc_ ## member = (value))
|
||||
|
||||
|
@ -87,6 +87,12 @@ struct pcpu;
|
||||
register struct pcpu *pcpup __asm__(__XSTRING(PCPU_REG));
|
||||
|
||||
#define PCPU_GET(member) (pcpup->pc_ ## member)
|
||||
|
||||
/*
|
||||
* XXX The implementation of this operation should be made atomic
|
||||
* with respect to preemption.
|
||||
*/
|
||||
#define PCPU_LAZY_INC(member) (++pcpup->pc_ ## member)
|
||||
#define PCPU_PTR(member) (&pcpup->pc_ ## member)
|
||||
#define PCPU_SET(member,value) (pcpup->pc_ ## member = (value))
|
||||
|
||||
|
@ -87,21 +87,6 @@ extern struct cpuhead cpuhead;
|
||||
#define curthread PCPU_GET(curthread)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* MI PCPU support functions
|
||||
*
|
||||
* PCPU_LAZY_INC() - Lazily increment a per-cpu stats counter, without
|
||||
* guarenteeing atomicity or even necessarily consistency.
|
||||
*
|
||||
* XXX we need to create MD primitives to support
|
||||
* this to guarentee at least some level of consistency,
|
||||
* i.e., to prevent us from totally corrupting the
|
||||
* counters due to preemption in a multi-instruction
|
||||
* increment sequence for architectures that do not
|
||||
* support single-instruction memory increments.
|
||||
*/
|
||||
#define PCPU_LAZY_INC(var) (++*PCPU_PTR(var))
|
||||
|
||||
/*
|
||||
* Machine dependent callouts. cpu_pcpu_init() is responsible for
|
||||
* initializing machine dependent fields of struct pcpu, and
|
||||
|
Loading…
x
Reference in New Issue
Block a user