Rework the PCPU_* (MD) interface:
- Rename PCPU_LAZY_INC into PCPU_INC - Add the PCPU_ADD interface which just does an add on the pcpu member given a specific value. Note that for most architectures PCPU_INC and PCPU_ADD are not safe. This is a point that needs some discussions/work in the next days. Reviewed by: alc, bde Approved by: jeff (mentor)
This commit is contained in:
parent
041b706b2f
commit
6759608248
@ -250,7 +250,7 @@ intr_execute_handlers(struct intsrc *isrc, struct trapframe *frame)
|
||||
* processed too.
|
||||
*/
|
||||
(*isrc->is_count)++;
|
||||
PCPU_LAZY_INC(cnt.v_intr);
|
||||
PCPU_INC(cnt.v_intr);
|
||||
|
||||
ie = isrc->is_event;
|
||||
|
||||
@ -321,7 +321,7 @@ intr_execute_handlers(struct intsrc *isrc, struct trapframe *frame)
|
||||
* processed too.
|
||||
*/
|
||||
(*isrc->is_count)++;
|
||||
PCPU_LAZY_INC(cnt.v_intr);
|
||||
PCPU_INC(cnt.v_intr);
|
||||
|
||||
ie = isrc->is_event;
|
||||
|
||||
|
@ -163,7 +163,7 @@ trap(struct trapframe *frame)
|
||||
register_t addr = 0;
|
||||
ksiginfo_t ksi;
|
||||
|
||||
PCPU_LAZY_INC(cnt.v_trap);
|
||||
PCPU_INC(cnt.v_trap);
|
||||
type = frame->tf_trapno;
|
||||
|
||||
#ifdef SMP
|
||||
@ -737,10 +737,10 @@ syscall(struct trapframe *frame)
|
||||
ksiginfo_t ksi;
|
||||
|
||||
/*
|
||||
* note: PCPU_LAZY_INC() can only be used if we can afford
|
||||
* note: PCPU_INC() can only be used if we can afford
|
||||
* occassional inaccuracy in the count.
|
||||
*/
|
||||
PCPU_LAZY_INC(cnt.v_syscall);
|
||||
PCPU_INC(cnt.v_syscall);
|
||||
|
||||
#ifdef DIAGNOSTIC
|
||||
if (ISPL(frame->tf_cs) != SEL_UPL) {
|
||||
|
@ -105,10 +105,10 @@ ia32_syscall(struct trapframe *frame)
|
||||
ksiginfo_t ksi;
|
||||
|
||||
/*
|
||||
* note: PCPU_LAZY_INC() can only be used if we can afford
|
||||
* note: PCPU_INC() can only be used if we can afford
|
||||
* occassional inaccuracy in the count.
|
||||
*/
|
||||
PCPU_LAZY_INC(cnt.v_syscall);
|
||||
PCPU_INC(cnt.v_syscall);
|
||||
|
||||
td->td_pticks = 0;
|
||||
td->td_frame = frame;
|
||||
|
@ -56,7 +56,8 @@
|
||||
extern struct pcpu *pcpup;
|
||||
|
||||
#define PCPU_GET(member) (pcpup->pc_ ## member)
|
||||
#define PCPU_LAZY_INC(member) (++pcpup->pc_ ## member)
|
||||
#define PCPU_ADD(member, val) (pcpup->pc_ ## member += (val))
|
||||
#define PCPU_INC(member) PCPU_ADD(member, 1)
|
||||
#define PCPU_PTR(member) (&pcpup->pc_ ## member)
|
||||
#define PCPU_SET(member, val) (pcpup->pc_ ## member = (val))
|
||||
|
||||
@ -109,11 +110,32 @@ extern struct pcpu *pcpup;
|
||||
__res; \
|
||||
})
|
||||
|
||||
/*
|
||||
* Adds the value to the per-cpu counter name. The implementation
|
||||
* must be atomic with respect to interrupts.
|
||||
*/
|
||||
#define __PCPU_ADD(name, val) do { \
|
||||
__pcpu_type(name) __val; \
|
||||
struct __s { \
|
||||
u_char __b[MIN(sizeof(__pcpu_type(name)), 8)]; \
|
||||
} __s; \
|
||||
\
|
||||
__val = (val); \
|
||||
if (sizeof(__val) == 1 || sizeof(__val) == 2 || \
|
||||
sizeof(__val) == 4 || sizeof(__val) == 8) { \
|
||||
__s = *(struct __s *)(void *)&__val; \
|
||||
__asm __volatile("add %1,%%gs:%0" \
|
||||
: "=m" (*(struct __s *)(__pcpu_offset(name))) \
|
||||
: "r" (__s)); \
|
||||
} else \
|
||||
*__PCPU_PTR(name) += __val; \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
* Increments the value of the per-cpu counter name. The implementation
|
||||
* must be atomic with respect to interrupts.
|
||||
*/
|
||||
#define __PCPU_LAZY_INC(name) do { \
|
||||
#define __PCPU_INC(name) do { \
|
||||
CTASSERT(sizeof(__pcpu_type(name)) == 1 || \
|
||||
sizeof(__pcpu_type(name)) == 2 || \
|
||||
sizeof(__pcpu_type(name)) == 4 || \
|
||||
@ -159,7 +181,8 @@ extern struct pcpu *pcpup;
|
||||
}
|
||||
|
||||
#define PCPU_GET(member) __PCPU_GET(pc_ ## member)
|
||||
#define PCPU_LAZY_INC(member) __PCPU_LAZY_INC(pc_ ## member)
|
||||
#define PCPU_ADD(member, val) __PCPU_ADD(pc_ ## member, val)
|
||||
#define PCPU_INC(member) __PCPU_INC(pc_ ## member)
|
||||
#define PCPU_PTR(member) __PCPU_PTR(pc_ ## member)
|
||||
#define PCPU_SET(member, val) __PCPU_SET(pc_ ## member, val)
|
||||
|
||||
|
@ -106,7 +106,7 @@ arm_handler_execute(struct trapframe *frame, int irqnb)
|
||||
struct thread *td = curthread;
|
||||
int i, thread, ret;
|
||||
|
||||
PCPU_LAZY_INC(cnt.v_intr);
|
||||
PCPU_INC(cnt.v_intr);
|
||||
td->td_intr_nesting_level++;
|
||||
while ((i = arm_get_next_irq()) != -1) {
|
||||
arm_mask_irq(i);
|
||||
|
@ -253,7 +253,7 @@ data_abort_handler(trapframe_t *tf)
|
||||
td = curthread;
|
||||
p = td->td_proc;
|
||||
|
||||
PCPU_LAZY_INC(cnt.v_trap);
|
||||
PCPU_INC(cnt.v_trap);
|
||||
/* Data abort came from user mode? */
|
||||
user = TRAP_USERMODE(tf);
|
||||
|
||||
@ -725,7 +725,7 @@ prefetch_abort_handler(trapframe_t *tf)
|
||||
|
||||
td = curthread;
|
||||
p = td->td_proc;
|
||||
PCPU_LAZY_INC(cnt.v_trap);
|
||||
PCPU_INC(cnt.v_trap);
|
||||
|
||||
if (TRAP_USERMODE(tf)) {
|
||||
td->td_frame = tf;
|
||||
@ -880,7 +880,7 @@ syscall(struct thread *td, trapframe_t *frame, u_int32_t insn)
|
||||
register_t *ap, *args, copyargs[MAXARGS];
|
||||
struct sysent *callp;
|
||||
|
||||
PCPU_LAZY_INC(cnt.v_syscall);
|
||||
PCPU_INC(cnt.v_syscall);
|
||||
td->td_pticks = 0;
|
||||
if (td->td_ucred != td->td_proc->p_ucred)
|
||||
cred_update_thread(td);
|
||||
|
@ -191,7 +191,7 @@ undefinedinstruction(trapframe_t *frame)
|
||||
enable_interrupts(I32_bit|F32_bit);
|
||||
|
||||
frame->tf_pc -= INSN_SIZE;
|
||||
PCPU_LAZY_INC(cnt.v_trap);
|
||||
PCPU_INC(cnt.v_trap);
|
||||
|
||||
fault_pc = frame->tf_pc;
|
||||
|
||||
|
@ -57,7 +57,8 @@ extern struct pcpu __pcpu;
|
||||
* XXX The implementation of this operation should be made atomic
|
||||
* with respect to preemption.
|
||||
*/
|
||||
#define PCPU_LAZY_INC(member) (++__pcpu.pc_ ## member)
|
||||
#define PCPU_ADD(member, value) (__pcpu.pc_ ## member += (value))
|
||||
#define PCPU_INC(member) PCPU_LAZY_ADD(member, 1)
|
||||
#define PCPU_PTR(member) (&__pcpu.pc_ ## member)
|
||||
#define PCPU_SET(member,value) (__pcpu.pc_ ## member = (value))
|
||||
|
||||
|
@ -241,7 +241,7 @@ intr_execute_handlers(struct intsrc *isrc, struct trapframe *frame)
|
||||
* processed too.
|
||||
*/
|
||||
(*isrc->is_count)++;
|
||||
PCPU_LAZY_INC(cnt.v_intr);
|
||||
PCPU_INC(cnt.v_intr);
|
||||
|
||||
ie = isrc->is_event;
|
||||
|
||||
@ -312,7 +312,7 @@ intr_execute_handlers(struct intsrc *isrc, struct trapframe *frame)
|
||||
* processed too.
|
||||
*/
|
||||
(*isrc->is_count)++;
|
||||
PCPU_LAZY_INC(cnt.v_intr);
|
||||
PCPU_INC(cnt.v_intr);
|
||||
|
||||
ie = isrc->is_event;
|
||||
|
||||
|
@ -181,7 +181,7 @@ trap(struct trapframe *frame)
|
||||
static int lastalert = 0;
|
||||
#endif
|
||||
|
||||
PCPU_LAZY_INC(cnt.v_trap);
|
||||
PCPU_INC(cnt.v_trap);
|
||||
type = frame->tf_trapno;
|
||||
|
||||
#ifdef SMP
|
||||
@ -922,10 +922,10 @@ syscall(struct trapframe *frame)
|
||||
ksiginfo_t ksi;
|
||||
|
||||
/*
|
||||
* note: PCPU_LAZY_INC() can only be used if we can afford
|
||||
* note: PCPU_INC() can only be used if we can afford
|
||||
* occassional inaccuracy in the count.
|
||||
*/
|
||||
PCPU_LAZY_INC(cnt.v_syscall);
|
||||
PCPU_INC(cnt.v_syscall);
|
||||
|
||||
#ifdef DIAGNOSTIC
|
||||
if (ISPL(frame->tf_cs) != SEL_UPL) {
|
||||
|
@ -62,7 +62,8 @@
|
||||
extern struct pcpu *pcpup;
|
||||
|
||||
#define PCPU_GET(member) (pcpup->pc_ ## member)
|
||||
#define PCPU_LAZY_INC(member) (++pcpup->pc_ ## member)
|
||||
#define PCPU_ADD(member, val) (pcpu->pc_ ## member += (val))
|
||||
#define PCPU_INC(member) PCPU_ADD(member, 1)
|
||||
#define PCPU_PTR(member) (&pcpup->pc_ ## member)
|
||||
#define PCPU_SET(member, val) (pcpup->pc_ ## member = (val))
|
||||
|
||||
@ -115,11 +116,32 @@ extern struct pcpu *pcpup;
|
||||
__res; \
|
||||
})
|
||||
|
||||
/*
|
||||
* Adds a value of the per-cpu counter name. The implementation
|
||||
* must be atomic with respect to interrupts.
|
||||
*/
|
||||
#define __PCPU_ADD(name, val) do { \
|
||||
__pcpu_type(name) __val; \
|
||||
struct __s { \
|
||||
u_char __b[MIN(sizeof(__pcpu_type(name)), 4)]; \
|
||||
} __s; \
|
||||
\
|
||||
__val = (val); \
|
||||
if (sizeof(__val) == 1 || sizeof(__val) == 2 || \
|
||||
sizeof(__val) == 4) { \
|
||||
__s = *(struct __s *)(void *)&__val; \
|
||||
__asm __volatile("add %1,%%fs:%0" \
|
||||
: "=m" (*(struct __s *)(__pcpu_offset(name))) \
|
||||
: "r" (__s)); \
|
||||
} else \
|
||||
*__PCPU_PTR(name) += __val; \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
* Increments the value of the per-cpu counter name. The implementation
|
||||
* must be atomic with respect to interrupts.
|
||||
*/
|
||||
#define __PCPU_LAZY_INC(name) do { \
|
||||
#define __PCPU_INC(name) do { \
|
||||
CTASSERT(sizeof(__pcpu_type(name)) == 1 || \
|
||||
sizeof(__pcpu_type(name)) == 2 || \
|
||||
sizeof(__pcpu_type(name)) == 4); \
|
||||
@ -160,7 +182,8 @@ extern struct pcpu *pcpup;
|
||||
}
|
||||
|
||||
#define PCPU_GET(member) __PCPU_GET(pc_ ## member)
|
||||
#define PCPU_LAZY_INC(member) __PCPU_LAZY_INC(pc_ ## member)
|
||||
#define PCPU_ADD(member, val) __PCPU_ADD(pc_ ## member, val)
|
||||
#define PCPU_INC(member) __PCPU_INC(pc_ ## member)
|
||||
#define PCPU_PTR(member) __PCPU_PTR(pc_ ## member)
|
||||
#define PCPU_SET(member, val) __PCPU_SET(pc_ ## member, val)
|
||||
|
||||
|
@ -64,7 +64,7 @@ ia32_syscall(struct trapframe *tf)
|
||||
int error, i, narg;
|
||||
ksiginfo_t ksi;
|
||||
|
||||
PCPU_LAZY_INC(cnt.v_syscall);
|
||||
PCPU_INC(cnt.v_syscall);
|
||||
|
||||
td = curthread;
|
||||
params = (caddr_t)(tf->tf_special.sp & ((1L<<32)-1)) +
|
||||
@ -220,7 +220,7 @@ ia32_trap(int vector, struct trapframe *tf)
|
||||
KASSERT(TRAPF_USERMODE(tf), ("%s: In kernel mode???", __func__));
|
||||
|
||||
ia64_set_fpsr(IA64_FPSR_DEFAULT);
|
||||
PCPU_LAZY_INC(cnt.v_trap);
|
||||
PCPU_INC(cnt.v_trap);
|
||||
|
||||
td = curthread;
|
||||
td->td_frame = tf;
|
||||
|
@ -154,7 +154,7 @@ interrupt(u_int64_t vector, struct trapframe *tf)
|
||||
if (vector == CLOCK_VECTOR) {/* clock interrupt */
|
||||
/* CTR0(KTR_INTR, "clock interrupt"); */
|
||||
|
||||
PCPU_LAZY_INC(cnt.v_intr);
|
||||
PCPU_INC(cnt.v_intr);
|
||||
#ifdef EVCNT_COUNTERS
|
||||
clock_intr_evcnt.ev_count++;
|
||||
#else
|
||||
|
@ -363,7 +363,7 @@ trap(int vector, struct trapframe *tf)
|
||||
|
||||
user = TRAPF_USERMODE(tf) ? 1 : 0;
|
||||
|
||||
PCPU_LAZY_INC(cnt.v_trap);
|
||||
PCPU_INC(cnt.v_trap);
|
||||
|
||||
td = curthread;
|
||||
p = td->td_proc;
|
||||
@ -978,7 +978,7 @@ syscall(struct trapframe *tf)
|
||||
code = tf->tf_scratch.gr15;
|
||||
args = &tf->tf_scratch.gr16;
|
||||
|
||||
PCPU_LAZY_INC(cnt.v_syscall);
|
||||
PCPU_INC(cnt.v_syscall);
|
||||
|
||||
td = curthread;
|
||||
td->td_frame = tf;
|
||||
|
@ -53,7 +53,8 @@ register struct pcpu *pcpup __asm__("r13");
|
||||
* XXX The implementation of this operation should be made atomic
|
||||
* with respect to preemption.
|
||||
*/
|
||||
#define PCPU_LAZY_INC(member) (++pcpup->pc_ ## member)
|
||||
#define PCPU_ADD(member, value) (pcpup->pc_ ## member += (value))
|
||||
#define PCPU_INC(member) PCPU_ADD(member, 1)
|
||||
#define PCPU_PTR(member) (&pcpup->pc_ ## member)
|
||||
#define PCPU_SET(member,value) (pcpup->pc_ ## member = (value))
|
||||
|
||||
|
@ -931,7 +931,7 @@ swi_sched(void *cookie, int flags)
|
||||
atomic_store_rel_int(&ih->ih_need, 1);
|
||||
|
||||
if (!(flags & SWI_DELAY)) {
|
||||
PCPU_LAZY_INC(cnt.v_soft);
|
||||
PCPU_INC(cnt.v_soft);
|
||||
#ifdef INTR_FILTER
|
||||
error = intr_event_schedule_thread(ie, ie->ie_thread);
|
||||
#else
|
||||
|
@ -149,7 +149,7 @@ trap(struct trapframe *frame)
|
||||
u_int ucode;
|
||||
ksiginfo_t ksi;
|
||||
|
||||
PCPU_LAZY_INC(cnt.v_trap);
|
||||
PCPU_INC(cnt.v_trap);
|
||||
|
||||
td = PCPU_GET(curthread);
|
||||
p = td->td_proc;
|
||||
@ -349,7 +349,7 @@ syscall(struct trapframe *frame)
|
||||
td = PCPU_GET(curthread);
|
||||
p = td->td_proc;
|
||||
|
||||
PCPU_LAZY_INC(cnt.v_syscall);
|
||||
PCPU_INC(cnt.v_syscall);
|
||||
|
||||
#ifdef KSE
|
||||
if (p->p_flag & P_SA)
|
||||
|
@ -62,7 +62,8 @@ struct pmap;
|
||||
* XXX The implementation of this operation should be made atomic
|
||||
* with respect to preemption.
|
||||
*/
|
||||
#define PCPU_LAZY_INC(member) (++PCPUP->pc_ ## member)
|
||||
#define PCPU_ADD(member, value) (PCPUP->pc_ ## member += (value))
|
||||
#define PCPU_INC(member) PCPU_ADD(member, 1)
|
||||
#define PCPU_PTR(member) (&PCPUP->pc_ ## member)
|
||||
#define PCPU_SET(member,value) (PCPUP->pc_ ## member = (value))
|
||||
|
||||
|
@ -149,7 +149,7 @@ trap(struct trapframe *frame)
|
||||
u_int ucode;
|
||||
ksiginfo_t ksi;
|
||||
|
||||
PCPU_LAZY_INC(cnt.v_trap);
|
||||
PCPU_INC(cnt.v_trap);
|
||||
|
||||
td = PCPU_GET(curthread);
|
||||
p = td->td_proc;
|
||||
@ -349,7 +349,7 @@ syscall(struct trapframe *frame)
|
||||
td = PCPU_GET(curthread);
|
||||
p = td->td_proc;
|
||||
|
||||
PCPU_LAZY_INC(cnt.v_syscall);
|
||||
PCPU_INC(cnt.v_syscall);
|
||||
|
||||
#ifdef KSE
|
||||
if (p->p_flag & P_SA)
|
||||
|
@ -71,7 +71,8 @@ register struct pcpu *pcpup __asm__(__XSTRING(PCPU_REG));
|
||||
* XXX The implementation of this operation should be made atomic
|
||||
* with respect to preemption.
|
||||
*/
|
||||
#define PCPU_LAZY_INC(member) (++pcpup->pc_ ## member)
|
||||
#define PCPU_ADD(member, value) (pcpup->pc_ ## member += (value))
|
||||
#define PCPU_INC(member) PCPU_ADD(member, 1)
|
||||
#define PCPU_PTR(member) (&pcpup->pc_ ## member)
|
||||
#define PCPU_SET(member,value) (pcpup->pc_ ## member = (value))
|
||||
|
||||
|
@ -243,7 +243,7 @@ trap(struct trapframe *tf)
|
||||
trap_msg[tf->tf_type & ~T_KERNEL],
|
||||
(TRAPF_USERMODE(tf) ? "user" : "kernel"), rdpr(pil));
|
||||
|
||||
PCPU_LAZY_INC(cnt.v_trap);
|
||||
PCPU_INC(cnt.v_trap);
|
||||
|
||||
if ((tf->tf_tstate & TSTATE_PRIV) == 0) {
|
||||
KASSERT(td != NULL, ("trap: curthread NULL"));
|
||||
@ -518,7 +518,7 @@ syscall(struct trapframe *tf)
|
||||
|
||||
p = td->td_proc;
|
||||
|
||||
PCPU_LAZY_INC(cnt.v_syscall);
|
||||
PCPU_INC(cnt.v_syscall);
|
||||
|
||||
narg = 0;
|
||||
error = 0;
|
||||
|
@ -92,7 +92,8 @@ register struct pcpu *pcpup __asm__(__XSTRING(PCPU_REG));
|
||||
* XXX The implementation of this operation should be made atomic
|
||||
* with respect to preemption.
|
||||
*/
|
||||
#define PCPU_LAZY_INC(member) (++pcpup->pc_ ## member)
|
||||
#define PCPU_ADD(member, value) (pcpup->pc_ ## member += (value))
|
||||
#define PCPU_INC(member) PCPU_ADD(member, 1)
|
||||
#define PCPU_PTR(member) (&pcpup->pc_ ## member)
|
||||
#define PCPU_SET(member,value) (pcpup->pc_ ## member = (value))
|
||||
|
||||
|
@ -268,7 +268,7 @@ trap(struct trapframe *tf, int64_t type, uint64_t data)
|
||||
trap_msg[trapno],
|
||||
(TRAPF_USERMODE(tf) ? "user" : "kernel"), rdpr(pil));
|
||||
|
||||
PCPU_LAZY_INC(cnt.v_trap);
|
||||
PCPU_INC(cnt.v_trap);
|
||||
|
||||
trapno = (type & TRAP_MASK);
|
||||
ctx = (type >> TRAP_CTX_SHIFT);
|
||||
@ -575,7 +575,7 @@ syscall(struct trapframe *tf)
|
||||
|
||||
p = td->td_proc;
|
||||
|
||||
PCPU_LAZY_INC(cnt.v_syscall);
|
||||
PCPU_INC(cnt.v_syscall);
|
||||
|
||||
narg = 0;
|
||||
error = 0;
|
||||
|
@ -219,7 +219,7 @@ vm_fault(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type,
|
||||
|
||||
hardfault = 0;
|
||||
growstack = TRUE;
|
||||
PCPU_LAZY_INC(cnt.v_vm_faults);
|
||||
PCPU_INC(cnt.v_vm_faults);
|
||||
|
||||
RetryFault:;
|
||||
|
||||
@ -394,7 +394,7 @@ RetryFault:;
|
||||
}
|
||||
vm_object_pip_wakeup(fs.object);
|
||||
VM_OBJECT_UNLOCK(fs.object);
|
||||
PCPU_LAZY_INC(cnt.v_intrans);
|
||||
PCPU_INC(cnt.v_intrans);
|
||||
vm_object_deallocate(fs.first_object);
|
||||
goto RetryFault;
|
||||
}
|
||||
@ -668,9 +668,9 @@ RetryFault:;
|
||||
if ((fs.m->flags & PG_ZERO) == 0) {
|
||||
pmap_zero_page(fs.m);
|
||||
} else {
|
||||
PCPU_LAZY_INC(cnt.v_ozfod);
|
||||
PCPU_INC(cnt.v_ozfod);
|
||||
}
|
||||
PCPU_LAZY_INC(cnt.v_zfod);
|
||||
PCPU_INC(cnt.v_zfod);
|
||||
fs.m->valid = VM_PAGE_BITS_ALL;
|
||||
break; /* break to PAGE HAS BEEN FOUND */
|
||||
} else {
|
||||
@ -752,7 +752,7 @@ RetryFault:;
|
||||
vm_page_busy(fs.m);
|
||||
fs.first_m = fs.m;
|
||||
fs.m = NULL;
|
||||
PCPU_LAZY_INC(cnt.v_cow_optim);
|
||||
PCPU_INC(cnt.v_cow_optim);
|
||||
} else {
|
||||
/*
|
||||
* Oh, well, lets copy it.
|
||||
@ -780,7 +780,7 @@ RetryFault:;
|
||||
fs.m = fs.first_m;
|
||||
if (!is_first_object_locked)
|
||||
VM_OBJECT_LOCK(fs.object);
|
||||
PCPU_LAZY_INC(cnt.v_cow_faults);
|
||||
PCPU_INC(cnt.v_cow_faults);
|
||||
} else {
|
||||
prot &= ~VM_PROT_WRITE;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user