Pass a thread argument into cpu_critical_{enter,exit}() rather than
dereference curthread. It is called only from critical_{enter,exit}(), which already dereferences curthread. This doesn't seem to affect SMP performance in my benchmarks, but improves MySQL transaction throughput by about 1% on UP on my Xeon. Head nodding: jhb, bmilekic
This commit is contained in:
parent
f66145c6bd
commit
1a8cfbc450
@ -55,11 +55,9 @@ void cpu_critical_fork_exit(void);
|
||||
* of td_critnest, prior to it being incremented to 1.
|
||||
*/
|
||||
static __inline void
|
||||
cpu_critical_enter(void)
|
||||
cpu_critical_enter(struct thread *td)
|
||||
{
|
||||
struct thread *td;
|
||||
|
||||
td = curthread;
|
||||
td->td_md.md_savecrit = intr_disable();
|
||||
}
|
||||
|
||||
@ -71,18 +69,16 @@ cpu_critical_enter(void)
|
||||
* exiting the last critical section.
|
||||
*/
|
||||
static __inline void
|
||||
cpu_critical_exit(void)
|
||||
cpu_critical_exit(struct thread *td)
|
||||
{
|
||||
struct thread *td;
|
||||
|
||||
td = curthread;
|
||||
intr_restore(td->td_md.md_savecrit);
|
||||
}
|
||||
|
||||
#else /* !__GNUC__ */
|
||||
|
||||
void cpu_critical_enter(void);
|
||||
void cpu_critical_exit(void);
|
||||
void cpu_critical_enter(struct thread *td);
|
||||
void cpu_critical_exit(struct thread *td);
|
||||
|
||||
#endif /* __GNUC__ */
|
||||
|
||||
|
@ -55,9 +55,10 @@ void cpu_critical_fork_exit(void);
|
||||
* of td_critnest, prior to it being incremented to 1.
|
||||
*/
|
||||
static __inline void
|
||||
cpu_critical_enter(void)
|
||||
cpu_critical_enter(struct thread *td)
|
||||
{
|
||||
curthread->td_md.md_savecrit = intr_disable();
|
||||
|
||||
td->td_md.md_savecrit = intr_disable();
|
||||
}
|
||||
|
||||
/*
|
||||
@ -68,15 +69,15 @@ cpu_critical_enter(void)
|
||||
* exiting the last critical section.
|
||||
*/
|
||||
static __inline void
|
||||
cpu_critical_exit(void)
|
||||
cpu_critical_exit(struct thread *td)
|
||||
{
|
||||
intr_restore(curthread->td_md.md_savecrit);
|
||||
intr_restore(td->td_md.md_savecrit);
|
||||
}
|
||||
|
||||
#else /* !__GNUC__ */
|
||||
|
||||
void cpu_critical_enter(void);
|
||||
void cpu_critical_exit(void);
|
||||
void cpu_critical_enter(struct thread *td);
|
||||
void cpu_critical_exit(struct thread *td);
|
||||
|
||||
#endif /* __GNUC__ */
|
||||
|
||||
|
@ -40,15 +40,15 @@
|
||||
#define MACHINE_CRITICAL_H
|
||||
void cpu_critical_fork_exit(void);
|
||||
static __inline void
|
||||
cpu_critical_enter(void)
|
||||
cpu_critical_enter(struct thread *td)
|
||||
{
|
||||
curthread->td_md.md_savecrit = disable_interrupts(I32_bit | F32_bit);
|
||||
cd->td_md.md_savecrit = disable_interrupts(I32_bit | F32_bit);
|
||||
}
|
||||
|
||||
static __inline void
|
||||
cpu_critical_exit(void)
|
||||
cpu_critical_exit(struct thread *td)
|
||||
{
|
||||
restore_interrupts(curthread->td_md.md_savecrit);
|
||||
restore_interrupts(td->td_md.md_savecrit);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -59,9 +59,9 @@ void cpu_critical_fork_exit(void);
|
||||
* is non-zero will be deferred.
|
||||
*/
|
||||
static __inline void
|
||||
cpu_critical_enter(void)
|
||||
cpu_critical_enter(struct thread *td)
|
||||
{
|
||||
curthread->td_md.md_savecrit = intr_disable();
|
||||
td->td_md.md_savecrit = intr_disable();
|
||||
}
|
||||
|
||||
/*
|
||||
@ -76,15 +76,15 @@ cpu_critical_enter(void)
|
||||
* code for us, so we do not have to do anything fancy.
|
||||
*/
|
||||
static __inline void
|
||||
cpu_critical_exit(void)
|
||||
cpu_critical_exit(struct thread *td)
|
||||
{
|
||||
intr_restore(curthread->td_md.md_savecrit);
|
||||
intr_restore(td->td_md.md_savecrit);
|
||||
}
|
||||
|
||||
#else /* !(__GNUC__ || __INTEL_COMPILER) */
|
||||
|
||||
void cpu_critical_enter(void);
|
||||
void cpu_critical_exit(void);
|
||||
void cpu_critical_enter(struct thread *td);
|
||||
void cpu_critical_exit(struct thread *td);
|
||||
|
||||
#endif /* __GNUC__ || __INTEL_COMPILER */
|
||||
|
||||
|
@ -55,11 +55,9 @@ void cpu_critical_fork_exit(void);
|
||||
* of td_critnest, prior to it being incremented to 1.
|
||||
*/
|
||||
static __inline void
|
||||
cpu_critical_enter(void)
|
||||
cpu_critical_enter(struct thread *td)
|
||||
{
|
||||
struct thread *td;
|
||||
|
||||
td = curthread;
|
||||
td->td_md.md_savecrit = intr_disable();
|
||||
}
|
||||
|
||||
@ -71,19 +69,17 @@ cpu_critical_enter(void)
|
||||
* exiting the last critical section.
|
||||
*/
|
||||
static __inline void
|
||||
cpu_critical_exit(void)
|
||||
cpu_critical_exit(struct thread *td)
|
||||
{
|
||||
struct thread *td;
|
||||
|
||||
td = curthread;
|
||||
intr_restore(td->td_md.md_savecrit);
|
||||
}
|
||||
|
||||
|
||||
#else /* !__GNUC__ */
|
||||
|
||||
void cpu_critical_enter(void);
|
||||
void cpu_critical_exit(void);
|
||||
void cpu_critical_enter(struct thread *td)
|
||||
void cpu_critical_exit(struct thread *td)
|
||||
|
||||
#endif /* __GNUC__ */
|
||||
|
||||
|
@ -437,7 +437,7 @@ critical_enter(void)
|
||||
|
||||
td = curthread;
|
||||
if (td->td_critnest == 0)
|
||||
cpu_critical_enter();
|
||||
cpu_critical_enter(td);
|
||||
td->td_critnest++;
|
||||
}
|
||||
|
||||
@ -459,7 +459,7 @@ critical_exit(void)
|
||||
}
|
||||
#endif
|
||||
td->td_critnest = 0;
|
||||
cpu_critical_exit();
|
||||
cpu_critical_exit(td);
|
||||
} else {
|
||||
td->td_critnest--;
|
||||
}
|
||||
|
@ -56,10 +56,9 @@ void cpu_critical_fork_exit(void);
|
||||
*/
|
||||
|
||||
static __inline void
|
||||
cpu_critical_enter(void)
|
||||
cpu_critical_enter(struct thread *td)
|
||||
{
|
||||
u_int msr;
|
||||
struct thread *td = curthread;
|
||||
|
||||
msr = mfmsr();
|
||||
td->td_md.md_savecrit = msr;
|
||||
@ -75,9 +74,8 @@ cpu_critical_enter(void)
|
||||
* exiting the last critical section.
|
||||
*/
|
||||
static __inline void
|
||||
cpu_critical_exit(void)
|
||||
cpu_critical_exit(struct thread *td)
|
||||
{
|
||||
struct thread *td = curthread;
|
||||
|
||||
mtmsr(td->td_md.md_savecrit);
|
||||
}
|
||||
@ -85,8 +83,8 @@ cpu_critical_exit(void)
|
||||
|
||||
#else /* !__GNUC__ */
|
||||
|
||||
void cpu_critical_enter(void);
|
||||
void cpu_critical_exit(void);
|
||||
void cpu_critical_enter(struct thread *td);
|
||||
void cpu_critical_exit(struct thread *td);
|
||||
|
||||
#endif /* __GNUC__ */
|
||||
|
||||
|
@ -55,12 +55,10 @@ void cpu_critical_fork_exit(void);
|
||||
* of td_critnest, prior to it being incremented to 1.
|
||||
*/
|
||||
static __inline void
|
||||
cpu_critical_enter(void)
|
||||
cpu_critical_enter(struct thread *td)
|
||||
{
|
||||
struct thread *td;
|
||||
critical_t pil;
|
||||
|
||||
td = curthread;
|
||||
pil = rdpr(pil);
|
||||
wrpr(pil, 0, 14);
|
||||
td->td_md.md_savecrit = pil;
|
||||
@ -75,18 +73,16 @@ cpu_critical_enter(void)
|
||||
* exiting the last critical section.
|
||||
*/
|
||||
static __inline void
|
||||
cpu_critical_exit(void)
|
||||
cpu_critical_exit(struct thread *td)
|
||||
{
|
||||
struct thread *td;
|
||||
|
||||
td = curthread;
|
||||
wrpr(pil, td->td_md.md_savecrit, 0);
|
||||
}
|
||||
|
||||
#else /* !__GNUC__ */
|
||||
|
||||
void cpu_critical_enter(void);
|
||||
void cpu_critical_exit(void);
|
||||
void cpu_critical_enter(struct thread *td);
|
||||
void cpu_critical_exit(struct thread *td);
|
||||
|
||||
#endif /* __GNUC__ */
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user