Get kernel profiling on SMP systems closer to working by replacing the
mcount spin mutex with a very simple non-recursive spinlock implemented using atomic operations.
This commit is contained in:
parent
797c3dba25
commit
25142c5ea1
@ -325,7 +325,7 @@ static void release_aps(void *dummy);
|
||||
struct mtx imen_mtx;
|
||||
|
||||
/* lock region used by kernel profiling */
|
||||
struct mtx mcount_mtx;
|
||||
int mcount_lock;
|
||||
|
||||
#ifdef USE_COMLOCK
|
||||
/* locks com (tty) data/hardware accesses: a FASTINTR() */
|
||||
@ -335,12 +335,6 @@ struct mtx com_mtx;
|
||||
static void
|
||||
init_locks(void)
|
||||
{
|
||||
/*
|
||||
* XXX The mcount mutex probably needs to be statically initialized,
|
||||
* since it will be used even in the function calls that get us to this
|
||||
* point.
|
||||
*/
|
||||
mtx_init(&mcount_mtx, "mcount", MTX_DEF);
|
||||
|
||||
#ifdef USE_COMLOCK
|
||||
mtx_init(&com_mtx, "com", MTX_SPIN);
|
||||
|
@ -325,7 +325,7 @@ static void release_aps(void *dummy);
|
||||
struct mtx imen_mtx;
|
||||
|
||||
/* lock region used by kernel profiling */
|
||||
struct mtx mcount_mtx;
|
||||
int mcount_lock;
|
||||
|
||||
#ifdef USE_COMLOCK
|
||||
/* locks com (tty) data/hardware accesses: a FASTINTR() */
|
||||
@ -335,12 +335,6 @@ struct mtx com_mtx;
|
||||
static void
|
||||
init_locks(void)
|
||||
{
|
||||
/*
|
||||
* XXX The mcount mutex probably needs to be statically initialized,
|
||||
* since it will be used even in the function calls that get us to this
|
||||
* point.
|
||||
*/
|
||||
mtx_init(&mcount_mtx, "mcount", MTX_DEF);
|
||||
|
||||
#ifdef USE_COMLOCK
|
||||
mtx_init(&com_mtx, "com", MTX_SPIN);
|
||||
|
@ -325,7 +325,7 @@ static void release_aps(void *dummy);
|
||||
struct mtx imen_mtx;
|
||||
|
||||
/* lock region used by kernel profiling */
|
||||
struct mtx mcount_mtx;
|
||||
int mcount_lock;
|
||||
|
||||
#ifdef USE_COMLOCK
|
||||
/* locks com (tty) data/hardware accesses: a FASTINTR() */
|
||||
@ -335,12 +335,6 @@ struct mtx com_mtx;
|
||||
static void
|
||||
init_locks(void)
|
||||
{
|
||||
/*
|
||||
* XXX The mcount mutex probably needs to be statically initialized,
|
||||
* since it will be used even in the function calls that get us to this
|
||||
* point.
|
||||
*/
|
||||
mtx_init(&mcount_mtx, "mcount", MTX_DEF);
|
||||
|
||||
#ifdef USE_COMLOCK
|
||||
mtx_init(&com_mtx, "com", MTX_SPIN);
|
||||
|
@ -64,10 +64,11 @@
|
||||
#else
|
||||
#define MCOUNT_DECL(s) u_long s;
|
||||
#ifdef SMP
|
||||
#define MCOUNT_ENTER(s) { s = read_eflags(); \
|
||||
__asm __volatile("cli" : : : "memory"); \
|
||||
mtx_lock(&mcount_mtx); }
|
||||
#define MCOUNT_EXIT(s) { mtx_unlock(&mcount_mtx); write_eflags(s); }
|
||||
#define MCOUNT_ENTER(s) { s = read_eflags(); disable_intr(); \
|
||||
while (!atomic_cmpset_acq_int(&mcount_lock, 0, 1) \
|
||||
/* nothing */ ; }
|
||||
#define MCOUNT_EXIT(s) { atomic_store_rel_int(&mcount_lock, 0); \
|
||||
write_eflags(s); }
|
||||
#else
|
||||
#define MCOUNT_ENTER(s) { s = read_eflags(); disable_intr(); }
|
||||
#define MCOUNT_EXIT(s) (write_eflags(s))
|
||||
|
@ -325,7 +325,7 @@ static void release_aps(void *dummy);
|
||||
struct mtx imen_mtx;
|
||||
|
||||
/* lock region used by kernel profiling */
|
||||
struct mtx mcount_mtx;
|
||||
int mcount_lock;
|
||||
|
||||
#ifdef USE_COMLOCK
|
||||
/* locks com (tty) data/hardware accesses: a FASTINTR() */
|
||||
@ -335,12 +335,6 @@ struct mtx com_mtx;
|
||||
static void
|
||||
init_locks(void)
|
||||
{
|
||||
/*
|
||||
* XXX The mcount mutex probably needs to be statically initialized,
|
||||
* since it will be used even in the function calls that get us to this
|
||||
* point.
|
||||
*/
|
||||
mtx_init(&mcount_mtx, "mcount", MTX_DEF);
|
||||
|
||||
#ifdef USE_COMLOCK
|
||||
mtx_init(&com_mtx, "com", MTX_SPIN);
|
||||
|
@ -325,7 +325,7 @@ static void release_aps(void *dummy);
|
||||
struct mtx imen_mtx;
|
||||
|
||||
/* lock region used by kernel profiling */
|
||||
struct mtx mcount_mtx;
|
||||
int mcount_lock;
|
||||
|
||||
#ifdef USE_COMLOCK
|
||||
/* locks com (tty) data/hardware accesses: a FASTINTR() */
|
||||
@ -335,12 +335,6 @@ struct mtx com_mtx;
|
||||
static void
|
||||
init_locks(void)
|
||||
{
|
||||
/*
|
||||
* XXX The mcount mutex probably needs to be statically initialized,
|
||||
* since it will be used even in the function calls that get us to this
|
||||
* point.
|
||||
*/
|
||||
mtx_init(&mcount_mtx, "mcount", MTX_DEF);
|
||||
|
||||
#ifdef USE_COMLOCK
|
||||
mtx_init(&com_mtx, "com", MTX_SPIN);
|
||||
|
@ -325,7 +325,7 @@ static void release_aps(void *dummy);
|
||||
struct mtx imen_mtx;
|
||||
|
||||
/* lock region used by kernel profiling */
|
||||
struct mtx mcount_mtx;
|
||||
int mcount_lock;
|
||||
|
||||
#ifdef USE_COMLOCK
|
||||
/* locks com (tty) data/hardware accesses: a FASTINTR() */
|
||||
@ -335,12 +335,6 @@ struct mtx com_mtx;
|
||||
static void
|
||||
init_locks(void)
|
||||
{
|
||||
/*
|
||||
* XXX The mcount mutex probably needs to be statically initialized,
|
||||
* since it will be used even in the function calls that get us to this
|
||||
* point.
|
||||
*/
|
||||
mtx_init(&mcount_mtx, "mcount", MTX_DEF);
|
||||
|
||||
#ifdef USE_COMLOCK
|
||||
mtx_init(&com_mtx, "com", MTX_SPIN);
|
||||
|
@ -64,10 +64,11 @@
|
||||
#else
|
||||
#define MCOUNT_DECL(s) u_long s;
|
||||
#ifdef SMP
|
||||
#define MCOUNT_ENTER(s) { s = read_eflags(); \
|
||||
__asm __volatile("cli" : : : "memory"); \
|
||||
mtx_lock(&mcount_mtx); }
|
||||
#define MCOUNT_EXIT(s) { mtx_unlock(&mcount_mtx); write_eflags(s); }
|
||||
#define MCOUNT_ENTER(s) { s = read_eflags(); disable_intr(); \
|
||||
while (!atomic_cmpset_acq_int(&mcount_lock, 0, 1) \
|
||||
/* nothing */ ; }
|
||||
#define MCOUNT_EXIT(s) { atomic_store_rel_int(&mcount_lock, 0); \
|
||||
write_eflags(s); }
|
||||
#else
|
||||
#define MCOUNT_ENTER(s) { s = read_eflags(); disable_intr(); }
|
||||
#define MCOUNT_EXIT(s) (write_eflags(s))
|
||||
|
Loading…
Reference in New Issue
Block a user