Remove the nanosleep calls from the spin loops in the locking code.
They provided little benefit (if any) and they caused some problems in OpenOffice, at least in post-KSE -current and perhaps in other environments too. The nanosleep calls prevented the profiling timer from advancing during the spinloops, thereby preventing the thread scheduler from ever pre-empting the spinning thread. Alexander Kabaev diagnosed this problem, Martin Blapp helped with testing, and Matt Dillon provided some helpful suggestions. This is a short-term fix for a larger problem. The use of spinlocking isn't guaranteed to work in all cases. For example, if the spinning thread has higher priority than all other threads, it may never be pre-empted, and the thread holding the lock may never progress far enough to release the lock. On the other hand, spinlocking is the only locking that can work with an arbitrary unknown threads package. I have some ideas for a much better fix in the longer term. It would eliminate all locking inside the dynamic linker by making it safe for symbol lookups and lazy binding to proceed in parallel with a call to dlopen or dlclose. This means that the only mutual exclusion needed would be to prevent multiple simultaneous calls to dlopen and/or dlclose. That mutual exclusion could be put into the native pthreads library. Applications using foreign threads packages would have to make their own arrangements to ensure that they did not have multiple threads in dlopen and/or dlclose -- a reasonable requirement in my opinion. MFC after: 3 days
This commit is contained in:
parent
86a2f287c5
commit
e6f0183bff
@ -39,11 +39,6 @@
|
||||
* contain a count of readers desiring the lock. The algorithm requires
|
||||
* atomic "compare_and_store" and "add" operations, which we implement
|
||||
* using assembly language sequences in "rtld_start.S".
|
||||
*
|
||||
* These are spinlocks. When spinning we call nanosleep() for 1
|
||||
* microsecond each time around the loop. This will most likely yield
|
||||
* the CPU to other threads (including, we hope, the lockholder) allowing
|
||||
* them to make some progress.
|
||||
*/
|
||||
|
||||
#include <signal.h>
|
||||
@ -70,7 +65,6 @@ typedef struct Struct_Lock {
|
||||
void *base;
|
||||
} Lock;
|
||||
|
||||
static const struct timespec usec = { 0, 1000 }; /* 1 usec. */
|
||||
static sigset_t fullsigmask, oldsigmask;
|
||||
|
||||
static void *
|
||||
@ -118,7 +112,7 @@ rlock_acquire(void *lock)
|
||||
|
||||
atomic_add_int(&l->lock, RC_INCR);
|
||||
while (l->lock & WAFLAG)
|
||||
nanosleep(&usec, NULL);
|
||||
; /* Spin */
|
||||
}
|
||||
|
||||
static void
|
||||
@ -132,7 +126,6 @@ wlock_acquire(void *lock)
|
||||
if (cmp0_and_store_int(&l->lock, WAFLAG) == 0)
|
||||
break;
|
||||
sigprocmask(SIG_SETMASK, &tmp_oldsigmask, NULL);
|
||||
nanosleep(&usec, NULL);
|
||||
}
|
||||
oldsigmask = tmp_oldsigmask;
|
||||
}
|
||||
|
@ -46,11 +46,6 @@
|
||||
* 80386 we must use simple test-and-set exclusive locks instead. We
|
||||
* determine which kind of lock to use by trying to execute a "cmpxchg"
|
||||
* instruction and catching the SIGILL which results on the 80386.
|
||||
*
|
||||
* These are spinlocks. When spinning we call nanosleep() for 1
|
||||
* microsecond each time around the loop. This will most likely yield
|
||||
* the CPU to other threads (including, we hope, the lockholder) allowing
|
||||
* them to make some progress.
|
||||
*/
|
||||
|
||||
#include <setjmp.h>
|
||||
@ -71,7 +66,6 @@ typedef struct Struct_Lock {
|
||||
void *base;
|
||||
} Lock;
|
||||
|
||||
static const struct timespec usec = { 0, 1000 }; /* 1 usec. */
|
||||
static sigset_t fullsigmask, oldsigmask;
|
||||
|
||||
static inline int
|
||||
@ -153,7 +147,7 @@ lock80386_acquire(void *lock)
|
||||
break;
|
||||
sigprocmask(SIG_SETMASK, &tmp_oldsigmask, NULL);
|
||||
while (l->lock != 0)
|
||||
nanosleep(&usec, NULL);
|
||||
; /* Spin */
|
||||
}
|
||||
oldsigmask = tmp_oldsigmask;
|
||||
}
|
||||
@ -177,7 +171,7 @@ rlock_acquire(void *lock)
|
||||
|
||||
atomic_add_int(&l->lock, RC_INCR);
|
||||
while (l->lock & WAFLAG)
|
||||
nanosleep(&usec, NULL);
|
||||
; /* Spin */
|
||||
}
|
||||
|
||||
static void
|
||||
@ -191,7 +185,6 @@ wlock_acquire(void *lock)
|
||||
if (cmpxchgl(0, WAFLAG, &l->lock) == 0)
|
||||
break;
|
||||
sigprocmask(SIG_SETMASK, &tmp_oldsigmask, NULL);
|
||||
nanosleep(&usec, NULL);
|
||||
}
|
||||
oldsigmask = tmp_oldsigmask;
|
||||
}
|
||||
|
@ -46,11 +46,6 @@
|
||||
* 80386 we must use simple test-and-set exclusive locks instead. We
|
||||
* determine which kind of lock to use by trying to execute a "cmpxchg"
|
||||
* instruction and catching the SIGILL which results on the 80386.
|
||||
*
|
||||
* These are spinlocks. When spinning we call nanosleep() for 1
|
||||
* microsecond each time around the loop. This will most likely yield
|
||||
* the CPU to other threads (including, we hope, the lockholder) allowing
|
||||
* them to make some progress.
|
||||
*/
|
||||
|
||||
#include <setjmp.h>
|
||||
@ -71,7 +66,6 @@ typedef struct Struct_Lock {
|
||||
void *base;
|
||||
} Lock;
|
||||
|
||||
static const struct timespec usec = { 0, 1000 }; /* 1 usec. */
|
||||
static sigset_t fullsigmask, oldsigmask;
|
||||
|
||||
static inline int
|
||||
@ -153,7 +147,7 @@ lock80386_acquire(void *lock)
|
||||
break;
|
||||
sigprocmask(SIG_SETMASK, &tmp_oldsigmask, NULL);
|
||||
while (l->lock != 0)
|
||||
nanosleep(&usec, NULL);
|
||||
; /* Spin */
|
||||
}
|
||||
oldsigmask = tmp_oldsigmask;
|
||||
}
|
||||
@ -177,7 +171,7 @@ rlock_acquire(void *lock)
|
||||
|
||||
atomic_add_int(&l->lock, RC_INCR);
|
||||
while (l->lock & WAFLAG)
|
||||
nanosleep(&usec, NULL);
|
||||
; /* Spin */
|
||||
}
|
||||
|
||||
static void
|
||||
@ -191,7 +185,6 @@ wlock_acquire(void *lock)
|
||||
if (cmpxchgl(0, WAFLAG, &l->lock) == 0)
|
||||
break;
|
||||
sigprocmask(SIG_SETMASK, &tmp_oldsigmask, NULL);
|
||||
nanosleep(&usec, NULL);
|
||||
}
|
||||
oldsigmask = tmp_oldsigmask;
|
||||
}
|
||||
|
@ -39,11 +39,6 @@
|
||||
* contain a count of readers desiring the lock. The algorithm requires
|
||||
* atomic "compare_and_store" and "add" operations, which we implement
|
||||
* using assembly language sequences in "rtld_start.S".
|
||||
*
|
||||
* These are spinlocks. When spinning we call nanosleep() for 1
|
||||
* microsecond each time around the loop. This will most likely yield
|
||||
* the CPU to other threads (including, we hope, the lockholder) allowing
|
||||
* them to make some progress.
|
||||
*/
|
||||
|
||||
#include <signal.h>
|
||||
@ -70,7 +65,6 @@ typedef struct Struct_Lock {
|
||||
void *base;
|
||||
} Lock;
|
||||
|
||||
static const struct timespec usec = { 0, 1000 }; /* 1 usec. */
|
||||
static sigset_t fullsigmask, oldsigmask;
|
||||
|
||||
static void *
|
||||
@ -118,7 +112,7 @@ rlock_acquire(void *lock)
|
||||
|
||||
atomic_add_int(&l->lock, RC_INCR);
|
||||
while (l->lock & WAFLAG)
|
||||
nanosleep(&usec, NULL);
|
||||
; /* Spin */
|
||||
}
|
||||
|
||||
static void
|
||||
@ -132,7 +126,6 @@ wlock_acquire(void *lock)
|
||||
if (cmp0_and_store_int(&l->lock, WAFLAG) == 0)
|
||||
break;
|
||||
sigprocmask(SIG_SETMASK, &tmp_oldsigmask, NULL);
|
||||
nanosleep(&usec, NULL);
|
||||
}
|
||||
oldsigmask = tmp_oldsigmask;
|
||||
}
|
||||
|
@ -40,11 +40,6 @@
|
||||
* contain a count of readers desiring the lock. The algorithm requires
|
||||
* atomic "compare_and_store" and "add" operations, which we implement
|
||||
* using assembly language sequences in "rtld_start.S".
|
||||
*
|
||||
* These are spinlocks. When spinning we call nanosleep() for 1
|
||||
* microsecond each time around the loop. This will most likely yield
|
||||
* the CPU to other threads (including, we hope, the lockholder) allowing
|
||||
* them to make some progress.
|
||||
*/
|
||||
|
||||
#include <signal.h>
|
||||
@ -65,7 +60,6 @@ typedef struct Struct_Lock {
|
||||
void *base;
|
||||
} Lock;
|
||||
|
||||
static const struct timespec usec = { 0, 1000 }; /* 1 usec. */
|
||||
static sigset_t fullsigmask, oldsigmask;
|
||||
|
||||
static void *
|
||||
@ -113,7 +107,7 @@ rlock_acquire(void *lock)
|
||||
|
||||
atomic_add_acq_int(&l->lock, RC_INCR);
|
||||
while (l->lock & WAFLAG)
|
||||
nanosleep(&usec, NULL);
|
||||
; /* Spin */
|
||||
}
|
||||
|
||||
static void
|
||||
@ -127,7 +121,6 @@ wlock_acquire(void *lock)
|
||||
if (atomic_cmpset_acq_int(&l->lock, 0, WAFLAG))
|
||||
break;
|
||||
sigprocmask(SIG_SETMASK, &tmp_oldsigmask, NULL);
|
||||
nanosleep(&usec, NULL);
|
||||
}
|
||||
oldsigmask = tmp_oldsigmask;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user