Change the thread ID (thr_id_t) used for 1:1 threading from being a

pointer to the corresponding struct thread to the thread ID (lwpid_t)
assigned to that thread. The primary reason for this change is that
libthr now internally uses the same ID as the debugger and the kernel
when referencing to a kernel thread. This allows us to implement the
support for debugging without additional translations and/or mappings.

To preserve the ABI, the 1:1 threading syscalls, including the umtx
locking API have not been changed to work on a lwpid_t. Instead the
1:1 threading syscalls operate on long and the umtx locking API has
not been changed except for the contested bit. Previously this was
the least significant bit. Now it's the most significant bit. Since
the contested bit should not be tested by userland, this change is
not expected to be visible. Just to be sure, UMTX_CONTESTED has been
removed from <sys/umtx.h>.

Reviewed by: mtm@
ABI preservation tested on: i386, ia64
This commit is contained in:
Marcel Moolenaar 2004-07-02 00:40:07 +00:00
parent c2589102b0
commit cd28f17da2
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=131431
10 changed files with 52 additions and 63 deletions

View File

@ -439,11 +439,11 @@ cond_queue_enq(pthread_cond_t cond, pthread_t pthread)
name = pthread->name ? pthread->name : "unknown";
if ((pthread->flags & PTHREAD_FLAGS_IN_CONDQ) != 0)
_thread_printf(2, "Thread (%s:%u) already on condq\n",
pthread->name, pthread->uniqueid);
_thread_printf(2, "Thread (%s:%ld) already on condq\n",
pthread->name, pthread->thr_id);
if ((pthread->flags & PTHREAD_FLAGS_IN_MUTEXQ) != 0)
_thread_printf(2, "Thread (%s:%u) already on mutexq\n",
pthread->name, pthread->uniqueid);
_thread_printf(2, "Thread (%s:%ld) already on mutexq\n",
pthread->name, pthread->thr_id);
PTHREAD_ASSERT_NOT_IN_SYNCQ(pthread);
/*

View File

@ -43,11 +43,8 @@
#include "thr_private.h"
#include "libc_private.h"
static u_int64_t next_uniqueid = 1;
#define OFF(f) offsetof(struct pthread, f)
int _thread_next_offset = OFF(tle.tqe_next);
int _thread_uniqueid_offset = OFF(uniqueid);
int _thread_name_offset = OFF(name);
int _thread_ctx_offset = OFF(ctx);
#undef OFF
@ -137,12 +134,6 @@ _pthread_create(pthread_t * thread, const pthread_attr_t * attr,
}
new_thread->active_priority = new_thread->base_priority;
/*
* Initialise the unique id which GDB uses to
* track threads.
*/
new_thread->uniqueid = next_uniqueid++;
THREAD_LIST_LOCK;
/* Add the thread to the linked list of all threads: */

View File

@ -799,11 +799,11 @@ mutex_queue_enq(pthread_mutex_t mutex, pthread_t pthread)
name = pthread->name ? pthread->name : "unknown";
if ((pthread->flags & PTHREAD_FLAGS_IN_CONDQ) != 0)
_thread_printf(2, "Thread (%s:%u) already on condq\n",
pthread->name, pthread->uniqueid);
_thread_printf(2, "Thread (%s:%ld) already on condq\n",
pthread->name, pthread->thr_id);
if ((pthread->flags & PTHREAD_FLAGS_IN_MUTEXQ) != 0)
_thread_printf(2, "Thread (%s:%u) already on mutexq\n",
pthread->name, pthread->uniqueid);
_thread_printf(2, "Thread (%s:%ld) already on mutexq\n",
pthread->name, pthread->thr_id);
PTHREAD_ASSERT_NOT_IN_SYNCQ(pthread);
/*
* For the common case of all threads having equal priority,

View File

@ -478,8 +478,7 @@ struct pthread {
#define PTHREAD_MAGIC ((u_int32_t) 0xd09ba115)
u_int32_t magic;
char *name;
u_int64_t uniqueid; /* for gdb */
thr_id_t thr_id;
long thr_id;
sigset_t savedsig;
int signest; /* blocked signal netsting level */
int ptdflags; /* used by other other threads

View File

@ -595,10 +595,10 @@
428 UNIMPL __acl_aclcheck_link
; XXX implement
429 UNIMPL sigwait
430 MNOPROTO { int thr_create(ucontext_t *ctx, thr_id_t *id, int flag s); }
430 MNOPROTO { int thr_create(ucontext_t *ctx, long *id, int flag s); }
431 MNOPROTO { void thr_exit(void); }
432 MNOPROTO { int thr_self(thr_id_t *id); }
433 MNOPROTO { int thr_kill(thr_id_t id, int sig); }
432 MNOPROTO { int thr_self(long *id); }
433 MNOPROTO { int thr_kill(long id, int sig); }
434 MNOPROTO { int _umtx_lock(struct umtx *umtx); }
435 MNOPROTO { int _umtx_unlock(struct umtx *umtx); }
436 MNOPROTO { int jail_attach(int jid); }
@ -608,4 +608,4 @@
440 UNIMPL kse_switchin
441 UNIMPL ksem_timedwait
442 MNOPROTO { int thr_suspend(const struct timespec *timeout); }
443 MNOPROTO { int thr_wake(thr_id_t id); }
443 MNOPROTO { int thr_wake(long id); }

View File

@ -118,11 +118,12 @@ thr_exit1(void)
*/
int
thr_create(struct thread *td, struct thr_create_args *uap)
/* ucontext_t *ctx, thr_id_t *id, int flags */
/* ucontext_t *ctx, long *id, int flags */
{
struct kse *ke0;
struct thread *td0;
ucontext_t ctx;
long id;
int error;
if ((error = copyin(uap->ctx, &ctx, sizeof(ctx))))
@ -135,7 +136,8 @@ thr_create(struct thread *td, struct thr_create_args *uap)
* Try the copyout as soon as we allocate the td so we don't have to
* tear things down in a failure case below.
*/
if ((error = copyout(&td0, uap->id, sizeof(thr_id_t)))) {
id = td0->td_tid;
if ((error = copyout(&id, uap->id, sizeof(long)))) {
thread_free(td0);
return (error);
}
@ -163,7 +165,7 @@ thr_create(struct thread *td, struct thr_create_args *uap)
kse_free(ke0);
thread_free(td0);
goto out;
}
}
/* Link the thread and kse into the ksegrp and make it runnable. */
mtx_lock_spin(&sched_lock);
@ -190,11 +192,13 @@ thr_create(struct thread *td, struct thr_create_args *uap)
int
thr_self(struct thread *td, struct thr_self_args *uap)
/* thr_id_t *id */
/* long *id */
{
long id;
int error;
if ((error = copyout(&td, uap->id, sizeof(thr_id_t))))
id = td->td_tid;
if ((error = copyout(&id, uap->id, sizeof(long))))
return (error);
return (0);
@ -223,7 +227,7 @@ thr_exit(struct thread *td, struct thr_exit_args *uap)
int
thr_kill(struct thread *td, struct thr_kill_args *uap)
/* thr_id_t id, int sig */
/* long id, int sig */
{
struct thread *ttd;
struct proc *p;
@ -233,7 +237,7 @@ thr_kill(struct thread *td, struct thr_kill_args *uap)
error = 0;
PROC_LOCK(p);
FOREACH_THREAD_IN_PROC(p, ttd) {
if (ttd == uap->id)
if (ttd->td_tid == uap->id)
break;
}
if (ttd == NULL) {
@ -291,14 +295,13 @@ thr_suspend(struct thread *td, struct thr_suspend_args *uap)
int
thr_wake(struct thread *td, struct thr_wake_args *uap)
/* thr_id_t id */
/* long id */
{
struct thread *tdsleeper, *ttd;
struct thread *ttd;
tdsleeper = ((struct thread *)uap->id);
PROC_LOCK(td->td_proc);
FOREACH_THREAD_IN_PROC(td->td_proc, ttd) {
if (ttd == tdsleeper)
if (ttd->td_tid == uap->id)
break;
}
if (ttd == NULL) {
@ -306,9 +309,9 @@ thr_wake(struct thread *td, struct thr_wake_args *uap)
return (ESRCH);
}
mtx_lock_spin(&sched_lock);
tdsleeper->td_flags |= TDF_THRWAKEUP;
ttd->td_flags |= TDF_THRWAKEUP;
mtx_unlock_spin(&sched_lock);
wakeup_one((void *)tdsleeper);
wakeup_one((void *)ttd);
PROC_UNLOCK(td->td_proc);
return (0);
}

View File

@ -29,6 +29,7 @@ __FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/kernel.h>
#include <sys/limits.h>
#include <sys/lock.h>
#include <sys/malloc.h>
#include <sys/mutex.h>
@ -62,6 +63,7 @@ MTX_SYSINIT(umtx, &umtx_lock, "umtx", MTX_DEF);
#define UMTX_LOCK() mtx_lock(&umtx_lock);
#define UMTX_UNLOCK() mtx_unlock(&umtx_lock);
#define UMTX_CONTESTED LONG_MIN
static struct umtx_q *umtx_lookup(struct thread *, struct umtx *umtx);
static struct umtx_q *umtx_insert(struct thread *, struct umtx *umtx);
@ -161,7 +163,7 @@ _umtx_lock(struct thread *td, struct _umtx_lock_args *uap)
* Try the uncontested case. This should be done in userland.
*/
owner = casuptr((intptr_t *)&umtx->u_owner,
UMTX_UNOWNED, (intptr_t)td);
UMTX_UNOWNED, td->td_tid);
/* The address was invalid. */
if (owner == -1)
@ -174,7 +176,7 @@ _umtx_lock(struct thread *td, struct _umtx_lock_args *uap)
/* If no one owns it but it is contested try to acquire it. */
if (owner == UMTX_CONTESTED) {
owner = casuptr((intptr_t *)&umtx->u_owner,
UMTX_CONTESTED, ((intptr_t)td | UMTX_CONTESTED));
UMTX_CONTESTED, td->td_tid | UMTX_CONTESTED);
/* The address was invalid. */
if (owner == -1)
@ -263,7 +265,7 @@ _umtx_unlock(struct thread *td, struct _umtx_unlock_args *uap)
if ((owner = fuword(&umtx->u_owner)) == -1)
return (EFAULT);
if ((struct thread *)(owner & ~UMTX_CONTESTED) != td)
if ((owner & ~UMTX_CONTESTED) != td->td_tid)
return (EPERM);
/* We should only ever be in here for contested locks */

View File

@ -613,10 +613,10 @@
428 MSTD { int __acl_aclcheck_link(const char *path, \
acl_type_t type, struct acl *aclp); }
429 MSTD { int sigwait(const sigset_t *set, int *sig); }
430 MSTD { int thr_create(ucontext_t *ctx, thr_id_t *id, int flags); }
430 MSTD { int thr_create(ucontext_t *ctx, long *id, int flags); }
431 MSTD { void thr_exit(void); }
432 MSTD { int thr_self(thr_id_t *id); }
433 MSTD { int thr_kill(thr_id_t id, int sig); }
432 MSTD { int thr_self(long *id); }
433 MSTD { int thr_kill(long id, int sig); }
434 MSTD { int _umtx_lock(struct umtx *umtx); }
435 MSTD { int _umtx_unlock(struct umtx *umtx); }
436 MSTD { int jail_attach(int jid); }
@ -630,6 +630,6 @@
long val, long *loc); }
441 MNOSTD { int ksem_timedwait(semid_t id, struct timespec *abstime); }
442 MSTD { int thr_suspend(const struct timespec *timeout); }
443 MSTD { int thr_wake(thr_id_t id); }
443 MSTD { int thr_wake(long id); }
; Please copy any additions and changes to the following compatability tables:
; sys/compat/freebsd32/syscalls.master

View File

@ -30,11 +30,6 @@
#ifndef _SYS_THR_H_
#define _SYS_THR_H_
/*
* Globally unique thread id type.
*/
typedef void * thr_id_t;
#define THR_SUSPENDED 0x0001 /* Create the thread in the suspended state. */
/*
@ -42,12 +37,12 @@ typedef void * thr_id_t;
*/
#ifndef _KERNEL
int thr_create(ucontext_t *ctx, thr_id_t *id, int flags);
int thr_self(thr_id_t *id);
int thr_create(ucontext_t *ctx, long *id, int flags);
int thr_self(long *id);
void thr_exit(void);
int thr_kill(thr_id_t id, int sig);
int thr_kill(long id, int sig);
int thr_suspend(const struct timespec *timeout);
int thr_wake(thr_id_t id);
int thr_wake(long id);
#endif /* !_KERNEL */

View File

@ -35,11 +35,9 @@
*/
#define UMTX_UNOWNED 0x0
#define UMTX_CONTESTED 0x1
struct umtx {
thr_id_t u_owner; /* Owner of the mutex. */
void *u_owner; /* Owner of the mutex. */
};
#ifndef _KERNEL
@ -55,28 +53,29 @@ int _umtx_unlock(struct umtx *mtx);
* kernel to resolve failures.
*/
static __inline int
umtx_lock(struct umtx *umtx, thr_id_t id)
umtx_lock(struct umtx *umtx, long id)
{
if (atomic_cmpset_acq_ptr(&umtx->u_owner, UMTX_UNOWNED, id) == 0)
if (atomic_cmpset_acq_ptr(&umtx->u_owner, (void *)UMTX_UNOWNED,
(void *)id) == 0)
if (_umtx_lock(umtx) == -1)
return (errno);
return (0);
}
static __inline int
umtx_trylock(struct umtx *umtx, thr_id_t id)
umtx_trylock(struct umtx *umtx, long id)
{
if (atomic_cmpset_acq_ptr(&umtx->u_owner, UMTX_UNOWNED, id) == 0)
if (atomic_cmpset_acq_ptr(&umtx->u_owner, (void *)UMTX_UNOWNED,
(void *)id) == 0)
return (EBUSY);
return (0);
}
static __inline int
umtx_unlock(struct umtx *umtx, thr_id_t id)
umtx_unlock(struct umtx *umtx, long id)
{
if (atomic_cmpset_rel_ptr(&umtx->u_owner, id, UMTX_UNOWNED) == 0)
if (atomic_cmpset_rel_ptr(&umtx->u_owner, (void *)id,
(void *)UMTX_UNOWNED) == 0)
if (_umtx_unlock(umtx) == -1)
return (errno);
return (0);