Make libpthread KSE aware.
Reviewed by: deischen, julian Approved by: -arch
This commit is contained in:
parent
ed825a4bd0
commit
6077cee242
@ -44,20 +44,13 @@ _pthread_cancel(pthread_t pthread)
|
||||
break;
|
||||
|
||||
case PS_SPINBLOCK:
|
||||
case PS_FDR_WAIT:
|
||||
case PS_FDW_WAIT:
|
||||
case PS_POLL_WAIT:
|
||||
case PS_SELECT_WAIT:
|
||||
/* Remove these threads from the work queue: */
|
||||
if ((pthread->flags & PTHREAD_FLAGS_IN_WORKQ)
|
||||
!= 0)
|
||||
PTHREAD_WORKQ_REMOVE(pthread);
|
||||
/* Fall through: */
|
||||
case PS_SIGTHREAD:
|
||||
case PS_SLEEP_WAIT:
|
||||
case PS_WAIT_WAIT:
|
||||
case PS_SIGSUSPEND:
|
||||
case PS_SIGWAIT:
|
||||
/* Interrupt and resume: */
|
||||
pthread->interrupted = 1;
|
||||
pthread->cancelflags |= PTHREAD_CANCELLING;
|
||||
@ -80,9 +73,6 @@ _pthread_cancel(pthread_t pthread)
|
||||
case PS_SUSPENDED:
|
||||
case PS_MUTEX_WAIT:
|
||||
case PS_COND_WAIT:
|
||||
case PS_FDLR_WAIT:
|
||||
case PS_FDLW_WAIT:
|
||||
case PS_FILE_WAIT:
|
||||
/*
|
||||
* Threads in these states may be in queues.
|
||||
* In order to preserve queue integrity, the
|
||||
|
@ -41,74 +41,13 @@
|
||||
|
||||
__weak_reference(__close, close);
|
||||
|
||||
int
|
||||
_close(int fd)
|
||||
{
|
||||
int flags;
|
||||
int ret;
|
||||
struct stat sb;
|
||||
struct fd_table_entry *entry;
|
||||
|
||||
if ((fd == _thread_kern_pipe[0]) || (fd == _thread_kern_pipe[1])) {
|
||||
/*
|
||||
* Don't allow silly programs to close the kernel pipe.
|
||||
*/
|
||||
errno = EBADF;
|
||||
ret = -1;
|
||||
}
|
||||
/*
|
||||
* Lock the file descriptor while the file is closed and get
|
||||
* the file descriptor status:
|
||||
*/
|
||||
else if (((ret = _FD_LOCK(fd, FD_RDWR, NULL)) == 0) &&
|
||||
((ret = __sys_fstat(fd, &sb)) == 0)) {
|
||||
/*
|
||||
* Check if the file should be left as blocking.
|
||||
*
|
||||
* This is so that the file descriptors shared with a parent
|
||||
* process aren't left set to non-blocking if the child
|
||||
* closes them prior to exit. An example where this causes
|
||||
* problems with /bin/sh is when a child closes stdin.
|
||||
*
|
||||
* Setting a file as blocking causes problems if a threaded
|
||||
* parent accesses the file descriptor before the child exits.
|
||||
* Once the threaded parent receives a SIGCHLD then it resets
|
||||
* all of its files to non-blocking, and so it is then safe
|
||||
* to access them.
|
||||
*
|
||||
* Pipes are not set to blocking when they are closed, as
|
||||
* the parent and child will normally close the file
|
||||
* descriptor of the end of the pipe that they are not
|
||||
* using, which would then cause any reads to block
|
||||
* indefinitely.
|
||||
*/
|
||||
if ((S_ISREG(sb.st_mode) || S_ISCHR(sb.st_mode))
|
||||
&& (_thread_fd_getflags(fd) & O_NONBLOCK) == 0) {
|
||||
/* Get the current flags: */
|
||||
flags = __sys_fcntl(fd, F_GETFL, NULL);
|
||||
/* Clear the nonblocking file descriptor flag: */
|
||||
__sys_fcntl(fd, F_SETFL, flags & ~O_NONBLOCK);
|
||||
}
|
||||
|
||||
/* XXX: Assumes well behaved threads. */
|
||||
/* XXX: Defer real close to avoid race condition */
|
||||
entry = _thread_fd_table[fd];
|
||||
_thread_fd_table[fd] = NULL;
|
||||
free(entry);
|
||||
|
||||
/* Close the file descriptor: */
|
||||
ret = __sys_close(fd);
|
||||
}
|
||||
return (ret);
|
||||
}
|
||||
|
||||
int
|
||||
__close(int fd)
|
||||
{
|
||||
int ret;
|
||||
|
||||
_thread_enter_cancellation_point();
|
||||
ret = _close(fd);
|
||||
ret = __sys_close(fd);
|
||||
_thread_leave_cancellation_point();
|
||||
|
||||
return ret;
|
||||
|
@ -121,27 +121,12 @@ _pthread_create(pthread_t * thread, const pthread_attr_t * attr,
|
||||
*/
|
||||
new_thread->magic = PTHREAD_MAGIC;
|
||||
|
||||
/* Initialise the thread for signals: */
|
||||
new_thread->sigmask = curthread->sigmask;
|
||||
new_thread->sigmask_seqno = 0;
|
||||
|
||||
/* Initialize the signal frame: */
|
||||
new_thread->curframe = NULL;
|
||||
|
||||
/* Initialise the jump buffer: */
|
||||
_setjmp(new_thread->ctx.jb);
|
||||
|
||||
/*
|
||||
* Set up new stack frame so that it looks like it
|
||||
* returned from a longjmp() to the beginning of
|
||||
* _thread_start().
|
||||
*/
|
||||
SET_RETURN_ADDR_JB(new_thread->ctx.jb, _thread_start);
|
||||
|
||||
/* The stack starts high and builds down: */
|
||||
SET_STACK_JB(new_thread->ctx.jb,
|
||||
(long)new_thread->stack + pattr->stacksize_attr
|
||||
- sizeof(double));
|
||||
/* Initialise the machine context: */
|
||||
getcontext(&new_thread->ctx);
|
||||
new_thread->ctx.uc_stack.ss_sp = new_thread->stack;
|
||||
new_thread->ctx.uc_stack.ss_size =
|
||||
pattr->stacksize_attr;
|
||||
makecontext(&new_thread->ctx, _thread_start, 1);
|
||||
|
||||
/* Copy the thread attributes: */
|
||||
memcpy(&new_thread->attr, pattr, sizeof(struct pthread_attr));
|
||||
@ -182,8 +167,6 @@ _pthread_create(pthread_t * thread, const pthread_attr_t * attr,
|
||||
new_thread->specific = NULL;
|
||||
new_thread->cleanup = NULL;
|
||||
new_thread->flags = 0;
|
||||
new_thread->poll_data.nfds = 0;
|
||||
new_thread->poll_data.fds = NULL;
|
||||
new_thread->continuation = NULL;
|
||||
|
||||
/*
|
||||
@ -224,18 +207,8 @@ _pthread_create(pthread_t * thread, const pthread_attr_t * attr,
|
||||
/* Return a pointer to the thread structure: */
|
||||
(*thread) = new_thread;
|
||||
|
||||
if (f_gc != 0) {
|
||||
/* Install the scheduling timer: */
|
||||
itimer.it_interval.tv_sec = 0;
|
||||
itimer.it_interval.tv_usec = _clock_res_usec;
|
||||
itimer.it_value = itimer.it_interval;
|
||||
if (setitimer(_ITIMER_SCHED_TIMER, &itimer,
|
||||
NULL) != 0)
|
||||
PANIC("Cannot set interval timer");
|
||||
}
|
||||
|
||||
/* Schedule the new user thread: */
|
||||
_thread_kern_sched(NULL);
|
||||
_thread_kern_sched();
|
||||
|
||||
/*
|
||||
* Start a garbage collector thread
|
||||
@ -257,7 +230,7 @@ _thread_start(void)
|
||||
{
|
||||
struct pthread *curthread = _get_curthread();
|
||||
|
||||
/* We just left the scheduler via longjmp: */
|
||||
/* We just left the scheduler via swapcontext: */
|
||||
_thread_kern_in_sched = 0;
|
||||
|
||||
/* Run the current thread's start routine with argument: */
|
||||
|
@ -45,42 +45,6 @@
|
||||
|
||||
__weak_reference(_pthread_exit, pthread_exit);
|
||||
|
||||
void _exit(int status)
|
||||
{
|
||||
int flags;
|
||||
int i;
|
||||
struct itimerval itimer;
|
||||
|
||||
/* Disable the interval timer: */
|
||||
itimer.it_interval.tv_sec = 0;
|
||||
itimer.it_interval.tv_usec = 0;
|
||||
itimer.it_value.tv_sec = 0;
|
||||
itimer.it_value.tv_usec = 0;
|
||||
setitimer(_ITIMER_SCHED_TIMER, &itimer, NULL);
|
||||
|
||||
/* Close the pthread kernel pipe: */
|
||||
__sys_close(_thread_kern_pipe[0]);
|
||||
__sys_close(_thread_kern_pipe[1]);
|
||||
|
||||
/*
|
||||
* Enter a loop to set all file descriptors to blocking
|
||||
* if they were not created as non-blocking:
|
||||
*/
|
||||
for (i = 0; i < _thread_dtablesize; i++) {
|
||||
/* Check if this file descriptor is in use: */
|
||||
if (_thread_fd_table[i] != NULL &&
|
||||
(_thread_fd_getflags(i) & O_NONBLOCK) == 0) {
|
||||
/* Get the current flags: */
|
||||
flags = __sys_fcntl(i, F_GETFL, NULL);
|
||||
/* Clear the nonblocking file descriptor flag: */
|
||||
__sys_fcntl(i, F_SETFL, flags & ~O_NONBLOCK);
|
||||
}
|
||||
}
|
||||
|
||||
/* Call the _exit syscall: */
|
||||
__sys_exit(status);
|
||||
}
|
||||
|
||||
void
|
||||
_thread_exit(char *fname, int lineno, char *string)
|
||||
{
|
||||
@ -120,9 +84,6 @@ _thread_exit_cleanup(void)
|
||||
* internal to the threads library, including file and fd locks,
|
||||
* are not visible to the application and need to be released.
|
||||
*/
|
||||
/* Unlock all owned fd locks: */
|
||||
_thread_fd_unlock_owned(curthread);
|
||||
|
||||
/* Unlock all private mutexes: */
|
||||
_mutex_unlock_private(curthread);
|
||||
|
||||
@ -163,12 +124,6 @@ _pthread_exit(void *status)
|
||||
_thread_cleanupspecific();
|
||||
}
|
||||
|
||||
/* Free thread-specific poll_data structure, if allocated: */
|
||||
if (curthread->poll_data.fds != NULL) {
|
||||
free(curthread->poll_data.fds);
|
||||
curthread->poll_data.fds = NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Lock the garbage collector mutex to ensure that the garbage
|
||||
* collector is not using the dead thread list.
|
||||
|
@ -39,108 +39,6 @@
|
||||
|
||||
__weak_reference(__fcntl, fcntl);
|
||||
|
||||
int
|
||||
_fcntl(int fd, int cmd,...)
|
||||
{
|
||||
int flags = 0;
|
||||
int nonblock;
|
||||
int oldfd;
|
||||
int ret;
|
||||
va_list ap;
|
||||
|
||||
/* Lock the file descriptor: */
|
||||
if ((ret = _FD_LOCK(fd, FD_RDWR, NULL)) == 0) {
|
||||
/* Initialise the variable argument list: */
|
||||
va_start(ap, cmd);
|
||||
|
||||
/* Process according to file control command type: */
|
||||
switch (cmd) {
|
||||
/* Duplicate a file descriptor: */
|
||||
case F_DUPFD:
|
||||
/*
|
||||
* Get the file descriptor that the caller wants to
|
||||
* use:
|
||||
*/
|
||||
oldfd = va_arg(ap, int);
|
||||
|
||||
/* Initialise the file descriptor table entry: */
|
||||
if ((ret = __sys_fcntl(fd, cmd, oldfd)) < 0) {
|
||||
}
|
||||
/* Initialise the file descriptor table entry: */
|
||||
else if (_thread_fd_table_init(ret) != 0) {
|
||||
/* Quietly close the file: */
|
||||
__sys_close(ret);
|
||||
|
||||
/* Reset the file descriptor: */
|
||||
ret = -1;
|
||||
} else {
|
||||
/*
|
||||
* Save the file open flags so that they can
|
||||
* be checked later:
|
||||
*/
|
||||
_thread_fd_setflags(ret,
|
||||
_thread_fd_getflags(fd));
|
||||
}
|
||||
break;
|
||||
case F_SETFD:
|
||||
flags = va_arg(ap, int);
|
||||
ret = __sys_fcntl(fd, cmd, flags);
|
||||
break;
|
||||
case F_GETFD:
|
||||
ret = __sys_fcntl(fd, cmd, 0);
|
||||
break;
|
||||
case F_GETFL:
|
||||
ret = _thread_fd_getflags(fd);
|
||||
break;
|
||||
case F_SETFL:
|
||||
/*
|
||||
* Get the file descriptor flags passed by the
|
||||
* caller:
|
||||
*/
|
||||
flags = va_arg(ap, int);
|
||||
|
||||
/*
|
||||
* Check if the user wants a non-blocking file
|
||||
* descriptor:
|
||||
*/
|
||||
nonblock = flags & O_NONBLOCK;
|
||||
|
||||
/* Set the file descriptor flags: */
|
||||
if ((ret = __sys_fcntl(fd, cmd, flags | O_NONBLOCK)) != 0) {
|
||||
|
||||
/* Get the flags so that we behave like the kernel: */
|
||||
} else if ((flags = __sys_fcntl(fd,
|
||||
F_GETFL, 0)) == -1) {
|
||||
/* Error getting flags: */
|
||||
ret = -1;
|
||||
|
||||
/*
|
||||
* Check if the file descriptor is non-blocking
|
||||
* with respect to the user:
|
||||
*/
|
||||
} else if (nonblock)
|
||||
/* A non-blocking descriptor: */
|
||||
_thread_fd_setflags(fd, flags | O_NONBLOCK);
|
||||
else
|
||||
/* Save the flags: */
|
||||
_thread_fd_setflags(fd, flags & ~O_NONBLOCK);
|
||||
break;
|
||||
default:
|
||||
/* Might want to make va_arg use a union */
|
||||
ret = __sys_fcntl(fd, cmd, va_arg(ap, void *));
|
||||
break;
|
||||
}
|
||||
|
||||
/* Free variable arguments: */
|
||||
va_end(ap);
|
||||
|
||||
/* Unlock the file descriptor: */
|
||||
_FD_UNLOCK(fd, FD_RDWR);
|
||||
}
|
||||
/* Return the completion status: */
|
||||
return (ret);
|
||||
}
|
||||
|
||||
int
|
||||
__fcntl(int fd, int cmd,...)
|
||||
{
|
||||
@ -154,14 +52,14 @@ __fcntl(int fd, int cmd,...)
|
||||
case F_DUPFD:
|
||||
case F_SETFD:
|
||||
case F_SETFL:
|
||||
ret = _fcntl(fd, cmd, va_arg(ap, int));
|
||||
ret = __sys_fcntl(fd, cmd, va_arg(ap, int));
|
||||
break;
|
||||
case F_GETFD:
|
||||
case F_GETFL:
|
||||
ret = _fcntl(fd, cmd);
|
||||
ret = __sys_fcntl(fd, cmd);
|
||||
break;
|
||||
default:
|
||||
ret = _fcntl(fd, cmd, va_arg(ap, void *));
|
||||
ret = __sys_fcntl(fd, cmd, va_arg(ap, void *));
|
||||
}
|
||||
va_end(ap);
|
||||
|
||||
|
@ -63,44 +63,8 @@ _fork(void)
|
||||
if ((ret = __sys_fork()) != 0) {
|
||||
/* Parent process or error. Nothing to do here. */
|
||||
} else {
|
||||
/* Close the pthread kernel pipe: */
|
||||
__sys_close(_thread_kern_pipe[0]);
|
||||
__sys_close(_thread_kern_pipe[1]);
|
||||
|
||||
/* Reset signals pending for the running thread: */
|
||||
sigemptyset(&curthread->sigpend);
|
||||
|
||||
/*
|
||||
* Create a pipe that is written to by the signal handler to
|
||||
* prevent signals being missed in calls to
|
||||
* __sys_select:
|
||||
*/
|
||||
if (__sys_pipe(_thread_kern_pipe) != 0) {
|
||||
/* Cannot create pipe, so abort: */
|
||||
PANIC("Cannot create pthread kernel pipe for forked process");
|
||||
}
|
||||
/* Get the flags for the read pipe: */
|
||||
else if ((flags = __sys_fcntl(_thread_kern_pipe[0], F_GETFL, NULL)) == -1) {
|
||||
/* Abort this application: */
|
||||
abort();
|
||||
}
|
||||
/* Make the read pipe non-blocking: */
|
||||
else if (__sys_fcntl(_thread_kern_pipe[0], F_SETFL, flags | O_NONBLOCK) == -1) {
|
||||
/* Abort this application: */
|
||||
abort();
|
||||
}
|
||||
/* Get the flags for the write pipe: */
|
||||
else if ((flags = __sys_fcntl(_thread_kern_pipe[1], F_GETFL, NULL)) == -1) {
|
||||
/* Abort this application: */
|
||||
abort();
|
||||
}
|
||||
/* Make the write pipe non-blocking: */
|
||||
else if (__sys_fcntl(_thread_kern_pipe[1], F_SETFL, flags | O_NONBLOCK) == -1) {
|
||||
/* Abort this application: */
|
||||
abort();
|
||||
}
|
||||
/* Reinitialize the GC mutex: */
|
||||
else if (_mutex_reinit(&_gc_mutex) != 0) {
|
||||
if (_mutex_reinit(&_gc_mutex) != 0) {
|
||||
/* Abort this application: */
|
||||
PANIC("Cannot initialize GC mutex for forked process");
|
||||
}
|
||||
@ -180,32 +144,8 @@ _fork(void)
|
||||
/* No spinlocks yet: */
|
||||
_spinblock_count = 0;
|
||||
|
||||
/* Don't queue signals yet: */
|
||||
_queue_signals = 0;
|
||||
|
||||
/* Initialize the scheduling switch hook routine: */
|
||||
_sched_switch_hook = NULL;
|
||||
|
||||
/* Clear out any locks in the file descriptor table: */
|
||||
for (i = 0; i < _thread_dtablesize; i++) {
|
||||
if (_thread_fd_table[i] != NULL) {
|
||||
/* Initialise the file locks: */
|
||||
memset(&_thread_fd_table[i]->lock, 0,
|
||||
sizeof(_thread_fd_table[i]->lock));
|
||||
_thread_fd_table[i]->r_owner = NULL;
|
||||
_thread_fd_table[i]->w_owner = NULL;
|
||||
_thread_fd_table[i]->r_fname = NULL;
|
||||
_thread_fd_table[i]->w_fname = NULL;
|
||||
_thread_fd_table[i]->r_lineno = 0;;
|
||||
_thread_fd_table[i]->w_lineno = 0;;
|
||||
_thread_fd_table[i]->r_lockcount = 0;;
|
||||
_thread_fd_table[i]->w_lockcount = 0;;
|
||||
|
||||
/* Initialise the read/write queues: */
|
||||
TAILQ_INIT(&_thread_fd_table[i]->r_queue);
|
||||
TAILQ_INIT(&_thread_fd_table[i]->w_queue);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -236,8 +176,5 @@ free_thread_resources(struct pthread *thread)
|
||||
if (thread->specific != NULL)
|
||||
free(thread->specific);
|
||||
|
||||
if (thread->poll_data.fds != NULL)
|
||||
free(thread->poll_data.fds);
|
||||
|
||||
free(thread);
|
||||
}
|
||||
|
@ -37,25 +37,13 @@
|
||||
|
||||
__weak_reference(__fsync, fsync);
|
||||
|
||||
int
|
||||
_fsync(int fd)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if ((ret = _FD_LOCK(fd, FD_RDWR, NULL)) == 0) {
|
||||
ret = __sys_fsync(fd);
|
||||
_FD_UNLOCK(fd, FD_RDWR);
|
||||
}
|
||||
return (ret);
|
||||
}
|
||||
|
||||
int
|
||||
__fsync(int fd)
|
||||
{
|
||||
int ret;
|
||||
|
||||
_thread_enter_cancellation_point();
|
||||
ret = _fsync(fd);
|
||||
ret = __sys_fsync(fd);
|
||||
_thread_leave_cancellation_point();
|
||||
|
||||
return ret;
|
||||
|
@ -56,20 +56,10 @@ struct s_thread_info {
|
||||
/* Static variables: */
|
||||
static const struct s_thread_info thread_info[] = {
|
||||
{PS_RUNNING , "Running"},
|
||||
{PS_SIGTHREAD , "Waiting on signal thread"},
|
||||
{PS_MUTEX_WAIT , "Waiting on a mutex"},
|
||||
{PS_COND_WAIT , "Waiting on a condition variable"},
|
||||
{PS_FDLR_WAIT , "Waiting for a file read lock"},
|
||||
{PS_FDLW_WAIT , "Waiting for a file write lock"},
|
||||
{PS_FDR_WAIT , "Waiting for read"},
|
||||
{PS_FDW_WAIT , "Waiting for write"},
|
||||
{PS_FILE_WAIT , "Waiting for FILE lock"},
|
||||
{PS_POLL_WAIT , "Waiting on poll"},
|
||||
{PS_SELECT_WAIT , "Waiting on select"},
|
||||
{PS_SLEEP_WAIT , "Sleeping"},
|
||||
{PS_WAIT_WAIT , "Waiting process"},
|
||||
{PS_SIGSUSPEND , "Suspended, waiting for a signal"},
|
||||
{PS_SIGWAIT , "Waiting for a signal"},
|
||||
{PS_SPINBLOCK , "Waiting for a spinlock"},
|
||||
{PS_JOIN , "Waiting to join"},
|
||||
{PS_SUSPENDED , "Suspended"},
|
||||
@ -169,34 +159,6 @@ _thread_dump_info(void)
|
||||
}
|
||||
}
|
||||
|
||||
/* Output a header for file descriptors: */
|
||||
snprintf(s, sizeof(s), "\n\n=============\nFILE DESCRIPTOR "
|
||||
"TABLE (table size %d)\n\n", _thread_dtablesize);
|
||||
__sys_write(fd, s, strlen(s));
|
||||
|
||||
/* Enter a loop to report file descriptor lock usage: */
|
||||
for (i = 0; i < _thread_dtablesize; i++) {
|
||||
/*
|
||||
* Check if memory is allocated for this file
|
||||
* descriptor:
|
||||
*/
|
||||
if (_thread_fd_table[i] != NULL) {
|
||||
/* Report the file descriptor lock status: */
|
||||
snprintf(s, sizeof(s),
|
||||
"fd[%3d] read owner %p count %d [%s:%d]\n"
|
||||
" write owner %p count %d [%s:%d]\n",
|
||||
i, _thread_fd_table[i]->r_owner,
|
||||
_thread_fd_table[i]->r_lockcount,
|
||||
_thread_fd_table[i]->r_fname,
|
||||
_thread_fd_table[i]->r_lineno,
|
||||
_thread_fd_table[i]->w_owner,
|
||||
_thread_fd_table[i]->w_lockcount,
|
||||
_thread_fd_table[i]->w_fname,
|
||||
_thread_fd_table[i]->w_lineno);
|
||||
__sys_write(fd, s, strlen(s));
|
||||
}
|
||||
}
|
||||
|
||||
/* Close the dump file: */
|
||||
__sys_close(fd);
|
||||
}
|
||||
@ -237,33 +199,6 @@ dump_thread(int fd, pthread_t pthread, int long_version)
|
||||
}
|
||||
/* Process according to thread state: */
|
||||
switch (pthread->state) {
|
||||
/* File descriptor read lock wait: */
|
||||
case PS_FDLR_WAIT:
|
||||
case PS_FDLW_WAIT:
|
||||
case PS_FDR_WAIT:
|
||||
case PS_FDW_WAIT:
|
||||
/* Write the lock details: */
|
||||
snprintf(s, sizeof(s), "fd %d[%s:%d]",
|
||||
pthread->data.fd.fd,
|
||||
pthread->data.fd.fname,
|
||||
pthread->data.fd.branch);
|
||||
__sys_write(fd, s, strlen(s));
|
||||
snprintf(s, sizeof(s), "owner %pr/%pw\n",
|
||||
_thread_fd_table[pthread->data.fd.fd]->r_owner,
|
||||
_thread_fd_table[pthread->data.fd.fd]->w_owner);
|
||||
__sys_write(fd, s, strlen(s));
|
||||
break;
|
||||
case PS_SIGWAIT:
|
||||
snprintf(s, sizeof(s), "sigmask (hi)");
|
||||
__sys_write(fd, s, strlen(s));
|
||||
for (i = _SIG_WORDS - 1; i >= 0; i--) {
|
||||
snprintf(s, sizeof(s), "%08x\n",
|
||||
pthread->sigmask.__bits[i]);
|
||||
__sys_write(fd, s, strlen(s));
|
||||
}
|
||||
snprintf(s, sizeof(s), "(lo)\n");
|
||||
__sys_write(fd, s, strlen(s));
|
||||
break;
|
||||
/*
|
||||
* Trap other states that are not explicitly
|
||||
* coded to dump information:
|
||||
|
@ -56,7 +56,6 @@
|
||||
#include <errno.h>
|
||||
#include <fcntl.h>
|
||||
#include <paths.h>
|
||||
#include <poll.h>
|
||||
#include <pthread.h>
|
||||
#include <signal.h>
|
||||
#include <stdio.h>
|
||||
@ -163,7 +162,6 @@ _thread_init(void)
|
||||
int sched_stack_size; /* Size of scheduler stack. */
|
||||
|
||||
struct clockinfo clockinfo;
|
||||
struct sigaction act;
|
||||
|
||||
_pthread_page_size = getpagesize();
|
||||
_pthread_guard_default = getpagesize();
|
||||
@ -209,57 +207,9 @@ _thread_init(void)
|
||||
PANIC("Can't dup2");
|
||||
}
|
||||
|
||||
/* Get the standard I/O flags before messing with them : */
|
||||
for (i = 0; i < 3; i++) {
|
||||
if (((_pthread_stdio_flags[i] =
|
||||
__sys_fcntl(i, F_GETFL, NULL)) == -1) &&
|
||||
(errno != EBADF))
|
||||
PANIC("Cannot get stdio flags");
|
||||
}
|
||||
|
||||
/*
|
||||
* Create a pipe that is written to by the signal handler to prevent
|
||||
* signals being missed in calls to _select:
|
||||
*/
|
||||
if (__sys_pipe(_thread_kern_pipe) != 0) {
|
||||
/* Cannot create pipe, so abort: */
|
||||
PANIC("Cannot create kernel pipe");
|
||||
}
|
||||
|
||||
/*
|
||||
* Make sure the pipe does not get in the way of stdio:
|
||||
*/
|
||||
for (i = 0; i < 2; i++) {
|
||||
if (_thread_kern_pipe[i] < 3) {
|
||||
fd = __sys_fcntl(_thread_kern_pipe[i], F_DUPFD, 3);
|
||||
if (fd == -1)
|
||||
PANIC("Cannot create kernel pipe");
|
||||
__sys_close(_thread_kern_pipe[i]);
|
||||
_thread_kern_pipe[i] = fd;
|
||||
}
|
||||
}
|
||||
/* Get the flags for the read pipe: */
|
||||
if ((flags = __sys_fcntl(_thread_kern_pipe[0], F_GETFL, NULL)) == -1) {
|
||||
/* Abort this application: */
|
||||
PANIC("Cannot get kernel read pipe flags");
|
||||
}
|
||||
/* Make the read pipe non-blocking: */
|
||||
else if (__sys_fcntl(_thread_kern_pipe[0], F_SETFL, flags | O_NONBLOCK) == -1) {
|
||||
/* Abort this application: */
|
||||
PANIC("Cannot make kernel read pipe non-blocking");
|
||||
}
|
||||
/* Get the flags for the write pipe: */
|
||||
else if ((flags = __sys_fcntl(_thread_kern_pipe[1], F_GETFL, NULL)) == -1) {
|
||||
/* Abort this application: */
|
||||
PANIC("Cannot get kernel write pipe flags");
|
||||
}
|
||||
/* Make the write pipe non-blocking: */
|
||||
else if (__sys_fcntl(_thread_kern_pipe[1], F_SETFL, flags | O_NONBLOCK) == -1) {
|
||||
/* Abort this application: */
|
||||
PANIC("Cannot get kernel write pipe flags");
|
||||
}
|
||||
/* Allocate and initialize the ready queue: */
|
||||
else if (_pq_alloc(&_readyq, PTHREAD_MIN_PRIORITY, PTHREAD_LAST_PRIORITY) != 0) {
|
||||
if (_pq_alloc(&_readyq, PTHREAD_MIN_PRIORITY, PTHREAD_LAST_PRIORITY) !=
|
||||
0) {
|
||||
/* Abort this application: */
|
||||
PANIC("Cannot allocate priority ready queue.");
|
||||
}
|
||||
@ -312,15 +262,19 @@ _thread_init(void)
|
||||
/* Set the main thread stack pointer. */
|
||||
_thread_initial->stack = _usrstack - PTHREAD_STACK_INITIAL;
|
||||
|
||||
/* Set the stack attributes: */
|
||||
/* Set the stack attributes. */
|
||||
_thread_initial->attr.stackaddr_attr = _thread_initial->stack;
|
||||
_thread_initial->attr.stacksize_attr = PTHREAD_STACK_INITIAL;
|
||||
|
||||
/* Setup the context for the scheduler: */
|
||||
_setjmp(_thread_kern_sched_jb);
|
||||
SET_STACK_JB(_thread_kern_sched_jb, _thread_kern_sched_stack +
|
||||
sched_stack_size - sizeof(double));
|
||||
SET_RETURN_ADDR_JB(_thread_kern_sched_jb, _thread_kern_scheduler);
|
||||
getcontext(&_thread_kern_sched_ctx);
|
||||
_thread_kern_sched_ctx.uc_stack.ss_sp =
|
||||
_thread_kern_sched_stack;
|
||||
_thread_kern_sched_ctx.uc_stack.ss_size = sched_stack_size;
|
||||
makecontext(&_thread_kern_sched_ctx, _thread_kern_scheduler, 1);
|
||||
|
||||
/* Block all signals to the scheduler's context. */
|
||||
sigfillset(&_thread_kern_sched_ctx.uc_sigmask);
|
||||
|
||||
/*
|
||||
* Write a magic value to the thread structure
|
||||
@ -332,6 +286,11 @@ _thread_init(void)
|
||||
_thread_initial->cancelflags = PTHREAD_CANCEL_ENABLE |
|
||||
PTHREAD_CANCEL_DEFERRED;
|
||||
|
||||
/* Setup the context for initial thread. */
|
||||
getcontext(&_thread_initial->ctx);
|
||||
_thread_kern_sched_ctx.uc_stack.ss_sp = _thread_initial->stack;
|
||||
_thread_kern_sched_ctx.uc_stack.ss_size = PTHREAD_STACK_INITIAL;
|
||||
|
||||
/* Default the priority of the initial thread: */
|
||||
_thread_initial->base_priority = PTHREAD_DEFAULT_PRIORITY;
|
||||
_thread_initial->active_priority = PTHREAD_DEFAULT_PRIORITY;
|
||||
@ -357,14 +316,8 @@ _thread_init(void)
|
||||
/* Initialize last active: */
|
||||
_thread_initial->last_active = (long) _sched_ticks;
|
||||
|
||||
/* Initialize the initial context: */
|
||||
_thread_initial->curframe = NULL;
|
||||
|
||||
/* Initialise the rest of the fields: */
|
||||
_thread_initial->poll_data.nfds = 0;
|
||||
_thread_initial->poll_data.fds = NULL;
|
||||
_thread_initial->sig_defer_count = 0;
|
||||
_thread_initial->yield_on_sig_undefer = 0;
|
||||
_thread_initial->specific = NULL;
|
||||
_thread_initial->cleanup = NULL;
|
||||
_thread_initial->flags = 0;
|
||||
@ -373,57 +326,6 @@ _thread_init(void)
|
||||
TAILQ_INSERT_HEAD(&_thread_list, _thread_initial, tle);
|
||||
_set_curthread(_thread_initial);
|
||||
|
||||
/* Initialise the global signal action structure: */
|
||||
sigfillset(&act.sa_mask);
|
||||
act.sa_handler = (void (*) ()) _thread_sig_handler;
|
||||
act.sa_flags = SA_SIGINFO | SA_ONSTACK;
|
||||
|
||||
/* Clear pending signals for the process: */
|
||||
sigemptyset(&_process_sigpending);
|
||||
|
||||
/* Clear the signal queue: */
|
||||
memset(_thread_sigq, 0, sizeof(_thread_sigq));
|
||||
|
||||
/* Enter a loop to get the existing signal status: */
|
||||
for (i = 1; i < NSIG; i++) {
|
||||
/* Check for signals which cannot be trapped: */
|
||||
if (i == SIGKILL || i == SIGSTOP) {
|
||||
}
|
||||
|
||||
/* Get the signal handler details: */
|
||||
else if (__sys_sigaction(i, NULL,
|
||||
&_thread_sigact[i - 1]) != 0) {
|
||||
/*
|
||||
* Abort this process if signal
|
||||
* initialisation fails:
|
||||
*/
|
||||
PANIC("Cannot read signal handler info");
|
||||
}
|
||||
|
||||
/* Initialize the SIG_DFL dummy handler count. */
|
||||
_thread_dfl_count[i] = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Install the signal handler for the most important
|
||||
* signals that the user-thread kernel needs. Actually
|
||||
* SIGINFO isn't really needed, but it is nice to have.
|
||||
*/
|
||||
if (__sys_sigaction(_SCHED_SIGNAL, &act, NULL) != 0 ||
|
||||
__sys_sigaction(SIGINFO, &act, NULL) != 0 ||
|
||||
__sys_sigaction(SIGCHLD, &act, NULL) != 0) {
|
||||
/*
|
||||
* Abort this process if signal initialisation fails:
|
||||
*/
|
||||
PANIC("Cannot initialise signal handler");
|
||||
}
|
||||
_thread_sigact[_SCHED_SIGNAL - 1].sa_flags = SA_SIGINFO;
|
||||
_thread_sigact[SIGINFO - 1].sa_flags = SA_SIGINFO;
|
||||
_thread_sigact[SIGCHLD - 1].sa_flags = SA_SIGINFO;
|
||||
|
||||
/* Get the process signal mask: */
|
||||
__sys_sigprocmask(SIG_SETMASK, NULL, &_process_sigmask);
|
||||
|
||||
/* Get the kernel clockrate: */
|
||||
mib[0] = CTL_KERN;
|
||||
mib[1] = KERN_CLOCKRATE;
|
||||
@ -432,50 +334,6 @@ _thread_init(void)
|
||||
_clock_res_usec = clockinfo.tick > CLOCK_RES_USEC_MIN ?
|
||||
clockinfo.tick : CLOCK_RES_USEC_MIN;
|
||||
|
||||
/* Get the table size: */
|
||||
if ((_thread_dtablesize = getdtablesize()) < 0) {
|
||||
/*
|
||||
* Cannot get the system defined table size, so abort
|
||||
* this process.
|
||||
*/
|
||||
PANIC("Cannot get dtablesize");
|
||||
}
|
||||
/* Allocate memory for the file descriptor table: */
|
||||
if ((_thread_fd_table = (struct fd_table_entry **) malloc(sizeof(struct fd_table_entry *) * _thread_dtablesize)) == NULL) {
|
||||
/* Avoid accesses to file descriptor table on exit: */
|
||||
_thread_dtablesize = 0;
|
||||
|
||||
/*
|
||||
* Cannot allocate memory for the file descriptor
|
||||
* table, so abort this process.
|
||||
*/
|
||||
PANIC("Cannot allocate memory for file descriptor table");
|
||||
}
|
||||
/* Allocate memory for the pollfd table: */
|
||||
if ((_thread_pfd_table = (struct pollfd *) malloc(sizeof(struct pollfd) * _thread_dtablesize)) == NULL) {
|
||||
/*
|
||||
* Cannot allocate memory for the file descriptor
|
||||
* table, so abort this process.
|
||||
*/
|
||||
PANIC("Cannot allocate memory for pollfd table");
|
||||
} else {
|
||||
/*
|
||||
* Enter a loop to initialise the file descriptor
|
||||
* table:
|
||||
*/
|
||||
for (i = 0; i < _thread_dtablesize; i++) {
|
||||
/* Initialise the file descriptor table: */
|
||||
_thread_fd_table[i] = NULL;
|
||||
}
|
||||
|
||||
/* Initialize stdio file descriptor table entries: */
|
||||
for (i = 0; i < 3; i++) {
|
||||
if ((_thread_fd_table_init(i) != 0) &&
|
||||
(errno != EBADF))
|
||||
PANIC("Cannot initialize stdio file "
|
||||
"descriptor table entry");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Initialise the garbage collector mutex and condition variable. */
|
||||
|
@ -38,7 +38,6 @@
|
||||
#include <stdarg.h>
|
||||
#include <string.h>
|
||||
#include <unistd.h>
|
||||
#include <setjmp.h>
|
||||
#include <sys/param.h>
|
||||
#include <sys/types.h>
|
||||
#include <sys/signalvar.h>
|
||||
@ -60,7 +59,7 @@
|
||||
|
||||
/* Static function prototype definitions: */
|
||||
static void
|
||||
thread_kern_poll(int wait_reqd);
|
||||
thread_kern_idle(void);
|
||||
|
||||
static void
|
||||
dequeue_signals(void);
|
||||
@ -70,37 +69,9 @@ thread_run_switch_hook(pthread_t thread_out, pthread_t thread_in);
|
||||
|
||||
/* Static variables: */
|
||||
static int last_tick = 0;
|
||||
static int called_from_handler = 0;
|
||||
|
||||
/*
|
||||
* This is called when a signal handler finishes and wants to
|
||||
* return to a previous frame.
|
||||
*/
|
||||
void
|
||||
_thread_kern_sched_frame(struct pthread_signal_frame *psf)
|
||||
{
|
||||
struct pthread *curthread = _get_curthread();
|
||||
|
||||
/*
|
||||
* Flag the pthread kernel as executing scheduler code
|
||||
* to avoid a signal from interrupting this execution and
|
||||
* corrupting the (soon-to-be) current frame.
|
||||
*/
|
||||
_thread_kern_in_sched = 1;
|
||||
|
||||
/* Restore the signal frame: */
|
||||
_thread_sigframe_restore(curthread, psf);
|
||||
|
||||
/* The signal mask was restored; check for any pending signals: */
|
||||
curthread->check_pending = 1;
|
||||
|
||||
/* Switch to the thread scheduler: */
|
||||
___longjmp(_thread_kern_sched_jb, 1);
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
_thread_kern_sched(ucontext_t *ucp)
|
||||
_thread_kern_sched(void)
|
||||
{
|
||||
struct pthread *curthread = _get_curthread();
|
||||
|
||||
@ -111,78 +82,40 @@ _thread_kern_sched(ucontext_t *ucp)
|
||||
*/
|
||||
_thread_kern_in_sched = 1;
|
||||
|
||||
/* Check if this function was called from the signal handler: */
|
||||
if (ucp != NULL) {
|
||||
/* XXX - Save FP registers? */
|
||||
FP_SAVE_UC(ucp);
|
||||
called_from_handler = 1;
|
||||
DBG_MSG("Entering scheduler due to signal\n");
|
||||
}
|
||||
/* Switch into the scheduler's context. */
|
||||
swapcontext(&curthread->ctx, &_thread_kern_sched_ctx);
|
||||
DBG_MSG("Returned from swapcontext, thread %p\n", curthread);
|
||||
|
||||
/* Save the state of the current thread: */
|
||||
if (_setjmp(curthread->ctx.jb) != 0) {
|
||||
DBG_MSG("Returned from ___longjmp, thread %p\n",
|
||||
curthread);
|
||||
/*
|
||||
* This point is reached when a longjmp() is called
|
||||
* to restore the state of a thread.
|
||||
*
|
||||
* This is the normal way out of the scheduler.
|
||||
*/
|
||||
_thread_kern_in_sched = 0;
|
||||
|
||||
if (curthread->sig_defer_count == 0) {
|
||||
if (((curthread->cancelflags &
|
||||
PTHREAD_AT_CANCEL_POINT) == 0) &&
|
||||
((curthread->cancelflags &
|
||||
PTHREAD_CANCEL_ASYNCHRONOUS) != 0))
|
||||
/*
|
||||
* Cancellations override signals.
|
||||
*
|
||||
* Stick a cancellation point at the
|
||||
* start of each async-cancellable
|
||||
* thread's resumption.
|
||||
*
|
||||
* We allow threads woken at cancel
|
||||
* points to do their own checks.
|
||||
*/
|
||||
pthread_testcancel();
|
||||
}
|
||||
|
||||
if (_sched_switch_hook != NULL) {
|
||||
/* Run the installed switch hook: */
|
||||
thread_run_switch_hook(_last_user_thread, curthread);
|
||||
}
|
||||
if (ucp == NULL)
|
||||
return;
|
||||
else {
|
||||
/* XXX - Restore FP registers? */
|
||||
FP_RESTORE_UC(ucp);
|
||||
/*
|
||||
* This point is reached when swapcontext() is called
|
||||
* to restore the state of a thread.
|
||||
*
|
||||
* This is the normal way out of the scheduler.
|
||||
*/
|
||||
_thread_kern_in_sched = 0;
|
||||
|
||||
if (curthread->sig_defer_count == 0) {
|
||||
if (((curthread->cancelflags &
|
||||
PTHREAD_AT_CANCEL_POINT) == 0) &&
|
||||
((curthread->cancelflags &
|
||||
PTHREAD_CANCEL_ASYNCHRONOUS) != 0))
|
||||
/*
|
||||
* Set the process signal mask in the context; it
|
||||
* could have changed by the handler.
|
||||
* Stick a cancellation point at the
|
||||
* start of each async-cancellable
|
||||
* thread's resumption.
|
||||
*
|
||||
* We allow threads woken at cancel
|
||||
* points to do their own checks.
|
||||
*/
|
||||
ucp->uc_sigmask = _process_sigmask;
|
||||
|
||||
/* Resume the interrupted thread: */
|
||||
__sys_sigreturn(ucp);
|
||||
}
|
||||
pthread_testcancel();
|
||||
}
|
||||
|
||||
if (_sched_switch_hook != NULL) {
|
||||
/* Run the installed switch hook: */
|
||||
thread_run_switch_hook(_last_user_thread, curthread);
|
||||
}
|
||||
/* Switch to the thread scheduler: */
|
||||
___longjmp(_thread_kern_sched_jb, 1);
|
||||
}
|
||||
|
||||
void
|
||||
_thread_kern_sched_sig(void)
|
||||
{
|
||||
struct pthread *curthread = _get_curthread();
|
||||
|
||||
curthread->check_pending = 1;
|
||||
_thread_kern_sched(NULL);
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
_thread_kern_scheduler(void)
|
||||
{
|
||||
@ -193,48 +126,28 @@ _thread_kern_scheduler(void)
|
||||
unsigned int current_tick;
|
||||
int add_to_prioq;
|
||||
|
||||
/* If the currently running thread is a user thread, save it: */
|
||||
if ((curthread->flags & PTHREAD_FLAGS_PRIVATE) == 0)
|
||||
_last_user_thread = curthread;
|
||||
|
||||
if (called_from_handler != 0) {
|
||||
called_from_handler = 0;
|
||||
|
||||
/*
|
||||
* We were called from a signal handler; restore the process
|
||||
* signal mask.
|
||||
*/
|
||||
if (__sys_sigprocmask(SIG_SETMASK,
|
||||
&_process_sigmask, NULL) != 0)
|
||||
PANIC("Unable to restore process mask after signal");
|
||||
}
|
||||
|
||||
/*
|
||||
* Enter a scheduling loop that finds the next thread that is
|
||||
* ready to run. This loop completes when there are no more threads
|
||||
* in the global list or when a thread has its state restored by
|
||||
* either a sigreturn (if the state was saved as a sigcontext) or a
|
||||
* longjmp (if the state was saved by a setjmp).
|
||||
* in the global list. It is interrupted each time a thread is
|
||||
* scheduled, but will continue when we return.
|
||||
*/
|
||||
while (!(TAILQ_EMPTY(&_thread_list))) {
|
||||
|
||||
/* If the currently running thread is a user thread, save it: */
|
||||
if ((curthread->flags & PTHREAD_FLAGS_PRIVATE) == 0)
|
||||
_last_user_thread = curthread;
|
||||
|
||||
/* Get the current time of day: */
|
||||
GET_CURRENT_TOD(tv);
|
||||
TIMEVAL_TO_TIMESPEC(&tv, &ts);
|
||||
current_tick = _sched_ticks;
|
||||
|
||||
/*
|
||||
* Protect the scheduling queues from access by the signal
|
||||
* handler.
|
||||
*/
|
||||
_queue_signals = 1;
|
||||
add_to_prioq = 0;
|
||||
|
||||
if (curthread != &_thread_kern_thread) {
|
||||
/*
|
||||
* This thread no longer needs to yield the CPU.
|
||||
*/
|
||||
curthread->yield_on_sig_undefer = 0;
|
||||
|
||||
if (curthread->state != PS_RUNNING) {
|
||||
/*
|
||||
* Save the current time as the time that the
|
||||
@ -278,14 +191,8 @@ _thread_kern_scheduler(void)
|
||||
* operations or timeouts:
|
||||
*/
|
||||
case PS_DEADLOCK:
|
||||
case PS_FDLR_WAIT:
|
||||
case PS_FDLW_WAIT:
|
||||
case PS_FILE_WAIT:
|
||||
case PS_JOIN:
|
||||
case PS_MUTEX_WAIT:
|
||||
case PS_SIGSUSPEND:
|
||||
case PS_SIGTHREAD:
|
||||
case PS_SIGWAIT:
|
||||
case PS_WAIT_WAIT:
|
||||
/* No timeouts for these states: */
|
||||
curthread->wakeup_time.tv_sec = -1;
|
||||
@ -318,62 +225,9 @@ _thread_kern_scheduler(void)
|
||||
_spinblock_count++;
|
||||
|
||||
/* FALLTHROUGH */
|
||||
case PS_FDR_WAIT:
|
||||
case PS_FDW_WAIT:
|
||||
case PS_POLL_WAIT:
|
||||
case PS_SELECT_WAIT:
|
||||
/* Restart the time slice: */
|
||||
curthread->slice_usec = -1;
|
||||
|
||||
/* Insert into the waiting queue: */
|
||||
PTHREAD_WAITQ_INSERT(curthread);
|
||||
|
||||
/* Insert into the work queue: */
|
||||
PTHREAD_WORKQ_INSERT(curthread);
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* Are there pending signals for this thread?
|
||||
*
|
||||
* This check has to be performed after the thread
|
||||
* has been placed in the queue(s) appropriate for
|
||||
* its state. The process of adding pending signals
|
||||
* can change a threads state, which in turn will
|
||||
* attempt to add or remove the thread from any
|
||||
* scheduling queue to which it belongs.
|
||||
*/
|
||||
if (curthread->check_pending != 0) {
|
||||
curthread->check_pending = 0;
|
||||
_thread_sig_check_pending(curthread);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Avoid polling file descriptors if there are none
|
||||
* waiting:
|
||||
*/
|
||||
if (TAILQ_EMPTY(&_workq) != 0) {
|
||||
}
|
||||
/*
|
||||
* Poll file descriptors only if a new scheduling signal
|
||||
* has occurred or if we have no more runnable threads.
|
||||
*/
|
||||
else if (((current_tick = _sched_ticks) != last_tick) ||
|
||||
((curthread->state != PS_RUNNING) &&
|
||||
(PTHREAD_PRIOQ_FIRST() == NULL))) {
|
||||
/* Unprotect the scheduling queues: */
|
||||
_queue_signals = 0;
|
||||
|
||||
/*
|
||||
* Poll file descriptors to update the state of threads
|
||||
* waiting on file I/O where data may be available:
|
||||
*/
|
||||
thread_kern_poll(0);
|
||||
|
||||
/* Protect the scheduling queues: */
|
||||
_queue_signals = 1;
|
||||
}
|
||||
last_tick = current_tick;
|
||||
|
||||
/*
|
||||
@ -389,25 +243,16 @@ _thread_kern_scheduler(void)
|
||||
(pthread->wakeup_time.tv_sec < ts.tv_sec) ||
|
||||
((pthread->wakeup_time.tv_sec == ts.tv_sec) &&
|
||||
(pthread->wakeup_time.tv_nsec <= ts.tv_nsec)))) {
|
||||
switch (pthread->state) {
|
||||
case PS_POLL_WAIT:
|
||||
case PS_SELECT_WAIT:
|
||||
/* Return zero file descriptors ready: */
|
||||
pthread->data.poll_data->nfds = 0;
|
||||
/* FALLTHROUGH */
|
||||
default:
|
||||
/*
|
||||
* Remove this thread from the waiting queue
|
||||
* (and work queue if necessary) and place it
|
||||
* in the ready queue.
|
||||
*/
|
||||
PTHREAD_WAITQ_CLEARACTIVE();
|
||||
if (pthread->flags & PTHREAD_FLAGS_IN_WORKQ)
|
||||
PTHREAD_WORKQ_REMOVE(pthread);
|
||||
PTHREAD_NEW_STATE(pthread, PS_RUNNING);
|
||||
PTHREAD_WAITQ_SETACTIVE();
|
||||
break;
|
||||
}
|
||||
/*
|
||||
* Remove this thread from the waiting queue
|
||||
* (and work queue if necessary) and place it
|
||||
* in the ready queue.
|
||||
*/
|
||||
PTHREAD_WAITQ_CLEARACTIVE();
|
||||
if (pthread->flags & PTHREAD_FLAGS_IN_WORKQ)
|
||||
PTHREAD_WORKQ_REMOVE(pthread);
|
||||
PTHREAD_NEW_STATE(pthread, PS_RUNNING);
|
||||
PTHREAD_WAITQ_SETACTIVE();
|
||||
/*
|
||||
* Flag the timeout in the thread structure:
|
||||
*/
|
||||
@ -483,14 +328,11 @@ _thread_kern_scheduler(void)
|
||||
DBG_MSG("No runnable threads, using kernel thread %p\n",
|
||||
curthread);
|
||||
|
||||
/* Unprotect the scheduling queues: */
|
||||
_queue_signals = 0;
|
||||
|
||||
/*
|
||||
* There are no threads ready to run, so wait until
|
||||
* something happens that changes this condition:
|
||||
*/
|
||||
thread_kern_poll(1);
|
||||
thread_kern_idle();
|
||||
|
||||
/*
|
||||
* This process' usage will likely be very small
|
||||
@ -503,54 +345,13 @@ _thread_kern_scheduler(void)
|
||||
gettimeofday((struct timeval *) &_sched_tod, NULL);
|
||||
|
||||
/* Check once more for a runnable thread: */
|
||||
_queue_signals = 1;
|
||||
pthread_h = PTHREAD_PRIOQ_FIRST();
|
||||
_queue_signals = 0;
|
||||
}
|
||||
|
||||
if (pthread_h != NULL) {
|
||||
/* Remove the thread from the ready queue: */
|
||||
PTHREAD_PRIOQ_REMOVE(pthread_h);
|
||||
|
||||
/* Unprotect the scheduling queues: */
|
||||
_queue_signals = 0;
|
||||
|
||||
/*
|
||||
* Check for signals queued while the scheduling
|
||||
* queues were protected:
|
||||
*/
|
||||
while (_sigq_check_reqd != 0) {
|
||||
/* Clear before handling queued signals: */
|
||||
_sigq_check_reqd = 0;
|
||||
|
||||
/* Protect the scheduling queues again: */
|
||||
_queue_signals = 1;
|
||||
|
||||
dequeue_signals();
|
||||
|
||||
/*
|
||||
* Check for a higher priority thread that
|
||||
* became runnable due to signal handling.
|
||||
*/
|
||||
if (((pthread = PTHREAD_PRIOQ_FIRST()) != NULL) &&
|
||||
(pthread->active_priority > pthread_h->active_priority)) {
|
||||
/* Remove the thread from the ready queue: */
|
||||
PTHREAD_PRIOQ_REMOVE(pthread);
|
||||
|
||||
/*
|
||||
* Insert the lower priority thread
|
||||
* at the head of its priority list:
|
||||
*/
|
||||
PTHREAD_PRIOQ_INSERT_HEAD(pthread_h);
|
||||
|
||||
/* There's a new thread in town: */
|
||||
pthread_h = pthread;
|
||||
}
|
||||
|
||||
/* Unprotect the scheduling queues: */
|
||||
_queue_signals = 0;
|
||||
}
|
||||
|
||||
/* Make the selected thread the current thread: */
|
||||
_set_curthread(pthread_h);
|
||||
curthread = pthread_h;
|
||||
@ -584,13 +385,7 @@ _thread_kern_scheduler(void)
|
||||
/*
|
||||
* Continue the thread at its current frame:
|
||||
*/
|
||||
#if NOT_YET
|
||||
_setcontext(&curthread->ctx.uc);
|
||||
#else
|
||||
___longjmp(curthread->ctx.jb, 1);
|
||||
#endif
|
||||
/* This point should not be reached. */
|
||||
PANIC("Thread has returned from sigreturn or longjmp");
|
||||
swapcontext(&_thread_kern_sched_ctx, &curthread->ctx);
|
||||
}
|
||||
}
|
||||
|
||||
@ -610,19 +405,13 @@ _thread_kern_sched_state(enum pthread_state state, char *fname, int lineno)
|
||||
*/
|
||||
_thread_kern_in_sched = 1;
|
||||
|
||||
/*
|
||||
* Prevent the signal handler from fiddling with this thread
|
||||
* before its state is set and is placed into the proper queue.
|
||||
*/
|
||||
_queue_signals = 1;
|
||||
|
||||
/* Change the state of the current thread: */
|
||||
curthread->state = state;
|
||||
curthread->fname = fname;
|
||||
curthread->lineno = lineno;
|
||||
|
||||
/* Schedule the next thread that is ready: */
|
||||
_thread_kern_sched(NULL);
|
||||
_thread_kern_sched();
|
||||
}
|
||||
|
||||
void
|
||||
@ -638,13 +427,6 @@ _thread_kern_sched_state_unlock(enum pthread_state state,
|
||||
*/
|
||||
_thread_kern_in_sched = 1;
|
||||
|
||||
/*
|
||||
* Prevent the signal handler from fiddling with this thread
|
||||
* before its state is set and it is placed into the proper
|
||||
* queue(s).
|
||||
*/
|
||||
_queue_signals = 1;
|
||||
|
||||
/* Change the state of the current thread: */
|
||||
curthread->state = state;
|
||||
curthread->fname = fname;
|
||||
@ -653,13 +435,12 @@ _thread_kern_sched_state_unlock(enum pthread_state state,
|
||||
_SPINUNLOCK(lock);
|
||||
|
||||
/* Schedule the next thread that is ready: */
|
||||
_thread_kern_sched(NULL);
|
||||
_thread_kern_sched();
|
||||
}
|
||||
|
||||
static void
|
||||
thread_kern_poll(int wait_reqd)
|
||||
thread_kern_idle()
|
||||
{
|
||||
int count = 0;
|
||||
int i, found;
|
||||
int kern_pipe_added = 0;
|
||||
int nfds = 0;
|
||||
@ -668,57 +449,35 @@ thread_kern_poll(int wait_reqd)
|
||||
struct timespec ts;
|
||||
struct timeval tv;
|
||||
|
||||
/* Check if the caller wants to wait: */
|
||||
if (wait_reqd == 0) {
|
||||
timeout_ms = 0;
|
||||
/* Get the current time of day: */
|
||||
GET_CURRENT_TOD(tv);
|
||||
TIMEVAL_TO_TIMESPEC(&tv, &ts);
|
||||
|
||||
pthread = TAILQ_FIRST(&_waitingq);
|
||||
|
||||
if ((pthread == NULL) || (pthread->wakeup_time.tv_sec == -1)) {
|
||||
/*
|
||||
* Either there are no threads in the waiting queue,
|
||||
* or there are no threads that can timeout.
|
||||
*/
|
||||
PANIC("Would idle forever");
|
||||
}
|
||||
else if (pthread->wakeup_time.tv_sec - ts.tv_sec > 60000)
|
||||
/* Limit maximum timeout to prevent rollover. */
|
||||
timeout_ms = 60000;
|
||||
else {
|
||||
/* Get the current time of day: */
|
||||
GET_CURRENT_TOD(tv);
|
||||
TIMEVAL_TO_TIMESPEC(&tv, &ts);
|
||||
|
||||
_queue_signals = 1;
|
||||
pthread = TAILQ_FIRST(&_waitingq);
|
||||
_queue_signals = 0;
|
||||
|
||||
if ((pthread == NULL) || (pthread->wakeup_time.tv_sec == -1)) {
|
||||
/*
|
||||
* Either there are no threads in the waiting queue,
|
||||
* or there are no threads that can timeout.
|
||||
*/
|
||||
timeout_ms = INFTIM;
|
||||
}
|
||||
else if (pthread->wakeup_time.tv_sec - ts.tv_sec > 60000)
|
||||
/* Limit maximum timeout to prevent rollover. */
|
||||
timeout_ms = 60000;
|
||||
else {
|
||||
/*
|
||||
* Calculate the time left for the next thread to
|
||||
* timeout:
|
||||
*/
|
||||
timeout_ms = ((pthread->wakeup_time.tv_sec - ts.tv_sec) *
|
||||
1000) + ((pthread->wakeup_time.tv_nsec - ts.tv_nsec) /
|
||||
1000000);
|
||||
/*
|
||||
* Don't allow negative timeouts:
|
||||
*/
|
||||
if (timeout_ms < 0)
|
||||
timeout_ms = 0;
|
||||
}
|
||||
}
|
||||
|
||||
/* Protect the scheduling queues: */
|
||||
_queue_signals = 1;
|
||||
|
||||
/*
|
||||
* Check to see if the signal queue needs to be walked to look
|
||||
* for threads awoken by a signal while in the scheduler.
|
||||
*/
|
||||
if (_sigq_check_reqd != 0) {
|
||||
/* Reset flag before handling queued signals: */
|
||||
_sigq_check_reqd = 0;
|
||||
|
||||
dequeue_signals();
|
||||
/*
|
||||
* Calculate the time left for the next thread to
|
||||
* timeout:
|
||||
*/
|
||||
timeout_ms = ((pthread->wakeup_time.tv_sec - ts.tv_sec) *
|
||||
1000) + ((pthread->wakeup_time.tv_nsec - ts.tv_nsec) /
|
||||
1000000);
|
||||
/*
|
||||
* Only idle if we would be.
|
||||
*/
|
||||
if (timeout_ms <= 0)
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -733,219 +492,11 @@ thread_kern_poll(int wait_reqd)
|
||||
}
|
||||
|
||||
/*
|
||||
* Form the poll table:
|
||||
* Idle.
|
||||
*/
|
||||
nfds = 0;
|
||||
if (timeout_ms != 0) {
|
||||
/* Add the kernel pipe to the poll table: */
|
||||
_thread_pfd_table[nfds].fd = _thread_kern_pipe[0];
|
||||
_thread_pfd_table[nfds].events = POLLRDNORM;
|
||||
_thread_pfd_table[nfds].revents = 0;
|
||||
nfds++;
|
||||
kern_pipe_added = 1;
|
||||
}
|
||||
__sys_poll(NULL, 0, timeout_ms);
|
||||
|
||||
PTHREAD_WAITQ_SETACTIVE();
|
||||
TAILQ_FOREACH(pthread, &_workq, qe) {
|
||||
switch (pthread->state) {
|
||||
case PS_SPINBLOCK:
|
||||
/*
|
||||
* If the lock is available, let the thread run.
|
||||
*/
|
||||
if (pthread->data.spinlock->access_lock == 0) {
|
||||
PTHREAD_WAITQ_CLEARACTIVE();
|
||||
PTHREAD_WORKQ_REMOVE(pthread);
|
||||
PTHREAD_NEW_STATE(pthread,PS_RUNNING);
|
||||
PTHREAD_WAITQ_SETACTIVE();
|
||||
/* One less thread in a spinblock state: */
|
||||
_spinblock_count--;
|
||||
/*
|
||||
* Since there is at least one runnable
|
||||
* thread, disable the wait.
|
||||
*/
|
||||
timeout_ms = 0;
|
||||
}
|
||||
break;
|
||||
|
||||
/* File descriptor read wait: */
|
||||
case PS_FDR_WAIT:
|
||||
/* Limit number of polled files to table size: */
|
||||
if (nfds < _thread_dtablesize) {
|
||||
_thread_pfd_table[nfds].events = POLLRDNORM;
|
||||
_thread_pfd_table[nfds].fd = pthread->data.fd.fd;
|
||||
nfds++;
|
||||
}
|
||||
break;
|
||||
|
||||
/* File descriptor write wait: */
|
||||
case PS_FDW_WAIT:
|
||||
/* Limit number of polled files to table size: */
|
||||
if (nfds < _thread_dtablesize) {
|
||||
_thread_pfd_table[nfds].events = POLLWRNORM;
|
||||
_thread_pfd_table[nfds].fd = pthread->data.fd.fd;
|
||||
nfds++;
|
||||
}
|
||||
break;
|
||||
|
||||
/* File descriptor poll or select wait: */
|
||||
case PS_POLL_WAIT:
|
||||
case PS_SELECT_WAIT:
|
||||
/* Limit number of polled files to table size: */
|
||||
if (pthread->data.poll_data->nfds + nfds <
|
||||
_thread_dtablesize) {
|
||||
for (i = 0; i < pthread->data.poll_data->nfds; i++) {
|
||||
_thread_pfd_table[nfds + i].fd =
|
||||
pthread->data.poll_data->fds[i].fd;
|
||||
_thread_pfd_table[nfds + i].events =
|
||||
pthread->data.poll_data->fds[i].events;
|
||||
}
|
||||
nfds += pthread->data.poll_data->nfds;
|
||||
}
|
||||
break;
|
||||
|
||||
/* Other states do not depend on file I/O. */
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
PTHREAD_WAITQ_CLEARACTIVE();
|
||||
|
||||
/*
|
||||
* Wait for a file descriptor to be ready for read, write, or
|
||||
* an exception, or a timeout to occur:
|
||||
*/
|
||||
count = __sys_poll(_thread_pfd_table, nfds, timeout_ms);
|
||||
|
||||
if (kern_pipe_added != 0)
|
||||
/*
|
||||
* Remove the pthread kernel pipe file descriptor
|
||||
* from the pollfd table:
|
||||
*/
|
||||
nfds = 1;
|
||||
else
|
||||
nfds = 0;
|
||||
|
||||
/*
|
||||
* Check if it is possible that there are bytes in the kernel
|
||||
* read pipe waiting to be read:
|
||||
*/
|
||||
if (count < 0 || ((kern_pipe_added != 0) &&
|
||||
(_thread_pfd_table[0].revents & POLLRDNORM))) {
|
||||
/*
|
||||
* If the kernel read pipe was included in the
|
||||
* count:
|
||||
*/
|
||||
if (count > 0) {
|
||||
/* Decrement the count of file descriptors: */
|
||||
count--;
|
||||
}
|
||||
|
||||
if (_sigq_check_reqd != 0) {
|
||||
/* Reset flag before handling signals: */
|
||||
_sigq_check_reqd = 0;
|
||||
|
||||
dequeue_signals();
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Check if any file descriptors are ready:
|
||||
*/
|
||||
if (count > 0) {
|
||||
/*
|
||||
* Enter a loop to look for threads waiting on file
|
||||
* descriptors that are flagged as available by the
|
||||
* _poll syscall:
|
||||
*/
|
||||
PTHREAD_WAITQ_SETACTIVE();
|
||||
TAILQ_FOREACH(pthread, &_workq, qe) {
|
||||
switch (pthread->state) {
|
||||
case PS_SPINBLOCK:
|
||||
/*
|
||||
* If the lock is available, let the thread run.
|
||||
*/
|
||||
if (pthread->data.spinlock->access_lock == 0) {
|
||||
PTHREAD_WAITQ_CLEARACTIVE();
|
||||
PTHREAD_WORKQ_REMOVE(pthread);
|
||||
PTHREAD_NEW_STATE(pthread,PS_RUNNING);
|
||||
PTHREAD_WAITQ_SETACTIVE();
|
||||
|
||||
/*
|
||||
* One less thread in a spinblock state:
|
||||
*/
|
||||
_spinblock_count--;
|
||||
}
|
||||
break;
|
||||
|
||||
/* File descriptor read wait: */
|
||||
case PS_FDR_WAIT:
|
||||
if ((nfds < _thread_dtablesize) &&
|
||||
(_thread_pfd_table[nfds].revents
|
||||
& (POLLRDNORM|POLLERR|POLLHUP|POLLNVAL))
|
||||
!= 0) {
|
||||
PTHREAD_WAITQ_CLEARACTIVE();
|
||||
PTHREAD_WORKQ_REMOVE(pthread);
|
||||
PTHREAD_NEW_STATE(pthread,PS_RUNNING);
|
||||
PTHREAD_WAITQ_SETACTIVE();
|
||||
}
|
||||
nfds++;
|
||||
break;
|
||||
|
||||
/* File descriptor write wait: */
|
||||
case PS_FDW_WAIT:
|
||||
if ((nfds < _thread_dtablesize) &&
|
||||
(_thread_pfd_table[nfds].revents
|
||||
& (POLLWRNORM|POLLERR|POLLHUP|POLLNVAL))
|
||||
!= 0) {
|
||||
PTHREAD_WAITQ_CLEARACTIVE();
|
||||
PTHREAD_WORKQ_REMOVE(pthread);
|
||||
PTHREAD_NEW_STATE(pthread,PS_RUNNING);
|
||||
PTHREAD_WAITQ_SETACTIVE();
|
||||
}
|
||||
nfds++;
|
||||
break;
|
||||
|
||||
/* File descriptor poll or select wait: */
|
||||
case PS_POLL_WAIT:
|
||||
case PS_SELECT_WAIT:
|
||||
if (pthread->data.poll_data->nfds + nfds <
|
||||
_thread_dtablesize) {
|
||||
/*
|
||||
* Enter a loop looking for I/O
|
||||
* readiness:
|
||||
*/
|
||||
found = 0;
|
||||
for (i = 0; i < pthread->data.poll_data->nfds; i++) {
|
||||
if (_thread_pfd_table[nfds + i].revents != 0) {
|
||||
pthread->data.poll_data->fds[i].revents =
|
||||
_thread_pfd_table[nfds + i].revents;
|
||||
found++;
|
||||
}
|
||||
}
|
||||
|
||||
/* Increment before destroying: */
|
||||
nfds += pthread->data.poll_data->nfds;
|
||||
|
||||
if (found != 0) {
|
||||
pthread->data.poll_data->nfds = found;
|
||||
PTHREAD_WAITQ_CLEARACTIVE();
|
||||
PTHREAD_WORKQ_REMOVE(pthread);
|
||||
PTHREAD_NEW_STATE(pthread,PS_RUNNING);
|
||||
PTHREAD_WAITQ_SETACTIVE();
|
||||
}
|
||||
}
|
||||
else
|
||||
nfds += pthread->data.poll_data->nfds;
|
||||
break;
|
||||
|
||||
/* Other states do not depend on file I/O. */
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
PTHREAD_WAITQ_CLEARACTIVE();
|
||||
}
|
||||
else if (_spinblock_count != 0) {
|
||||
if (_spinblock_count != 0) {
|
||||
/*
|
||||
* Enter a loop to look for threads waiting on a spinlock
|
||||
* that is now available.
|
||||
@ -971,22 +522,6 @@ thread_kern_poll(int wait_reqd)
|
||||
}
|
||||
PTHREAD_WAITQ_CLEARACTIVE();
|
||||
}
|
||||
|
||||
/* Unprotect the scheduling queues: */
|
||||
_queue_signals = 0;
|
||||
|
||||
while (_sigq_check_reqd != 0) {
|
||||
/* Handle queued signals: */
|
||||
_sigq_check_reqd = 0;
|
||||
|
||||
/* Protect the scheduling queues: */
|
||||
_queue_signals = 1;
|
||||
|
||||
dequeue_signals();
|
||||
|
||||
/* Unprotect the scheduling queues: */
|
||||
_queue_signals = 0;
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
@ -1057,12 +592,6 @@ _thread_kern_sig_undefer(void)
|
||||
/* Reenable signals: */
|
||||
curthread->sig_defer_count = 0;
|
||||
|
||||
/*
|
||||
* Check if there are queued signals:
|
||||
*/
|
||||
if (_sigq_check_reqd != 0)
|
||||
_thread_kern_sched(NULL);
|
||||
|
||||
/*
|
||||
* Check for asynchronous cancellation before delivering any
|
||||
* pending signals:
|
||||
@ -1070,44 +599,9 @@ _thread_kern_sig_undefer(void)
|
||||
if (((curthread->cancelflags & PTHREAD_AT_CANCEL_POINT) == 0) &&
|
||||
((curthread->cancelflags & PTHREAD_CANCEL_ASYNCHRONOUS) != 0))
|
||||
pthread_testcancel();
|
||||
|
||||
/*
|
||||
* If there are pending signals or this thread has
|
||||
* to yield the CPU, call the kernel scheduler:
|
||||
*
|
||||
* XXX - Come back and revisit the pending signal problem
|
||||
*/
|
||||
if ((curthread->yield_on_sig_undefer != 0) ||
|
||||
SIGNOTEMPTY(curthread->sigpend)) {
|
||||
curthread->yield_on_sig_undefer = 0;
|
||||
_thread_kern_sched(NULL);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
dequeue_signals(void)
|
||||
{
|
||||
char bufr[128];
|
||||
int num;
|
||||
|
||||
/*
|
||||
* Enter a loop to clear the pthread kernel pipe:
|
||||
*/
|
||||
while (((num = __sys_read(_thread_kern_pipe[0], bufr,
|
||||
sizeof(bufr))) > 0) || (num == -1 && errno == EINTR)) {
|
||||
}
|
||||
if ((num < 0) && (errno != EAGAIN)) {
|
||||
/*
|
||||
* The only error we should expect is if there is
|
||||
* no data to read.
|
||||
*/
|
||||
PANIC("Unable to read from thread kernel pipe");
|
||||
}
|
||||
/* Handle any pending signals: */
|
||||
_thread_sig_handle_pending();
|
||||
}
|
||||
|
||||
static inline void
|
||||
thread_run_switch_hook(pthread_t thread_out, pthread_t thread_in)
|
||||
{
|
||||
|
@ -41,34 +41,8 @@ __weak_reference(_pthread_kill, pthread_kill);
|
||||
int
|
||||
_pthread_kill(pthread_t pthread, int sig)
|
||||
{
|
||||
int ret;
|
||||
|
||||
/* Check for invalid signal numbers: */
|
||||
if (sig < 0 || sig >= NSIG)
|
||||
/* Invalid signal: */
|
||||
ret = EINVAL;
|
||||
/*
|
||||
* Ensure the thread is in the list of active threads, and the
|
||||
* signal is valid (signal 0 specifies error checking only) and
|
||||
* not being ignored:
|
||||
* All signals are unsupported.
|
||||
*/
|
||||
else if (((ret = _find_thread(pthread)) == 0) && (sig > 0) &&
|
||||
(_thread_sigact[sig - 1].sa_handler != SIG_IGN)) {
|
||||
/*
|
||||
* Defer signals to protect the scheduling queues from
|
||||
* access by the signal handler:
|
||||
*/
|
||||
_thread_kern_sig_defer();
|
||||
|
||||
_thread_sig_send(pthread, sig);
|
||||
|
||||
/*
|
||||
* Undefer and handle pending signals, yielding if
|
||||
* necessary:
|
||||
*/
|
||||
_thread_kern_sig_undefer();
|
||||
}
|
||||
|
||||
/* Return the completion status: */
|
||||
return (ret);
|
||||
return (EINVAL);
|
||||
}
|
||||
|
@ -13,16 +13,6 @@
|
||||
|
||||
__weak_reference(__msync, msync);
|
||||
|
||||
int
|
||||
_msync(void *addr, size_t len, int flags)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = __sys_msync(addr, len, flags);
|
||||
|
||||
return (ret);
|
||||
}
|
||||
|
||||
int
|
||||
__msync(void *addr, size_t len, int flags)
|
||||
{
|
||||
@ -35,7 +25,7 @@ __msync(void *addr, size_t len, int flags)
|
||||
* a cancellation point, as per the standard. sigh.
|
||||
*/
|
||||
_thread_enter_cancellation_point();
|
||||
ret = _msync(addr, len, flags);
|
||||
ret = __sys_msync(addr, len, flags);
|
||||
_thread_leave_cancellation_point();
|
||||
|
||||
return ret;
|
||||
|
@ -42,36 +42,6 @@
|
||||
|
||||
__weak_reference(__open, open);
|
||||
|
||||
int
|
||||
_open(const char *path, int flags,...)
|
||||
{
|
||||
int fd;
|
||||
int mode = 0;
|
||||
va_list ap;
|
||||
|
||||
/* Check if the file is being created: */
|
||||
if (flags & O_CREAT) {
|
||||
/* Get the creation mode: */
|
||||
va_start(ap, flags);
|
||||
mode = va_arg(ap, int);
|
||||
va_end(ap);
|
||||
}
|
||||
/* Open the file: */
|
||||
if ((fd = __sys_open(path, flags, mode)) < 0) {
|
||||
}
|
||||
/* Initialise the file descriptor table entry: */
|
||||
else if (_thread_fd_table_init(fd) != 0) {
|
||||
/* Quietly close the file: */
|
||||
__sys_close(fd);
|
||||
|
||||
/* Reset the file descriptor: */
|
||||
fd = -1;
|
||||
}
|
||||
|
||||
/* Return the file descriptor or -1 on error: */
|
||||
return (fd);
|
||||
}
|
||||
|
||||
int
|
||||
__open(const char *path, int flags,...)
|
||||
{
|
||||
@ -89,7 +59,7 @@ __open(const char *path, int flags,...)
|
||||
va_end(ap);
|
||||
}
|
||||
|
||||
ret = _open(path, flags, mode);
|
||||
ret = __sys_open(path, flags, mode);
|
||||
_thread_leave_cancellation_point();
|
||||
|
||||
return ret;
|
||||
|
@ -43,68 +43,13 @@
|
||||
|
||||
__weak_reference(__poll, poll);
|
||||
|
||||
int
|
||||
_poll(struct pollfd *fds, unsigned int nfds, int timeout)
|
||||
{
|
||||
struct pthread *curthread = _get_curthread();
|
||||
struct timespec ts;
|
||||
int numfds = nfds;
|
||||
int i, ret = 0;
|
||||
struct pthread_poll_data data;
|
||||
|
||||
if (numfds > _thread_dtablesize) {
|
||||
numfds = _thread_dtablesize;
|
||||
}
|
||||
/* Check if a timeout was specified: */
|
||||
if (timeout == INFTIM) {
|
||||
/* Wait for ever: */
|
||||
_thread_kern_set_timeout(NULL);
|
||||
} else if (timeout > 0) {
|
||||
/* Convert the timeout in msec to a timespec: */
|
||||
ts.tv_sec = timeout / 1000;
|
||||
ts.tv_nsec = (timeout % 1000) * 1000000;
|
||||
|
||||
/* Set the wake up time: */
|
||||
_thread_kern_set_timeout(&ts);
|
||||
} else if (timeout < 0) {
|
||||
/* a timeout less than zero but not == INFTIM is invalid */
|
||||
errno = EINVAL;
|
||||
return (-1);
|
||||
}
|
||||
|
||||
if (((ret = __sys_poll(fds, numfds, 0)) == 0) && (timeout != 0)) {
|
||||
data.nfds = numfds;
|
||||
data.fds = fds;
|
||||
|
||||
/*
|
||||
* Clear revents in case of a timeout which leaves fds
|
||||
* unchanged:
|
||||
*/
|
||||
for (i = 0; i < numfds; i++) {
|
||||
fds[i].revents = 0;
|
||||
}
|
||||
|
||||
curthread->data.poll_data = &data;
|
||||
curthread->interrupted = 0;
|
||||
_thread_kern_sched_state(PS_POLL_WAIT, __FILE__, __LINE__);
|
||||
if (curthread->interrupted) {
|
||||
errno = EINTR;
|
||||
ret = -1;
|
||||
} else {
|
||||
ret = data.nfds;
|
||||
}
|
||||
}
|
||||
|
||||
return (ret);
|
||||
}
|
||||
|
||||
int
|
||||
__poll(struct pollfd *fds, unsigned int nfds, int timeout)
|
||||
{
|
||||
int ret;
|
||||
|
||||
_thread_enter_cancellation_point();
|
||||
ret = _poll(fds, nfds, timeout);
|
||||
ret = __sys_poll(fds, nfds, timeout);
|
||||
_thread_leave_cancellation_point();
|
||||
|
||||
return ret;
|
||||
|
@ -70,8 +70,8 @@ static int _pq_active = 0;
|
||||
} while (0)
|
||||
#define _PQ_ASSERT_PROTECTED(msg) \
|
||||
PTHREAD_ASSERT((_thread_kern_in_sched != 0) || \
|
||||
((_get_curthread())->sig_defer_count > 0) ||\
|
||||
(_sig_in_handler != 0), msg);
|
||||
((_get_curthread())->sig_defer_count > 0), \
|
||||
msg);
|
||||
|
||||
#else
|
||||
|
||||
|
@ -49,7 +49,6 @@
|
||||
/*
|
||||
* Include files.
|
||||
*/
|
||||
#include <setjmp.h>
|
||||
#include <signal.h>
|
||||
#include <stdio.h>
|
||||
#include <sys/queue.h>
|
||||
@ -61,50 +60,6 @@
|
||||
#include <ucontext.h>
|
||||
#include <pthread_np.h>
|
||||
|
||||
/*
|
||||
* Define machine dependent macros to get and set the stack pointer
|
||||
* from the supported contexts. Also define a macro to set the return
|
||||
* address in a jmp_buf context.
|
||||
*
|
||||
* XXX - These need to be moved into architecture dependent support files.
|
||||
*/
|
||||
#if defined(__i386__)
|
||||
#define GET_STACK_JB(jb) ((unsigned long)((jb)[0]._jb[2]))
|
||||
#define GET_STACK_SJB(sjb) ((unsigned long)((sjb)[0]._sjb[2]))
|
||||
#define GET_STACK_UC(ucp) ((unsigned long)((ucp)->uc_mcontext.mc_esp))
|
||||
#define SET_STACK_JB(jb, stk) (jb)[0]._jb[2] = (int)(stk)
|
||||
#define SET_STACK_SJB(sjb, stk) (sjb)[0]._sjb[2] = (int)(stk)
|
||||
#define SET_STACK_UC(ucp, stk) (ucp)->uc_mcontext.mc_esp = (int)(stk)
|
||||
#define FP_SAVE_UC(ucp) do { \
|
||||
char *fdata; \
|
||||
fdata = (char *) (ucp)->uc_mcontext.mc_fpregs; \
|
||||
__asm__("fnsave %0": :"m"(*fdata)); \
|
||||
} while (0)
|
||||
#define FP_RESTORE_UC(ucp) do { \
|
||||
char *fdata; \
|
||||
fdata = (char *) (ucp)->uc_mcontext.mc_fpregs; \
|
||||
__asm__("frstor %0": :"m"(*fdata)); \
|
||||
} while (0)
|
||||
#define SET_RETURN_ADDR_JB(jb, ra) (jb)[0]._jb[0] = (int)(ra)
|
||||
#elif defined(__alpha__)
|
||||
#include <machine/reg.h>
|
||||
#define GET_STACK_JB(jb) ((unsigned long)((jb)[0]._jb[R_SP + 4]))
|
||||
#define GET_STACK_SJB(sjb) ((unsigned long)((sjb)[0]._sjb[R_SP + 4]))
|
||||
#define GET_STACK_UC(ucp) ((ucp)->uc_mcontext.mc_regs[R_SP])
|
||||
#define SET_STACK_JB(jb, stk) (jb)[0]._jb[R_SP + 4] = (long)(stk)
|
||||
#define SET_STACK_SJB(sjb, stk) (sjb)[0]._sjb[R_SP + 4] = (long)(stk)
|
||||
#define SET_STACK_UC(ucp, stk) (ucp)->uc_mcontext.mc_regs[R_SP] = (unsigned long)(stk)
|
||||
#define FP_SAVE_UC(ucp)
|
||||
#define FP_RESTORE_UC(ucp)
|
||||
#define SET_RETURN_ADDR_JB(jb, ra) do { \
|
||||
(jb)[0]._jb[2] = (long)(ra); \
|
||||
(jb)[0]._jb[R_RA + 4] = (long)(ra); \
|
||||
(jb)[0]._jb[R_T12 + 4] = (long)(ra); \
|
||||
} while (0)
|
||||
#else
|
||||
#error "Don't recognize this architecture!"
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Kernel fatal error handler macro.
|
||||
*/
|
||||
@ -216,17 +171,6 @@
|
||||
} while (0)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Define the signals to be used for scheduling.
|
||||
*/
|
||||
#if defined(_PTHREADS_COMPAT_SCHED)
|
||||
#define _ITIMER_SCHED_TIMER ITIMER_VIRTUAL
|
||||
#define _SCHED_SIGNAL SIGVTALRM
|
||||
#else
|
||||
#define _ITIMER_SCHED_TIMER ITIMER_PROF
|
||||
#define _SCHED_SIGNAL SIGPROF
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Priority queues.
|
||||
*
|
||||
@ -487,20 +431,10 @@ struct pthread_rwlock {
|
||||
*/
|
||||
enum pthread_state {
|
||||
PS_RUNNING,
|
||||
PS_SIGTHREAD,
|
||||
PS_MUTEX_WAIT,
|
||||
PS_COND_WAIT,
|
||||
PS_FDLR_WAIT,
|
||||
PS_FDLW_WAIT,
|
||||
PS_FDR_WAIT,
|
||||
PS_FDW_WAIT,
|
||||
PS_FILE_WAIT,
|
||||
PS_POLL_WAIT,
|
||||
PS_SELECT_WAIT,
|
||||
PS_SLEEP_WAIT,
|
||||
PS_WAIT_WAIT,
|
||||
PS_SIGSUSPEND,
|
||||
PS_SIGWAIT,
|
||||
PS_SPINBLOCK,
|
||||
PS_JOIN,
|
||||
PS_SUSPENDED,
|
||||
@ -517,46 +451,9 @@ enum pthread_state {
|
||||
#define FD_WRITE 0x2
|
||||
#define FD_RDWR (FD_READ | FD_WRITE)
|
||||
|
||||
/*
|
||||
* File descriptor table structure.
|
||||
*/
|
||||
struct fd_table_entry {
|
||||
/*
|
||||
* Lock for accesses to this file descriptor table
|
||||
* entry. This is passed to _spinlock() to provide atomic
|
||||
* access to this structure. It does *not* represent the
|
||||
* state of the lock on the file descriptor.
|
||||
*/
|
||||
spinlock_t lock;
|
||||
TAILQ_HEAD(, pthread) r_queue; /* Read queue. */
|
||||
TAILQ_HEAD(, pthread) w_queue; /* Write queue. */
|
||||
struct pthread *r_owner; /* Ptr to thread owning read lock. */
|
||||
struct pthread *w_owner; /* Ptr to thread owning write lock. */
|
||||
char *r_fname; /* Ptr to read lock source file name */
|
||||
int r_lineno; /* Read lock source line number. */
|
||||
char *w_fname; /* Ptr to write lock source file name */
|
||||
int w_lineno; /* Write lock source line number. */
|
||||
int r_lockcount; /* Count for FILE read locks. */
|
||||
int w_lockcount; /* Count for FILE write locks. */
|
||||
int flags; /* Flags used in open. */
|
||||
};
|
||||
|
||||
struct pthread_poll_data {
|
||||
int nfds;
|
||||
struct pollfd *fds;
|
||||
};
|
||||
|
||||
union pthread_wait_data {
|
||||
pthread_mutex_t mutex;
|
||||
pthread_cond_t cond;
|
||||
const sigset_t *sigwait; /* Waiting on a signal in sigwait */
|
||||
struct {
|
||||
short fd; /* Used when thread waiting on fd */
|
||||
short branch; /* Line number, for debugging. */
|
||||
char *fname; /* Source file name for debugging.*/
|
||||
} fd;
|
||||
FILE *fp;
|
||||
struct pthread_poll_data *poll_data;
|
||||
spinlock_t *spinlock;
|
||||
struct pthread *thread;
|
||||
};
|
||||
@ -567,52 +464,12 @@ union pthread_wait_data {
|
||||
*/
|
||||
typedef void (*thread_continuation_t) (void *);
|
||||
|
||||
struct pthread_signal_frame;
|
||||
|
||||
struct pthread_state_data {
|
||||
struct pthread_signal_frame *psd_curframe;
|
||||
sigset_t psd_sigmask;
|
||||
struct timespec psd_wakeup_time;
|
||||
union pthread_wait_data psd_wait_data;
|
||||
enum pthread_state psd_state;
|
||||
int psd_flags;
|
||||
int psd_interrupted;
|
||||
int psd_longjmp_val;
|
||||
int psd_sigmask_seqno;
|
||||
int psd_signo;
|
||||
int psd_sig_defer_count;
|
||||
/* XXX - What about thread->timeout and/or thread->error? */
|
||||
};
|
||||
|
||||
struct join_status {
|
||||
struct pthread *thread;
|
||||
void *ret;
|
||||
int error;
|
||||
};
|
||||
|
||||
/*
|
||||
* The frame that is added to the top of a threads stack when setting up
|
||||
* up the thread to run a signal handler.
|
||||
*/
|
||||
struct pthread_signal_frame {
|
||||
/*
|
||||
* This stores the threads state before the signal.
|
||||
*/
|
||||
struct pthread_state_data saved_state;
|
||||
|
||||
/*
|
||||
* Threads return context; we use only jmp_buf's for now.
|
||||
*/
|
||||
union {
|
||||
jmp_buf jb;
|
||||
ucontext_t uc;
|
||||
} ctx;
|
||||
int signo; /* signal, arg 1 to sighandler */
|
||||
int sig_has_args; /* use signal args if true */
|
||||
ucontext_t uc;
|
||||
siginfo_t siginfo;
|
||||
};
|
||||
|
||||
struct pthread_specific_elem {
|
||||
const void *data;
|
||||
int seqno;
|
||||
@ -652,19 +509,11 @@ struct pthread {
|
||||
struct pthread_attr attr;
|
||||
|
||||
/*
|
||||
* Threads return context; we use only jmp_buf's for now.
|
||||
* Machine context, including signal state.
|
||||
*/
|
||||
union {
|
||||
jmp_buf jb;
|
||||
ucontext_t uc;
|
||||
} ctx;
|
||||
ucontext_t ctx;
|
||||
|
||||
/*
|
||||
* Used for tracking delivery of signal handlers.
|
||||
*/
|
||||
struct pthread_signal_frame *curframe;
|
||||
|
||||
/*
|
||||
* Cancelability flags - the lower 2 bits are used by cancel
|
||||
* definitions in pthread.h
|
||||
*/
|
||||
@ -675,14 +524,6 @@ struct pthread {
|
||||
|
||||
thread_continuation_t continuation;
|
||||
|
||||
/*
|
||||
* Current signal mask and pending signals.
|
||||
*/
|
||||
sigset_t sigmask;
|
||||
sigset_t sigpend;
|
||||
int sigmask_seqno;
|
||||
int check_pending;
|
||||
|
||||
/* Thread state: */
|
||||
enum pthread_state state;
|
||||
|
||||
@ -700,7 +541,7 @@ struct pthread {
|
||||
|
||||
/*
|
||||
* Time to wake up thread. This is used for sleeping threads and
|
||||
* for any operation which may time out (such as select).
|
||||
* for any operation which may time out.
|
||||
*/
|
||||
struct timespec wakeup_time;
|
||||
|
||||
@ -752,32 +593,18 @@ struct pthread {
|
||||
/* Wait data. */
|
||||
union pthread_wait_data data;
|
||||
|
||||
/*
|
||||
* Allocated for converting select into poll.
|
||||
*/
|
||||
struct pthread_poll_data poll_data;
|
||||
|
||||
/*
|
||||
* Set to TRUE if a blocking operation was
|
||||
* interrupted by a signal:
|
||||
*/
|
||||
int interrupted;
|
||||
|
||||
/* Signal number when in state PS_SIGWAIT: */
|
||||
int signo;
|
||||
|
||||
/*
|
||||
* Set to non-zero when this thread has deferred signals.
|
||||
* We allow for recursive deferral.
|
||||
*/
|
||||
int sig_defer_count;
|
||||
|
||||
/*
|
||||
* Set to TRUE if this thread should yield after undeferring
|
||||
* signals.
|
||||
*/
|
||||
int yield_on_sig_undefer;
|
||||
|
||||
/* Miscellaneous flags; only set with signals deferred. */
|
||||
int flags;
|
||||
#define PTHREAD_FLAGS_PRIVATE 0x0001
|
||||
@ -786,7 +613,7 @@ struct pthread {
|
||||
#define PTHREAD_FLAGS_IN_PRIOQ 0x0008 /* in priority queue using pqe link */
|
||||
#define PTHREAD_FLAGS_IN_WORKQ 0x0010 /* in work queue using qe link */
|
||||
#define PTHREAD_FLAGS_IN_FILEQ 0x0020 /* in file lock queue using qe link */
|
||||
#define PTHREAD_FLAGS_IN_FDQ 0x0040 /* in fd lock queue using qe link */
|
||||
/* 0x0040 Unused. */
|
||||
#define PTHREAD_FLAGS_IN_CONDQ 0x0080 /* in condition queue using sqe link*/
|
||||
#define PTHREAD_FLAGS_IN_MUTEXQ 0x0100 /* in mutex queue using sqe link */
|
||||
#define PTHREAD_FLAGS_SUSPENDED 0x0200 /* thread is suspended */
|
||||
@ -876,25 +703,6 @@ SCLASS TAILQ_HEAD(, pthread) _thread_list
|
||||
;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Array of kernel pipe file descriptors that are used to ensure that
|
||||
* no signals are missed in calls to _select.
|
||||
*/
|
||||
SCLASS int _thread_kern_pipe[2]
|
||||
#ifdef GLOBAL_PTHREAD_PRIVATE
|
||||
= {
|
||||
-1,
|
||||
-1
|
||||
};
|
||||
#else
|
||||
;
|
||||
#endif
|
||||
SCLASS int volatile _queue_signals
|
||||
#ifdef GLOBAL_PTHREAD_PRIVATE
|
||||
= 0;
|
||||
#else
|
||||
;
|
||||
#endif
|
||||
SCLASS int _thread_kern_in_sched
|
||||
#ifdef GLOBAL_PTHREAD_PRIVATE
|
||||
= 0;
|
||||
@ -902,13 +710,6 @@ SCLASS int _thread_kern_in_sched
|
||||
;
|
||||
#endif
|
||||
|
||||
SCLASS int _sig_in_handler
|
||||
#ifdef GLOBAL_PTHREAD_PRIVATE
|
||||
= 0;
|
||||
#else
|
||||
;
|
||||
#endif
|
||||
|
||||
/* Time of day at last scheduling timer signal: */
|
||||
SCLASS struct timeval volatile _sched_tod
|
||||
#ifdef GLOBAL_PTHREAD_PRIVATE
|
||||
@ -969,42 +770,6 @@ SCLASS struct pthread_cond_attr pthread_condattr_default
|
||||
;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Standard I/O file descriptors need special flag treatment since
|
||||
* setting one to non-blocking does all on *BSD. Sigh. This array
|
||||
* is used to store the initial flag settings.
|
||||
*/
|
||||
SCLASS int _pthread_stdio_flags[3];
|
||||
|
||||
/* File table information: */
|
||||
SCLASS struct fd_table_entry **_thread_fd_table
|
||||
#ifdef GLOBAL_PTHREAD_PRIVATE
|
||||
= NULL;
|
||||
#else
|
||||
;
|
||||
#endif
|
||||
|
||||
/* Table for polling file descriptors: */
|
||||
SCLASS struct pollfd *_thread_pfd_table
|
||||
#ifdef GLOBAL_PTHREAD_PRIVATE
|
||||
= NULL;
|
||||
#else
|
||||
;
|
||||
#endif
|
||||
|
||||
SCLASS const int dtablecount
|
||||
#ifdef GLOBAL_PTHREAD_PRIVATE
|
||||
= 4096/sizeof(struct fd_table_entry);
|
||||
#else
|
||||
;
|
||||
#endif
|
||||
SCLASS int _thread_dtablesize /* Descriptor table size. */
|
||||
#ifdef GLOBAL_PTHREAD_PRIVATE
|
||||
= 0;
|
||||
#else
|
||||
;
|
||||
#endif
|
||||
|
||||
SCLASS int _clock_res_usec /* Clock resolution in usec. */
|
||||
#ifdef GLOBAL_PTHREAD_PRIVATE
|
||||
= CLOCK_RES_USEC;
|
||||
@ -1024,28 +789,6 @@ SCLASS pthread_cond_t _gc_cond
|
||||
#endif
|
||||
;
|
||||
|
||||
/*
|
||||
* Array of signal actions for this process.
|
||||
*/
|
||||
SCLASS struct sigaction _thread_sigact[NSIG];
|
||||
|
||||
/*
|
||||
* Array of counts of dummy handlers for SIG_DFL signals. This is used to
|
||||
* assure that there is always a dummy signal handler installed while there is a
|
||||
* thread sigwait()ing on the corresponding signal.
|
||||
*/
|
||||
SCLASS int _thread_dfl_count[NSIG];
|
||||
|
||||
/*
|
||||
* Pending signals and mask for this process:
|
||||
*/
|
||||
SCLASS sigset_t _process_sigpending;
|
||||
SCLASS sigset_t _process_sigmask
|
||||
#ifdef GLOBAL_PTHREAD_PRIVATE
|
||||
= { {0, 0, 0, 0} }
|
||||
#endif
|
||||
;
|
||||
|
||||
/*
|
||||
* Scheduling queues:
|
||||
*/
|
||||
@ -1064,28 +807,6 @@ SCLASS volatile int _spinblock_count
|
||||
#endif
|
||||
;
|
||||
|
||||
/* Used to maintain pending and active signals: */
|
||||
struct sigstatus {
|
||||
int pending; /* Is this a pending signal? */
|
||||
int blocked; /*
|
||||
* A handler is currently active for
|
||||
* this signal; ignore subsequent
|
||||
* signals until the handler is done.
|
||||
*/
|
||||
int signo; /* arg 1 to signal handler */
|
||||
siginfo_t siginfo; /* arg 2 to signal handler */
|
||||
ucontext_t uc; /* arg 3 to signal handler */
|
||||
};
|
||||
|
||||
SCLASS struct sigstatus _thread_sigq[NSIG];
|
||||
|
||||
/* Indicates that the signal queue needs to be checked. */
|
||||
SCLASS volatile int _sigq_check_reqd
|
||||
#ifdef GLOBAL_PTHREAD_PRIVATE
|
||||
= 0
|
||||
#endif
|
||||
;
|
||||
|
||||
/* Thread switch hook. */
|
||||
SCLASS pthread_switch_routine_t _sched_switch_hook
|
||||
#ifdef GLOBAL_PTHREAD_PRIVATE
|
||||
@ -1096,9 +817,9 @@ SCLASS pthread_switch_routine_t _sched_switch_hook
|
||||
/*
|
||||
* Declare the kernel scheduler jump buffer and stack:
|
||||
*/
|
||||
SCLASS jmp_buf _thread_kern_sched_jb;
|
||||
SCLASS ucontext_t _thread_kern_sched_ctx;
|
||||
|
||||
SCLASS void * _thread_kern_sched_stack
|
||||
SCLASS void * _thread_kern_sched_stack
|
||||
#ifdef GLOBAL_PTHREAD_PRIVATE
|
||||
= NULL
|
||||
#endif
|
||||
@ -1115,16 +836,6 @@ SCLASS int _thread_kern_new_state
|
||||
/* Undefine the storage class specifier: */
|
||||
#undef SCLASS
|
||||
|
||||
#ifdef _LOCK_DEBUG
|
||||
#define _FD_LOCK(_fd,_type,_ts) _thread_fd_lock_debug(_fd, _type, \
|
||||
_ts, __FILE__, __LINE__)
|
||||
#define _FD_UNLOCK(_fd,_type) _thread_fd_unlock_debug(_fd, _type, \
|
||||
__FILE__, __LINE__)
|
||||
#else
|
||||
#define _FD_LOCK(_fd,_type,_ts) _thread_fd_lock(_fd, _type, _ts)
|
||||
#define _FD_UNLOCK(_fd,_type) _thread_fd_unlock(_fd, _type)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Function prototype definitions.
|
||||
*/
|
||||
@ -1133,7 +844,6 @@ char *__ttyname_basic(int);
|
||||
char *__ttyname_r_basic(int, char *, size_t);
|
||||
char *ttyname_r(int, char *, size_t);
|
||||
void _cond_wait_backout(pthread_t);
|
||||
void _fd_lock_backout(pthread_t);
|
||||
int _find_thread(pthread_t);
|
||||
struct pthread *_get_curthread(void);
|
||||
void _set_curthread(struct pthread *);
|
||||
@ -1175,35 +885,18 @@ void _waitq_clearactive(void);
|
||||
#endif
|
||||
void _thread_exit(char *, int, char *);
|
||||
void _thread_exit_cleanup(void);
|
||||
int _thread_fd_getflags(int);
|
||||
int _thread_fd_lock(int, int, struct timespec *);
|
||||
int _thread_fd_lock_debug(int, int, struct timespec *,char *fname,int lineno);
|
||||
void _thread_fd_setflags(int, int);
|
||||
int _thread_fd_table_init(int fd);
|
||||
void _thread_fd_unlock(int, int);
|
||||
void _thread_fd_unlock_debug(int, int, char *, int);
|
||||
void _thread_fd_unlock_owned(pthread_t);
|
||||
void *_thread_cleanup(pthread_t);
|
||||
void _thread_cleanupspecific(void);
|
||||
void _thread_dump_info(void);
|
||||
void _thread_init(void);
|
||||
void _thread_kern_sched(ucontext_t *);
|
||||
void _thread_kern_sched(void);
|
||||
void _thread_kern_scheduler(void);
|
||||
void _thread_kern_sched_frame(struct pthread_signal_frame *psf);
|
||||
void _thread_kern_sched_sig(void);
|
||||
void _thread_kern_sched_state(enum pthread_state, char *fname, int lineno);
|
||||
void _thread_kern_sched_state_unlock(enum pthread_state state,
|
||||
spinlock_t *lock, char *fname, int lineno);
|
||||
void _thread_kern_set_timeout(const struct timespec *);
|
||||
void _thread_kern_sig_defer(void);
|
||||
void _thread_kern_sig_undefer(void);
|
||||
void _thread_sig_handler(int, siginfo_t *, ucontext_t *);
|
||||
void _thread_sig_check_pending(struct pthread *pthread);
|
||||
void _thread_sig_handle_pending(void);
|
||||
void _thread_sig_send(struct pthread *pthread, int sig);
|
||||
void _thread_sig_wrapper(void);
|
||||
void _thread_sigframe_restore(struct pthread *thread,
|
||||
struct pthread_signal_frame *psf);
|
||||
void _thread_start(void);
|
||||
void _thread_seterrno(pthread_t, int);
|
||||
pthread_addr_t _thread_gc(pthread_addr_t);
|
||||
@ -1211,13 +904,6 @@ void _thread_enter_cancellation_point(void);
|
||||
void _thread_leave_cancellation_point(void);
|
||||
void _thread_cancellation_point(void);
|
||||
|
||||
/* #include <sys/acl.h> */
|
||||
#ifdef _SYS_ACL_H
|
||||
int __sys___acl_aclcheck_fd(int, acl_type_t, struct acl *);
|
||||
int __sys___acl_delete_fd(int, acl_type_t);
|
||||
int __sys___acl_get_fd(int, acl_type_t, struct acl *);
|
||||
int __sys___acl_set_fd(int, acl_type_t, struct acl *);
|
||||
#endif
|
||||
|
||||
/* #include <sys/aio.h> */
|
||||
#ifdef _SYS_AIO_H_
|
||||
@ -1324,12 +1010,6 @@ ssize_t __sys_read(int, void *, size_t);
|
||||
ssize_t __sys_write(int, const void *, size_t);
|
||||
#endif
|
||||
|
||||
/* #include <setjmp.h> */
|
||||
#ifdef _SETJMP_H_
|
||||
extern void __siglongjmp(sigjmp_buf, int) __dead2;
|
||||
extern void __longjmp(jmp_buf, int) __dead2;
|
||||
extern void ___longjmp(jmp_buf, int) __dead2;
|
||||
#endif
|
||||
__END_DECLS
|
||||
|
||||
#endif /* !_THR_PRIVATE_H */
|
||||
|
@ -42,69 +42,13 @@
|
||||
|
||||
__weak_reference(__read, read);
|
||||
|
||||
ssize_t
|
||||
_read(int fd, void *buf, size_t nbytes)
|
||||
{
|
||||
struct pthread *curthread = _get_curthread();
|
||||
int ret;
|
||||
int type;
|
||||
|
||||
/* POSIX says to do just this: */
|
||||
if (nbytes == 0) {
|
||||
return (0);
|
||||
}
|
||||
|
||||
/* Lock the file descriptor for read: */
|
||||
if ((ret = _FD_LOCK(fd, FD_READ, NULL)) == 0) {
|
||||
/* Get the read/write mode type: */
|
||||
type = _thread_fd_getflags(fd) & O_ACCMODE;
|
||||
|
||||
/* Check if the file is not open for read: */
|
||||
if (type != O_RDONLY && type != O_RDWR) {
|
||||
/* File is not open for read: */
|
||||
errno = EBADF;
|
||||
_FD_UNLOCK(fd, FD_READ);
|
||||
return (-1);
|
||||
}
|
||||
|
||||
/* Perform a non-blocking read syscall: */
|
||||
while ((ret = __sys_read(fd, buf, nbytes)) < 0) {
|
||||
if ((_thread_fd_getflags(fd) & O_NONBLOCK) == 0 &&
|
||||
(errno == EWOULDBLOCK || errno == EAGAIN)) {
|
||||
curthread->data.fd.fd = fd;
|
||||
_thread_kern_set_timeout(NULL);
|
||||
|
||||
/* Reset the interrupted operation flag: */
|
||||
curthread->interrupted = 0;
|
||||
|
||||
_thread_kern_sched_state(PS_FDR_WAIT,
|
||||
__FILE__, __LINE__);
|
||||
|
||||
/*
|
||||
* Check if the operation was
|
||||
* interrupted by a signal
|
||||
*/
|
||||
if (curthread->interrupted) {
|
||||
errno = EINTR;
|
||||
ret = -1;
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
_FD_UNLOCK(fd, FD_READ);
|
||||
}
|
||||
return (ret);
|
||||
}
|
||||
|
||||
ssize_t
|
||||
__read(int fd, void *buf, size_t nbytes)
|
||||
{
|
||||
ssize_t ret;
|
||||
|
||||
_thread_enter_cancellation_point();
|
||||
ret = _read(fd, buf, nbytes);
|
||||
ret = __sys_read(fd, buf, nbytes);
|
||||
_thread_leave_cancellation_point();
|
||||
|
||||
return ret;
|
||||
|
@ -42,64 +42,13 @@
|
||||
|
||||
__weak_reference(__readv, readv);
|
||||
|
||||
ssize_t
|
||||
_readv(int fd, const struct iovec * iov, int iovcnt)
|
||||
{
|
||||
struct pthread *curthread = _get_curthread();
|
||||
int ret;
|
||||
int type;
|
||||
|
||||
/* Lock the file descriptor for read: */
|
||||
if ((ret = _FD_LOCK(fd, FD_READ, NULL)) == 0) {
|
||||
/* Get the read/write mode type: */
|
||||
type = _thread_fd_getflags(fd) & O_ACCMODE;
|
||||
|
||||
/* Check if the file is not open for read: */
|
||||
if (type != O_RDONLY && type != O_RDWR) {
|
||||
/* File is not open for read: */
|
||||
errno = EBADF;
|
||||
_FD_UNLOCK(fd, FD_READ);
|
||||
return (-1);
|
||||
}
|
||||
|
||||
/* Perform a non-blocking readv syscall: */
|
||||
while ((ret = __sys_readv(fd, iov, iovcnt)) < 0) {
|
||||
if ((_thread_fd_getflags(fd) & O_NONBLOCK) == 0 &&
|
||||
(errno == EWOULDBLOCK || errno == EAGAIN)) {
|
||||
curthread->data.fd.fd = fd;
|
||||
_thread_kern_set_timeout(NULL);
|
||||
|
||||
/* Reset the interrupted operation flag: */
|
||||
curthread->interrupted = 0;
|
||||
|
||||
_thread_kern_sched_state(PS_FDR_WAIT,
|
||||
__FILE__, __LINE__);
|
||||
|
||||
/*
|
||||
* Check if the operation was
|
||||
* interrupted by a signal
|
||||
*/
|
||||
if (curthread->interrupted) {
|
||||
errno = EINTR;
|
||||
ret = -1;
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
_FD_UNLOCK(fd, FD_READ);
|
||||
}
|
||||
return (ret);
|
||||
}
|
||||
|
||||
ssize_t
|
||||
__readv(int fd, const struct iovec *iov, int iovcnt)
|
||||
{
|
||||
ssize_t ret;
|
||||
|
||||
_thread_enter_cancellation_point();
|
||||
ret = _readv(fd, iov, iovcnt);
|
||||
ret = __sys_readv(fd, iov, iovcnt);
|
||||
_thread_leave_cancellation_point();
|
||||
|
||||
return ret;
|
||||
|
@ -45,178 +45,6 @@
|
||||
|
||||
__weak_reference(__select, select);
|
||||
|
||||
int
|
||||
_select(int numfds, fd_set * readfds, fd_set * writefds, fd_set * exceptfds,
|
||||
struct timeval * timeout)
|
||||
{
|
||||
struct pthread *curthread = _get_curthread();
|
||||
struct timespec ts;
|
||||
int i, ret = 0, f_wait = 1;
|
||||
int pfd_index, got_events = 0, fd_count = 0;
|
||||
struct pthread_poll_data data;
|
||||
|
||||
if (numfds > _thread_dtablesize) {
|
||||
numfds = _thread_dtablesize;
|
||||
}
|
||||
/* Check if a timeout was specified: */
|
||||
if (timeout) {
|
||||
if (timeout->tv_sec < 0 ||
|
||||
timeout->tv_usec < 0 || timeout->tv_usec >= 1000000) {
|
||||
errno = EINVAL;
|
||||
return (-1);
|
||||
}
|
||||
|
||||
/* Convert the timeval to a timespec: */
|
||||
TIMEVAL_TO_TIMESPEC(timeout, &ts);
|
||||
|
||||
/* Set the wake up time: */
|
||||
_thread_kern_set_timeout(&ts);
|
||||
if (ts.tv_sec == 0 && ts.tv_nsec == 0)
|
||||
f_wait = 0;
|
||||
} else {
|
||||
/* Wait for ever: */
|
||||
_thread_kern_set_timeout(NULL);
|
||||
}
|
||||
|
||||
/* Count the number of file descriptors to be polled: */
|
||||
if (readfds || writefds || exceptfds) {
|
||||
for (i = 0; i < numfds; i++) {
|
||||
if ((readfds && FD_ISSET(i, readfds)) ||
|
||||
(exceptfds && FD_ISSET(i, exceptfds)) ||
|
||||
(writefds && FD_ISSET(i, writefds))) {
|
||||
fd_count++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Allocate memory for poll data if it hasn't already been
|
||||
* allocated or if previously allocated memory is insufficient.
|
||||
*/
|
||||
if ((curthread->poll_data.fds == NULL) ||
|
||||
(curthread->poll_data.nfds < fd_count)) {
|
||||
data.fds = (struct pollfd *) realloc(curthread->poll_data.fds,
|
||||
sizeof(struct pollfd) * MAX(128, fd_count));
|
||||
if (data.fds == NULL) {
|
||||
errno = ENOMEM;
|
||||
ret = -1;
|
||||
}
|
||||
else {
|
||||
/*
|
||||
* Note that the threads poll data always
|
||||
* indicates what is allocated, not what is
|
||||
* currently being polled.
|
||||
*/
|
||||
curthread->poll_data.fds = data.fds;
|
||||
curthread->poll_data.nfds = MAX(128, fd_count);
|
||||
}
|
||||
}
|
||||
if (ret == 0) {
|
||||
/* Setup the wait data. */
|
||||
data.fds = curthread->poll_data.fds;
|
||||
data.nfds = fd_count;
|
||||
|
||||
/*
|
||||
* Setup the array of pollfds. Optimize this by
|
||||
* running the loop in reverse and stopping when
|
||||
* the number of selected file descriptors is reached.
|
||||
*/
|
||||
for (i = numfds - 1, pfd_index = fd_count - 1;
|
||||
(i >= 0) && (pfd_index >= 0); i--) {
|
||||
data.fds[pfd_index].events = 0;
|
||||
if (readfds && FD_ISSET(i, readfds)) {
|
||||
data.fds[pfd_index].events = POLLRDNORM;
|
||||
}
|
||||
if (exceptfds && FD_ISSET(i, exceptfds)) {
|
||||
data.fds[pfd_index].events |= POLLRDBAND;
|
||||
}
|
||||
if (writefds && FD_ISSET(i, writefds)) {
|
||||
data.fds[pfd_index].events |= POLLWRNORM;
|
||||
}
|
||||
if (data.fds[pfd_index].events != 0) {
|
||||
/*
|
||||
* Set the file descriptor to be polled and
|
||||
* clear revents in case of a timeout which
|
||||
* leaves fds unchanged:
|
||||
*/
|
||||
data.fds[pfd_index].fd = i;
|
||||
data.fds[pfd_index].revents = 0;
|
||||
pfd_index--;
|
||||
}
|
||||
}
|
||||
if (((ret = __sys_poll(data.fds, data.nfds, 0)) == 0) &&
|
||||
(f_wait != 0)) {
|
||||
curthread->data.poll_data = &data;
|
||||
curthread->interrupted = 0;
|
||||
_thread_kern_sched_state(PS_SELECT_WAIT, __FILE__, __LINE__);
|
||||
if (curthread->interrupted) {
|
||||
errno = EINTR;
|
||||
data.nfds = 0;
|
||||
ret = -1;
|
||||
} else
|
||||
ret = data.nfds;
|
||||
}
|
||||
}
|
||||
|
||||
if (ret >= 0) {
|
||||
numfds = 0;
|
||||
for (i = 0; i < fd_count; i++) {
|
||||
/*
|
||||
* Check the results of the poll and clear
|
||||
* this file descriptor from the fdset if
|
||||
* the requested event wasn't ready.
|
||||
*/
|
||||
|
||||
/*
|
||||
* First check for invalid descriptor.
|
||||
* If found, set errno and return -1.
|
||||
*/
|
||||
if (data.fds[i].revents & POLLNVAL) {
|
||||
errno = EBADF;
|
||||
return -1;
|
||||
}
|
||||
|
||||
got_events = 0;
|
||||
if (readfds != NULL) {
|
||||
if (FD_ISSET(data.fds[i].fd, readfds)) {
|
||||
if ((data.fds[i].revents & (POLLIN
|
||||
| POLLRDNORM | POLLERR
|
||||
| POLLHUP | POLLNVAL)) != 0)
|
||||
got_events++;
|
||||
else
|
||||
FD_CLR(data.fds[i].fd, readfds);
|
||||
}
|
||||
}
|
||||
if (writefds != NULL) {
|
||||
if (FD_ISSET(data.fds[i].fd, writefds)) {
|
||||
if ((data.fds[i].revents & (POLLOUT
|
||||
| POLLWRNORM | POLLWRBAND | POLLERR
|
||||
| POLLHUP | POLLNVAL)) != 0)
|
||||
got_events++;
|
||||
else
|
||||
FD_CLR(data.fds[i].fd,
|
||||
writefds);
|
||||
}
|
||||
}
|
||||
if (exceptfds != NULL) {
|
||||
if (FD_ISSET(data.fds[i].fd, exceptfds)) {
|
||||
if (data.fds[i].revents & (POLLRDBAND |
|
||||
POLLPRI))
|
||||
got_events++;
|
||||
else
|
||||
FD_CLR(data.fds[i].fd,
|
||||
exceptfds);
|
||||
}
|
||||
}
|
||||
if (got_events != 0)
|
||||
numfds+=got_events;
|
||||
}
|
||||
ret = numfds;
|
||||
}
|
||||
|
||||
return (ret);
|
||||
}
|
||||
|
||||
int
|
||||
__select(int numfds, fd_set *readfds, fd_set *writefds, fd_set *exceptfds,
|
||||
struct timeval *timeout)
|
||||
@ -224,7 +52,7 @@ __select(int numfds, fd_set *readfds, fd_set *writefds, fd_set *exceptfds,
|
||||
int ret;
|
||||
|
||||
_thread_enter_cancellation_point();
|
||||
ret = _select(numfds, readfds, writefds, exceptfds, timeout);
|
||||
ret = __sys_select(numfds, readfds, writefds, exceptfds, timeout);
|
||||
_thread_leave_cancellation_point();
|
||||
|
||||
return ret;
|
||||
|
@ -44,63 +44,6 @@ __weak_reference(_pthread_sigmask, pthread_sigmask);
|
||||
int
|
||||
_pthread_sigmask(int how, const sigset_t *set, sigset_t *oset)
|
||||
{
|
||||
struct pthread *curthread = _get_curthread();
|
||||
sigset_t sigset;
|
||||
int ret = 0;
|
||||
|
||||
/* Check if the existing signal process mask is to be returned: */
|
||||
if (oset != NULL) {
|
||||
/* Return the current mask: */
|
||||
*oset = curthread->sigmask;
|
||||
}
|
||||
/* Check if a new signal set was provided by the caller: */
|
||||
if (set != NULL) {
|
||||
/* Process according to what to do: */
|
||||
switch (how) {
|
||||
/* Block signals: */
|
||||
case SIG_BLOCK:
|
||||
/* Add signals to the existing mask: */
|
||||
SIGSETOR(curthread->sigmask, *set);
|
||||
break;
|
||||
|
||||
/* Unblock signals: */
|
||||
case SIG_UNBLOCK:
|
||||
/* Clear signals from the existing mask: */
|
||||
SIGSETNAND(curthread->sigmask, *set);
|
||||
break;
|
||||
|
||||
/* Set the signal process mask: */
|
||||
case SIG_SETMASK:
|
||||
/* Set the new mask: */
|
||||
curthread->sigmask = *set;
|
||||
break;
|
||||
|
||||
/* Trap invalid actions: */
|
||||
default:
|
||||
/* Return an invalid argument: */
|
||||
errno = EINVAL;
|
||||
ret = -1;
|
||||
break;
|
||||
}
|
||||
|
||||
/* Increment the sequence number: */
|
||||
curthread->sigmask_seqno++;
|
||||
|
||||
/*
|
||||
* Check if there are pending signals for the running
|
||||
* thread or process that aren't blocked:
|
||||
*/
|
||||
sigset = curthread->sigpend;
|
||||
SIGSETOR(sigset, _process_sigpending);
|
||||
SIGSETNAND(sigset, curthread->sigmask);
|
||||
if (SIGNOTEMPTY(sigset))
|
||||
/*
|
||||
* Call the kernel scheduler which will safely
|
||||
* install a signal frame for the running thread:
|
||||
*/
|
||||
_thread_kern_sched_sig();
|
||||
}
|
||||
|
||||
/* Return the completion status: */
|
||||
return (ret);
|
||||
return (sigprocmask(how, set, oset));
|
||||
}
|
||||
|
@ -40,61 +40,13 @@
|
||||
|
||||
__weak_reference(__sigsuspend, sigsuspend);
|
||||
|
||||
int
|
||||
_sigsuspend(const sigset_t * set)
|
||||
{
|
||||
struct pthread *curthread = _get_curthread();
|
||||
int ret = -1;
|
||||
sigset_t oset, sigset;
|
||||
|
||||
/* Check if a new signal set was provided by the caller: */
|
||||
if (set != NULL) {
|
||||
/* Save the current signal mask: */
|
||||
oset = curthread->sigmask;
|
||||
|
||||
/* Change the caller's mask: */
|
||||
curthread->sigmask = *set;
|
||||
|
||||
/*
|
||||
* Check if there are pending signals for the running
|
||||
* thread or process that aren't blocked:
|
||||
*/
|
||||
sigset = curthread->sigpend;
|
||||
SIGSETOR(sigset, _process_sigpending);
|
||||
SIGSETNAND(sigset, curthread->sigmask);
|
||||
if (SIGNOTEMPTY(sigset)) {
|
||||
/*
|
||||
* Call the kernel scheduler which will safely
|
||||
* install a signal frame for the running thread:
|
||||
*/
|
||||
_thread_kern_sched_sig();
|
||||
} else {
|
||||
/* Wait for a signal: */
|
||||
_thread_kern_sched_state(PS_SIGSUSPEND,
|
||||
__FILE__, __LINE__);
|
||||
}
|
||||
|
||||
/* Always return an interrupted error: */
|
||||
errno = EINTR;
|
||||
|
||||
/* Restore the signal mask: */
|
||||
curthread->sigmask = oset;
|
||||
} else {
|
||||
/* Return an invalid argument error: */
|
||||
errno = EINVAL;
|
||||
}
|
||||
|
||||
/* Return the completion status: */
|
||||
return (ret);
|
||||
}
|
||||
|
||||
int
|
||||
__sigsuspend(const sigset_t * set)
|
||||
{
|
||||
int ret;
|
||||
|
||||
_thread_enter_cancellation_point();
|
||||
ret = _sigsuspend(set);
|
||||
ret = __sys_sigsuspend(set);
|
||||
_thread_leave_cancellation_point();
|
||||
|
||||
return ret;
|
||||
|
@ -1,3 +1,4 @@
|
||||
//depot/projects/kse/lib/libpthread/thread/thr_sigwait.c#1 - branch change 15154 (text+ko)
|
||||
/*
|
||||
* Copyright (c) 1997 John Birrell <jb@cimlogic.com.au>.
|
||||
* All rights reserved.
|
||||
@ -43,132 +44,9 @@ __weak_reference(_sigwait, sigwait);
|
||||
int
|
||||
_sigwait(const sigset_t *set, int *sig)
|
||||
{
|
||||
struct pthread *curthread = _get_curthread();
|
||||
int ret = 0;
|
||||
int i;
|
||||
sigset_t tempset, waitset;
|
||||
struct sigaction act;
|
||||
|
||||
_thread_enter_cancellation_point();
|
||||
/*
|
||||
* Specify the thread kernel signal handler.
|
||||
*/
|
||||
act.sa_handler = (void (*) ()) _thread_sig_handler;
|
||||
act.sa_flags = SA_RESTART | SA_SIGINFO;
|
||||
/* Ensure the signal handler cannot be interrupted by other signals: */
|
||||
sigfillset(&act.sa_mask);
|
||||
|
||||
/*
|
||||
* Initialize the set of signals that will be waited on:
|
||||
* All signals are invalid for waiting.
|
||||
*/
|
||||
waitset = *set;
|
||||
|
||||
/* These signals can't be waited on. */
|
||||
sigdelset(&waitset, SIGKILL);
|
||||
sigdelset(&waitset, SIGSTOP);
|
||||
sigdelset(&waitset, _SCHED_SIGNAL);
|
||||
sigdelset(&waitset, SIGCHLD);
|
||||
sigdelset(&waitset, SIGINFO);
|
||||
|
||||
/* Check to see if a pending signal is in the wait mask. */
|
||||
tempset = curthread->sigpend;
|
||||
SIGSETOR(tempset, _process_sigpending);
|
||||
SIGSETAND(tempset, waitset);
|
||||
if (SIGNOTEMPTY(tempset)) {
|
||||
/* Enter a loop to find a pending signal: */
|
||||
for (i = 1; i < NSIG; i++) {
|
||||
if (sigismember (&tempset, i))
|
||||
break;
|
||||
}
|
||||
|
||||
/* Clear the pending signal: */
|
||||
if (sigismember(&curthread->sigpend,i))
|
||||
sigdelset(&curthread->sigpend,i);
|
||||
else
|
||||
sigdelset(&_process_sigpending,i);
|
||||
|
||||
/* Return the signal number to the caller: */
|
||||
*sig = i;
|
||||
|
||||
_thread_leave_cancellation_point();
|
||||
return (0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Access the _thread_dfl_count array under the protection of signal
|
||||
* deferral.
|
||||
*/
|
||||
_thread_kern_sig_defer();
|
||||
|
||||
/*
|
||||
* Enter a loop to find the signals that are SIG_DFL. For
|
||||
* these signals we must install a dummy signal handler in
|
||||
* order for the kernel to pass them in to us. POSIX says
|
||||
* that the _application_ must explicitly install a dummy
|
||||
* handler for signals that are SIG_IGN in order to sigwait
|
||||
* on them. Note that SIG_IGN signals are left in the
|
||||
* mask because a subsequent sigaction could enable an
|
||||
* ignored signal.
|
||||
*/
|
||||
sigemptyset(&tempset);
|
||||
for (i = 1; i < NSIG; i++) {
|
||||
if (sigismember(&waitset, i) &&
|
||||
(_thread_sigact[i - 1].sa_handler == SIG_DFL)) {
|
||||
_thread_dfl_count[i]++;
|
||||
sigaddset(&tempset, i);
|
||||
if (_thread_dfl_count[i] == 1) {
|
||||
if (__sys_sigaction(i,&act,NULL) != 0)
|
||||
ret = -1;
|
||||
}
|
||||
}
|
||||
}
|
||||
/* Done accessing _thread_dfl_count for now. */
|
||||
_thread_kern_sig_undefer();
|
||||
|
||||
if (ret == 0) {
|
||||
/*
|
||||
* Save the wait signal mask. The wait signal
|
||||
* mask is independent of the threads signal mask
|
||||
* and requires separate storage.
|
||||
*/
|
||||
curthread->data.sigwait = &waitset;
|
||||
|
||||
/* Wait for a signal: */
|
||||
_thread_kern_sched_state(PS_SIGWAIT, __FILE__, __LINE__);
|
||||
|
||||
/* Return the signal number to the caller: */
|
||||
*sig = curthread->signo;
|
||||
|
||||
/*
|
||||
* Probably unnecessary, but since it's in a union struct
|
||||
* we don't know how it could be used in the future.
|
||||
*/
|
||||
curthread->data.sigwait = NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Access the _thread_dfl_count array under the protection of signal
|
||||
* deferral.
|
||||
*/
|
||||
_thread_kern_sig_defer();
|
||||
|
||||
/* Restore the sigactions: */
|
||||
act.sa_handler = SIG_DFL;
|
||||
for (i = 1; i < NSIG; i++) {
|
||||
if (sigismember(&tempset, i)) {
|
||||
_thread_dfl_count[i]--;
|
||||
if ((_thread_sigact[i - 1].sa_handler == SIG_DFL) &&
|
||||
(_thread_dfl_count[i] == 0)) {
|
||||
if (__sys_sigaction(i,&act,NULL) != 0)
|
||||
ret = -1;
|
||||
}
|
||||
}
|
||||
}
|
||||
/* Done accessing _thread_dfl_count. */
|
||||
_thread_kern_sig_undefer();
|
||||
|
||||
_thread_leave_cancellation_point();
|
||||
|
||||
/* Return the completion status: */
|
||||
return (ret);
|
||||
return (EINVAL);
|
||||
}
|
||||
|
@ -40,35 +40,6 @@
|
||||
|
||||
__weak_reference(__wait4, wait4);
|
||||
|
||||
pid_t
|
||||
_wait4(pid_t pid, int *istat, int options, struct rusage * rusage)
|
||||
{
|
||||
struct pthread *curthread = _get_curthread();
|
||||
pid_t ret;
|
||||
|
||||
_thread_kern_sig_defer();
|
||||
|
||||
/* Perform a non-blocking wait4 syscall: */
|
||||
while ((ret = __sys_wait4(pid, istat, options | WNOHANG, rusage)) == 0 && (options & WNOHANG) == 0) {
|
||||
/* Reset the interrupted operation flag: */
|
||||
curthread->interrupted = 0;
|
||||
|
||||
/* Schedule the next thread while this one waits: */
|
||||
_thread_kern_sched_state(PS_WAIT_WAIT, __FILE__, __LINE__);
|
||||
|
||||
/* Check if this call was interrupted by a signal: */
|
||||
if (curthread->interrupted) {
|
||||
errno = EINTR;
|
||||
ret = -1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
_thread_kern_sig_undefer();
|
||||
|
||||
return (ret);
|
||||
}
|
||||
|
||||
pid_t
|
||||
__wait4(pid_t pid, int *istat, int options, struct rusage *rusage)
|
||||
{
|
||||
|
@ -42,105 +42,13 @@
|
||||
|
||||
__weak_reference(__write, write);
|
||||
|
||||
ssize_t
|
||||
_write(int fd, const void *buf, size_t nbytes)
|
||||
{
|
||||
struct pthread *curthread = _get_curthread();
|
||||
int blocking;
|
||||
int type;
|
||||
ssize_t n;
|
||||
ssize_t num = 0;
|
||||
ssize_t ret;
|
||||
|
||||
/* POSIX says to do just this: */
|
||||
if (nbytes == 0)
|
||||
return (0);
|
||||
|
||||
/* Lock the file descriptor for write: */
|
||||
if ((ret = _FD_LOCK(fd, FD_WRITE, NULL)) == 0) {
|
||||
/* Get the read/write mode type: */
|
||||
type = _thread_fd_getflags(fd) & O_ACCMODE;
|
||||
|
||||
/* Check if the file is not open for write: */
|
||||
if (type != O_WRONLY && type != O_RDWR) {
|
||||
/* File is not open for write: */
|
||||
errno = EBADF;
|
||||
_FD_UNLOCK(fd, FD_WRITE);
|
||||
return (-1);
|
||||
}
|
||||
|
||||
/* Check if file operations are to block */
|
||||
blocking = ((_thread_fd_getflags(fd) & O_NONBLOCK) == 0);
|
||||
|
||||
/*
|
||||
* Loop while no error occurs and until the expected number
|
||||
* of bytes are written if performing a blocking write:
|
||||
*/
|
||||
while (ret == 0) {
|
||||
/* Perform a non-blocking write syscall: */
|
||||
n = __sys_write(fd, buf + num, nbytes - num);
|
||||
|
||||
/* Check if one or more bytes were written: */
|
||||
if (n > 0)
|
||||
/*
|
||||
* Keep a count of the number of bytes
|
||||
* written:
|
||||
*/
|
||||
num += n;
|
||||
|
||||
/*
|
||||
* If performing a blocking write, check if the
|
||||
* write would have blocked or if some bytes
|
||||
* were written but there are still more to
|
||||
* write:
|
||||
*/
|
||||
if (blocking && ((n < 0 && (errno == EWOULDBLOCK ||
|
||||
errno == EAGAIN)) || (n >= 0 && num < nbytes))) {
|
||||
curthread->data.fd.fd = fd;
|
||||
_thread_kern_set_timeout(NULL);
|
||||
|
||||
/* Reset the interrupted operation flag: */
|
||||
curthread->interrupted = 0;
|
||||
|
||||
_thread_kern_sched_state(PS_FDW_WAIT,
|
||||
__FILE__, __LINE__);
|
||||
|
||||
/*
|
||||
* Check if the operation was
|
||||
* interrupted by a signal
|
||||
*/
|
||||
if (curthread->interrupted) {
|
||||
/* Return an error: */
|
||||
ret = -1;
|
||||
}
|
||||
|
||||
/*
|
||||
* If performing a non-blocking write or if an
|
||||
* error occurred, just return whatever the write
|
||||
* syscall did:
|
||||
*/
|
||||
} else if (!blocking || n < 0) {
|
||||
/* A non-blocking call might return zero: */
|
||||
ret = n;
|
||||
break;
|
||||
|
||||
/* Check if the write has completed: */
|
||||
} else if (num >= nbytes)
|
||||
/* Return the number of bytes written: */
|
||||
ret = num;
|
||||
}
|
||||
_FD_UNLOCK(fd, FD_WRITE);
|
||||
}
|
||||
return (ret);
|
||||
}
|
||||
|
||||
ssize_t
|
||||
__write(int fd, const void *buf, size_t nbytes)
|
||||
{
|
||||
ssize_t ret;
|
||||
|
||||
_thread_enter_cancellation_point();
|
||||
ret = _write(fd, buf, nbytes);
|
||||
ret = __sys_write(fd, buf, nbytes);
|
||||
_thread_leave_cancellation_point();
|
||||
|
||||
return ret;
|
||||
|
@ -44,172 +44,13 @@
|
||||
|
||||
__weak_reference(__writev, writev);
|
||||
|
||||
ssize_t
|
||||
_writev(int fd, const struct iovec * iov, int iovcnt)
|
||||
{
|
||||
struct pthread *curthread = _get_curthread();
|
||||
int blocking;
|
||||
int idx = 0;
|
||||
int type;
|
||||
ssize_t cnt;
|
||||
ssize_t n;
|
||||
ssize_t num = 0;
|
||||
ssize_t ret;
|
||||
struct iovec liov[20];
|
||||
struct iovec *p_iov = liov;
|
||||
|
||||
/* Check if the array size exceeds to compiled in size: */
|
||||
if (iovcnt > (sizeof(liov) / sizeof(struct iovec))) {
|
||||
/* Allocate memory for the local array: */
|
||||
if ((p_iov = (struct iovec *)
|
||||
malloc(iovcnt * sizeof(struct iovec))) == NULL) {
|
||||
/* Insufficient memory: */
|
||||
errno = ENOMEM;
|
||||
return (-1);
|
||||
}
|
||||
}
|
||||
|
||||
/* Copy the caller's array so that it can be modified locally: */
|
||||
memcpy(p_iov,iov,iovcnt * sizeof(struct iovec));
|
||||
|
||||
/* Lock the file descriptor for write: */
|
||||
if ((ret = _FD_LOCK(fd, FD_WRITE, NULL)) == 0) {
|
||||
/* Get the read/write mode type: */
|
||||
type = _thread_fd_getflags(fd) & O_ACCMODE;
|
||||
|
||||
/* Check if the file is not open for write: */
|
||||
if (type != O_WRONLY && type != O_RDWR) {
|
||||
/* File is not open for write: */
|
||||
errno = EBADF;
|
||||
_FD_UNLOCK(fd, FD_WRITE);
|
||||
return (-1);
|
||||
}
|
||||
|
||||
/* Check if file operations are to block */
|
||||
blocking = ((_thread_fd_getflags(fd) & O_NONBLOCK) == 0);
|
||||
|
||||
/*
|
||||
* Loop while no error occurs and until the expected number
|
||||
* of bytes are written if performing a blocking write:
|
||||
*/
|
||||
while (ret == 0) {
|
||||
/* Perform a non-blocking write syscall: */
|
||||
n = __sys_writev(fd, &p_iov[idx], iovcnt - idx);
|
||||
|
||||
/* Check if one or more bytes were written: */
|
||||
if (n > 0) {
|
||||
/*
|
||||
* Keep a count of the number of bytes
|
||||
* written:
|
||||
*/
|
||||
num += n;
|
||||
|
||||
/*
|
||||
* Enter a loop to check if a short write
|
||||
* occurred and move the index to the
|
||||
* array entry where the short write
|
||||
* ended:
|
||||
*/
|
||||
cnt = n;
|
||||
while (cnt > 0 && idx < iovcnt) {
|
||||
/*
|
||||
* If the residual count exceeds
|
||||
* the size of this vector, then
|
||||
* it was completely written:
|
||||
*/
|
||||
if (cnt >= p_iov[idx].iov_len)
|
||||
/*
|
||||
* Decrement the residual
|
||||
* count and increment the
|
||||
* index to the next array
|
||||
* entry:
|
||||
*/
|
||||
cnt -= p_iov[idx++].iov_len;
|
||||
else {
|
||||
/*
|
||||
* This entry was only
|
||||
* partially written, so
|
||||
* adjust it's length
|
||||
* and base pointer ready
|
||||
* for the next write:
|
||||
*/
|
||||
p_iov[idx].iov_len -= cnt;
|
||||
p_iov[idx].iov_base += cnt;
|
||||
cnt = 0;
|
||||
}
|
||||
}
|
||||
} else if (n == 0) {
|
||||
/*
|
||||
* Avoid an infinite loop if the last iov_len is
|
||||
* 0.
|
||||
*/
|
||||
while (idx < iovcnt && p_iov[idx].iov_len == 0)
|
||||
idx++;
|
||||
|
||||
if (idx == iovcnt) {
|
||||
ret = num;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* If performing a blocking write, check if the
|
||||
* write would have blocked or if some bytes
|
||||
* were written but there are still more to
|
||||
* write:
|
||||
*/
|
||||
if (blocking && ((n < 0 && (errno == EWOULDBLOCK ||
|
||||
errno == EAGAIN)) || (n >= 0 && idx < iovcnt))) {
|
||||
curthread->data.fd.fd = fd;
|
||||
_thread_kern_set_timeout(NULL);
|
||||
|
||||
/* Reset the interrupted operation flag: */
|
||||
curthread->interrupted = 0;
|
||||
|
||||
_thread_kern_sched_state(PS_FDW_WAIT,
|
||||
__FILE__, __LINE__);
|
||||
|
||||
/*
|
||||
* Check if the operation was
|
||||
* interrupted by a signal
|
||||
*/
|
||||
if (curthread->interrupted) {
|
||||
/* Return an error: */
|
||||
ret = -1;
|
||||
}
|
||||
|
||||
/*
|
||||
* If performing a non-blocking write or if an
|
||||
* error occurred, just return whatever the write
|
||||
* syscall did:
|
||||
*/
|
||||
} else if (!blocking || n < 0) {
|
||||
/* A non-blocking call might return zero: */
|
||||
ret = n;
|
||||
break;
|
||||
|
||||
/* Check if the write has completed: */
|
||||
} else if (idx == iovcnt)
|
||||
/* Return the number of bytes written: */
|
||||
ret = num;
|
||||
}
|
||||
_FD_UNLOCK(fd, FD_RDWR);
|
||||
}
|
||||
|
||||
/* If memory was allocated for the array, free it: */
|
||||
if (p_iov != liov)
|
||||
free(p_iov);
|
||||
|
||||
return (ret);
|
||||
}
|
||||
|
||||
ssize_t
|
||||
__writev(int fd, const struct iovec *iov, int iovcnt)
|
||||
{
|
||||
ssize_t ret;
|
||||
|
||||
_thread_enter_cancellation_point();
|
||||
ret = _writev(fd, iov, iovcnt);
|
||||
ret = __sys_writev(fd, iov, iovcnt);
|
||||
_thread_leave_cancellation_point();
|
||||
|
||||
return ret;
|
||||
|
@ -46,7 +46,7 @@ _sched_yield(void)
|
||||
curthread->slice_usec = -1;
|
||||
|
||||
/* Schedule the next thread: */
|
||||
_thread_kern_sched(NULL);
|
||||
_thread_kern_sched();
|
||||
|
||||
/* Always return no error. */
|
||||
return(0);
|
||||
@ -62,5 +62,5 @@ _pthread_yield(void)
|
||||
curthread->slice_usec = -1;
|
||||
|
||||
/* Schedule the next thread: */
|
||||
_thread_kern_sched(NULL);
|
||||
_thread_kern_sched();
|
||||
}
|
||||
|
@ -44,20 +44,13 @@ _pthread_cancel(pthread_t pthread)
|
||||
break;
|
||||
|
||||
case PS_SPINBLOCK:
|
||||
case PS_FDR_WAIT:
|
||||
case PS_FDW_WAIT:
|
||||
case PS_POLL_WAIT:
|
||||
case PS_SELECT_WAIT:
|
||||
/* Remove these threads from the work queue: */
|
||||
if ((pthread->flags & PTHREAD_FLAGS_IN_WORKQ)
|
||||
!= 0)
|
||||
PTHREAD_WORKQ_REMOVE(pthread);
|
||||
/* Fall through: */
|
||||
case PS_SIGTHREAD:
|
||||
case PS_SLEEP_WAIT:
|
||||
case PS_WAIT_WAIT:
|
||||
case PS_SIGSUSPEND:
|
||||
case PS_SIGWAIT:
|
||||
/* Interrupt and resume: */
|
||||
pthread->interrupted = 1;
|
||||
pthread->cancelflags |= PTHREAD_CANCELLING;
|
||||
@ -80,9 +73,6 @@ _pthread_cancel(pthread_t pthread)
|
||||
case PS_SUSPENDED:
|
||||
case PS_MUTEX_WAIT:
|
||||
case PS_COND_WAIT:
|
||||
case PS_FDLR_WAIT:
|
||||
case PS_FDLW_WAIT:
|
||||
case PS_FILE_WAIT:
|
||||
/*
|
||||
* Threads in these states may be in queues.
|
||||
* In order to preserve queue integrity, the
|
||||
|
@ -41,74 +41,13 @@
|
||||
|
||||
__weak_reference(__close, close);
|
||||
|
||||
int
|
||||
_close(int fd)
|
||||
{
|
||||
int flags;
|
||||
int ret;
|
||||
struct stat sb;
|
||||
struct fd_table_entry *entry;
|
||||
|
||||
if ((fd == _thread_kern_pipe[0]) || (fd == _thread_kern_pipe[1])) {
|
||||
/*
|
||||
* Don't allow silly programs to close the kernel pipe.
|
||||
*/
|
||||
errno = EBADF;
|
||||
ret = -1;
|
||||
}
|
||||
/*
|
||||
* Lock the file descriptor while the file is closed and get
|
||||
* the file descriptor status:
|
||||
*/
|
||||
else if (((ret = _FD_LOCK(fd, FD_RDWR, NULL)) == 0) &&
|
||||
((ret = __sys_fstat(fd, &sb)) == 0)) {
|
||||
/*
|
||||
* Check if the file should be left as blocking.
|
||||
*
|
||||
* This is so that the file descriptors shared with a parent
|
||||
* process aren't left set to non-blocking if the child
|
||||
* closes them prior to exit. An example where this causes
|
||||
* problems with /bin/sh is when a child closes stdin.
|
||||
*
|
||||
* Setting a file as blocking causes problems if a threaded
|
||||
* parent accesses the file descriptor before the child exits.
|
||||
* Once the threaded parent receives a SIGCHLD then it resets
|
||||
* all of its files to non-blocking, and so it is then safe
|
||||
* to access them.
|
||||
*
|
||||
* Pipes are not set to blocking when they are closed, as
|
||||
* the parent and child will normally close the file
|
||||
* descriptor of the end of the pipe that they are not
|
||||
* using, which would then cause any reads to block
|
||||
* indefinitely.
|
||||
*/
|
||||
if ((S_ISREG(sb.st_mode) || S_ISCHR(sb.st_mode))
|
||||
&& (_thread_fd_getflags(fd) & O_NONBLOCK) == 0) {
|
||||
/* Get the current flags: */
|
||||
flags = __sys_fcntl(fd, F_GETFL, NULL);
|
||||
/* Clear the nonblocking file descriptor flag: */
|
||||
__sys_fcntl(fd, F_SETFL, flags & ~O_NONBLOCK);
|
||||
}
|
||||
|
||||
/* XXX: Assumes well behaved threads. */
|
||||
/* XXX: Defer real close to avoid race condition */
|
||||
entry = _thread_fd_table[fd];
|
||||
_thread_fd_table[fd] = NULL;
|
||||
free(entry);
|
||||
|
||||
/* Close the file descriptor: */
|
||||
ret = __sys_close(fd);
|
||||
}
|
||||
return (ret);
|
||||
}
|
||||
|
||||
int
|
||||
__close(int fd)
|
||||
{
|
||||
int ret;
|
||||
|
||||
_thread_enter_cancellation_point();
|
||||
ret = _close(fd);
|
||||
ret = __sys_close(fd);
|
||||
_thread_leave_cancellation_point();
|
||||
|
||||
return ret;
|
||||
|
@ -121,27 +121,12 @@ _pthread_create(pthread_t * thread, const pthread_attr_t * attr,
|
||||
*/
|
||||
new_thread->magic = PTHREAD_MAGIC;
|
||||
|
||||
/* Initialise the thread for signals: */
|
||||
new_thread->sigmask = curthread->sigmask;
|
||||
new_thread->sigmask_seqno = 0;
|
||||
|
||||
/* Initialize the signal frame: */
|
||||
new_thread->curframe = NULL;
|
||||
|
||||
/* Initialise the jump buffer: */
|
||||
_setjmp(new_thread->ctx.jb);
|
||||
|
||||
/*
|
||||
* Set up new stack frame so that it looks like it
|
||||
* returned from a longjmp() to the beginning of
|
||||
* _thread_start().
|
||||
*/
|
||||
SET_RETURN_ADDR_JB(new_thread->ctx.jb, _thread_start);
|
||||
|
||||
/* The stack starts high and builds down: */
|
||||
SET_STACK_JB(new_thread->ctx.jb,
|
||||
(long)new_thread->stack + pattr->stacksize_attr
|
||||
- sizeof(double));
|
||||
/* Initialise the machine context: */
|
||||
getcontext(&new_thread->ctx);
|
||||
new_thread->ctx.uc_stack.ss_sp = new_thread->stack;
|
||||
new_thread->ctx.uc_stack.ss_size =
|
||||
pattr->stacksize_attr;
|
||||
makecontext(&new_thread->ctx, _thread_start, 1);
|
||||
|
||||
/* Copy the thread attributes: */
|
||||
memcpy(&new_thread->attr, pattr, sizeof(struct pthread_attr));
|
||||
@ -182,8 +167,6 @@ _pthread_create(pthread_t * thread, const pthread_attr_t * attr,
|
||||
new_thread->specific = NULL;
|
||||
new_thread->cleanup = NULL;
|
||||
new_thread->flags = 0;
|
||||
new_thread->poll_data.nfds = 0;
|
||||
new_thread->poll_data.fds = NULL;
|
||||
new_thread->continuation = NULL;
|
||||
|
||||
/*
|
||||
@ -224,18 +207,8 @@ _pthread_create(pthread_t * thread, const pthread_attr_t * attr,
|
||||
/* Return a pointer to the thread structure: */
|
||||
(*thread) = new_thread;
|
||||
|
||||
if (f_gc != 0) {
|
||||
/* Install the scheduling timer: */
|
||||
itimer.it_interval.tv_sec = 0;
|
||||
itimer.it_interval.tv_usec = _clock_res_usec;
|
||||
itimer.it_value = itimer.it_interval;
|
||||
if (setitimer(_ITIMER_SCHED_TIMER, &itimer,
|
||||
NULL) != 0)
|
||||
PANIC("Cannot set interval timer");
|
||||
}
|
||||
|
||||
/* Schedule the new user thread: */
|
||||
_thread_kern_sched(NULL);
|
||||
_thread_kern_sched();
|
||||
|
||||
/*
|
||||
* Start a garbage collector thread
|
||||
@ -257,7 +230,7 @@ _thread_start(void)
|
||||
{
|
||||
struct pthread *curthread = _get_curthread();
|
||||
|
||||
/* We just left the scheduler via longjmp: */
|
||||
/* We just left the scheduler via swapcontext: */
|
||||
_thread_kern_in_sched = 0;
|
||||
|
||||
/* Run the current thread's start routine with argument: */
|
||||
|
@ -45,42 +45,6 @@
|
||||
|
||||
__weak_reference(_pthread_exit, pthread_exit);
|
||||
|
||||
void _exit(int status)
|
||||
{
|
||||
int flags;
|
||||
int i;
|
||||
struct itimerval itimer;
|
||||
|
||||
/* Disable the interval timer: */
|
||||
itimer.it_interval.tv_sec = 0;
|
||||
itimer.it_interval.tv_usec = 0;
|
||||
itimer.it_value.tv_sec = 0;
|
||||
itimer.it_value.tv_usec = 0;
|
||||
setitimer(_ITIMER_SCHED_TIMER, &itimer, NULL);
|
||||
|
||||
/* Close the pthread kernel pipe: */
|
||||
__sys_close(_thread_kern_pipe[0]);
|
||||
__sys_close(_thread_kern_pipe[1]);
|
||||
|
||||
/*
|
||||
* Enter a loop to set all file descriptors to blocking
|
||||
* if they were not created as non-blocking:
|
||||
*/
|
||||
for (i = 0; i < _thread_dtablesize; i++) {
|
||||
/* Check if this file descriptor is in use: */
|
||||
if (_thread_fd_table[i] != NULL &&
|
||||
(_thread_fd_getflags(i) & O_NONBLOCK) == 0) {
|
||||
/* Get the current flags: */
|
||||
flags = __sys_fcntl(i, F_GETFL, NULL);
|
||||
/* Clear the nonblocking file descriptor flag: */
|
||||
__sys_fcntl(i, F_SETFL, flags & ~O_NONBLOCK);
|
||||
}
|
||||
}
|
||||
|
||||
/* Call the _exit syscall: */
|
||||
__sys_exit(status);
|
||||
}
|
||||
|
||||
void
|
||||
_thread_exit(char *fname, int lineno, char *string)
|
||||
{
|
||||
@ -120,9 +84,6 @@ _thread_exit_cleanup(void)
|
||||
* internal to the threads library, including file and fd locks,
|
||||
* are not visible to the application and need to be released.
|
||||
*/
|
||||
/* Unlock all owned fd locks: */
|
||||
_thread_fd_unlock_owned(curthread);
|
||||
|
||||
/* Unlock all private mutexes: */
|
||||
_mutex_unlock_private(curthread);
|
||||
|
||||
@ -163,12 +124,6 @@ _pthread_exit(void *status)
|
||||
_thread_cleanupspecific();
|
||||
}
|
||||
|
||||
/* Free thread-specific poll_data structure, if allocated: */
|
||||
if (curthread->poll_data.fds != NULL) {
|
||||
free(curthread->poll_data.fds);
|
||||
curthread->poll_data.fds = NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Lock the garbage collector mutex to ensure that the garbage
|
||||
* collector is not using the dead thread list.
|
||||
|
@ -39,108 +39,6 @@
|
||||
|
||||
__weak_reference(__fcntl, fcntl);
|
||||
|
||||
int
|
||||
_fcntl(int fd, int cmd,...)
|
||||
{
|
||||
int flags = 0;
|
||||
int nonblock;
|
||||
int oldfd;
|
||||
int ret;
|
||||
va_list ap;
|
||||
|
||||
/* Lock the file descriptor: */
|
||||
if ((ret = _FD_LOCK(fd, FD_RDWR, NULL)) == 0) {
|
||||
/* Initialise the variable argument list: */
|
||||
va_start(ap, cmd);
|
||||
|
||||
/* Process according to file control command type: */
|
||||
switch (cmd) {
|
||||
/* Duplicate a file descriptor: */
|
||||
case F_DUPFD:
|
||||
/*
|
||||
* Get the file descriptor that the caller wants to
|
||||
* use:
|
||||
*/
|
||||
oldfd = va_arg(ap, int);
|
||||
|
||||
/* Initialise the file descriptor table entry: */
|
||||
if ((ret = __sys_fcntl(fd, cmd, oldfd)) < 0) {
|
||||
}
|
||||
/* Initialise the file descriptor table entry: */
|
||||
else if (_thread_fd_table_init(ret) != 0) {
|
||||
/* Quietly close the file: */
|
||||
__sys_close(ret);
|
||||
|
||||
/* Reset the file descriptor: */
|
||||
ret = -1;
|
||||
} else {
|
||||
/*
|
||||
* Save the file open flags so that they can
|
||||
* be checked later:
|
||||
*/
|
||||
_thread_fd_setflags(ret,
|
||||
_thread_fd_getflags(fd));
|
||||
}
|
||||
break;
|
||||
case F_SETFD:
|
||||
flags = va_arg(ap, int);
|
||||
ret = __sys_fcntl(fd, cmd, flags);
|
||||
break;
|
||||
case F_GETFD:
|
||||
ret = __sys_fcntl(fd, cmd, 0);
|
||||
break;
|
||||
case F_GETFL:
|
||||
ret = _thread_fd_getflags(fd);
|
||||
break;
|
||||
case F_SETFL:
|
||||
/*
|
||||
* Get the file descriptor flags passed by the
|
||||
* caller:
|
||||
*/
|
||||
flags = va_arg(ap, int);
|
||||
|
||||
/*
|
||||
* Check if the user wants a non-blocking file
|
||||
* descriptor:
|
||||
*/
|
||||
nonblock = flags & O_NONBLOCK;
|
||||
|
||||
/* Set the file descriptor flags: */
|
||||
if ((ret = __sys_fcntl(fd, cmd, flags | O_NONBLOCK)) != 0) {
|
||||
|
||||
/* Get the flags so that we behave like the kernel: */
|
||||
} else if ((flags = __sys_fcntl(fd,
|
||||
F_GETFL, 0)) == -1) {
|
||||
/* Error getting flags: */
|
||||
ret = -1;
|
||||
|
||||
/*
|
||||
* Check if the file descriptor is non-blocking
|
||||
* with respect to the user:
|
||||
*/
|
||||
} else if (nonblock)
|
||||
/* A non-blocking descriptor: */
|
||||
_thread_fd_setflags(fd, flags | O_NONBLOCK);
|
||||
else
|
||||
/* Save the flags: */
|
||||
_thread_fd_setflags(fd, flags & ~O_NONBLOCK);
|
||||
break;
|
||||
default:
|
||||
/* Might want to make va_arg use a union */
|
||||
ret = __sys_fcntl(fd, cmd, va_arg(ap, void *));
|
||||
break;
|
||||
}
|
||||
|
||||
/* Free variable arguments: */
|
||||
va_end(ap);
|
||||
|
||||
/* Unlock the file descriptor: */
|
||||
_FD_UNLOCK(fd, FD_RDWR);
|
||||
}
|
||||
/* Return the completion status: */
|
||||
return (ret);
|
||||
}
|
||||
|
||||
int
|
||||
__fcntl(int fd, int cmd,...)
|
||||
{
|
||||
@ -154,14 +52,14 @@ __fcntl(int fd, int cmd,...)
|
||||
case F_DUPFD:
|
||||
case F_SETFD:
|
||||
case F_SETFL:
|
||||
ret = _fcntl(fd, cmd, va_arg(ap, int));
|
||||
ret = __sys_fcntl(fd, cmd, va_arg(ap, int));
|
||||
break;
|
||||
case F_GETFD:
|
||||
case F_GETFL:
|
||||
ret = _fcntl(fd, cmd);
|
||||
ret = __sys_fcntl(fd, cmd);
|
||||
break;
|
||||
default:
|
||||
ret = _fcntl(fd, cmd, va_arg(ap, void *));
|
||||
ret = __sys_fcntl(fd, cmd, va_arg(ap, void *));
|
||||
}
|
||||
va_end(ap);
|
||||
|
||||
|
@ -63,44 +63,8 @@ _fork(void)
|
||||
if ((ret = __sys_fork()) != 0) {
|
||||
/* Parent process or error. Nothing to do here. */
|
||||
} else {
|
||||
/* Close the pthread kernel pipe: */
|
||||
__sys_close(_thread_kern_pipe[0]);
|
||||
__sys_close(_thread_kern_pipe[1]);
|
||||
|
||||
/* Reset signals pending for the running thread: */
|
||||
sigemptyset(&curthread->sigpend);
|
||||
|
||||
/*
|
||||
* Create a pipe that is written to by the signal handler to
|
||||
* prevent signals being missed in calls to
|
||||
* __sys_select:
|
||||
*/
|
||||
if (__sys_pipe(_thread_kern_pipe) != 0) {
|
||||
/* Cannot create pipe, so abort: */
|
||||
PANIC("Cannot create pthread kernel pipe for forked process");
|
||||
}
|
||||
/* Get the flags for the read pipe: */
|
||||
else if ((flags = __sys_fcntl(_thread_kern_pipe[0], F_GETFL, NULL)) == -1) {
|
||||
/* Abort this application: */
|
||||
abort();
|
||||
}
|
||||
/* Make the read pipe non-blocking: */
|
||||
else if (__sys_fcntl(_thread_kern_pipe[0], F_SETFL, flags | O_NONBLOCK) == -1) {
|
||||
/* Abort this application: */
|
||||
abort();
|
||||
}
|
||||
/* Get the flags for the write pipe: */
|
||||
else if ((flags = __sys_fcntl(_thread_kern_pipe[1], F_GETFL, NULL)) == -1) {
|
||||
/* Abort this application: */
|
||||
abort();
|
||||
}
|
||||
/* Make the write pipe non-blocking: */
|
||||
else if (__sys_fcntl(_thread_kern_pipe[1], F_SETFL, flags | O_NONBLOCK) == -1) {
|
||||
/* Abort this application: */
|
||||
abort();
|
||||
}
|
||||
/* Reinitialize the GC mutex: */
|
||||
else if (_mutex_reinit(&_gc_mutex) != 0) {
|
||||
if (_mutex_reinit(&_gc_mutex) != 0) {
|
||||
/* Abort this application: */
|
||||
PANIC("Cannot initialize GC mutex for forked process");
|
||||
}
|
||||
@ -180,32 +144,8 @@ _fork(void)
|
||||
/* No spinlocks yet: */
|
||||
_spinblock_count = 0;
|
||||
|
||||
/* Don't queue signals yet: */
|
||||
_queue_signals = 0;
|
||||
|
||||
/* Initialize the scheduling switch hook routine: */
|
||||
_sched_switch_hook = NULL;
|
||||
|
||||
/* Clear out any locks in the file descriptor table: */
|
||||
for (i = 0; i < _thread_dtablesize; i++) {
|
||||
if (_thread_fd_table[i] != NULL) {
|
||||
/* Initialise the file locks: */
|
||||
memset(&_thread_fd_table[i]->lock, 0,
|
||||
sizeof(_thread_fd_table[i]->lock));
|
||||
_thread_fd_table[i]->r_owner = NULL;
|
||||
_thread_fd_table[i]->w_owner = NULL;
|
||||
_thread_fd_table[i]->r_fname = NULL;
|
||||
_thread_fd_table[i]->w_fname = NULL;
|
||||
_thread_fd_table[i]->r_lineno = 0;;
|
||||
_thread_fd_table[i]->w_lineno = 0;;
|
||||
_thread_fd_table[i]->r_lockcount = 0;;
|
||||
_thread_fd_table[i]->w_lockcount = 0;;
|
||||
|
||||
/* Initialise the read/write queues: */
|
||||
TAILQ_INIT(&_thread_fd_table[i]->r_queue);
|
||||
TAILQ_INIT(&_thread_fd_table[i]->w_queue);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -236,8 +176,5 @@ free_thread_resources(struct pthread *thread)
|
||||
if (thread->specific != NULL)
|
||||
free(thread->specific);
|
||||
|
||||
if (thread->poll_data.fds != NULL)
|
||||
free(thread->poll_data.fds);
|
||||
|
||||
free(thread);
|
||||
}
|
||||
|
@ -37,25 +37,13 @@
|
||||
|
||||
__weak_reference(__fsync, fsync);
|
||||
|
||||
int
|
||||
_fsync(int fd)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if ((ret = _FD_LOCK(fd, FD_RDWR, NULL)) == 0) {
|
||||
ret = __sys_fsync(fd);
|
||||
_FD_UNLOCK(fd, FD_RDWR);
|
||||
}
|
||||
return (ret);
|
||||
}
|
||||
|
||||
int
|
||||
__fsync(int fd)
|
||||
{
|
||||
int ret;
|
||||
|
||||
_thread_enter_cancellation_point();
|
||||
ret = _fsync(fd);
|
||||
ret = __sys_fsync(fd);
|
||||
_thread_leave_cancellation_point();
|
||||
|
||||
return ret;
|
||||
|
@ -56,20 +56,10 @@ struct s_thread_info {
|
||||
/* Static variables: */
|
||||
static const struct s_thread_info thread_info[] = {
|
||||
{PS_RUNNING , "Running"},
|
||||
{PS_SIGTHREAD , "Waiting on signal thread"},
|
||||
{PS_MUTEX_WAIT , "Waiting on a mutex"},
|
||||
{PS_COND_WAIT , "Waiting on a condition variable"},
|
||||
{PS_FDLR_WAIT , "Waiting for a file read lock"},
|
||||
{PS_FDLW_WAIT , "Waiting for a file write lock"},
|
||||
{PS_FDR_WAIT , "Waiting for read"},
|
||||
{PS_FDW_WAIT , "Waiting for write"},
|
||||
{PS_FILE_WAIT , "Waiting for FILE lock"},
|
||||
{PS_POLL_WAIT , "Waiting on poll"},
|
||||
{PS_SELECT_WAIT , "Waiting on select"},
|
||||
{PS_SLEEP_WAIT , "Sleeping"},
|
||||
{PS_WAIT_WAIT , "Waiting process"},
|
||||
{PS_SIGSUSPEND , "Suspended, waiting for a signal"},
|
||||
{PS_SIGWAIT , "Waiting for a signal"},
|
||||
{PS_SPINBLOCK , "Waiting for a spinlock"},
|
||||
{PS_JOIN , "Waiting to join"},
|
||||
{PS_SUSPENDED , "Suspended"},
|
||||
@ -169,34 +159,6 @@ _thread_dump_info(void)
|
||||
}
|
||||
}
|
||||
|
||||
/* Output a header for file descriptors: */
|
||||
snprintf(s, sizeof(s), "\n\n=============\nFILE DESCRIPTOR "
|
||||
"TABLE (table size %d)\n\n", _thread_dtablesize);
|
||||
__sys_write(fd, s, strlen(s));
|
||||
|
||||
/* Enter a loop to report file descriptor lock usage: */
|
||||
for (i = 0; i < _thread_dtablesize; i++) {
|
||||
/*
|
||||
* Check if memory is allocated for this file
|
||||
* descriptor:
|
||||
*/
|
||||
if (_thread_fd_table[i] != NULL) {
|
||||
/* Report the file descriptor lock status: */
|
||||
snprintf(s, sizeof(s),
|
||||
"fd[%3d] read owner %p count %d [%s:%d]\n"
|
||||
" write owner %p count %d [%s:%d]\n",
|
||||
i, _thread_fd_table[i]->r_owner,
|
||||
_thread_fd_table[i]->r_lockcount,
|
||||
_thread_fd_table[i]->r_fname,
|
||||
_thread_fd_table[i]->r_lineno,
|
||||
_thread_fd_table[i]->w_owner,
|
||||
_thread_fd_table[i]->w_lockcount,
|
||||
_thread_fd_table[i]->w_fname,
|
||||
_thread_fd_table[i]->w_lineno);
|
||||
__sys_write(fd, s, strlen(s));
|
||||
}
|
||||
}
|
||||
|
||||
/* Close the dump file: */
|
||||
__sys_close(fd);
|
||||
}
|
||||
@ -237,33 +199,6 @@ dump_thread(int fd, pthread_t pthread, int long_version)
|
||||
}
|
||||
/* Process according to thread state: */
|
||||
switch (pthread->state) {
|
||||
/* File descriptor read lock wait: */
|
||||
case PS_FDLR_WAIT:
|
||||
case PS_FDLW_WAIT:
|
||||
case PS_FDR_WAIT:
|
||||
case PS_FDW_WAIT:
|
||||
/* Write the lock details: */
|
||||
snprintf(s, sizeof(s), "fd %d[%s:%d]",
|
||||
pthread->data.fd.fd,
|
||||
pthread->data.fd.fname,
|
||||
pthread->data.fd.branch);
|
||||
__sys_write(fd, s, strlen(s));
|
||||
snprintf(s, sizeof(s), "owner %pr/%pw\n",
|
||||
_thread_fd_table[pthread->data.fd.fd]->r_owner,
|
||||
_thread_fd_table[pthread->data.fd.fd]->w_owner);
|
||||
__sys_write(fd, s, strlen(s));
|
||||
break;
|
||||
case PS_SIGWAIT:
|
||||
snprintf(s, sizeof(s), "sigmask (hi)");
|
||||
__sys_write(fd, s, strlen(s));
|
||||
for (i = _SIG_WORDS - 1; i >= 0; i--) {
|
||||
snprintf(s, sizeof(s), "%08x\n",
|
||||
pthread->sigmask.__bits[i]);
|
||||
__sys_write(fd, s, strlen(s));
|
||||
}
|
||||
snprintf(s, sizeof(s), "(lo)\n");
|
||||
__sys_write(fd, s, strlen(s));
|
||||
break;
|
||||
/*
|
||||
* Trap other states that are not explicitly
|
||||
* coded to dump information:
|
||||
|
@ -56,7 +56,6 @@
|
||||
#include <errno.h>
|
||||
#include <fcntl.h>
|
||||
#include <paths.h>
|
||||
#include <poll.h>
|
||||
#include <pthread.h>
|
||||
#include <signal.h>
|
||||
#include <stdio.h>
|
||||
@ -163,7 +162,6 @@ _thread_init(void)
|
||||
int sched_stack_size; /* Size of scheduler stack. */
|
||||
|
||||
struct clockinfo clockinfo;
|
||||
struct sigaction act;
|
||||
|
||||
_pthread_page_size = getpagesize();
|
||||
_pthread_guard_default = getpagesize();
|
||||
@ -209,57 +207,9 @@ _thread_init(void)
|
||||
PANIC("Can't dup2");
|
||||
}
|
||||
|
||||
/* Get the standard I/O flags before messing with them : */
|
||||
for (i = 0; i < 3; i++) {
|
||||
if (((_pthread_stdio_flags[i] =
|
||||
__sys_fcntl(i, F_GETFL, NULL)) == -1) &&
|
||||
(errno != EBADF))
|
||||
PANIC("Cannot get stdio flags");
|
||||
}
|
||||
|
||||
/*
|
||||
* Create a pipe that is written to by the signal handler to prevent
|
||||
* signals being missed in calls to _select:
|
||||
*/
|
||||
if (__sys_pipe(_thread_kern_pipe) != 0) {
|
||||
/* Cannot create pipe, so abort: */
|
||||
PANIC("Cannot create kernel pipe");
|
||||
}
|
||||
|
||||
/*
|
||||
* Make sure the pipe does not get in the way of stdio:
|
||||
*/
|
||||
for (i = 0; i < 2; i++) {
|
||||
if (_thread_kern_pipe[i] < 3) {
|
||||
fd = __sys_fcntl(_thread_kern_pipe[i], F_DUPFD, 3);
|
||||
if (fd == -1)
|
||||
PANIC("Cannot create kernel pipe");
|
||||
__sys_close(_thread_kern_pipe[i]);
|
||||
_thread_kern_pipe[i] = fd;
|
||||
}
|
||||
}
|
||||
/* Get the flags for the read pipe: */
|
||||
if ((flags = __sys_fcntl(_thread_kern_pipe[0], F_GETFL, NULL)) == -1) {
|
||||
/* Abort this application: */
|
||||
PANIC("Cannot get kernel read pipe flags");
|
||||
}
|
||||
/* Make the read pipe non-blocking: */
|
||||
else if (__sys_fcntl(_thread_kern_pipe[0], F_SETFL, flags | O_NONBLOCK) == -1) {
|
||||
/* Abort this application: */
|
||||
PANIC("Cannot make kernel read pipe non-blocking");
|
||||
}
|
||||
/* Get the flags for the write pipe: */
|
||||
else if ((flags = __sys_fcntl(_thread_kern_pipe[1], F_GETFL, NULL)) == -1) {
|
||||
/* Abort this application: */
|
||||
PANIC("Cannot get kernel write pipe flags");
|
||||
}
|
||||
/* Make the write pipe non-blocking: */
|
||||
else if (__sys_fcntl(_thread_kern_pipe[1], F_SETFL, flags | O_NONBLOCK) == -1) {
|
||||
/* Abort this application: */
|
||||
PANIC("Cannot get kernel write pipe flags");
|
||||
}
|
||||
/* Allocate and initialize the ready queue: */
|
||||
else if (_pq_alloc(&_readyq, PTHREAD_MIN_PRIORITY, PTHREAD_LAST_PRIORITY) != 0) {
|
||||
if (_pq_alloc(&_readyq, PTHREAD_MIN_PRIORITY, PTHREAD_LAST_PRIORITY) !=
|
||||
0) {
|
||||
/* Abort this application: */
|
||||
PANIC("Cannot allocate priority ready queue.");
|
||||
}
|
||||
@ -312,15 +262,19 @@ _thread_init(void)
|
||||
/* Set the main thread stack pointer. */
|
||||
_thread_initial->stack = _usrstack - PTHREAD_STACK_INITIAL;
|
||||
|
||||
/* Set the stack attributes: */
|
||||
/* Set the stack attributes. */
|
||||
_thread_initial->attr.stackaddr_attr = _thread_initial->stack;
|
||||
_thread_initial->attr.stacksize_attr = PTHREAD_STACK_INITIAL;
|
||||
|
||||
/* Setup the context for the scheduler: */
|
||||
_setjmp(_thread_kern_sched_jb);
|
||||
SET_STACK_JB(_thread_kern_sched_jb, _thread_kern_sched_stack +
|
||||
sched_stack_size - sizeof(double));
|
||||
SET_RETURN_ADDR_JB(_thread_kern_sched_jb, _thread_kern_scheduler);
|
||||
getcontext(&_thread_kern_sched_ctx);
|
||||
_thread_kern_sched_ctx.uc_stack.ss_sp =
|
||||
_thread_kern_sched_stack;
|
||||
_thread_kern_sched_ctx.uc_stack.ss_size = sched_stack_size;
|
||||
makecontext(&_thread_kern_sched_ctx, _thread_kern_scheduler, 1);
|
||||
|
||||
/* Block all signals to the scheduler's context. */
|
||||
sigfillset(&_thread_kern_sched_ctx.uc_sigmask);
|
||||
|
||||
/*
|
||||
* Write a magic value to the thread structure
|
||||
@ -332,6 +286,11 @@ _thread_init(void)
|
||||
_thread_initial->cancelflags = PTHREAD_CANCEL_ENABLE |
|
||||
PTHREAD_CANCEL_DEFERRED;
|
||||
|
||||
/* Setup the context for initial thread. */
|
||||
getcontext(&_thread_initial->ctx);
|
||||
_thread_kern_sched_ctx.uc_stack.ss_sp = _thread_initial->stack;
|
||||
_thread_kern_sched_ctx.uc_stack.ss_size = PTHREAD_STACK_INITIAL;
|
||||
|
||||
/* Default the priority of the initial thread: */
|
||||
_thread_initial->base_priority = PTHREAD_DEFAULT_PRIORITY;
|
||||
_thread_initial->active_priority = PTHREAD_DEFAULT_PRIORITY;
|
||||
@ -357,14 +316,8 @@ _thread_init(void)
|
||||
/* Initialize last active: */
|
||||
_thread_initial->last_active = (long) _sched_ticks;
|
||||
|
||||
/* Initialize the initial context: */
|
||||
_thread_initial->curframe = NULL;
|
||||
|
||||
/* Initialise the rest of the fields: */
|
||||
_thread_initial->poll_data.nfds = 0;
|
||||
_thread_initial->poll_data.fds = NULL;
|
||||
_thread_initial->sig_defer_count = 0;
|
||||
_thread_initial->yield_on_sig_undefer = 0;
|
||||
_thread_initial->specific = NULL;
|
||||
_thread_initial->cleanup = NULL;
|
||||
_thread_initial->flags = 0;
|
||||
@ -373,57 +326,6 @@ _thread_init(void)
|
||||
TAILQ_INSERT_HEAD(&_thread_list, _thread_initial, tle);
|
||||
_set_curthread(_thread_initial);
|
||||
|
||||
/* Initialise the global signal action structure: */
|
||||
sigfillset(&act.sa_mask);
|
||||
act.sa_handler = (void (*) ()) _thread_sig_handler;
|
||||
act.sa_flags = SA_SIGINFO | SA_ONSTACK;
|
||||
|
||||
/* Clear pending signals for the process: */
|
||||
sigemptyset(&_process_sigpending);
|
||||
|
||||
/* Clear the signal queue: */
|
||||
memset(_thread_sigq, 0, sizeof(_thread_sigq));
|
||||
|
||||
/* Enter a loop to get the existing signal status: */
|
||||
for (i = 1; i < NSIG; i++) {
|
||||
/* Check for signals which cannot be trapped: */
|
||||
if (i == SIGKILL || i == SIGSTOP) {
|
||||
}
|
||||
|
||||
/* Get the signal handler details: */
|
||||
else if (__sys_sigaction(i, NULL,
|
||||
&_thread_sigact[i - 1]) != 0) {
|
||||
/*
|
||||
* Abort this process if signal
|
||||
* initialisation fails:
|
||||
*/
|
||||
PANIC("Cannot read signal handler info");
|
||||
}
|
||||
|
||||
/* Initialize the SIG_DFL dummy handler count. */
|
||||
_thread_dfl_count[i] = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Install the signal handler for the most important
|
||||
* signals that the user-thread kernel needs. Actually
|
||||
* SIGINFO isn't really needed, but it is nice to have.
|
||||
*/
|
||||
if (__sys_sigaction(_SCHED_SIGNAL, &act, NULL) != 0 ||
|
||||
__sys_sigaction(SIGINFO, &act, NULL) != 0 ||
|
||||
__sys_sigaction(SIGCHLD, &act, NULL) != 0) {
|
||||
/*
|
||||
* Abort this process if signal initialisation fails:
|
||||
*/
|
||||
PANIC("Cannot initialise signal handler");
|
||||
}
|
||||
_thread_sigact[_SCHED_SIGNAL - 1].sa_flags = SA_SIGINFO;
|
||||
_thread_sigact[SIGINFO - 1].sa_flags = SA_SIGINFO;
|
||||
_thread_sigact[SIGCHLD - 1].sa_flags = SA_SIGINFO;
|
||||
|
||||
/* Get the process signal mask: */
|
||||
__sys_sigprocmask(SIG_SETMASK, NULL, &_process_sigmask);
|
||||
|
||||
/* Get the kernel clockrate: */
|
||||
mib[0] = CTL_KERN;
|
||||
mib[1] = KERN_CLOCKRATE;
|
||||
@ -432,50 +334,6 @@ _thread_init(void)
|
||||
_clock_res_usec = clockinfo.tick > CLOCK_RES_USEC_MIN ?
|
||||
clockinfo.tick : CLOCK_RES_USEC_MIN;
|
||||
|
||||
/* Get the table size: */
|
||||
if ((_thread_dtablesize = getdtablesize()) < 0) {
|
||||
/*
|
||||
* Cannot get the system defined table size, so abort
|
||||
* this process.
|
||||
*/
|
||||
PANIC("Cannot get dtablesize");
|
||||
}
|
||||
/* Allocate memory for the file descriptor table: */
|
||||
if ((_thread_fd_table = (struct fd_table_entry **) malloc(sizeof(struct fd_table_entry *) * _thread_dtablesize)) == NULL) {
|
||||
/* Avoid accesses to file descriptor table on exit: */
|
||||
_thread_dtablesize = 0;
|
||||
|
||||
/*
|
||||
* Cannot allocate memory for the file descriptor
|
||||
* table, so abort this process.
|
||||
*/
|
||||
PANIC("Cannot allocate memory for file descriptor table");
|
||||
}
|
||||
/* Allocate memory for the pollfd table: */
|
||||
if ((_thread_pfd_table = (struct pollfd *) malloc(sizeof(struct pollfd) * _thread_dtablesize)) == NULL) {
|
||||
/*
|
||||
* Cannot allocate memory for the file descriptor
|
||||
* table, so abort this process.
|
||||
*/
|
||||
PANIC("Cannot allocate memory for pollfd table");
|
||||
} else {
|
||||
/*
|
||||
* Enter a loop to initialise the file descriptor
|
||||
* table:
|
||||
*/
|
||||
for (i = 0; i < _thread_dtablesize; i++) {
|
||||
/* Initialise the file descriptor table: */
|
||||
_thread_fd_table[i] = NULL;
|
||||
}
|
||||
|
||||
/* Initialize stdio file descriptor table entries: */
|
||||
for (i = 0; i < 3; i++) {
|
||||
if ((_thread_fd_table_init(i) != 0) &&
|
||||
(errno != EBADF))
|
||||
PANIC("Cannot initialize stdio file "
|
||||
"descriptor table entry");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Initialise the garbage collector mutex and condition variable. */
|
||||
|
@ -38,7 +38,6 @@
|
||||
#include <stdarg.h>
|
||||
#include <string.h>
|
||||
#include <unistd.h>
|
||||
#include <setjmp.h>
|
||||
#include <sys/param.h>
|
||||
#include <sys/types.h>
|
||||
#include <sys/signalvar.h>
|
||||
@ -60,7 +59,7 @@
|
||||
|
||||
/* Static function prototype definitions: */
|
||||
static void
|
||||
thread_kern_poll(int wait_reqd);
|
||||
thread_kern_idle(void);
|
||||
|
||||
static void
|
||||
dequeue_signals(void);
|
||||
@ -70,37 +69,9 @@ thread_run_switch_hook(pthread_t thread_out, pthread_t thread_in);
|
||||
|
||||
/* Static variables: */
|
||||
static int last_tick = 0;
|
||||
static int called_from_handler = 0;
|
||||
|
||||
/*
|
||||
* This is called when a signal handler finishes and wants to
|
||||
* return to a previous frame.
|
||||
*/
|
||||
void
|
||||
_thread_kern_sched_frame(struct pthread_signal_frame *psf)
|
||||
{
|
||||
struct pthread *curthread = _get_curthread();
|
||||
|
||||
/*
|
||||
* Flag the pthread kernel as executing scheduler code
|
||||
* to avoid a signal from interrupting this execution and
|
||||
* corrupting the (soon-to-be) current frame.
|
||||
*/
|
||||
_thread_kern_in_sched = 1;
|
||||
|
||||
/* Restore the signal frame: */
|
||||
_thread_sigframe_restore(curthread, psf);
|
||||
|
||||
/* The signal mask was restored; check for any pending signals: */
|
||||
curthread->check_pending = 1;
|
||||
|
||||
/* Switch to the thread scheduler: */
|
||||
___longjmp(_thread_kern_sched_jb, 1);
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
_thread_kern_sched(ucontext_t *ucp)
|
||||
_thread_kern_sched(void)
|
||||
{
|
||||
struct pthread *curthread = _get_curthread();
|
||||
|
||||
@ -111,78 +82,40 @@ _thread_kern_sched(ucontext_t *ucp)
|
||||
*/
|
||||
_thread_kern_in_sched = 1;
|
||||
|
||||
/* Check if this function was called from the signal handler: */
|
||||
if (ucp != NULL) {
|
||||
/* XXX - Save FP registers? */
|
||||
FP_SAVE_UC(ucp);
|
||||
called_from_handler = 1;
|
||||
DBG_MSG("Entering scheduler due to signal\n");
|
||||
}
|
||||
/* Switch into the scheduler's context. */
|
||||
swapcontext(&curthread->ctx, &_thread_kern_sched_ctx);
|
||||
DBG_MSG("Returned from swapcontext, thread %p\n", curthread);
|
||||
|
||||
/* Save the state of the current thread: */
|
||||
if (_setjmp(curthread->ctx.jb) != 0) {
|
||||
DBG_MSG("Returned from ___longjmp, thread %p\n",
|
||||
curthread);
|
||||
/*
|
||||
* This point is reached when a longjmp() is called
|
||||
* to restore the state of a thread.
|
||||
*
|
||||
* This is the normal way out of the scheduler.
|
||||
*/
|
||||
_thread_kern_in_sched = 0;
|
||||
|
||||
if (curthread->sig_defer_count == 0) {
|
||||
if (((curthread->cancelflags &
|
||||
PTHREAD_AT_CANCEL_POINT) == 0) &&
|
||||
((curthread->cancelflags &
|
||||
PTHREAD_CANCEL_ASYNCHRONOUS) != 0))
|
||||
/*
|
||||
* Cancellations override signals.
|
||||
*
|
||||
* Stick a cancellation point at the
|
||||
* start of each async-cancellable
|
||||
* thread's resumption.
|
||||
*
|
||||
* We allow threads woken at cancel
|
||||
* points to do their own checks.
|
||||
*/
|
||||
pthread_testcancel();
|
||||
}
|
||||
|
||||
if (_sched_switch_hook != NULL) {
|
||||
/* Run the installed switch hook: */
|
||||
thread_run_switch_hook(_last_user_thread, curthread);
|
||||
}
|
||||
if (ucp == NULL)
|
||||
return;
|
||||
else {
|
||||
/* XXX - Restore FP registers? */
|
||||
FP_RESTORE_UC(ucp);
|
||||
/*
|
||||
* This point is reached when swapcontext() is called
|
||||
* to restore the state of a thread.
|
||||
*
|
||||
* This is the normal way out of the scheduler.
|
||||
*/
|
||||
_thread_kern_in_sched = 0;
|
||||
|
||||
if (curthread->sig_defer_count == 0) {
|
||||
if (((curthread->cancelflags &
|
||||
PTHREAD_AT_CANCEL_POINT) == 0) &&
|
||||
((curthread->cancelflags &
|
||||
PTHREAD_CANCEL_ASYNCHRONOUS) != 0))
|
||||
/*
|
||||
* Set the process signal mask in the context; it
|
||||
* could have changed by the handler.
|
||||
* Stick a cancellation point at the
|
||||
* start of each async-cancellable
|
||||
* thread's resumption.
|
||||
*
|
||||
* We allow threads woken at cancel
|
||||
* points to do their own checks.
|
||||
*/
|
||||
ucp->uc_sigmask = _process_sigmask;
|
||||
|
||||
/* Resume the interrupted thread: */
|
||||
__sys_sigreturn(ucp);
|
||||
}
|
||||
pthread_testcancel();
|
||||
}
|
||||
|
||||
if (_sched_switch_hook != NULL) {
|
||||
/* Run the installed switch hook: */
|
||||
thread_run_switch_hook(_last_user_thread, curthread);
|
||||
}
|
||||
/* Switch to the thread scheduler: */
|
||||
___longjmp(_thread_kern_sched_jb, 1);
|
||||
}
|
||||
|
||||
void
|
||||
_thread_kern_sched_sig(void)
|
||||
{
|
||||
struct pthread *curthread = _get_curthread();
|
||||
|
||||
curthread->check_pending = 1;
|
||||
_thread_kern_sched(NULL);
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
_thread_kern_scheduler(void)
|
||||
{
|
||||
@ -193,48 +126,28 @@ _thread_kern_scheduler(void)
|
||||
unsigned int current_tick;
|
||||
int add_to_prioq;
|
||||
|
||||
/* If the currently running thread is a user thread, save it: */
|
||||
if ((curthread->flags & PTHREAD_FLAGS_PRIVATE) == 0)
|
||||
_last_user_thread = curthread;
|
||||
|
||||
if (called_from_handler != 0) {
|
||||
called_from_handler = 0;
|
||||
|
||||
/*
|
||||
* We were called from a signal handler; restore the process
|
||||
* signal mask.
|
||||
*/
|
||||
if (__sys_sigprocmask(SIG_SETMASK,
|
||||
&_process_sigmask, NULL) != 0)
|
||||
PANIC("Unable to restore process mask after signal");
|
||||
}
|
||||
|
||||
/*
|
||||
* Enter a scheduling loop that finds the next thread that is
|
||||
* ready to run. This loop completes when there are no more threads
|
||||
* in the global list or when a thread has its state restored by
|
||||
* either a sigreturn (if the state was saved as a sigcontext) or a
|
||||
* longjmp (if the state was saved by a setjmp).
|
||||
* in the global list. It is interrupted each time a thread is
|
||||
* scheduled, but will continue when we return.
|
||||
*/
|
||||
while (!(TAILQ_EMPTY(&_thread_list))) {
|
||||
|
||||
/* If the currently running thread is a user thread, save it: */
|
||||
if ((curthread->flags & PTHREAD_FLAGS_PRIVATE) == 0)
|
||||
_last_user_thread = curthread;
|
||||
|
||||
/* Get the current time of day: */
|
||||
GET_CURRENT_TOD(tv);
|
||||
TIMEVAL_TO_TIMESPEC(&tv, &ts);
|
||||
current_tick = _sched_ticks;
|
||||
|
||||
/*
|
||||
* Protect the scheduling queues from access by the signal
|
||||
* handler.
|
||||
*/
|
||||
_queue_signals = 1;
|
||||
add_to_prioq = 0;
|
||||
|
||||
if (curthread != &_thread_kern_thread) {
|
||||
/*
|
||||
* This thread no longer needs to yield the CPU.
|
||||
*/
|
||||
curthread->yield_on_sig_undefer = 0;
|
||||
|
||||
if (curthread->state != PS_RUNNING) {
|
||||
/*
|
||||
* Save the current time as the time that the
|
||||
@ -278,14 +191,8 @@ _thread_kern_scheduler(void)
|
||||
* operations or timeouts:
|
||||
*/
|
||||
case PS_DEADLOCK:
|
||||
case PS_FDLR_WAIT:
|
||||
case PS_FDLW_WAIT:
|
||||
case PS_FILE_WAIT:
|
||||
case PS_JOIN:
|
||||
case PS_MUTEX_WAIT:
|
||||
case PS_SIGSUSPEND:
|
||||
case PS_SIGTHREAD:
|
||||
case PS_SIGWAIT:
|
||||
case PS_WAIT_WAIT:
|
||||
/* No timeouts for these states: */
|
||||
curthread->wakeup_time.tv_sec = -1;
|
||||
@ -318,62 +225,9 @@ _thread_kern_scheduler(void)
|
||||
_spinblock_count++;
|
||||
|
||||
/* FALLTHROUGH */
|
||||
case PS_FDR_WAIT:
|
||||
case PS_FDW_WAIT:
|
||||
case PS_POLL_WAIT:
|
||||
case PS_SELECT_WAIT:
|
||||
/* Restart the time slice: */
|
||||
curthread->slice_usec = -1;
|
||||
|
||||
/* Insert into the waiting queue: */
|
||||
PTHREAD_WAITQ_INSERT(curthread);
|
||||
|
||||
/* Insert into the work queue: */
|
||||
PTHREAD_WORKQ_INSERT(curthread);
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* Are there pending signals for this thread?
|
||||
*
|
||||
* This check has to be performed after the thread
|
||||
* has been placed in the queue(s) appropriate for
|
||||
* its state. The process of adding pending signals
|
||||
* can change a threads state, which in turn will
|
||||
* attempt to add or remove the thread from any
|
||||
* scheduling queue to which it belongs.
|
||||
*/
|
||||
if (curthread->check_pending != 0) {
|
||||
curthread->check_pending = 0;
|
||||
_thread_sig_check_pending(curthread);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Avoid polling file descriptors if there are none
|
||||
* waiting:
|
||||
*/
|
||||
if (TAILQ_EMPTY(&_workq) != 0) {
|
||||
}
|
||||
/*
|
||||
* Poll file descriptors only if a new scheduling signal
|
||||
* has occurred or if we have no more runnable threads.
|
||||
*/
|
||||
else if (((current_tick = _sched_ticks) != last_tick) ||
|
||||
((curthread->state != PS_RUNNING) &&
|
||||
(PTHREAD_PRIOQ_FIRST() == NULL))) {
|
||||
/* Unprotect the scheduling queues: */
|
||||
_queue_signals = 0;
|
||||
|
||||
/*
|
||||
* Poll file descriptors to update the state of threads
|
||||
* waiting on file I/O where data may be available:
|
||||
*/
|
||||
thread_kern_poll(0);
|
||||
|
||||
/* Protect the scheduling queues: */
|
||||
_queue_signals = 1;
|
||||
}
|
||||
last_tick = current_tick;
|
||||
|
||||
/*
|
||||
@ -389,25 +243,16 @@ _thread_kern_scheduler(void)
|
||||
(pthread->wakeup_time.tv_sec < ts.tv_sec) ||
|
||||
((pthread->wakeup_time.tv_sec == ts.tv_sec) &&
|
||||
(pthread->wakeup_time.tv_nsec <= ts.tv_nsec)))) {
|
||||
switch (pthread->state) {
|
||||
case PS_POLL_WAIT:
|
||||
case PS_SELECT_WAIT:
|
||||
/* Return zero file descriptors ready: */
|
||||
pthread->data.poll_data->nfds = 0;
|
||||
/* FALLTHROUGH */
|
||||
default:
|
||||
/*
|
||||
* Remove this thread from the waiting queue
|
||||
* (and work queue if necessary) and place it
|
||||
* in the ready queue.
|
||||
*/
|
||||
PTHREAD_WAITQ_CLEARACTIVE();
|
||||
if (pthread->flags & PTHREAD_FLAGS_IN_WORKQ)
|
||||
PTHREAD_WORKQ_REMOVE(pthread);
|
||||
PTHREAD_NEW_STATE(pthread, PS_RUNNING);
|
||||
PTHREAD_WAITQ_SETACTIVE();
|
||||
break;
|
||||
}
|
||||
/*
|
||||
* Remove this thread from the waiting queue
|
||||
* (and work queue if necessary) and place it
|
||||
* in the ready queue.
|
||||
*/
|
||||
PTHREAD_WAITQ_CLEARACTIVE();
|
||||
if (pthread->flags & PTHREAD_FLAGS_IN_WORKQ)
|
||||
PTHREAD_WORKQ_REMOVE(pthread);
|
||||
PTHREAD_NEW_STATE(pthread, PS_RUNNING);
|
||||
PTHREAD_WAITQ_SETACTIVE();
|
||||
/*
|
||||
* Flag the timeout in the thread structure:
|
||||
*/
|
||||
@ -483,14 +328,11 @@ _thread_kern_scheduler(void)
|
||||
DBG_MSG("No runnable threads, using kernel thread %p\n",
|
||||
curthread);
|
||||
|
||||
/* Unprotect the scheduling queues: */
|
||||
_queue_signals = 0;
|
||||
|
||||
/*
|
||||
* There are no threads ready to run, so wait until
|
||||
* something happens that changes this condition:
|
||||
*/
|
||||
thread_kern_poll(1);
|
||||
thread_kern_idle();
|
||||
|
||||
/*
|
||||
* This process' usage will likely be very small
|
||||
@ -503,54 +345,13 @@ _thread_kern_scheduler(void)
|
||||
gettimeofday((struct timeval *) &_sched_tod, NULL);
|
||||
|
||||
/* Check once more for a runnable thread: */
|
||||
_queue_signals = 1;
|
||||
pthread_h = PTHREAD_PRIOQ_FIRST();
|
||||
_queue_signals = 0;
|
||||
}
|
||||
|
||||
if (pthread_h != NULL) {
|
||||
/* Remove the thread from the ready queue: */
|
||||
PTHREAD_PRIOQ_REMOVE(pthread_h);
|
||||
|
||||
/* Unprotect the scheduling queues: */
|
||||
_queue_signals = 0;
|
||||
|
||||
/*
|
||||
* Check for signals queued while the scheduling
|
||||
* queues were protected:
|
||||
*/
|
||||
while (_sigq_check_reqd != 0) {
|
||||
/* Clear before handling queued signals: */
|
||||
_sigq_check_reqd = 0;
|
||||
|
||||
/* Protect the scheduling queues again: */
|
||||
_queue_signals = 1;
|
||||
|
||||
dequeue_signals();
|
||||
|
||||
/*
|
||||
* Check for a higher priority thread that
|
||||
* became runnable due to signal handling.
|
||||
*/
|
||||
if (((pthread = PTHREAD_PRIOQ_FIRST()) != NULL) &&
|
||||
(pthread->active_priority > pthread_h->active_priority)) {
|
||||
/* Remove the thread from the ready queue: */
|
||||
PTHREAD_PRIOQ_REMOVE(pthread);
|
||||
|
||||
/*
|
||||
* Insert the lower priority thread
|
||||
* at the head of its priority list:
|
||||
*/
|
||||
PTHREAD_PRIOQ_INSERT_HEAD(pthread_h);
|
||||
|
||||
/* There's a new thread in town: */
|
||||
pthread_h = pthread;
|
||||
}
|
||||
|
||||
/* Unprotect the scheduling queues: */
|
||||
_queue_signals = 0;
|
||||
}
|
||||
|
||||
/* Make the selected thread the current thread: */
|
||||
_set_curthread(pthread_h);
|
||||
curthread = pthread_h;
|
||||
@ -584,13 +385,7 @@ _thread_kern_scheduler(void)
|
||||
/*
|
||||
* Continue the thread at its current frame:
|
||||
*/
|
||||
#if NOT_YET
|
||||
_setcontext(&curthread->ctx.uc);
|
||||
#else
|
||||
___longjmp(curthread->ctx.jb, 1);
|
||||
#endif
|
||||
/* This point should not be reached. */
|
||||
PANIC("Thread has returned from sigreturn or longjmp");
|
||||
swapcontext(&_thread_kern_sched_ctx, &curthread->ctx);
|
||||
}
|
||||
}
|
||||
|
||||
@ -610,19 +405,13 @@ _thread_kern_sched_state(enum pthread_state state, char *fname, int lineno)
|
||||
*/
|
||||
_thread_kern_in_sched = 1;
|
||||
|
||||
/*
|
||||
* Prevent the signal handler from fiddling with this thread
|
||||
* before its state is set and is placed into the proper queue.
|
||||
*/
|
||||
_queue_signals = 1;
|
||||
|
||||
/* Change the state of the current thread: */
|
||||
curthread->state = state;
|
||||
curthread->fname = fname;
|
||||
curthread->lineno = lineno;
|
||||
|
||||
/* Schedule the next thread that is ready: */
|
||||
_thread_kern_sched(NULL);
|
||||
_thread_kern_sched();
|
||||
}
|
||||
|
||||
void
|
||||
@ -638,13 +427,6 @@ _thread_kern_sched_state_unlock(enum pthread_state state,
|
||||
*/
|
||||
_thread_kern_in_sched = 1;
|
||||
|
||||
/*
|
||||
* Prevent the signal handler from fiddling with this thread
|
||||
* before its state is set and it is placed into the proper
|
||||
* queue(s).
|
||||
*/
|
||||
_queue_signals = 1;
|
||||
|
||||
/* Change the state of the current thread: */
|
||||
curthread->state = state;
|
||||
curthread->fname = fname;
|
||||
@ -653,13 +435,12 @@ _thread_kern_sched_state_unlock(enum pthread_state state,
|
||||
_SPINUNLOCK(lock);
|
||||
|
||||
/* Schedule the next thread that is ready: */
|
||||
_thread_kern_sched(NULL);
|
||||
_thread_kern_sched();
|
||||
}
|
||||
|
||||
static void
|
||||
thread_kern_poll(int wait_reqd)
|
||||
thread_kern_idle()
|
||||
{
|
||||
int count = 0;
|
||||
int i, found;
|
||||
int kern_pipe_added = 0;
|
||||
int nfds = 0;
|
||||
@ -668,57 +449,35 @@ thread_kern_poll(int wait_reqd)
|
||||
struct timespec ts;
|
||||
struct timeval tv;
|
||||
|
||||
/* Check if the caller wants to wait: */
|
||||
if (wait_reqd == 0) {
|
||||
timeout_ms = 0;
|
||||
/* Get the current time of day: */
|
||||
GET_CURRENT_TOD(tv);
|
||||
TIMEVAL_TO_TIMESPEC(&tv, &ts);
|
||||
|
||||
pthread = TAILQ_FIRST(&_waitingq);
|
||||
|
||||
if ((pthread == NULL) || (pthread->wakeup_time.tv_sec == -1)) {
|
||||
/*
|
||||
* Either there are no threads in the waiting queue,
|
||||
* or there are no threads that can timeout.
|
||||
*/
|
||||
PANIC("Would idle forever");
|
||||
}
|
||||
else if (pthread->wakeup_time.tv_sec - ts.tv_sec > 60000)
|
||||
/* Limit maximum timeout to prevent rollover. */
|
||||
timeout_ms = 60000;
|
||||
else {
|
||||
/* Get the current time of day: */
|
||||
GET_CURRENT_TOD(tv);
|
||||
TIMEVAL_TO_TIMESPEC(&tv, &ts);
|
||||
|
||||
_queue_signals = 1;
|
||||
pthread = TAILQ_FIRST(&_waitingq);
|
||||
_queue_signals = 0;
|
||||
|
||||
if ((pthread == NULL) || (pthread->wakeup_time.tv_sec == -1)) {
|
||||
/*
|
||||
* Either there are no threads in the waiting queue,
|
||||
* or there are no threads that can timeout.
|
||||
*/
|
||||
timeout_ms = INFTIM;
|
||||
}
|
||||
else if (pthread->wakeup_time.tv_sec - ts.tv_sec > 60000)
|
||||
/* Limit maximum timeout to prevent rollover. */
|
||||
timeout_ms = 60000;
|
||||
else {
|
||||
/*
|
||||
* Calculate the time left for the next thread to
|
||||
* timeout:
|
||||
*/
|
||||
timeout_ms = ((pthread->wakeup_time.tv_sec - ts.tv_sec) *
|
||||
1000) + ((pthread->wakeup_time.tv_nsec - ts.tv_nsec) /
|
||||
1000000);
|
||||
/*
|
||||
* Don't allow negative timeouts:
|
||||
*/
|
||||
if (timeout_ms < 0)
|
||||
timeout_ms = 0;
|
||||
}
|
||||
}
|
||||
|
||||
/* Protect the scheduling queues: */
|
||||
_queue_signals = 1;
|
||||
|
||||
/*
|
||||
* Check to see if the signal queue needs to be walked to look
|
||||
* for threads awoken by a signal while in the scheduler.
|
||||
*/
|
||||
if (_sigq_check_reqd != 0) {
|
||||
/* Reset flag before handling queued signals: */
|
||||
_sigq_check_reqd = 0;
|
||||
|
||||
dequeue_signals();
|
||||
/*
|
||||
* Calculate the time left for the next thread to
|
||||
* timeout:
|
||||
*/
|
||||
timeout_ms = ((pthread->wakeup_time.tv_sec - ts.tv_sec) *
|
||||
1000) + ((pthread->wakeup_time.tv_nsec - ts.tv_nsec) /
|
||||
1000000);
|
||||
/*
|
||||
* Only idle if we would be.
|
||||
*/
|
||||
if (timeout_ms <= 0)
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -733,219 +492,11 @@ thread_kern_poll(int wait_reqd)
|
||||
}
|
||||
|
||||
/*
|
||||
* Form the poll table:
|
||||
* Idle.
|
||||
*/
|
||||
nfds = 0;
|
||||
if (timeout_ms != 0) {
|
||||
/* Add the kernel pipe to the poll table: */
|
||||
_thread_pfd_table[nfds].fd = _thread_kern_pipe[0];
|
||||
_thread_pfd_table[nfds].events = POLLRDNORM;
|
||||
_thread_pfd_table[nfds].revents = 0;
|
||||
nfds++;
|
||||
kern_pipe_added = 1;
|
||||
}
|
||||
__sys_poll(NULL, 0, timeout_ms);
|
||||
|
||||
PTHREAD_WAITQ_SETACTIVE();
|
||||
TAILQ_FOREACH(pthread, &_workq, qe) {
|
||||
switch (pthread->state) {
|
||||
case PS_SPINBLOCK:
|
||||
/*
|
||||
* If the lock is available, let the thread run.
|
||||
*/
|
||||
if (pthread->data.spinlock->access_lock == 0) {
|
||||
PTHREAD_WAITQ_CLEARACTIVE();
|
||||
PTHREAD_WORKQ_REMOVE(pthread);
|
||||
PTHREAD_NEW_STATE(pthread,PS_RUNNING);
|
||||
PTHREAD_WAITQ_SETACTIVE();
|
||||
/* One less thread in a spinblock state: */
|
||||
_spinblock_count--;
|
||||
/*
|
||||
* Since there is at least one runnable
|
||||
* thread, disable the wait.
|
||||
*/
|
||||
timeout_ms = 0;
|
||||
}
|
||||
break;
|
||||
|
||||
/* File descriptor read wait: */
|
||||
case PS_FDR_WAIT:
|
||||
/* Limit number of polled files to table size: */
|
||||
if (nfds < _thread_dtablesize) {
|
||||
_thread_pfd_table[nfds].events = POLLRDNORM;
|
||||
_thread_pfd_table[nfds].fd = pthread->data.fd.fd;
|
||||
nfds++;
|
||||
}
|
||||
break;
|
||||
|
||||
/* File descriptor write wait: */
|
||||
case PS_FDW_WAIT:
|
||||
/* Limit number of polled files to table size: */
|
||||
if (nfds < _thread_dtablesize) {
|
||||
_thread_pfd_table[nfds].events = POLLWRNORM;
|
||||
_thread_pfd_table[nfds].fd = pthread->data.fd.fd;
|
||||
nfds++;
|
||||
}
|
||||
break;
|
||||
|
||||
/* File descriptor poll or select wait: */
|
||||
case PS_POLL_WAIT:
|
||||
case PS_SELECT_WAIT:
|
||||
/* Limit number of polled files to table size: */
|
||||
if (pthread->data.poll_data->nfds + nfds <
|
||||
_thread_dtablesize) {
|
||||
for (i = 0; i < pthread->data.poll_data->nfds; i++) {
|
||||
_thread_pfd_table[nfds + i].fd =
|
||||
pthread->data.poll_data->fds[i].fd;
|
||||
_thread_pfd_table[nfds + i].events =
|
||||
pthread->data.poll_data->fds[i].events;
|
||||
}
|
||||
nfds += pthread->data.poll_data->nfds;
|
||||
}
|
||||
break;
|
||||
|
||||
/* Other states do not depend on file I/O. */
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
PTHREAD_WAITQ_CLEARACTIVE();
|
||||
|
||||
/*
|
||||
* Wait for a file descriptor to be ready for read, write, or
|
||||
* an exception, or a timeout to occur:
|
||||
*/
|
||||
count = __sys_poll(_thread_pfd_table, nfds, timeout_ms);
|
||||
|
||||
if (kern_pipe_added != 0)
|
||||
/*
|
||||
* Remove the pthread kernel pipe file descriptor
|
||||
* from the pollfd table:
|
||||
*/
|
||||
nfds = 1;
|
||||
else
|
||||
nfds = 0;
|
||||
|
||||
/*
|
||||
* Check if it is possible that there are bytes in the kernel
|
||||
* read pipe waiting to be read:
|
||||
*/
|
||||
if (count < 0 || ((kern_pipe_added != 0) &&
|
||||
(_thread_pfd_table[0].revents & POLLRDNORM))) {
|
||||
/*
|
||||
* If the kernel read pipe was included in the
|
||||
* count:
|
||||
*/
|
||||
if (count > 0) {
|
||||
/* Decrement the count of file descriptors: */
|
||||
count--;
|
||||
}
|
||||
|
||||
if (_sigq_check_reqd != 0) {
|
||||
/* Reset flag before handling signals: */
|
||||
_sigq_check_reqd = 0;
|
||||
|
||||
dequeue_signals();
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Check if any file descriptors are ready:
|
||||
*/
|
||||
if (count > 0) {
|
||||
/*
|
||||
* Enter a loop to look for threads waiting on file
|
||||
* descriptors that are flagged as available by the
|
||||
* _poll syscall:
|
||||
*/
|
||||
PTHREAD_WAITQ_SETACTIVE();
|
||||
TAILQ_FOREACH(pthread, &_workq, qe) {
|
||||
switch (pthread->state) {
|
||||
case PS_SPINBLOCK:
|
||||
/*
|
||||
* If the lock is available, let the thread run.
|
||||
*/
|
||||
if (pthread->data.spinlock->access_lock == 0) {
|
||||
PTHREAD_WAITQ_CLEARACTIVE();
|
||||
PTHREAD_WORKQ_REMOVE(pthread);
|
||||
PTHREAD_NEW_STATE(pthread,PS_RUNNING);
|
||||
PTHREAD_WAITQ_SETACTIVE();
|
||||
|
||||
/*
|
||||
* One less thread in a spinblock state:
|
||||
*/
|
||||
_spinblock_count--;
|
||||
}
|
||||
break;
|
||||
|
||||
/* File descriptor read wait: */
|
||||
case PS_FDR_WAIT:
|
||||
if ((nfds < _thread_dtablesize) &&
|
||||
(_thread_pfd_table[nfds].revents
|
||||
& (POLLRDNORM|POLLERR|POLLHUP|POLLNVAL))
|
||||
!= 0) {
|
||||
PTHREAD_WAITQ_CLEARACTIVE();
|
||||
PTHREAD_WORKQ_REMOVE(pthread);
|
||||
PTHREAD_NEW_STATE(pthread,PS_RUNNING);
|
||||
PTHREAD_WAITQ_SETACTIVE();
|
||||
}
|
||||
nfds++;
|
||||
break;
|
||||
|
||||
/* File descriptor write wait: */
|
||||
case PS_FDW_WAIT:
|
||||
if ((nfds < _thread_dtablesize) &&
|
||||
(_thread_pfd_table[nfds].revents
|
||||
& (POLLWRNORM|POLLERR|POLLHUP|POLLNVAL))
|
||||
!= 0) {
|
||||
PTHREAD_WAITQ_CLEARACTIVE();
|
||||
PTHREAD_WORKQ_REMOVE(pthread);
|
||||
PTHREAD_NEW_STATE(pthread,PS_RUNNING);
|
||||
PTHREAD_WAITQ_SETACTIVE();
|
||||
}
|
||||
nfds++;
|
||||
break;
|
||||
|
||||
/* File descriptor poll or select wait: */
|
||||
case PS_POLL_WAIT:
|
||||
case PS_SELECT_WAIT:
|
||||
if (pthread->data.poll_data->nfds + nfds <
|
||||
_thread_dtablesize) {
|
||||
/*
|
||||
* Enter a loop looking for I/O
|
||||
* readiness:
|
||||
*/
|
||||
found = 0;
|
||||
for (i = 0; i < pthread->data.poll_data->nfds; i++) {
|
||||
if (_thread_pfd_table[nfds + i].revents != 0) {
|
||||
pthread->data.poll_data->fds[i].revents =
|
||||
_thread_pfd_table[nfds + i].revents;
|
||||
found++;
|
||||
}
|
||||
}
|
||||
|
||||
/* Increment before destroying: */
|
||||
nfds += pthread->data.poll_data->nfds;
|
||||
|
||||
if (found != 0) {
|
||||
pthread->data.poll_data->nfds = found;
|
||||
PTHREAD_WAITQ_CLEARACTIVE();
|
||||
PTHREAD_WORKQ_REMOVE(pthread);
|
||||
PTHREAD_NEW_STATE(pthread,PS_RUNNING);
|
||||
PTHREAD_WAITQ_SETACTIVE();
|
||||
}
|
||||
}
|
||||
else
|
||||
nfds += pthread->data.poll_data->nfds;
|
||||
break;
|
||||
|
||||
/* Other states do not depend on file I/O. */
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
PTHREAD_WAITQ_CLEARACTIVE();
|
||||
}
|
||||
else if (_spinblock_count != 0) {
|
||||
if (_spinblock_count != 0) {
|
||||
/*
|
||||
* Enter a loop to look for threads waiting on a spinlock
|
||||
* that is now available.
|
||||
@ -971,22 +522,6 @@ thread_kern_poll(int wait_reqd)
|
||||
}
|
||||
PTHREAD_WAITQ_CLEARACTIVE();
|
||||
}
|
||||
|
||||
/* Unprotect the scheduling queues: */
|
||||
_queue_signals = 0;
|
||||
|
||||
while (_sigq_check_reqd != 0) {
|
||||
/* Handle queued signals: */
|
||||
_sigq_check_reqd = 0;
|
||||
|
||||
/* Protect the scheduling queues: */
|
||||
_queue_signals = 1;
|
||||
|
||||
dequeue_signals();
|
||||
|
||||
/* Unprotect the scheduling queues: */
|
||||
_queue_signals = 0;
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
@ -1057,12 +592,6 @@ _thread_kern_sig_undefer(void)
|
||||
/* Reenable signals: */
|
||||
curthread->sig_defer_count = 0;
|
||||
|
||||
/*
|
||||
* Check if there are queued signals:
|
||||
*/
|
||||
if (_sigq_check_reqd != 0)
|
||||
_thread_kern_sched(NULL);
|
||||
|
||||
/*
|
||||
* Check for asynchronous cancellation before delivering any
|
||||
* pending signals:
|
||||
@ -1070,44 +599,9 @@ _thread_kern_sig_undefer(void)
|
||||
if (((curthread->cancelflags & PTHREAD_AT_CANCEL_POINT) == 0) &&
|
||||
((curthread->cancelflags & PTHREAD_CANCEL_ASYNCHRONOUS) != 0))
|
||||
pthread_testcancel();
|
||||
|
||||
/*
|
||||
* If there are pending signals or this thread has
|
||||
* to yield the CPU, call the kernel scheduler:
|
||||
*
|
||||
* XXX - Come back and revisit the pending signal problem
|
||||
*/
|
||||
if ((curthread->yield_on_sig_undefer != 0) ||
|
||||
SIGNOTEMPTY(curthread->sigpend)) {
|
||||
curthread->yield_on_sig_undefer = 0;
|
||||
_thread_kern_sched(NULL);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
dequeue_signals(void)
|
||||
{
|
||||
char bufr[128];
|
||||
int num;
|
||||
|
||||
/*
|
||||
* Enter a loop to clear the pthread kernel pipe:
|
||||
*/
|
||||
while (((num = __sys_read(_thread_kern_pipe[0], bufr,
|
||||
sizeof(bufr))) > 0) || (num == -1 && errno == EINTR)) {
|
||||
}
|
||||
if ((num < 0) && (errno != EAGAIN)) {
|
||||
/*
|
||||
* The only error we should expect is if there is
|
||||
* no data to read.
|
||||
*/
|
||||
PANIC("Unable to read from thread kernel pipe");
|
||||
}
|
||||
/* Handle any pending signals: */
|
||||
_thread_sig_handle_pending();
|
||||
}
|
||||
|
||||
static inline void
|
||||
thread_run_switch_hook(pthread_t thread_out, pthread_t thread_in)
|
||||
{
|
||||
|
@ -41,34 +41,8 @@ __weak_reference(_pthread_kill, pthread_kill);
|
||||
int
|
||||
_pthread_kill(pthread_t pthread, int sig)
|
||||
{
|
||||
int ret;
|
||||
|
||||
/* Check for invalid signal numbers: */
|
||||
if (sig < 0 || sig >= NSIG)
|
||||
/* Invalid signal: */
|
||||
ret = EINVAL;
|
||||
/*
|
||||
* Ensure the thread is in the list of active threads, and the
|
||||
* signal is valid (signal 0 specifies error checking only) and
|
||||
* not being ignored:
|
||||
* All signals are unsupported.
|
||||
*/
|
||||
else if (((ret = _find_thread(pthread)) == 0) && (sig > 0) &&
|
||||
(_thread_sigact[sig - 1].sa_handler != SIG_IGN)) {
|
||||
/*
|
||||
* Defer signals to protect the scheduling queues from
|
||||
* access by the signal handler:
|
||||
*/
|
||||
_thread_kern_sig_defer();
|
||||
|
||||
_thread_sig_send(pthread, sig);
|
||||
|
||||
/*
|
||||
* Undefer and handle pending signals, yielding if
|
||||
* necessary:
|
||||
*/
|
||||
_thread_kern_sig_undefer();
|
||||
}
|
||||
|
||||
/* Return the completion status: */
|
||||
return (ret);
|
||||
return (EINVAL);
|
||||
}
|
||||
|
@ -13,16 +13,6 @@
|
||||
|
||||
__weak_reference(__msync, msync);
|
||||
|
||||
int
|
||||
_msync(void *addr, size_t len, int flags)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = __sys_msync(addr, len, flags);
|
||||
|
||||
return (ret);
|
||||
}
|
||||
|
||||
int
|
||||
__msync(void *addr, size_t len, int flags)
|
||||
{
|
||||
@ -35,7 +25,7 @@ __msync(void *addr, size_t len, int flags)
|
||||
* a cancellation point, as per the standard. sigh.
|
||||
*/
|
||||
_thread_enter_cancellation_point();
|
||||
ret = _msync(addr, len, flags);
|
||||
ret = __sys_msync(addr, len, flags);
|
||||
_thread_leave_cancellation_point();
|
||||
|
||||
return ret;
|
||||
|
@ -42,36 +42,6 @@
|
||||
|
||||
__weak_reference(__open, open);
|
||||
|
||||
int
|
||||
_open(const char *path, int flags,...)
|
||||
{
|
||||
int fd;
|
||||
int mode = 0;
|
||||
va_list ap;
|
||||
|
||||
/* Check if the file is being created: */
|
||||
if (flags & O_CREAT) {
|
||||
/* Get the creation mode: */
|
||||
va_start(ap, flags);
|
||||
mode = va_arg(ap, int);
|
||||
va_end(ap);
|
||||
}
|
||||
/* Open the file: */
|
||||
if ((fd = __sys_open(path, flags, mode)) < 0) {
|
||||
}
|
||||
/* Initialise the file descriptor table entry: */
|
||||
else if (_thread_fd_table_init(fd) != 0) {
|
||||
/* Quietly close the file: */
|
||||
__sys_close(fd);
|
||||
|
||||
/* Reset the file descriptor: */
|
||||
fd = -1;
|
||||
}
|
||||
|
||||
/* Return the file descriptor or -1 on error: */
|
||||
return (fd);
|
||||
}
|
||||
|
||||
int
|
||||
__open(const char *path, int flags,...)
|
||||
{
|
||||
@ -89,7 +59,7 @@ __open(const char *path, int flags,...)
|
||||
va_end(ap);
|
||||
}
|
||||
|
||||
ret = _open(path, flags, mode);
|
||||
ret = __sys_open(path, flags, mode);
|
||||
_thread_leave_cancellation_point();
|
||||
|
||||
return ret;
|
||||
|
@ -43,68 +43,13 @@
|
||||
|
||||
__weak_reference(__poll, poll);
|
||||
|
||||
int
|
||||
_poll(struct pollfd *fds, unsigned int nfds, int timeout)
|
||||
{
|
||||
struct pthread *curthread = _get_curthread();
|
||||
struct timespec ts;
|
||||
int numfds = nfds;
|
||||
int i, ret = 0;
|
||||
struct pthread_poll_data data;
|
||||
|
||||
if (numfds > _thread_dtablesize) {
|
||||
numfds = _thread_dtablesize;
|
||||
}
|
||||
/* Check if a timeout was specified: */
|
||||
if (timeout == INFTIM) {
|
||||
/* Wait for ever: */
|
||||
_thread_kern_set_timeout(NULL);
|
||||
} else if (timeout > 0) {
|
||||
/* Convert the timeout in msec to a timespec: */
|
||||
ts.tv_sec = timeout / 1000;
|
||||
ts.tv_nsec = (timeout % 1000) * 1000000;
|
||||
|
||||
/* Set the wake up time: */
|
||||
_thread_kern_set_timeout(&ts);
|
||||
} else if (timeout < 0) {
|
||||
/* a timeout less than zero but not == INFTIM is invalid */
|
||||
errno = EINVAL;
|
||||
return (-1);
|
||||
}
|
||||
|
||||
if (((ret = __sys_poll(fds, numfds, 0)) == 0) && (timeout != 0)) {
|
||||
data.nfds = numfds;
|
||||
data.fds = fds;
|
||||
|
||||
/*
|
||||
* Clear revents in case of a timeout which leaves fds
|
||||
* unchanged:
|
||||
*/
|
||||
for (i = 0; i < numfds; i++) {
|
||||
fds[i].revents = 0;
|
||||
}
|
||||
|
||||
curthread->data.poll_data = &data;
|
||||
curthread->interrupted = 0;
|
||||
_thread_kern_sched_state(PS_POLL_WAIT, __FILE__, __LINE__);
|
||||
if (curthread->interrupted) {
|
||||
errno = EINTR;
|
||||
ret = -1;
|
||||
} else {
|
||||
ret = data.nfds;
|
||||
}
|
||||
}
|
||||
|
||||
return (ret);
|
||||
}
|
||||
|
||||
int
|
||||
__poll(struct pollfd *fds, unsigned int nfds, int timeout)
|
||||
{
|
||||
int ret;
|
||||
|
||||
_thread_enter_cancellation_point();
|
||||
ret = _poll(fds, nfds, timeout);
|
||||
ret = __sys_poll(fds, nfds, timeout);
|
||||
_thread_leave_cancellation_point();
|
||||
|
||||
return ret;
|
||||
|
@ -70,8 +70,8 @@ static int _pq_active = 0;
|
||||
} while (0)
|
||||
#define _PQ_ASSERT_PROTECTED(msg) \
|
||||
PTHREAD_ASSERT((_thread_kern_in_sched != 0) || \
|
||||
((_get_curthread())->sig_defer_count > 0) ||\
|
||||
(_sig_in_handler != 0), msg);
|
||||
((_get_curthread())->sig_defer_count > 0), \
|
||||
msg);
|
||||
|
||||
#else
|
||||
|
||||
|
@ -49,7 +49,6 @@
|
||||
/*
|
||||
* Include files.
|
||||
*/
|
||||
#include <setjmp.h>
|
||||
#include <signal.h>
|
||||
#include <stdio.h>
|
||||
#include <sys/queue.h>
|
||||
@ -61,50 +60,6 @@
|
||||
#include <ucontext.h>
|
||||
#include <pthread_np.h>
|
||||
|
||||
/*
|
||||
* Define machine dependent macros to get and set the stack pointer
|
||||
* from the supported contexts. Also define a macro to set the return
|
||||
* address in a jmp_buf context.
|
||||
*
|
||||
* XXX - These need to be moved into architecture dependent support files.
|
||||
*/
|
||||
#if defined(__i386__)
|
||||
#define GET_STACK_JB(jb) ((unsigned long)((jb)[0]._jb[2]))
|
||||
#define GET_STACK_SJB(sjb) ((unsigned long)((sjb)[0]._sjb[2]))
|
||||
#define GET_STACK_UC(ucp) ((unsigned long)((ucp)->uc_mcontext.mc_esp))
|
||||
#define SET_STACK_JB(jb, stk) (jb)[0]._jb[2] = (int)(stk)
|
||||
#define SET_STACK_SJB(sjb, stk) (sjb)[0]._sjb[2] = (int)(stk)
|
||||
#define SET_STACK_UC(ucp, stk) (ucp)->uc_mcontext.mc_esp = (int)(stk)
|
||||
#define FP_SAVE_UC(ucp) do { \
|
||||
char *fdata; \
|
||||
fdata = (char *) (ucp)->uc_mcontext.mc_fpregs; \
|
||||
__asm__("fnsave %0": :"m"(*fdata)); \
|
||||
} while (0)
|
||||
#define FP_RESTORE_UC(ucp) do { \
|
||||
char *fdata; \
|
||||
fdata = (char *) (ucp)->uc_mcontext.mc_fpregs; \
|
||||
__asm__("frstor %0": :"m"(*fdata)); \
|
||||
} while (0)
|
||||
#define SET_RETURN_ADDR_JB(jb, ra) (jb)[0]._jb[0] = (int)(ra)
|
||||
#elif defined(__alpha__)
|
||||
#include <machine/reg.h>
|
||||
#define GET_STACK_JB(jb) ((unsigned long)((jb)[0]._jb[R_SP + 4]))
|
||||
#define GET_STACK_SJB(sjb) ((unsigned long)((sjb)[0]._sjb[R_SP + 4]))
|
||||
#define GET_STACK_UC(ucp) ((ucp)->uc_mcontext.mc_regs[R_SP])
|
||||
#define SET_STACK_JB(jb, stk) (jb)[0]._jb[R_SP + 4] = (long)(stk)
|
||||
#define SET_STACK_SJB(sjb, stk) (sjb)[0]._sjb[R_SP + 4] = (long)(stk)
|
||||
#define SET_STACK_UC(ucp, stk) (ucp)->uc_mcontext.mc_regs[R_SP] = (unsigned long)(stk)
|
||||
#define FP_SAVE_UC(ucp)
|
||||
#define FP_RESTORE_UC(ucp)
|
||||
#define SET_RETURN_ADDR_JB(jb, ra) do { \
|
||||
(jb)[0]._jb[2] = (long)(ra); \
|
||||
(jb)[0]._jb[R_RA + 4] = (long)(ra); \
|
||||
(jb)[0]._jb[R_T12 + 4] = (long)(ra); \
|
||||
} while (0)
|
||||
#else
|
||||
#error "Don't recognize this architecture!"
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Kernel fatal error handler macro.
|
||||
*/
|
||||
@ -216,17 +171,6 @@
|
||||
} while (0)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Define the signals to be used for scheduling.
|
||||
*/
|
||||
#if defined(_PTHREADS_COMPAT_SCHED)
|
||||
#define _ITIMER_SCHED_TIMER ITIMER_VIRTUAL
|
||||
#define _SCHED_SIGNAL SIGVTALRM
|
||||
#else
|
||||
#define _ITIMER_SCHED_TIMER ITIMER_PROF
|
||||
#define _SCHED_SIGNAL SIGPROF
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Priority queues.
|
||||
*
|
||||
@ -487,20 +431,10 @@ struct pthread_rwlock {
|
||||
*/
|
||||
enum pthread_state {
|
||||
PS_RUNNING,
|
||||
PS_SIGTHREAD,
|
||||
PS_MUTEX_WAIT,
|
||||
PS_COND_WAIT,
|
||||
PS_FDLR_WAIT,
|
||||
PS_FDLW_WAIT,
|
||||
PS_FDR_WAIT,
|
||||
PS_FDW_WAIT,
|
||||
PS_FILE_WAIT,
|
||||
PS_POLL_WAIT,
|
||||
PS_SELECT_WAIT,
|
||||
PS_SLEEP_WAIT,
|
||||
PS_WAIT_WAIT,
|
||||
PS_SIGSUSPEND,
|
||||
PS_SIGWAIT,
|
||||
PS_SPINBLOCK,
|
||||
PS_JOIN,
|
||||
PS_SUSPENDED,
|
||||
@ -517,46 +451,9 @@ enum pthread_state {
|
||||
#define FD_WRITE 0x2
|
||||
#define FD_RDWR (FD_READ | FD_WRITE)
|
||||
|
||||
/*
|
||||
* File descriptor table structure.
|
||||
*/
|
||||
struct fd_table_entry {
|
||||
/*
|
||||
* Lock for accesses to this file descriptor table
|
||||
* entry. This is passed to _spinlock() to provide atomic
|
||||
* access to this structure. It does *not* represent the
|
||||
* state of the lock on the file descriptor.
|
||||
*/
|
||||
spinlock_t lock;
|
||||
TAILQ_HEAD(, pthread) r_queue; /* Read queue. */
|
||||
TAILQ_HEAD(, pthread) w_queue; /* Write queue. */
|
||||
struct pthread *r_owner; /* Ptr to thread owning read lock. */
|
||||
struct pthread *w_owner; /* Ptr to thread owning write lock. */
|
||||
char *r_fname; /* Ptr to read lock source file name */
|
||||
int r_lineno; /* Read lock source line number. */
|
||||
char *w_fname; /* Ptr to write lock source file name */
|
||||
int w_lineno; /* Write lock source line number. */
|
||||
int r_lockcount; /* Count for FILE read locks. */
|
||||
int w_lockcount; /* Count for FILE write locks. */
|
||||
int flags; /* Flags used in open. */
|
||||
};
|
||||
|
||||
struct pthread_poll_data {
|
||||
int nfds;
|
||||
struct pollfd *fds;
|
||||
};
|
||||
|
||||
union pthread_wait_data {
|
||||
pthread_mutex_t mutex;
|
||||
pthread_cond_t cond;
|
||||
const sigset_t *sigwait; /* Waiting on a signal in sigwait */
|
||||
struct {
|
||||
short fd; /* Used when thread waiting on fd */
|
||||
short branch; /* Line number, for debugging. */
|
||||
char *fname; /* Source file name for debugging.*/
|
||||
} fd;
|
||||
FILE *fp;
|
||||
struct pthread_poll_data *poll_data;
|
||||
spinlock_t *spinlock;
|
||||
struct pthread *thread;
|
||||
};
|
||||
@ -567,52 +464,12 @@ union pthread_wait_data {
|
||||
*/
|
||||
typedef void (*thread_continuation_t) (void *);
|
||||
|
||||
struct pthread_signal_frame;
|
||||
|
||||
struct pthread_state_data {
|
||||
struct pthread_signal_frame *psd_curframe;
|
||||
sigset_t psd_sigmask;
|
||||
struct timespec psd_wakeup_time;
|
||||
union pthread_wait_data psd_wait_data;
|
||||
enum pthread_state psd_state;
|
||||
int psd_flags;
|
||||
int psd_interrupted;
|
||||
int psd_longjmp_val;
|
||||
int psd_sigmask_seqno;
|
||||
int psd_signo;
|
||||
int psd_sig_defer_count;
|
||||
/* XXX - What about thread->timeout and/or thread->error? */
|
||||
};
|
||||
|
||||
struct join_status {
|
||||
struct pthread *thread;
|
||||
void *ret;
|
||||
int error;
|
||||
};
|
||||
|
||||
/*
|
||||
* The frame that is added to the top of a threads stack when setting up
|
||||
* up the thread to run a signal handler.
|
||||
*/
|
||||
struct pthread_signal_frame {
|
||||
/*
|
||||
* This stores the threads state before the signal.
|
||||
*/
|
||||
struct pthread_state_data saved_state;
|
||||
|
||||
/*
|
||||
* Threads return context; we use only jmp_buf's for now.
|
||||
*/
|
||||
union {
|
||||
jmp_buf jb;
|
||||
ucontext_t uc;
|
||||
} ctx;
|
||||
int signo; /* signal, arg 1 to sighandler */
|
||||
int sig_has_args; /* use signal args if true */
|
||||
ucontext_t uc;
|
||||
siginfo_t siginfo;
|
||||
};
|
||||
|
||||
struct pthread_specific_elem {
|
||||
const void *data;
|
||||
int seqno;
|
||||
@ -652,19 +509,11 @@ struct pthread {
|
||||
struct pthread_attr attr;
|
||||
|
||||
/*
|
||||
* Threads return context; we use only jmp_buf's for now.
|
||||
* Machine context, including signal state.
|
||||
*/
|
||||
union {
|
||||
jmp_buf jb;
|
||||
ucontext_t uc;
|
||||
} ctx;
|
||||
ucontext_t ctx;
|
||||
|
||||
/*
|
||||
* Used for tracking delivery of signal handlers.
|
||||
*/
|
||||
struct pthread_signal_frame *curframe;
|
||||
|
||||
/*
|
||||
* Cancelability flags - the lower 2 bits are used by cancel
|
||||
* definitions in pthread.h
|
||||
*/
|
||||
@ -675,14 +524,6 @@ struct pthread {
|
||||
|
||||
thread_continuation_t continuation;
|
||||
|
||||
/*
|
||||
* Current signal mask and pending signals.
|
||||
*/
|
||||
sigset_t sigmask;
|
||||
sigset_t sigpend;
|
||||
int sigmask_seqno;
|
||||
int check_pending;
|
||||
|
||||
/* Thread state: */
|
||||
enum pthread_state state;
|
||||
|
||||
@ -700,7 +541,7 @@ struct pthread {
|
||||
|
||||
/*
|
||||
* Time to wake up thread. This is used for sleeping threads and
|
||||
* for any operation which may time out (such as select).
|
||||
* for any operation which may time out.
|
||||
*/
|
||||
struct timespec wakeup_time;
|
||||
|
||||
@ -752,32 +593,18 @@ struct pthread {
|
||||
/* Wait data. */
|
||||
union pthread_wait_data data;
|
||||
|
||||
/*
|
||||
* Allocated for converting select into poll.
|
||||
*/
|
||||
struct pthread_poll_data poll_data;
|
||||
|
||||
/*
|
||||
* Set to TRUE if a blocking operation was
|
||||
* interrupted by a signal:
|
||||
*/
|
||||
int interrupted;
|
||||
|
||||
/* Signal number when in state PS_SIGWAIT: */
|
||||
int signo;
|
||||
|
||||
/*
|
||||
* Set to non-zero when this thread has deferred signals.
|
||||
* We allow for recursive deferral.
|
||||
*/
|
||||
int sig_defer_count;
|
||||
|
||||
/*
|
||||
* Set to TRUE if this thread should yield after undeferring
|
||||
* signals.
|
||||
*/
|
||||
int yield_on_sig_undefer;
|
||||
|
||||
/* Miscellaneous flags; only set with signals deferred. */
|
||||
int flags;
|
||||
#define PTHREAD_FLAGS_PRIVATE 0x0001
|
||||
@ -786,7 +613,7 @@ struct pthread {
|
||||
#define PTHREAD_FLAGS_IN_PRIOQ 0x0008 /* in priority queue using pqe link */
|
||||
#define PTHREAD_FLAGS_IN_WORKQ 0x0010 /* in work queue using qe link */
|
||||
#define PTHREAD_FLAGS_IN_FILEQ 0x0020 /* in file lock queue using qe link */
|
||||
#define PTHREAD_FLAGS_IN_FDQ 0x0040 /* in fd lock queue using qe link */
|
||||
/* 0x0040 Unused. */
|
||||
#define PTHREAD_FLAGS_IN_CONDQ 0x0080 /* in condition queue using sqe link*/
|
||||
#define PTHREAD_FLAGS_IN_MUTEXQ 0x0100 /* in mutex queue using sqe link */
|
||||
#define PTHREAD_FLAGS_SUSPENDED 0x0200 /* thread is suspended */
|
||||
@ -876,25 +703,6 @@ SCLASS TAILQ_HEAD(, pthread) _thread_list
|
||||
;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Array of kernel pipe file descriptors that are used to ensure that
|
||||
* no signals are missed in calls to _select.
|
||||
*/
|
||||
SCLASS int _thread_kern_pipe[2]
|
||||
#ifdef GLOBAL_PTHREAD_PRIVATE
|
||||
= {
|
||||
-1,
|
||||
-1
|
||||
};
|
||||
#else
|
||||
;
|
||||
#endif
|
||||
SCLASS int volatile _queue_signals
|
||||
#ifdef GLOBAL_PTHREAD_PRIVATE
|
||||
= 0;
|
||||
#else
|
||||
;
|
||||
#endif
|
||||
SCLASS int _thread_kern_in_sched
|
||||
#ifdef GLOBAL_PTHREAD_PRIVATE
|
||||
= 0;
|
||||
@ -902,13 +710,6 @@ SCLASS int _thread_kern_in_sched
|
||||
;
|
||||
#endif
|
||||
|
||||
SCLASS int _sig_in_handler
|
||||
#ifdef GLOBAL_PTHREAD_PRIVATE
|
||||
= 0;
|
||||
#else
|
||||
;
|
||||
#endif
|
||||
|
||||
/* Time of day at last scheduling timer signal: */
|
||||
SCLASS struct timeval volatile _sched_tod
|
||||
#ifdef GLOBAL_PTHREAD_PRIVATE
|
||||
@ -969,42 +770,6 @@ SCLASS struct pthread_cond_attr pthread_condattr_default
|
||||
;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Standard I/O file descriptors need special flag treatment since
|
||||
* setting one to non-blocking does all on *BSD. Sigh. This array
|
||||
* is used to store the initial flag settings.
|
||||
*/
|
||||
SCLASS int _pthread_stdio_flags[3];
|
||||
|
||||
/* File table information: */
|
||||
SCLASS struct fd_table_entry **_thread_fd_table
|
||||
#ifdef GLOBAL_PTHREAD_PRIVATE
|
||||
= NULL;
|
||||
#else
|
||||
;
|
||||
#endif
|
||||
|
||||
/* Table for polling file descriptors: */
|
||||
SCLASS struct pollfd *_thread_pfd_table
|
||||
#ifdef GLOBAL_PTHREAD_PRIVATE
|
||||
= NULL;
|
||||
#else
|
||||
;
|
||||
#endif
|
||||
|
||||
SCLASS const int dtablecount
|
||||
#ifdef GLOBAL_PTHREAD_PRIVATE
|
||||
= 4096/sizeof(struct fd_table_entry);
|
||||
#else
|
||||
;
|
||||
#endif
|
||||
SCLASS int _thread_dtablesize /* Descriptor table size. */
|
||||
#ifdef GLOBAL_PTHREAD_PRIVATE
|
||||
= 0;
|
||||
#else
|
||||
;
|
||||
#endif
|
||||
|
||||
SCLASS int _clock_res_usec /* Clock resolution in usec. */
|
||||
#ifdef GLOBAL_PTHREAD_PRIVATE
|
||||
= CLOCK_RES_USEC;
|
||||
@ -1024,28 +789,6 @@ SCLASS pthread_cond_t _gc_cond
|
||||
#endif
|
||||
;
|
||||
|
||||
/*
|
||||
* Array of signal actions for this process.
|
||||
*/
|
||||
SCLASS struct sigaction _thread_sigact[NSIG];
|
||||
|
||||
/*
|
||||
* Array of counts of dummy handlers for SIG_DFL signals. This is used to
|
||||
* assure that there is always a dummy signal handler installed while there is a
|
||||
* thread sigwait()ing on the corresponding signal.
|
||||
*/
|
||||
SCLASS int _thread_dfl_count[NSIG];
|
||||
|
||||
/*
|
||||
* Pending signals and mask for this process:
|
||||
*/
|
||||
SCLASS sigset_t _process_sigpending;
|
||||
SCLASS sigset_t _process_sigmask
|
||||
#ifdef GLOBAL_PTHREAD_PRIVATE
|
||||
= { {0, 0, 0, 0} }
|
||||
#endif
|
||||
;
|
||||
|
||||
/*
|
||||
* Scheduling queues:
|
||||
*/
|
||||
@ -1064,28 +807,6 @@ SCLASS volatile int _spinblock_count
|
||||
#endif
|
||||
;
|
||||
|
||||
/* Used to maintain pending and active signals: */
|
||||
struct sigstatus {
|
||||
int pending; /* Is this a pending signal? */
|
||||
int blocked; /*
|
||||
* A handler is currently active for
|
||||
* this signal; ignore subsequent
|
||||
* signals until the handler is done.
|
||||
*/
|
||||
int signo; /* arg 1 to signal handler */
|
||||
siginfo_t siginfo; /* arg 2 to signal handler */
|
||||
ucontext_t uc; /* arg 3 to signal handler */
|
||||
};
|
||||
|
||||
SCLASS struct sigstatus _thread_sigq[NSIG];
|
||||
|
||||
/* Indicates that the signal queue needs to be checked. */
|
||||
SCLASS volatile int _sigq_check_reqd
|
||||
#ifdef GLOBAL_PTHREAD_PRIVATE
|
||||
= 0
|
||||
#endif
|
||||
;
|
||||
|
||||
/* Thread switch hook. */
|
||||
SCLASS pthread_switch_routine_t _sched_switch_hook
|
||||
#ifdef GLOBAL_PTHREAD_PRIVATE
|
||||
@ -1096,9 +817,9 @@ SCLASS pthread_switch_routine_t _sched_switch_hook
|
||||
/*
|
||||
* Declare the kernel scheduler jump buffer and stack:
|
||||
*/
|
||||
SCLASS jmp_buf _thread_kern_sched_jb;
|
||||
SCLASS ucontext_t _thread_kern_sched_ctx;
|
||||
|
||||
SCLASS void * _thread_kern_sched_stack
|
||||
SCLASS void * _thread_kern_sched_stack
|
||||
#ifdef GLOBAL_PTHREAD_PRIVATE
|
||||
= NULL
|
||||
#endif
|
||||
@ -1115,16 +836,6 @@ SCLASS int _thread_kern_new_state
|
||||
/* Undefine the storage class specifier: */
|
||||
#undef SCLASS
|
||||
|
||||
#ifdef _LOCK_DEBUG
|
||||
#define _FD_LOCK(_fd,_type,_ts) _thread_fd_lock_debug(_fd, _type, \
|
||||
_ts, __FILE__, __LINE__)
|
||||
#define _FD_UNLOCK(_fd,_type) _thread_fd_unlock_debug(_fd, _type, \
|
||||
__FILE__, __LINE__)
|
||||
#else
|
||||
#define _FD_LOCK(_fd,_type,_ts) _thread_fd_lock(_fd, _type, _ts)
|
||||
#define _FD_UNLOCK(_fd,_type) _thread_fd_unlock(_fd, _type)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Function prototype definitions.
|
||||
*/
|
||||
@ -1133,7 +844,6 @@ char *__ttyname_basic(int);
|
||||
char *__ttyname_r_basic(int, char *, size_t);
|
||||
char *ttyname_r(int, char *, size_t);
|
||||
void _cond_wait_backout(pthread_t);
|
||||
void _fd_lock_backout(pthread_t);
|
||||
int _find_thread(pthread_t);
|
||||
struct pthread *_get_curthread(void);
|
||||
void _set_curthread(struct pthread *);
|
||||
@ -1175,35 +885,18 @@ void _waitq_clearactive(void);
|
||||
#endif
|
||||
void _thread_exit(char *, int, char *);
|
||||
void _thread_exit_cleanup(void);
|
||||
int _thread_fd_getflags(int);
|
||||
int _thread_fd_lock(int, int, struct timespec *);
|
||||
int _thread_fd_lock_debug(int, int, struct timespec *,char *fname,int lineno);
|
||||
void _thread_fd_setflags(int, int);
|
||||
int _thread_fd_table_init(int fd);
|
||||
void _thread_fd_unlock(int, int);
|
||||
void _thread_fd_unlock_debug(int, int, char *, int);
|
||||
void _thread_fd_unlock_owned(pthread_t);
|
||||
void *_thread_cleanup(pthread_t);
|
||||
void _thread_cleanupspecific(void);
|
||||
void _thread_dump_info(void);
|
||||
void _thread_init(void);
|
||||
void _thread_kern_sched(ucontext_t *);
|
||||
void _thread_kern_sched(void);
|
||||
void _thread_kern_scheduler(void);
|
||||
void _thread_kern_sched_frame(struct pthread_signal_frame *psf);
|
||||
void _thread_kern_sched_sig(void);
|
||||
void _thread_kern_sched_state(enum pthread_state, char *fname, int lineno);
|
||||
void _thread_kern_sched_state_unlock(enum pthread_state state,
|
||||
spinlock_t *lock, char *fname, int lineno);
|
||||
void _thread_kern_set_timeout(const struct timespec *);
|
||||
void _thread_kern_sig_defer(void);
|
||||
void _thread_kern_sig_undefer(void);
|
||||
void _thread_sig_handler(int, siginfo_t *, ucontext_t *);
|
||||
void _thread_sig_check_pending(struct pthread *pthread);
|
||||
void _thread_sig_handle_pending(void);
|
||||
void _thread_sig_send(struct pthread *pthread, int sig);
|
||||
void _thread_sig_wrapper(void);
|
||||
void _thread_sigframe_restore(struct pthread *thread,
|
||||
struct pthread_signal_frame *psf);
|
||||
void _thread_start(void);
|
||||
void _thread_seterrno(pthread_t, int);
|
||||
pthread_addr_t _thread_gc(pthread_addr_t);
|
||||
@ -1211,13 +904,6 @@ void _thread_enter_cancellation_point(void);
|
||||
void _thread_leave_cancellation_point(void);
|
||||
void _thread_cancellation_point(void);
|
||||
|
||||
/* #include <sys/acl.h> */
|
||||
#ifdef _SYS_ACL_H
|
||||
int __sys___acl_aclcheck_fd(int, acl_type_t, struct acl *);
|
||||
int __sys___acl_delete_fd(int, acl_type_t);
|
||||
int __sys___acl_get_fd(int, acl_type_t, struct acl *);
|
||||
int __sys___acl_set_fd(int, acl_type_t, struct acl *);
|
||||
#endif
|
||||
|
||||
/* #include <sys/aio.h> */
|
||||
#ifdef _SYS_AIO_H_
|
||||
@ -1324,12 +1010,6 @@ ssize_t __sys_read(int, void *, size_t);
|
||||
ssize_t __sys_write(int, const void *, size_t);
|
||||
#endif
|
||||
|
||||
/* #include <setjmp.h> */
|
||||
#ifdef _SETJMP_H_
|
||||
extern void __siglongjmp(sigjmp_buf, int) __dead2;
|
||||
extern void __longjmp(jmp_buf, int) __dead2;
|
||||
extern void ___longjmp(jmp_buf, int) __dead2;
|
||||
#endif
|
||||
__END_DECLS
|
||||
|
||||
#endif /* !_THR_PRIVATE_H */
|
||||
|
@ -42,69 +42,13 @@
|
||||
|
||||
__weak_reference(__read, read);
|
||||
|
||||
ssize_t
|
||||
_read(int fd, void *buf, size_t nbytes)
|
||||
{
|
||||
struct pthread *curthread = _get_curthread();
|
||||
int ret;
|
||||
int type;
|
||||
|
||||
/* POSIX says to do just this: */
|
||||
if (nbytes == 0) {
|
||||
return (0);
|
||||
}
|
||||
|
||||
/* Lock the file descriptor for read: */
|
||||
if ((ret = _FD_LOCK(fd, FD_READ, NULL)) == 0) {
|
||||
/* Get the read/write mode type: */
|
||||
type = _thread_fd_getflags(fd) & O_ACCMODE;
|
||||
|
||||
/* Check if the file is not open for read: */
|
||||
if (type != O_RDONLY && type != O_RDWR) {
|
||||
/* File is not open for read: */
|
||||
errno = EBADF;
|
||||
_FD_UNLOCK(fd, FD_READ);
|
||||
return (-1);
|
||||
}
|
||||
|
||||
/* Perform a non-blocking read syscall: */
|
||||
while ((ret = __sys_read(fd, buf, nbytes)) < 0) {
|
||||
if ((_thread_fd_getflags(fd) & O_NONBLOCK) == 0 &&
|
||||
(errno == EWOULDBLOCK || errno == EAGAIN)) {
|
||||
curthread->data.fd.fd = fd;
|
||||
_thread_kern_set_timeout(NULL);
|
||||
|
||||
/* Reset the interrupted operation flag: */
|
||||
curthread->interrupted = 0;
|
||||
|
||||
_thread_kern_sched_state(PS_FDR_WAIT,
|
||||
__FILE__, __LINE__);
|
||||
|
||||
/*
|
||||
* Check if the operation was
|
||||
* interrupted by a signal
|
||||
*/
|
||||
if (curthread->interrupted) {
|
||||
errno = EINTR;
|
||||
ret = -1;
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
_FD_UNLOCK(fd, FD_READ);
|
||||
}
|
||||
return (ret);
|
||||
}
|
||||
|
||||
ssize_t
|
||||
__read(int fd, void *buf, size_t nbytes)
|
||||
{
|
||||
ssize_t ret;
|
||||
|
||||
_thread_enter_cancellation_point();
|
||||
ret = _read(fd, buf, nbytes);
|
||||
ret = __sys_read(fd, buf, nbytes);
|
||||
_thread_leave_cancellation_point();
|
||||
|
||||
return ret;
|
||||
|
@ -42,64 +42,13 @@
|
||||
|
||||
__weak_reference(__readv, readv);
|
||||
|
||||
ssize_t
|
||||
_readv(int fd, const struct iovec * iov, int iovcnt)
|
||||
{
|
||||
struct pthread *curthread = _get_curthread();
|
||||
int ret;
|
||||
int type;
|
||||
|
||||
/* Lock the file descriptor for read: */
|
||||
if ((ret = _FD_LOCK(fd, FD_READ, NULL)) == 0) {
|
||||
/* Get the read/write mode type: */
|
||||
type = _thread_fd_getflags(fd) & O_ACCMODE;
|
||||
|
||||
/* Check if the file is not open for read: */
|
||||
if (type != O_RDONLY && type != O_RDWR) {
|
||||
/* File is not open for read: */
|
||||
errno = EBADF;
|
||||
_FD_UNLOCK(fd, FD_READ);
|
||||
return (-1);
|
||||
}
|
||||
|
||||
/* Perform a non-blocking readv syscall: */
|
||||
while ((ret = __sys_readv(fd, iov, iovcnt)) < 0) {
|
||||
if ((_thread_fd_getflags(fd) & O_NONBLOCK) == 0 &&
|
||||
(errno == EWOULDBLOCK || errno == EAGAIN)) {
|
||||
curthread->data.fd.fd = fd;
|
||||
_thread_kern_set_timeout(NULL);
|
||||
|
||||
/* Reset the interrupted operation flag: */
|
||||
curthread->interrupted = 0;
|
||||
|
||||
_thread_kern_sched_state(PS_FDR_WAIT,
|
||||
__FILE__, __LINE__);
|
||||
|
||||
/*
|
||||
* Check if the operation was
|
||||
* interrupted by a signal
|
||||
*/
|
||||
if (curthread->interrupted) {
|
||||
errno = EINTR;
|
||||
ret = -1;
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
_FD_UNLOCK(fd, FD_READ);
|
||||
}
|
||||
return (ret);
|
||||
}
|
||||
|
||||
ssize_t
|
||||
__readv(int fd, const struct iovec *iov, int iovcnt)
|
||||
{
|
||||
ssize_t ret;
|
||||
|
||||
_thread_enter_cancellation_point();
|
||||
ret = _readv(fd, iov, iovcnt);
|
||||
ret = __sys_readv(fd, iov, iovcnt);
|
||||
_thread_leave_cancellation_point();
|
||||
|
||||
return ret;
|
||||
|
@ -45,178 +45,6 @@
|
||||
|
||||
__weak_reference(__select, select);
|
||||
|
||||
int
|
||||
_select(int numfds, fd_set * readfds, fd_set * writefds, fd_set * exceptfds,
|
||||
struct timeval * timeout)
|
||||
{
|
||||
struct pthread *curthread = _get_curthread();
|
||||
struct timespec ts;
|
||||
int i, ret = 0, f_wait = 1;
|
||||
int pfd_index, got_events = 0, fd_count = 0;
|
||||
struct pthread_poll_data data;
|
||||
|
||||
if (numfds > _thread_dtablesize) {
|
||||
numfds = _thread_dtablesize;
|
||||
}
|
||||
/* Check if a timeout was specified: */
|
||||
if (timeout) {
|
||||
if (timeout->tv_sec < 0 ||
|
||||
timeout->tv_usec < 0 || timeout->tv_usec >= 1000000) {
|
||||
errno = EINVAL;
|
||||
return (-1);
|
||||
}
|
||||
|
||||
/* Convert the timeval to a timespec: */
|
||||
TIMEVAL_TO_TIMESPEC(timeout, &ts);
|
||||
|
||||
/* Set the wake up time: */
|
||||
_thread_kern_set_timeout(&ts);
|
||||
if (ts.tv_sec == 0 && ts.tv_nsec == 0)
|
||||
f_wait = 0;
|
||||
} else {
|
||||
/* Wait for ever: */
|
||||
_thread_kern_set_timeout(NULL);
|
||||
}
|
||||
|
||||
/* Count the number of file descriptors to be polled: */
|
||||
if (readfds || writefds || exceptfds) {
|
||||
for (i = 0; i < numfds; i++) {
|
||||
if ((readfds && FD_ISSET(i, readfds)) ||
|
||||
(exceptfds && FD_ISSET(i, exceptfds)) ||
|
||||
(writefds && FD_ISSET(i, writefds))) {
|
||||
fd_count++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Allocate memory for poll data if it hasn't already been
|
||||
* allocated or if previously allocated memory is insufficient.
|
||||
*/
|
||||
if ((curthread->poll_data.fds == NULL) ||
|
||||
(curthread->poll_data.nfds < fd_count)) {
|
||||
data.fds = (struct pollfd *) realloc(curthread->poll_data.fds,
|
||||
sizeof(struct pollfd) * MAX(128, fd_count));
|
||||
if (data.fds == NULL) {
|
||||
errno = ENOMEM;
|
||||
ret = -1;
|
||||
}
|
||||
else {
|
||||
/*
|
||||
* Note that the threads poll data always
|
||||
* indicates what is allocated, not what is
|
||||
* currently being polled.
|
||||
*/
|
||||
curthread->poll_data.fds = data.fds;
|
||||
curthread->poll_data.nfds = MAX(128, fd_count);
|
||||
}
|
||||
}
|
||||
if (ret == 0) {
|
||||
/* Setup the wait data. */
|
||||
data.fds = curthread->poll_data.fds;
|
||||
data.nfds = fd_count;
|
||||
|
||||
/*
|
||||
* Setup the array of pollfds. Optimize this by
|
||||
* running the loop in reverse and stopping when
|
||||
* the number of selected file descriptors is reached.
|
||||
*/
|
||||
for (i = numfds - 1, pfd_index = fd_count - 1;
|
||||
(i >= 0) && (pfd_index >= 0); i--) {
|
||||
data.fds[pfd_index].events = 0;
|
||||
if (readfds && FD_ISSET(i, readfds)) {
|
||||
data.fds[pfd_index].events = POLLRDNORM;
|
||||
}
|
||||
if (exceptfds && FD_ISSET(i, exceptfds)) {
|
||||
data.fds[pfd_index].events |= POLLRDBAND;
|
||||
}
|
||||
if (writefds && FD_ISSET(i, writefds)) {
|
||||
data.fds[pfd_index].events |= POLLWRNORM;
|
||||
}
|
||||
if (data.fds[pfd_index].events != 0) {
|
||||
/*
|
||||
* Set the file descriptor to be polled and
|
||||
* clear revents in case of a timeout which
|
||||
* leaves fds unchanged:
|
||||
*/
|
||||
data.fds[pfd_index].fd = i;
|
||||
data.fds[pfd_index].revents = 0;
|
||||
pfd_index--;
|
||||
}
|
||||
}
|
||||
if (((ret = __sys_poll(data.fds, data.nfds, 0)) == 0) &&
|
||||
(f_wait != 0)) {
|
||||
curthread->data.poll_data = &data;
|
||||
curthread->interrupted = 0;
|
||||
_thread_kern_sched_state(PS_SELECT_WAIT, __FILE__, __LINE__);
|
||||
if (curthread->interrupted) {
|
||||
errno = EINTR;
|
||||
data.nfds = 0;
|
||||
ret = -1;
|
||||
} else
|
||||
ret = data.nfds;
|
||||
}
|
||||
}
|
||||
|
||||
if (ret >= 0) {
|
||||
numfds = 0;
|
||||
for (i = 0; i < fd_count; i++) {
|
||||
/*
|
||||
* Check the results of the poll and clear
|
||||
* this file descriptor from the fdset if
|
||||
* the requested event wasn't ready.
|
||||
*/
|
||||
|
||||
/*
|
||||
* First check for invalid descriptor.
|
||||
* If found, set errno and return -1.
|
||||
*/
|
||||
if (data.fds[i].revents & POLLNVAL) {
|
||||
errno = EBADF;
|
||||
return -1;
|
||||
}
|
||||
|
||||
got_events = 0;
|
||||
if (readfds != NULL) {
|
||||
if (FD_ISSET(data.fds[i].fd, readfds)) {
|
||||
if ((data.fds[i].revents & (POLLIN
|
||||
| POLLRDNORM | POLLERR
|
||||
| POLLHUP | POLLNVAL)) != 0)
|
||||
got_events++;
|
||||
else
|
||||
FD_CLR(data.fds[i].fd, readfds);
|
||||
}
|
||||
}
|
||||
if (writefds != NULL) {
|
||||
if (FD_ISSET(data.fds[i].fd, writefds)) {
|
||||
if ((data.fds[i].revents & (POLLOUT
|
||||
| POLLWRNORM | POLLWRBAND | POLLERR
|
||||
| POLLHUP | POLLNVAL)) != 0)
|
||||
got_events++;
|
||||
else
|
||||
FD_CLR(data.fds[i].fd,
|
||||
writefds);
|
||||
}
|
||||
}
|
||||
if (exceptfds != NULL) {
|
||||
if (FD_ISSET(data.fds[i].fd, exceptfds)) {
|
||||
if (data.fds[i].revents & (POLLRDBAND |
|
||||
POLLPRI))
|
||||
got_events++;
|
||||
else
|
||||
FD_CLR(data.fds[i].fd,
|
||||
exceptfds);
|
||||
}
|
||||
}
|
||||
if (got_events != 0)
|
||||
numfds+=got_events;
|
||||
}
|
||||
ret = numfds;
|
||||
}
|
||||
|
||||
return (ret);
|
||||
}
|
||||
|
||||
int
|
||||
__select(int numfds, fd_set *readfds, fd_set *writefds, fd_set *exceptfds,
|
||||
struct timeval *timeout)
|
||||
@ -224,7 +52,7 @@ __select(int numfds, fd_set *readfds, fd_set *writefds, fd_set *exceptfds,
|
||||
int ret;
|
||||
|
||||
_thread_enter_cancellation_point();
|
||||
ret = _select(numfds, readfds, writefds, exceptfds, timeout);
|
||||
ret = __sys_select(numfds, readfds, writefds, exceptfds, timeout);
|
||||
_thread_leave_cancellation_point();
|
||||
|
||||
return ret;
|
||||
|
@ -44,63 +44,6 @@ __weak_reference(_pthread_sigmask, pthread_sigmask);
|
||||
int
|
||||
_pthread_sigmask(int how, const sigset_t *set, sigset_t *oset)
|
||||
{
|
||||
struct pthread *curthread = _get_curthread();
|
||||
sigset_t sigset;
|
||||
int ret = 0;
|
||||
|
||||
/* Check if the existing signal process mask is to be returned: */
|
||||
if (oset != NULL) {
|
||||
/* Return the current mask: */
|
||||
*oset = curthread->sigmask;
|
||||
}
|
||||
/* Check if a new signal set was provided by the caller: */
|
||||
if (set != NULL) {
|
||||
/* Process according to what to do: */
|
||||
switch (how) {
|
||||
/* Block signals: */
|
||||
case SIG_BLOCK:
|
||||
/* Add signals to the existing mask: */
|
||||
SIGSETOR(curthread->sigmask, *set);
|
||||
break;
|
||||
|
||||
/* Unblock signals: */
|
||||
case SIG_UNBLOCK:
|
||||
/* Clear signals from the existing mask: */
|
||||
SIGSETNAND(curthread->sigmask, *set);
|
||||
break;
|
||||
|
||||
/* Set the signal process mask: */
|
||||
case SIG_SETMASK:
|
||||
/* Set the new mask: */
|
||||
curthread->sigmask = *set;
|
||||
break;
|
||||
|
||||
/* Trap invalid actions: */
|
||||
default:
|
||||
/* Return an invalid argument: */
|
||||
errno = EINVAL;
|
||||
ret = -1;
|
||||
break;
|
||||
}
|
||||
|
||||
/* Increment the sequence number: */
|
||||
curthread->sigmask_seqno++;
|
||||
|
||||
/*
|
||||
* Check if there are pending signals for the running
|
||||
* thread or process that aren't blocked:
|
||||
*/
|
||||
sigset = curthread->sigpend;
|
||||
SIGSETOR(sigset, _process_sigpending);
|
||||
SIGSETNAND(sigset, curthread->sigmask);
|
||||
if (SIGNOTEMPTY(sigset))
|
||||
/*
|
||||
* Call the kernel scheduler which will safely
|
||||
* install a signal frame for the running thread:
|
||||
*/
|
||||
_thread_kern_sched_sig();
|
||||
}
|
||||
|
||||
/* Return the completion status: */
|
||||
return (ret);
|
||||
return (sigprocmask(how, set, oset));
|
||||
}
|
||||
|
@ -40,61 +40,13 @@
|
||||
|
||||
__weak_reference(__sigsuspend, sigsuspend);
|
||||
|
||||
int
|
||||
_sigsuspend(const sigset_t * set)
|
||||
{
|
||||
struct pthread *curthread = _get_curthread();
|
||||
int ret = -1;
|
||||
sigset_t oset, sigset;
|
||||
|
||||
/* Check if a new signal set was provided by the caller: */
|
||||
if (set != NULL) {
|
||||
/* Save the current signal mask: */
|
||||
oset = curthread->sigmask;
|
||||
|
||||
/* Change the caller's mask: */
|
||||
curthread->sigmask = *set;
|
||||
|
||||
/*
|
||||
* Check if there are pending signals for the running
|
||||
* thread or process that aren't blocked:
|
||||
*/
|
||||
sigset = curthread->sigpend;
|
||||
SIGSETOR(sigset, _process_sigpending);
|
||||
SIGSETNAND(sigset, curthread->sigmask);
|
||||
if (SIGNOTEMPTY(sigset)) {
|
||||
/*
|
||||
* Call the kernel scheduler which will safely
|
||||
* install a signal frame for the running thread:
|
||||
*/
|
||||
_thread_kern_sched_sig();
|
||||
} else {
|
||||
/* Wait for a signal: */
|
||||
_thread_kern_sched_state(PS_SIGSUSPEND,
|
||||
__FILE__, __LINE__);
|
||||
}
|
||||
|
||||
/* Always return an interrupted error: */
|
||||
errno = EINTR;
|
||||
|
||||
/* Restore the signal mask: */
|
||||
curthread->sigmask = oset;
|
||||
} else {
|
||||
/* Return an invalid argument error: */
|
||||
errno = EINVAL;
|
||||
}
|
||||
|
||||
/* Return the completion status: */
|
||||
return (ret);
|
||||
}
|
||||
|
||||
int
|
||||
__sigsuspend(const sigset_t * set)
|
||||
{
|
||||
int ret;
|
||||
|
||||
_thread_enter_cancellation_point();
|
||||
ret = _sigsuspend(set);
|
||||
ret = __sys_sigsuspend(set);
|
||||
_thread_leave_cancellation_point();
|
||||
|
||||
return ret;
|
||||
|
@ -1,3 +1,4 @@
|
||||
//depot/projects/kse/lib/libpthread/thread/thr_sigwait.c#1 - branch change 15154 (text+ko)
|
||||
/*
|
||||
* Copyright (c) 1997 John Birrell <jb@cimlogic.com.au>.
|
||||
* All rights reserved.
|
||||
@ -43,132 +44,9 @@ __weak_reference(_sigwait, sigwait);
|
||||
int
|
||||
_sigwait(const sigset_t *set, int *sig)
|
||||
{
|
||||
struct pthread *curthread = _get_curthread();
|
||||
int ret = 0;
|
||||
int i;
|
||||
sigset_t tempset, waitset;
|
||||
struct sigaction act;
|
||||
|
||||
_thread_enter_cancellation_point();
|
||||
/*
|
||||
* Specify the thread kernel signal handler.
|
||||
*/
|
||||
act.sa_handler = (void (*) ()) _thread_sig_handler;
|
||||
act.sa_flags = SA_RESTART | SA_SIGINFO;
|
||||
/* Ensure the signal handler cannot be interrupted by other signals: */
|
||||
sigfillset(&act.sa_mask);
|
||||
|
||||
/*
|
||||
* Initialize the set of signals that will be waited on:
|
||||
* All signals are invalid for waiting.
|
||||
*/
|
||||
waitset = *set;
|
||||
|
||||
/* These signals can't be waited on. */
|
||||
sigdelset(&waitset, SIGKILL);
|
||||
sigdelset(&waitset, SIGSTOP);
|
||||
sigdelset(&waitset, _SCHED_SIGNAL);
|
||||
sigdelset(&waitset, SIGCHLD);
|
||||
sigdelset(&waitset, SIGINFO);
|
||||
|
||||
/* Check to see if a pending signal is in the wait mask. */
|
||||
tempset = curthread->sigpend;
|
||||
SIGSETOR(tempset, _process_sigpending);
|
||||
SIGSETAND(tempset, waitset);
|
||||
if (SIGNOTEMPTY(tempset)) {
|
||||
/* Enter a loop to find a pending signal: */
|
||||
for (i = 1; i < NSIG; i++) {
|
||||
if (sigismember (&tempset, i))
|
||||
break;
|
||||
}
|
||||
|
||||
/* Clear the pending signal: */
|
||||
if (sigismember(&curthread->sigpend,i))
|
||||
sigdelset(&curthread->sigpend,i);
|
||||
else
|
||||
sigdelset(&_process_sigpending,i);
|
||||
|
||||
/* Return the signal number to the caller: */
|
||||
*sig = i;
|
||||
|
||||
_thread_leave_cancellation_point();
|
||||
return (0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Access the _thread_dfl_count array under the protection of signal
|
||||
* deferral.
|
||||
*/
|
||||
_thread_kern_sig_defer();
|
||||
|
||||
/*
|
||||
* Enter a loop to find the signals that are SIG_DFL. For
|
||||
* these signals we must install a dummy signal handler in
|
||||
* order for the kernel to pass them in to us. POSIX says
|
||||
* that the _application_ must explicitly install a dummy
|
||||
* handler for signals that are SIG_IGN in order to sigwait
|
||||
* on them. Note that SIG_IGN signals are left in the
|
||||
* mask because a subsequent sigaction could enable an
|
||||
* ignored signal.
|
||||
*/
|
||||
sigemptyset(&tempset);
|
||||
for (i = 1; i < NSIG; i++) {
|
||||
if (sigismember(&waitset, i) &&
|
||||
(_thread_sigact[i - 1].sa_handler == SIG_DFL)) {
|
||||
_thread_dfl_count[i]++;
|
||||
sigaddset(&tempset, i);
|
||||
if (_thread_dfl_count[i] == 1) {
|
||||
if (__sys_sigaction(i,&act,NULL) != 0)
|
||||
ret = -1;
|
||||
}
|
||||
}
|
||||
}
|
||||
/* Done accessing _thread_dfl_count for now. */
|
||||
_thread_kern_sig_undefer();
|
||||
|
||||
if (ret == 0) {
|
||||
/*
|
||||
* Save the wait signal mask. The wait signal
|
||||
* mask is independent of the threads signal mask
|
||||
* and requires separate storage.
|
||||
*/
|
||||
curthread->data.sigwait = &waitset;
|
||||
|
||||
/* Wait for a signal: */
|
||||
_thread_kern_sched_state(PS_SIGWAIT, __FILE__, __LINE__);
|
||||
|
||||
/* Return the signal number to the caller: */
|
||||
*sig = curthread->signo;
|
||||
|
||||
/*
|
||||
* Probably unnecessary, but since it's in a union struct
|
||||
* we don't know how it could be used in the future.
|
||||
*/
|
||||
curthread->data.sigwait = NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Access the _thread_dfl_count array under the protection of signal
|
||||
* deferral.
|
||||
*/
|
||||
_thread_kern_sig_defer();
|
||||
|
||||
/* Restore the sigactions: */
|
||||
act.sa_handler = SIG_DFL;
|
||||
for (i = 1; i < NSIG; i++) {
|
||||
if (sigismember(&tempset, i)) {
|
||||
_thread_dfl_count[i]--;
|
||||
if ((_thread_sigact[i - 1].sa_handler == SIG_DFL) &&
|
||||
(_thread_dfl_count[i] == 0)) {
|
||||
if (__sys_sigaction(i,&act,NULL) != 0)
|
||||
ret = -1;
|
||||
}
|
||||
}
|
||||
}
|
||||
/* Done accessing _thread_dfl_count. */
|
||||
_thread_kern_sig_undefer();
|
||||
|
||||
_thread_leave_cancellation_point();
|
||||
|
||||
/* Return the completion status: */
|
||||
return (ret);
|
||||
return (EINVAL);
|
||||
}
|
||||
|
@ -40,35 +40,6 @@
|
||||
|
||||
__weak_reference(__wait4, wait4);
|
||||
|
||||
pid_t
|
||||
_wait4(pid_t pid, int *istat, int options, struct rusage * rusage)
|
||||
{
|
||||
struct pthread *curthread = _get_curthread();
|
||||
pid_t ret;
|
||||
|
||||
_thread_kern_sig_defer();
|
||||
|
||||
/* Perform a non-blocking wait4 syscall: */
|
||||
while ((ret = __sys_wait4(pid, istat, options | WNOHANG, rusage)) == 0 && (options & WNOHANG) == 0) {
|
||||
/* Reset the interrupted operation flag: */
|
||||
curthread->interrupted = 0;
|
||||
|
||||
/* Schedule the next thread while this one waits: */
|
||||
_thread_kern_sched_state(PS_WAIT_WAIT, __FILE__, __LINE__);
|
||||
|
||||
/* Check if this call was interrupted by a signal: */
|
||||
if (curthread->interrupted) {
|
||||
errno = EINTR;
|
||||
ret = -1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
_thread_kern_sig_undefer();
|
||||
|
||||
return (ret);
|
||||
}
|
||||
|
||||
pid_t
|
||||
__wait4(pid_t pid, int *istat, int options, struct rusage *rusage)
|
||||
{
|
||||
|
@ -42,105 +42,13 @@
|
||||
|
||||
__weak_reference(__write, write);
|
||||
|
||||
ssize_t
|
||||
_write(int fd, const void *buf, size_t nbytes)
|
||||
{
|
||||
struct pthread *curthread = _get_curthread();
|
||||
int blocking;
|
||||
int type;
|
||||
ssize_t n;
|
||||
ssize_t num = 0;
|
||||
ssize_t ret;
|
||||
|
||||
/* POSIX says to do just this: */
|
||||
if (nbytes == 0)
|
||||
return (0);
|
||||
|
||||
/* Lock the file descriptor for write: */
|
||||
if ((ret = _FD_LOCK(fd, FD_WRITE, NULL)) == 0) {
|
||||
/* Get the read/write mode type: */
|
||||
type = _thread_fd_getflags(fd) & O_ACCMODE;
|
||||
|
||||
/* Check if the file is not open for write: */
|
||||
if (type != O_WRONLY && type != O_RDWR) {
|
||||
/* File is not open for write: */
|
||||
errno = EBADF;
|
||||
_FD_UNLOCK(fd, FD_WRITE);
|
||||
return (-1);
|
||||
}
|
||||
|
||||
/* Check if file operations are to block */
|
||||
blocking = ((_thread_fd_getflags(fd) & O_NONBLOCK) == 0);
|
||||
|
||||
/*
|
||||
* Loop while no error occurs and until the expected number
|
||||
* of bytes are written if performing a blocking write:
|
||||
*/
|
||||
while (ret == 0) {
|
||||
/* Perform a non-blocking write syscall: */
|
||||
n = __sys_write(fd, buf + num, nbytes - num);
|
||||
|
||||
/* Check if one or more bytes were written: */
|
||||
if (n > 0)
|
||||
/*
|
||||
* Keep a count of the number of bytes
|
||||
* written:
|
||||
*/
|
||||
num += n;
|
||||
|
||||
/*
|
||||
* If performing a blocking write, check if the
|
||||
* write would have blocked or if some bytes
|
||||
* were written but there are still more to
|
||||
* write:
|
||||
*/
|
||||
if (blocking && ((n < 0 && (errno == EWOULDBLOCK ||
|
||||
errno == EAGAIN)) || (n >= 0 && num < nbytes))) {
|
||||
curthread->data.fd.fd = fd;
|
||||
_thread_kern_set_timeout(NULL);
|
||||
|
||||
/* Reset the interrupted operation flag: */
|
||||
curthread->interrupted = 0;
|
||||
|
||||
_thread_kern_sched_state(PS_FDW_WAIT,
|
||||
__FILE__, __LINE__);
|
||||
|
||||
/*
|
||||
* Check if the operation was
|
||||
* interrupted by a signal
|
||||
*/
|
||||
if (curthread->interrupted) {
|
||||
/* Return an error: */
|
||||
ret = -1;
|
||||
}
|
||||
|
||||
/*
|
||||
* If performing a non-blocking write or if an
|
||||
* error occurred, just return whatever the write
|
||||
* syscall did:
|
||||
*/
|
||||
} else if (!blocking || n < 0) {
|
||||
/* A non-blocking call might return zero: */
|
||||
ret = n;
|
||||
break;
|
||||
|
||||
/* Check if the write has completed: */
|
||||
} else if (num >= nbytes)
|
||||
/* Return the number of bytes written: */
|
||||
ret = num;
|
||||
}
|
||||
_FD_UNLOCK(fd, FD_WRITE);
|
||||
}
|
||||
return (ret);
|
||||
}
|
||||
|
||||
ssize_t
|
||||
__write(int fd, const void *buf, size_t nbytes)
|
||||
{
|
||||
ssize_t ret;
|
||||
|
||||
_thread_enter_cancellation_point();
|
||||
ret = _write(fd, buf, nbytes);
|
||||
ret = __sys_write(fd, buf, nbytes);
|
||||
_thread_leave_cancellation_point();
|
||||
|
||||
return ret;
|
||||
|
@ -44,172 +44,13 @@
|
||||
|
||||
__weak_reference(__writev, writev);
|
||||
|
||||
ssize_t
|
||||
_writev(int fd, const struct iovec * iov, int iovcnt)
|
||||
{
|
||||
struct pthread *curthread = _get_curthread();
|
||||
int blocking;
|
||||
int idx = 0;
|
||||
int type;
|
||||
ssize_t cnt;
|
||||
ssize_t n;
|
||||
ssize_t num = 0;
|
||||
ssize_t ret;
|
||||
struct iovec liov[20];
|
||||
struct iovec *p_iov = liov;
|
||||
|
||||
/* Check if the array size exceeds to compiled in size: */
|
||||
if (iovcnt > (sizeof(liov) / sizeof(struct iovec))) {
|
||||
/* Allocate memory for the local array: */
|
||||
if ((p_iov = (struct iovec *)
|
||||
malloc(iovcnt * sizeof(struct iovec))) == NULL) {
|
||||
/* Insufficient memory: */
|
||||
errno = ENOMEM;
|
||||
return (-1);
|
||||
}
|
||||
}
|
||||
|
||||
/* Copy the caller's array so that it can be modified locally: */
|
||||
memcpy(p_iov,iov,iovcnt * sizeof(struct iovec));
|
||||
|
||||
/* Lock the file descriptor for write: */
|
||||
if ((ret = _FD_LOCK(fd, FD_WRITE, NULL)) == 0) {
|
||||
/* Get the read/write mode type: */
|
||||
type = _thread_fd_getflags(fd) & O_ACCMODE;
|
||||
|
||||
/* Check if the file is not open for write: */
|
||||
if (type != O_WRONLY && type != O_RDWR) {
|
||||
/* File is not open for write: */
|
||||
errno = EBADF;
|
||||
_FD_UNLOCK(fd, FD_WRITE);
|
||||
return (-1);
|
||||
}
|
||||
|
||||
/* Check if file operations are to block */
|
||||
blocking = ((_thread_fd_getflags(fd) & O_NONBLOCK) == 0);
|
||||
|
||||
/*
|
||||
* Loop while no error occurs and until the expected number
|
||||
* of bytes are written if performing a blocking write:
|
||||
*/
|
||||
while (ret == 0) {
|
||||
/* Perform a non-blocking write syscall: */
|
||||
n = __sys_writev(fd, &p_iov[idx], iovcnt - idx);
|
||||
|
||||
/* Check if one or more bytes were written: */
|
||||
if (n > 0) {
|
||||
/*
|
||||
* Keep a count of the number of bytes
|
||||
* written:
|
||||
*/
|
||||
num += n;
|
||||
|
||||
/*
|
||||
* Enter a loop to check if a short write
|
||||
* occurred and move the index to the
|
||||
* array entry where the short write
|
||||
* ended:
|
||||
*/
|
||||
cnt = n;
|
||||
while (cnt > 0 && idx < iovcnt) {
|
||||
/*
|
||||
* If the residual count exceeds
|
||||
* the size of this vector, then
|
||||
* it was completely written:
|
||||
*/
|
||||
if (cnt >= p_iov[idx].iov_len)
|
||||
/*
|
||||
* Decrement the residual
|
||||
* count and increment the
|
||||
* index to the next array
|
||||
* entry:
|
||||
*/
|
||||
cnt -= p_iov[idx++].iov_len;
|
||||
else {
|
||||
/*
|
||||
* This entry was only
|
||||
* partially written, so
|
||||
* adjust it's length
|
||||
* and base pointer ready
|
||||
* for the next write:
|
||||
*/
|
||||
p_iov[idx].iov_len -= cnt;
|
||||
p_iov[idx].iov_base += cnt;
|
||||
cnt = 0;
|
||||
}
|
||||
}
|
||||
} else if (n == 0) {
|
||||
/*
|
||||
* Avoid an infinite loop if the last iov_len is
|
||||
* 0.
|
||||
*/
|
||||
while (idx < iovcnt && p_iov[idx].iov_len == 0)
|
||||
idx++;
|
||||
|
||||
if (idx == iovcnt) {
|
||||
ret = num;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* If performing a blocking write, check if the
|
||||
* write would have blocked or if some bytes
|
||||
* were written but there are still more to
|
||||
* write:
|
||||
*/
|
||||
if (blocking && ((n < 0 && (errno == EWOULDBLOCK ||
|
||||
errno == EAGAIN)) || (n >= 0 && idx < iovcnt))) {
|
||||
curthread->data.fd.fd = fd;
|
||||
_thread_kern_set_timeout(NULL);
|
||||
|
||||
/* Reset the interrupted operation flag: */
|
||||
curthread->interrupted = 0;
|
||||
|
||||
_thread_kern_sched_state(PS_FDW_WAIT,
|
||||
__FILE__, __LINE__);
|
||||
|
||||
/*
|
||||
* Check if the operation was
|
||||
* interrupted by a signal
|
||||
*/
|
||||
if (curthread->interrupted) {
|
||||
/* Return an error: */
|
||||
ret = -1;
|
||||
}
|
||||
|
||||
/*
|
||||
* If performing a non-blocking write or if an
|
||||
* error occurred, just return whatever the write
|
||||
* syscall did:
|
||||
*/
|
||||
} else if (!blocking || n < 0) {
|
||||
/* A non-blocking call might return zero: */
|
||||
ret = n;
|
||||
break;
|
||||
|
||||
/* Check if the write has completed: */
|
||||
} else if (idx == iovcnt)
|
||||
/* Return the number of bytes written: */
|
||||
ret = num;
|
||||
}
|
||||
_FD_UNLOCK(fd, FD_RDWR);
|
||||
}
|
||||
|
||||
/* If memory was allocated for the array, free it: */
|
||||
if (p_iov != liov)
|
||||
free(p_iov);
|
||||
|
||||
return (ret);
|
||||
}
|
||||
|
||||
ssize_t
|
||||
__writev(int fd, const struct iovec *iov, int iovcnt)
|
||||
{
|
||||
ssize_t ret;
|
||||
|
||||
_thread_enter_cancellation_point();
|
||||
ret = _writev(fd, iov, iovcnt);
|
||||
ret = __sys_writev(fd, iov, iovcnt);
|
||||
_thread_leave_cancellation_point();
|
||||
|
||||
return ret;
|
||||
|
@ -46,7 +46,7 @@ _sched_yield(void)
|
||||
curthread->slice_usec = -1;
|
||||
|
||||
/* Schedule the next thread: */
|
||||
_thread_kern_sched(NULL);
|
||||
_thread_kern_sched();
|
||||
|
||||
/* Always return no error. */
|
||||
return(0);
|
||||
@ -62,5 +62,5 @@ _pthread_yield(void)
|
||||
curthread->slice_usec = -1;
|
||||
|
||||
/* Schedule the next thread: */
|
||||
_thread_kern_sched(NULL);
|
||||
_thread_kern_sched();
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user