2005-01-06 23:35:40 +00:00
|
|
|
/*-
|
1994-05-24 10:09:53 +00:00
|
|
|
* Copyright (c) 1982, 1986, 1989, 1991, 1993
|
|
|
|
* The Regents of the University of California. All rights reserved.
|
|
|
|
* (c) UNIX System Laboratories, Inc.
|
|
|
|
* All or some portions of this file are derived from material licensed
|
|
|
|
* to the University of California by American Telephone and Telegraph
|
|
|
|
* Co. or Unix System Laboratories, Inc. and are reproduced herein with
|
|
|
|
* the permission of UNIX System Laboratories, Inc.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
* 4. Neither the name of the University nor the names of its contributors
|
|
|
|
* may be used to endorse or promote products derived from this software
|
|
|
|
* without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
|
|
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
|
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
|
|
* SUCH DAMAGE.
|
|
|
|
*
|
|
|
|
* @(#)kern_sig.c 8.7 (Berkeley) 4/18/94
|
|
|
|
*/
|
|
|
|
|
2003-06-11 00:56:59 +00:00
|
|
|
#include <sys/cdefs.h>
|
|
|
|
__FBSDID("$FreeBSD$");
|
|
|
|
|
1997-12-16 17:40:42 +00:00
|
|
|
#include "opt_compat.h"
|
2008-05-24 06:22:16 +00:00
|
|
|
#include "opt_kdtrace.h"
|
1996-01-03 21:42:35 +00:00
|
|
|
#include "opt_ktrace.h"
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <sys/param.h>
|
2000-09-10 13:54:52 +00:00
|
|
|
#include <sys/systm.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <sys/signalvar.h>
|
|
|
|
#include <sys/vnode.h>
|
|
|
|
#include <sys/acct.h>
|
2001-01-16 01:00:43 +00:00
|
|
|
#include <sys/condvar.h>
|
2003-05-05 21:26:25 +00:00
|
|
|
#include <sys/event.h>
|
|
|
|
#include <sys/fcntl.h>
|
|
|
|
#include <sys/kernel.h>
|
2000-09-07 01:33:02 +00:00
|
|
|
#include <sys/ktr.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <sys/ktrace.h>
|
2003-05-05 21:26:25 +00:00
|
|
|
#include <sys/lock.h>
|
|
|
|
#include <sys/malloc.h>
|
|
|
|
#include <sys/mutex.h>
|
|
|
|
#include <sys/namei.h>
|
|
|
|
#include <sys/proc.h>
|
2006-11-11 16:26:58 +00:00
|
|
|
#include <sys/posix4.h>
|
2003-05-05 21:26:25 +00:00
|
|
|
#include <sys/pioctl.h>
|
2001-03-28 08:41:04 +00:00
|
|
|
#include <sys/resourcevar.h>
|
2008-05-24 06:22:16 +00:00
|
|
|
#include <sys/sdt.h>
|
2008-03-08 16:31:29 +00:00
|
|
|
#include <sys/sbuf.h>
|
Switch the sleep/wakeup and condition variable implementations to use the
sleep queue interface:
- Sleep queues attempt to merge some of the benefits of both sleep queues
and condition variables. Having sleep qeueus in a hash table avoids
having to allocate a queue head for each wait channel. Thus, struct cv
has shrunk down to just a single char * pointer now. However, the
hash table does not hold threads directly, but queue heads. This means
that once you have located a queue in the hash bucket, you no longer have
to walk the rest of the hash chain looking for threads. Instead, you have
a list of all the threads sleeping on that wait channel.
- Outside of the sleepq code and the sleep/cv code the kernel no longer
differentiates between cv's and sleep/wakeup. For example, calls to
abortsleep() and cv_abort() are replaced with a call to sleepq_abort().
Thus, the TDF_CVWAITQ flag is removed. Also, calls to unsleep() and
cv_waitq_remove() have been replaced with calls to sleepq_remove().
- The sched_sleep() function no longer accepts a priority argument as
sleep's no longer inherently bump the priority. Instead, this is soley
a propery of msleep() which explicitly calls sched_prio() before
blocking.
- The TDF_ONSLEEPQ flag has been dropped as it was never used. The
associated TDF_SET_ONSLEEPQ and TDF_CLR_ON_SLEEPQ macros have also been
dropped and replaced with a single explicit clearing of td_wchan.
TD_SET_ONSLEEPQ() would really have only made sense if it had taken
the wait channel and message as arguments anyway. Now that that only
happens in one place, a macro would be overkill.
2004-02-27 18:52:44 +00:00
|
|
|
#include <sys/sleepqueue.h>
|
2001-04-27 19:28:25 +00:00
|
|
|
#include <sys/smp.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <sys/stat.h>
|
2001-03-28 11:52:56 +00:00
|
|
|
#include <sys/sx.h>
|
2002-09-01 20:37:28 +00:00
|
|
|
#include <sys/syscallsubr.h>
|
1998-06-28 08:37:45 +00:00
|
|
|
#include <sys/sysctl.h>
|
2003-05-05 21:26:25 +00:00
|
|
|
#include <sys/sysent.h>
|
|
|
|
#include <sys/syslog.h>
|
|
|
|
#include <sys/sysproto.h>
|
2005-10-30 02:56:08 +00:00
|
|
|
#include <sys/timers.h>
|
2001-09-08 20:02:33 +00:00
|
|
|
#include <sys/unistd.h>
|
2003-05-05 21:26:25 +00:00
|
|
|
#include <sys/wait.h>
|
1. Change prototype of trapsignal and sendsig to use ksiginfo_t *, most
changes in MD code are trivial, before this change, trapsignal and
sendsig use discrete parameters, now they uses member fields of
ksiginfo_t structure. For sendsig, this change allows us to pass
POSIX realtime signal value to user code.
2. Remove cpu_thread_siginfo, it is no longer needed because we now always
generate ksiginfo_t data and feed it to libpthread.
3. Add p_sigqueue to proc structure to hold shared signals which were
blocked by all threads in the proc.
4. Add td_sigqueue to thread structure to hold all signals delivered to
thread.
5. i386 and amd64 now return POSIX standard si_code, other arches will
be fixed.
6. In this sigqueue implementation, pending signal set is kept as before,
an extra siginfo list holds additional siginfo_t data for signals.
kernel code uses psignal() still behavior as before, it won't be failed
even under memory pressure, only exception is when deleting a signal,
we should call sigqueue_delete to remove signal from sigqueue but
not SIGDELSET. Current there is no kernel code will deliver a signal
with additional data, so kernel should be as stable as before,
a ksiginfo can carry more information, for example, allow signal to
be delivered but throw away siginfo data if memory is not enough.
SIGKILL and SIGSTOP have fast path in sigqueue_add, because they can
not be caught or masked.
The sigqueue() syscall allows user code to queue a signal to target
process, if resource is unavailable, EAGAIN will be returned as
specification said.
Just before thread exits, signal queue memory will be freed by
sigqueue_flush.
Current, all signals are allowed to be queued, not only realtime signals.
Earlier patch reviewed by: jhb, deischen
Tested on: i386, amd64
2005-10-14 12:43:47 +00:00
|
|
|
#include <vm/vm.h>
|
|
|
|
#include <vm/vm_extern.h>
|
|
|
|
#include <vm/uma.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
|
|
|
|
#include <machine/cpu.h>
|
|
|
|
|
2006-02-14 01:17:03 +00:00
|
|
|
#include <security/audit/audit.h>
|
|
|
|
|
1999-10-12 13:14:18 +00:00
|
|
|
#define ONSIG 32 /* NSIG for osig* syscalls. XXX. */
|
|
|
|
|
2008-05-24 06:22:16 +00:00
|
|
|
SDT_PROVIDER_DECLARE(proc);
|
|
|
|
SDT_PROBE_DEFINE(proc, kernel, , signal_send);
|
|
|
|
SDT_PROBE_ARGTYPE(proc, kernel, , signal_send, 0, "struct thread *");
|
|
|
|
SDT_PROBE_ARGTYPE(proc, kernel, , signal_send, 1, "struct proc *");
|
|
|
|
SDT_PROBE_ARGTYPE(proc, kernel, , signal_send, 2, "int");
|
|
|
|
SDT_PROBE_DEFINE(proc, kernel, , signal_clear);
|
|
|
|
SDT_PROBE_ARGTYPE(proc, kernel, , signal_clear, 0, "int");
|
|
|
|
SDT_PROBE_ARGTYPE(proc, kernel, , signal_clear, 1, "ksiginfo_t *");
|
|
|
|
SDT_PROBE_DEFINE(proc, kernel, , signal_discard);
|
|
|
|
SDT_PROBE_ARGTYPE(proc, kernel, , signal_discard, 0, "struct thread *");
|
|
|
|
SDT_PROBE_ARGTYPE(proc, kernel, , signal_discard, 1, "struct proc *");
|
|
|
|
SDT_PROBE_ARGTYPE(proc, kernel, , signal_discard, 2, "int");
|
|
|
|
|
2002-07-03 06:15:26 +00:00
|
|
|
static int coredump(struct thread *);
|
|
|
|
static char *expand_name(const char *, uid_t, pid_t);
|
2005-02-13 17:37:20 +00:00
|
|
|
static int killpg1(struct thread *td, int sig, int pgid, int all);
|
2009-07-14 22:52:46 +00:00
|
|
|
static int issignal(struct thread *td, int stop_allowed);
|
2002-07-03 06:15:26 +00:00
|
|
|
static int sigprop(int sig);
|
Fix a long standing race between sleep queue and thread
suspension code. When a thread A is going to sleep, it calls
sleepq_catch_signals() to detect any pending signals or thread
suspension request, if nothing happens, it returns without
holding process lock or scheduler lock, this opens a race
window which allows thread B to come in and do process
suspension work, however since A is still at running state,
thread B can do nothing to A, thread A continues, and puts
itself into actually sleeping state, but B has never seen it,
and it sits there forever until B is woken up by other threads
sometimes later(this can be very long delay or never
happen). Fix this bug by forcing sleepq_catch_signals to
return with scheduler lock held.
Fix sleepq_abort() by passing it an interrupted code, previously,
it worked as wakeup_one(), and the interruption can not be
identified correctly by sleep queue code when the sleeping
thread is resumed.
Let thread_suspend_check() returns EINTR or ERESTART, so sleep
queue no longer has to use SIGSTOP as a hack to build a return
value.
Reviewed by: jhb
MFC after: 1 week
2006-02-15 23:52:01 +00:00
|
|
|
static void tdsigwakeup(struct thread *, int, sig_t, int);
|
2006-02-13 03:16:55 +00:00
|
|
|
static void sig_suspend_threads(struct thread *, struct proc *, int);
|
2000-04-16 18:53:38 +00:00
|
|
|
static int filt_sigattach(struct knote *kn);
|
|
|
|
static void filt_sigdetach(struct knote *kn);
|
|
|
|
static int filt_signal(struct knote *kn, long hint);
|
2003-03-31 22:49:17 +00:00
|
|
|
static struct thread *sigtd(struct proc *p, int sig, int prop);
|
1. Change prototype of trapsignal and sendsig to use ksiginfo_t *, most
changes in MD code are trivial, before this change, trapsignal and
sendsig use discrete parameters, now they uses member fields of
ksiginfo_t structure. For sendsig, this change allows us to pass
POSIX realtime signal value to user code.
2. Remove cpu_thread_siginfo, it is no longer needed because we now always
generate ksiginfo_t data and feed it to libpthread.
3. Add p_sigqueue to proc structure to hold shared signals which were
blocked by all threads in the proc.
4. Add td_sigqueue to thread structure to hold all signals delivered to
thread.
5. i386 and amd64 now return POSIX standard si_code, other arches will
be fixed.
6. In this sigqueue implementation, pending signal set is kept as before,
an extra siginfo list holds additional siginfo_t data for signals.
kernel code uses psignal() still behavior as before, it won't be failed
even under memory pressure, only exception is when deleting a signal,
we should call sigqueue_delete to remove signal from sigqueue but
not SIGDELSET. Current there is no kernel code will deliver a signal
with additional data, so kernel should be as stable as before,
a ksiginfo can carry more information, for example, allow signal to
be delivered but throw away siginfo data if memory is not enough.
SIGKILL and SIGSTOP have fast path in sigqueue_add, because they can
not be caught or masked.
The sigqueue() syscall allows user code to queue a signal to target
process, if resource is unavailable, EAGAIN will be returned as
specification said.
Just before thread exits, signal queue memory will be freed by
sigqueue_flush.
Current, all signals are allowed to be queued, not only realtime signals.
Earlier patch reviewed by: jhb, deischen
Tested on: i386, amd64
2005-10-14 12:43:47 +00:00
|
|
|
static void sigqueue_start(void);
|
|
|
|
|
|
|
|
static uma_zone_t ksiginfo_zone = NULL;
|
2009-09-12 20:03:45 +00:00
|
|
|
struct filterops sig_filtops = {
|
|
|
|
.f_isfd = 0,
|
|
|
|
.f_attach = filt_sigattach,
|
|
|
|
.f_detach = filt_sigdetach,
|
|
|
|
.f_event = filt_signal,
|
|
|
|
};
|
2000-04-16 18:53:38 +00:00
|
|
|
|
2007-11-05 11:36:16 +00:00
|
|
|
int kern_logsigexit = 1;
|
1999-05-03 23:57:32 +00:00
|
|
|
SYSCTL_INT(_kern, KERN_LOGSIGEXIT, logsigexit, CTLFLAG_RW,
|
|
|
|
&kern_logsigexit, 0,
|
|
|
|
"Log processes quitting on abnormal signals to syslog(3)");
|
1998-07-28 22:34:12 +00:00
|
|
|
|
2005-12-09 08:29:29 +00:00
|
|
|
static int kern_forcesigexit = 1;
|
|
|
|
SYSCTL_INT(_kern, OID_AUTO, forcesigexit, CTLFLAG_RW,
|
|
|
|
&kern_forcesigexit, 0, "Force trap signal to be handled");
|
|
|
|
|
1. Change prototype of trapsignal and sendsig to use ksiginfo_t *, most
changes in MD code are trivial, before this change, trapsignal and
sendsig use discrete parameters, now they uses member fields of
ksiginfo_t structure. For sendsig, this change allows us to pass
POSIX realtime signal value to user code.
2. Remove cpu_thread_siginfo, it is no longer needed because we now always
generate ksiginfo_t data and feed it to libpthread.
3. Add p_sigqueue to proc structure to hold shared signals which were
blocked by all threads in the proc.
4. Add td_sigqueue to thread structure to hold all signals delivered to
thread.
5. i386 and amd64 now return POSIX standard si_code, other arches will
be fixed.
6. In this sigqueue implementation, pending signal set is kept as before,
an extra siginfo list holds additional siginfo_t data for signals.
kernel code uses psignal() still behavior as before, it won't be failed
even under memory pressure, only exception is when deleting a signal,
we should call sigqueue_delete to remove signal from sigqueue but
not SIGDELSET. Current there is no kernel code will deliver a signal
with additional data, so kernel should be as stable as before,
a ksiginfo can carry more information, for example, allow signal to
be delivered but throw away siginfo data if memory is not enough.
SIGKILL and SIGSTOP have fast path in sigqueue_add, because they can
not be caught or masked.
The sigqueue() syscall allows user code to queue a signal to target
process, if resource is unavailable, EAGAIN will be returned as
specification said.
Just before thread exits, signal queue memory will be freed by
sigqueue_flush.
Current, all signals are allowed to be queued, not only realtime signals.
Earlier patch reviewed by: jhb, deischen
Tested on: i386, amd64
2005-10-14 12:43:47 +00:00
|
|
|
SYSCTL_NODE(_kern, OID_AUTO, sigqueue, CTLFLAG_RW, 0, "POSIX real time signal");
|
|
|
|
|
|
|
|
static int max_pending_per_proc = 128;
|
|
|
|
SYSCTL_INT(_kern_sigqueue, OID_AUTO, max_pending_per_proc, CTLFLAG_RW,
|
|
|
|
&max_pending_per_proc, 0, "Max pending signals per proc");
|
|
|
|
|
|
|
|
static int preallocate_siginfo = 1024;
|
|
|
|
TUNABLE_INT("kern.sigqueue.preallocate", &preallocate_siginfo);
|
|
|
|
SYSCTL_INT(_kern_sigqueue, OID_AUTO, preallocate, CTLFLAG_RD,
|
|
|
|
&preallocate_siginfo, 0, "Preallocated signal memory size");
|
|
|
|
|
|
|
|
static int signal_overflow = 0;
|
2005-12-09 02:26:44 +00:00
|
|
|
SYSCTL_INT(_kern_sigqueue, OID_AUTO, overflow, CTLFLAG_RD,
|
1. Change prototype of trapsignal and sendsig to use ksiginfo_t *, most
changes in MD code are trivial, before this change, trapsignal and
sendsig use discrete parameters, now they uses member fields of
ksiginfo_t structure. For sendsig, this change allows us to pass
POSIX realtime signal value to user code.
2. Remove cpu_thread_siginfo, it is no longer needed because we now always
generate ksiginfo_t data and feed it to libpthread.
3. Add p_sigqueue to proc structure to hold shared signals which were
blocked by all threads in the proc.
4. Add td_sigqueue to thread structure to hold all signals delivered to
thread.
5. i386 and amd64 now return POSIX standard si_code, other arches will
be fixed.
6. In this sigqueue implementation, pending signal set is kept as before,
an extra siginfo list holds additional siginfo_t data for signals.
kernel code uses psignal() still behavior as before, it won't be failed
even under memory pressure, only exception is when deleting a signal,
we should call sigqueue_delete to remove signal from sigqueue but
not SIGDELSET. Current there is no kernel code will deliver a signal
with additional data, so kernel should be as stable as before,
a ksiginfo can carry more information, for example, allow signal to
be delivered but throw away siginfo data if memory is not enough.
SIGKILL and SIGSTOP have fast path in sigqueue_add, because they can
not be caught or masked.
The sigqueue() syscall allows user code to queue a signal to target
process, if resource is unavailable, EAGAIN will be returned as
specification said.
Just before thread exits, signal queue memory will be freed by
sigqueue_flush.
Current, all signals are allowed to be queued, not only realtime signals.
Earlier patch reviewed by: jhb, deischen
Tested on: i386, amd64
2005-10-14 12:43:47 +00:00
|
|
|
&signal_overflow, 0, "Number of signals overflew");
|
|
|
|
|
|
|
|
static int signal_alloc_fail = 0;
|
2005-12-09 02:26:44 +00:00
|
|
|
SYSCTL_INT(_kern_sigqueue, OID_AUTO, alloc_fail, CTLFLAG_RD,
|
1. Change prototype of trapsignal and sendsig to use ksiginfo_t *, most
changes in MD code are trivial, before this change, trapsignal and
sendsig use discrete parameters, now they uses member fields of
ksiginfo_t structure. For sendsig, this change allows us to pass
POSIX realtime signal value to user code.
2. Remove cpu_thread_siginfo, it is no longer needed because we now always
generate ksiginfo_t data and feed it to libpthread.
3. Add p_sigqueue to proc structure to hold shared signals which were
blocked by all threads in the proc.
4. Add td_sigqueue to thread structure to hold all signals delivered to
thread.
5. i386 and amd64 now return POSIX standard si_code, other arches will
be fixed.
6. In this sigqueue implementation, pending signal set is kept as before,
an extra siginfo list holds additional siginfo_t data for signals.
kernel code uses psignal() still behavior as before, it won't be failed
even under memory pressure, only exception is when deleting a signal,
we should call sigqueue_delete to remove signal from sigqueue but
not SIGDELSET. Current there is no kernel code will deliver a signal
with additional data, so kernel should be as stable as before,
a ksiginfo can carry more information, for example, allow signal to
be delivered but throw away siginfo data if memory is not enough.
SIGKILL and SIGSTOP have fast path in sigqueue_add, because they can
not be caught or masked.
The sigqueue() syscall allows user code to queue a signal to target
process, if resource is unavailable, EAGAIN will be returned as
specification said.
Just before thread exits, signal queue memory will be freed by
sigqueue_flush.
Current, all signals are allowed to be queued, not only realtime signals.
Earlier patch reviewed by: jhb, deischen
Tested on: i386, amd64
2005-10-14 12:43:47 +00:00
|
|
|
&signal_alloc_fail, 0, "signals failed to be allocated");
|
|
|
|
|
|
|
|
SYSINIT(signal, SI_SUB_P1003_1B, SI_ORDER_FIRST+3, sigqueue_start, NULL);
|
|
|
|
|
2002-01-10 01:25:35 +00:00
|
|
|
/*
|
|
|
|
* Policy -- Can ucred cr1 send SIGIO to process cr2?
|
|
|
|
* Should use cr_cansignal() once cr_cansignal() allows SIGIO and SIGURG
|
|
|
|
* in the right situations.
|
|
|
|
*/
|
|
|
|
#define CANSIGIO(cr1, cr2) \
|
|
|
|
((cr1)->cr_uid == 0 || \
|
|
|
|
(cr1)->cr_ruid == (cr2)->cr_ruid || \
|
|
|
|
(cr1)->cr_uid == (cr2)->cr_ruid || \
|
|
|
|
(cr1)->cr_ruid == (cr2)->cr_uid || \
|
|
|
|
(cr1)->cr_uid == (cr2)->cr_uid)
|
|
|
|
|
1998-09-14 05:36:51 +00:00
|
|
|
int sugid_coredump;
|
1999-05-03 23:57:32 +00:00
|
|
|
SYSCTL_INT(_kern, OID_AUTO, sugid_coredump, CTLFLAG_RW,
|
2009-10-12 15:49:48 +00:00
|
|
|
&sugid_coredump, 0, "Allow setuid and setgid processes to dump core");
|
1998-06-28 08:37:45 +00:00
|
|
|
|
2000-03-21 07:10:42 +00:00
|
|
|
static int do_coredump = 1;
|
|
|
|
SYSCTL_INT(_kern, OID_AUTO, coredump, CTLFLAG_RW,
|
|
|
|
&do_coredump, 0, "Enable/Disable coredumps");
|
|
|
|
|
2004-08-09 05:46:46 +00:00
|
|
|
static int set_core_nodump_flag = 0;
|
|
|
|
SYSCTL_INT(_kern, OID_AUTO, nodump_coredump, CTLFLAG_RW, &set_core_nodump_flag,
|
|
|
|
0, "Enable setting the NODUMP flag on coredump files");
|
|
|
|
|
1999-09-29 15:03:48 +00:00
|
|
|
/*
|
|
|
|
* Signal properties and actions.
|
|
|
|
* The array below categorizes the signals and their default actions
|
|
|
|
* according to the following properties:
|
|
|
|
*/
|
|
|
|
#define SA_KILL 0x01 /* terminates process by default */
|
|
|
|
#define SA_CORE 0x02 /* ditto and coredumps */
|
|
|
|
#define SA_STOP 0x04 /* suspend process */
|
|
|
|
#define SA_TTYSTOP 0x08 /* ditto, from tty */
|
|
|
|
#define SA_IGNORE 0x10 /* ignore by default */
|
|
|
|
#define SA_CONT 0x20 /* continue if suspended */
|
|
|
|
#define SA_CANTMASK 0x40 /* non-maskable, catchable */
|
2003-03-31 22:12:09 +00:00
|
|
|
#define SA_PROC 0x80 /* deliverable to any thread */
|
1999-09-29 15:03:48 +00:00
|
|
|
|
|
|
|
static int sigproptbl[NSIG] = {
|
2003-03-31 22:12:09 +00:00
|
|
|
SA_KILL|SA_PROC, /* SIGHUP */
|
|
|
|
SA_KILL|SA_PROC, /* SIGINT */
|
|
|
|
SA_KILL|SA_CORE|SA_PROC, /* SIGQUIT */
|
|
|
|
SA_KILL|SA_CORE, /* SIGILL */
|
|
|
|
SA_KILL|SA_CORE, /* SIGTRAP */
|
|
|
|
SA_KILL|SA_CORE, /* SIGABRT */
|
|
|
|
SA_KILL|SA_CORE|SA_PROC, /* SIGEMT */
|
|
|
|
SA_KILL|SA_CORE, /* SIGFPE */
|
|
|
|
SA_KILL|SA_PROC, /* SIGKILL */
|
|
|
|
SA_KILL|SA_CORE, /* SIGBUS */
|
|
|
|
SA_KILL|SA_CORE, /* SIGSEGV */
|
|
|
|
SA_KILL|SA_CORE, /* SIGSYS */
|
|
|
|
SA_KILL|SA_PROC, /* SIGPIPE */
|
|
|
|
SA_KILL|SA_PROC, /* SIGALRM */
|
|
|
|
SA_KILL|SA_PROC, /* SIGTERM */
|
|
|
|
SA_IGNORE|SA_PROC, /* SIGURG */
|
|
|
|
SA_STOP|SA_PROC, /* SIGSTOP */
|
|
|
|
SA_STOP|SA_TTYSTOP|SA_PROC, /* SIGTSTP */
|
|
|
|
SA_IGNORE|SA_CONT|SA_PROC, /* SIGCONT */
|
|
|
|
SA_IGNORE|SA_PROC, /* SIGCHLD */
|
|
|
|
SA_STOP|SA_TTYSTOP|SA_PROC, /* SIGTTIN */
|
|
|
|
SA_STOP|SA_TTYSTOP|SA_PROC, /* SIGTTOU */
|
|
|
|
SA_IGNORE|SA_PROC, /* SIGIO */
|
|
|
|
SA_KILL, /* SIGXCPU */
|
|
|
|
SA_KILL, /* SIGXFSZ */
|
|
|
|
SA_KILL|SA_PROC, /* SIGVTALRM */
|
|
|
|
SA_KILL|SA_PROC, /* SIGPROF */
|
|
|
|
SA_IGNORE|SA_PROC, /* SIGWINCH */
|
|
|
|
SA_IGNORE|SA_PROC, /* SIGINFO */
|
|
|
|
SA_KILL|SA_PROC, /* SIGUSR1 */
|
|
|
|
SA_KILL|SA_PROC, /* SIGUSR2 */
|
1999-09-29 15:03:48 +00:00
|
|
|
};
|
|
|
|
|
2009-10-30 10:10:39 +00:00
|
|
|
static void reschedule_signals(struct proc *p, sigset_t block, int flags);
|
Currently, when signal is delivered to the process and there is a thread
not blocking the signal, signal is placed on the thread sigqueue. If
the selected thread is in kernel executing thr_exit() or sigprocmask()
syscalls, then signal might be not delivered to usermode for arbitrary
amount of time, and for exiting thread it is lost.
Put process-directed signals to the process queue unconditionally,
selecting the thread to deliver the signal only by the thread returning
to usermode, since only then the thread can handle delivery of signal
reliably. For exiting thread or thread that has blocked some signals,
check whether the newly blocked signal is queued for the process, and
try to find a thread to wakeup for delivery, in reschedule_signal(). For
exiting thread, assume that all signals are blocked.
Change cursig() and postsig() to look both into the thread and process
signal queues. When there is a signal that thread returning to usermode
could consume, TDF_NEEDSIGCHK flag is not neccessary set now. Do
unlocked read of p_siglist and p_pendingcnt to check for queued signals.
Note that thread that has a signal unblocked might get spurious wakeup
and EINTR from the interruptible system call now, due to the possibility
of being selected by reschedule_signals(), while other thread returned
to usermode earlier and removed the signal from process queue. This
should not cause compliance issues, since the thread has not blocked a
signal and thus should be ready to receive it anyway.
Reported by: Justin Teller <justin.teller gmail com>
Reviewed by: davidxu, jilles
MFC after: 1 month
2009-10-11 16:49:30 +00:00
|
|
|
|
1. Change prototype of trapsignal and sendsig to use ksiginfo_t *, most
changes in MD code are trivial, before this change, trapsignal and
sendsig use discrete parameters, now they uses member fields of
ksiginfo_t structure. For sendsig, this change allows us to pass
POSIX realtime signal value to user code.
2. Remove cpu_thread_siginfo, it is no longer needed because we now always
generate ksiginfo_t data and feed it to libpthread.
3. Add p_sigqueue to proc structure to hold shared signals which were
blocked by all threads in the proc.
4. Add td_sigqueue to thread structure to hold all signals delivered to
thread.
5. i386 and amd64 now return POSIX standard si_code, other arches will
be fixed.
6. In this sigqueue implementation, pending signal set is kept as before,
an extra siginfo list holds additional siginfo_t data for signals.
kernel code uses psignal() still behavior as before, it won't be failed
even under memory pressure, only exception is when deleting a signal,
we should call sigqueue_delete to remove signal from sigqueue but
not SIGDELSET. Current there is no kernel code will deliver a signal
with additional data, so kernel should be as stable as before,
a ksiginfo can carry more information, for example, allow signal to
be delivered but throw away siginfo data if memory is not enough.
SIGKILL and SIGSTOP have fast path in sigqueue_add, because they can
not be caught or masked.
The sigqueue() syscall allows user code to queue a signal to target
process, if resource is unavailable, EAGAIN will be returned as
specification said.
Just before thread exits, signal queue memory will be freed by
sigqueue_flush.
Current, all signals are allowed to be queued, not only realtime signals.
Earlier patch reviewed by: jhb, deischen
Tested on: i386, amd64
2005-10-14 12:43:47 +00:00
|
|
|
static void
|
|
|
|
sigqueue_start(void)
|
|
|
|
{
|
|
|
|
ksiginfo_zone = uma_zcreate("ksiginfo", sizeof(ksiginfo_t),
|
|
|
|
NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
|
|
|
|
uma_prealloc(ksiginfo_zone, preallocate_siginfo);
|
2005-12-01 00:25:50 +00:00
|
|
|
p31b_setcfg(CTL_P1003_1B_REALTIME_SIGNALS, _POSIX_REALTIME_SIGNALS);
|
|
|
|
p31b_setcfg(CTL_P1003_1B_RTSIG_MAX, SIGRTMAX - SIGRTMIN + 1);
|
|
|
|
p31b_setcfg(CTL_P1003_1B_SIGQUEUE_MAX, max_pending_per_proc);
|
1. Change prototype of trapsignal and sendsig to use ksiginfo_t *, most
changes in MD code are trivial, before this change, trapsignal and
sendsig use discrete parameters, now they uses member fields of
ksiginfo_t structure. For sendsig, this change allows us to pass
POSIX realtime signal value to user code.
2. Remove cpu_thread_siginfo, it is no longer needed because we now always
generate ksiginfo_t data and feed it to libpthread.
3. Add p_sigqueue to proc structure to hold shared signals which were
blocked by all threads in the proc.
4. Add td_sigqueue to thread structure to hold all signals delivered to
thread.
5. i386 and amd64 now return POSIX standard si_code, other arches will
be fixed.
6. In this sigqueue implementation, pending signal set is kept as before,
an extra siginfo list holds additional siginfo_t data for signals.
kernel code uses psignal() still behavior as before, it won't be failed
even under memory pressure, only exception is when deleting a signal,
we should call sigqueue_delete to remove signal from sigqueue but
not SIGDELSET. Current there is no kernel code will deliver a signal
with additional data, so kernel should be as stable as before,
a ksiginfo can carry more information, for example, allow signal to
be delivered but throw away siginfo data if memory is not enough.
SIGKILL and SIGSTOP have fast path in sigqueue_add, because they can
not be caught or masked.
The sigqueue() syscall allows user code to queue a signal to target
process, if resource is unavailable, EAGAIN will be returned as
specification said.
Just before thread exits, signal queue memory will be freed by
sigqueue_flush.
Current, all signals are allowed to be queued, not only realtime signals.
Earlier patch reviewed by: jhb, deischen
Tested on: i386, amd64
2005-10-14 12:43:47 +00:00
|
|
|
}
|
|
|
|
|
2005-10-23 04:12:26 +00:00
|
|
|
ksiginfo_t *
|
2005-11-08 09:09:26 +00:00
|
|
|
ksiginfo_alloc(int wait)
|
1. Change prototype of trapsignal and sendsig to use ksiginfo_t *, most
changes in MD code are trivial, before this change, trapsignal and
sendsig use discrete parameters, now they uses member fields of
ksiginfo_t structure. For sendsig, this change allows us to pass
POSIX realtime signal value to user code.
2. Remove cpu_thread_siginfo, it is no longer needed because we now always
generate ksiginfo_t data and feed it to libpthread.
3. Add p_sigqueue to proc structure to hold shared signals which were
blocked by all threads in the proc.
4. Add td_sigqueue to thread structure to hold all signals delivered to
thread.
5. i386 and amd64 now return POSIX standard si_code, other arches will
be fixed.
6. In this sigqueue implementation, pending signal set is kept as before,
an extra siginfo list holds additional siginfo_t data for signals.
kernel code uses psignal() still behavior as before, it won't be failed
even under memory pressure, only exception is when deleting a signal,
we should call sigqueue_delete to remove signal from sigqueue but
not SIGDELSET. Current there is no kernel code will deliver a signal
with additional data, so kernel should be as stable as before,
a ksiginfo can carry more information, for example, allow signal to
be delivered but throw away siginfo data if memory is not enough.
SIGKILL and SIGSTOP have fast path in sigqueue_add, because they can
not be caught or masked.
The sigqueue() syscall allows user code to queue a signal to target
process, if resource is unavailable, EAGAIN will be returned as
specification said.
Just before thread exits, signal queue memory will be freed by
sigqueue_flush.
Current, all signals are allowed to be queued, not only realtime signals.
Earlier patch reviewed by: jhb, deischen
Tested on: i386, amd64
2005-10-14 12:43:47 +00:00
|
|
|
{
|
2005-11-08 09:09:26 +00:00
|
|
|
int flags;
|
|
|
|
|
|
|
|
flags = M_ZERO;
|
|
|
|
if (! wait)
|
|
|
|
flags |= M_NOWAIT;
|
1. Change prototype of trapsignal and sendsig to use ksiginfo_t *, most
changes in MD code are trivial, before this change, trapsignal and
sendsig use discrete parameters, now they uses member fields of
ksiginfo_t structure. For sendsig, this change allows us to pass
POSIX realtime signal value to user code.
2. Remove cpu_thread_siginfo, it is no longer needed because we now always
generate ksiginfo_t data and feed it to libpthread.
3. Add p_sigqueue to proc structure to hold shared signals which were
blocked by all threads in the proc.
4. Add td_sigqueue to thread structure to hold all signals delivered to
thread.
5. i386 and amd64 now return POSIX standard si_code, other arches will
be fixed.
6. In this sigqueue implementation, pending signal set is kept as before,
an extra siginfo list holds additional siginfo_t data for signals.
kernel code uses psignal() still behavior as before, it won't be failed
even under memory pressure, only exception is when deleting a signal,
we should call sigqueue_delete to remove signal from sigqueue but
not SIGDELSET. Current there is no kernel code will deliver a signal
with additional data, so kernel should be as stable as before,
a ksiginfo can carry more information, for example, allow signal to
be delivered but throw away siginfo data if memory is not enough.
SIGKILL and SIGSTOP have fast path in sigqueue_add, because they can
not be caught or masked.
The sigqueue() syscall allows user code to queue a signal to target
process, if resource is unavailable, EAGAIN will be returned as
specification said.
Just before thread exits, signal queue memory will be freed by
sigqueue_flush.
Current, all signals are allowed to be queued, not only realtime signals.
Earlier patch reviewed by: jhb, deischen
Tested on: i386, amd64
2005-10-14 12:43:47 +00:00
|
|
|
if (ksiginfo_zone != NULL)
|
2005-11-08 09:09:26 +00:00
|
|
|
return ((ksiginfo_t *)uma_zalloc(ksiginfo_zone, flags));
|
1. Change prototype of trapsignal and sendsig to use ksiginfo_t *, most
changes in MD code are trivial, before this change, trapsignal and
sendsig use discrete parameters, now they uses member fields of
ksiginfo_t structure. For sendsig, this change allows us to pass
POSIX realtime signal value to user code.
2. Remove cpu_thread_siginfo, it is no longer needed because we now always
generate ksiginfo_t data and feed it to libpthread.
3. Add p_sigqueue to proc structure to hold shared signals which were
blocked by all threads in the proc.
4. Add td_sigqueue to thread structure to hold all signals delivered to
thread.
5. i386 and amd64 now return POSIX standard si_code, other arches will
be fixed.
6. In this sigqueue implementation, pending signal set is kept as before,
an extra siginfo list holds additional siginfo_t data for signals.
kernel code uses psignal() still behavior as before, it won't be failed
even under memory pressure, only exception is when deleting a signal,
we should call sigqueue_delete to remove signal from sigqueue but
not SIGDELSET. Current there is no kernel code will deliver a signal
with additional data, so kernel should be as stable as before,
a ksiginfo can carry more information, for example, allow signal to
be delivered but throw away siginfo data if memory is not enough.
SIGKILL and SIGSTOP have fast path in sigqueue_add, because they can
not be caught or masked.
The sigqueue() syscall allows user code to queue a signal to target
process, if resource is unavailable, EAGAIN will be returned as
specification said.
Just before thread exits, signal queue memory will be freed by
sigqueue_flush.
Current, all signals are allowed to be queued, not only realtime signals.
Earlier patch reviewed by: jhb, deischen
Tested on: i386, amd64
2005-10-14 12:43:47 +00:00
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
|
2005-10-23 04:12:26 +00:00
|
|
|
void
|
1. Change prototype of trapsignal and sendsig to use ksiginfo_t *, most
changes in MD code are trivial, before this change, trapsignal and
sendsig use discrete parameters, now they uses member fields of
ksiginfo_t structure. For sendsig, this change allows us to pass
POSIX realtime signal value to user code.
2. Remove cpu_thread_siginfo, it is no longer needed because we now always
generate ksiginfo_t data and feed it to libpthread.
3. Add p_sigqueue to proc structure to hold shared signals which were
blocked by all threads in the proc.
4. Add td_sigqueue to thread structure to hold all signals delivered to
thread.
5. i386 and amd64 now return POSIX standard si_code, other arches will
be fixed.
6. In this sigqueue implementation, pending signal set is kept as before,
an extra siginfo list holds additional siginfo_t data for signals.
kernel code uses psignal() still behavior as before, it won't be failed
even under memory pressure, only exception is when deleting a signal,
we should call sigqueue_delete to remove signal from sigqueue but
not SIGDELSET. Current there is no kernel code will deliver a signal
with additional data, so kernel should be as stable as before,
a ksiginfo can carry more information, for example, allow signal to
be delivered but throw away siginfo data if memory is not enough.
SIGKILL and SIGSTOP have fast path in sigqueue_add, because they can
not be caught or masked.
The sigqueue() syscall allows user code to queue a signal to target
process, if resource is unavailable, EAGAIN will be returned as
specification said.
Just before thread exits, signal queue memory will be freed by
sigqueue_flush.
Current, all signals are allowed to be queued, not only realtime signals.
Earlier patch reviewed by: jhb, deischen
Tested on: i386, amd64
2005-10-14 12:43:47 +00:00
|
|
|
ksiginfo_free(ksiginfo_t *ksi)
|
|
|
|
{
|
|
|
|
uma_zfree(ksiginfo_zone, ksi);
|
|
|
|
}
|
|
|
|
|
2005-10-23 04:12:26 +00:00
|
|
|
static __inline int
|
|
|
|
ksiginfo_tryfree(ksiginfo_t *ksi)
|
|
|
|
{
|
|
|
|
if (!(ksi->ksi_flags & KSI_EXT)) {
|
|
|
|
uma_zfree(ksiginfo_zone, ksi);
|
|
|
|
return (1);
|
|
|
|
}
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
1. Change prototype of trapsignal and sendsig to use ksiginfo_t *, most
changes in MD code are trivial, before this change, trapsignal and
sendsig use discrete parameters, now they uses member fields of
ksiginfo_t structure. For sendsig, this change allows us to pass
POSIX realtime signal value to user code.
2. Remove cpu_thread_siginfo, it is no longer needed because we now always
generate ksiginfo_t data and feed it to libpthread.
3. Add p_sigqueue to proc structure to hold shared signals which were
blocked by all threads in the proc.
4. Add td_sigqueue to thread structure to hold all signals delivered to
thread.
5. i386 and amd64 now return POSIX standard si_code, other arches will
be fixed.
6. In this sigqueue implementation, pending signal set is kept as before,
an extra siginfo list holds additional siginfo_t data for signals.
kernel code uses psignal() still behavior as before, it won't be failed
even under memory pressure, only exception is when deleting a signal,
we should call sigqueue_delete to remove signal from sigqueue but
not SIGDELSET. Current there is no kernel code will deliver a signal
with additional data, so kernel should be as stable as before,
a ksiginfo can carry more information, for example, allow signal to
be delivered but throw away siginfo data if memory is not enough.
SIGKILL and SIGSTOP have fast path in sigqueue_add, because they can
not be caught or masked.
The sigqueue() syscall allows user code to queue a signal to target
process, if resource is unavailable, EAGAIN will be returned as
specification said.
Just before thread exits, signal queue memory will be freed by
sigqueue_flush.
Current, all signals are allowed to be queued, not only realtime signals.
Earlier patch reviewed by: jhb, deischen
Tested on: i386, amd64
2005-10-14 12:43:47 +00:00
|
|
|
void
|
|
|
|
sigqueue_init(sigqueue_t *list, struct proc *p)
|
|
|
|
{
|
|
|
|
SIGEMPTYSET(list->sq_signals);
|
2006-03-02 14:06:40 +00:00
|
|
|
SIGEMPTYSET(list->sq_kill);
|
1. Change prototype of trapsignal and sendsig to use ksiginfo_t *, most
changes in MD code are trivial, before this change, trapsignal and
sendsig use discrete parameters, now they uses member fields of
ksiginfo_t structure. For sendsig, this change allows us to pass
POSIX realtime signal value to user code.
2. Remove cpu_thread_siginfo, it is no longer needed because we now always
generate ksiginfo_t data and feed it to libpthread.
3. Add p_sigqueue to proc structure to hold shared signals which were
blocked by all threads in the proc.
4. Add td_sigqueue to thread structure to hold all signals delivered to
thread.
5. i386 and amd64 now return POSIX standard si_code, other arches will
be fixed.
6. In this sigqueue implementation, pending signal set is kept as before,
an extra siginfo list holds additional siginfo_t data for signals.
kernel code uses psignal() still behavior as before, it won't be failed
even under memory pressure, only exception is when deleting a signal,
we should call sigqueue_delete to remove signal from sigqueue but
not SIGDELSET. Current there is no kernel code will deliver a signal
with additional data, so kernel should be as stable as before,
a ksiginfo can carry more information, for example, allow signal to
be delivered but throw away siginfo data if memory is not enough.
SIGKILL and SIGSTOP have fast path in sigqueue_add, because they can
not be caught or masked.
The sigqueue() syscall allows user code to queue a signal to target
process, if resource is unavailable, EAGAIN will be returned as
specification said.
Just before thread exits, signal queue memory will be freed by
sigqueue_flush.
Current, all signals are allowed to be queued, not only realtime signals.
Earlier patch reviewed by: jhb, deischen
Tested on: i386, amd64
2005-10-14 12:43:47 +00:00
|
|
|
TAILQ_INIT(&list->sq_list);
|
|
|
|
list->sq_proc = p;
|
|
|
|
list->sq_flags = SQ_INIT;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Get a signal's ksiginfo.
|
|
|
|
* Return:
|
|
|
|
* 0 - signal not found
|
|
|
|
* others - signal number
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
sigqueue_get(sigqueue_t *sq, int signo, ksiginfo_t *si)
|
|
|
|
{
|
|
|
|
struct proc *p = sq->sq_proc;
|
|
|
|
struct ksiginfo *ksi, *next;
|
|
|
|
int count = 0;
|
|
|
|
|
|
|
|
KASSERT(sq->sq_flags & SQ_INIT, ("sigqueue not inited"));
|
|
|
|
|
|
|
|
if (!SIGISMEMBER(sq->sq_signals, signo))
|
|
|
|
return (0);
|
|
|
|
|
2006-03-02 14:06:40 +00:00
|
|
|
if (SIGISMEMBER(sq->sq_kill, signo)) {
|
|
|
|
count++;
|
|
|
|
SIGDELSET(sq->sq_kill, signo);
|
|
|
|
}
|
|
|
|
|
2006-10-22 00:09:41 +00:00
|
|
|
TAILQ_FOREACH_SAFE(ksi, &sq->sq_list, ksi_link, next) {
|
1. Change prototype of trapsignal and sendsig to use ksiginfo_t *, most
changes in MD code are trivial, before this change, trapsignal and
sendsig use discrete parameters, now they uses member fields of
ksiginfo_t structure. For sendsig, this change allows us to pass
POSIX realtime signal value to user code.
2. Remove cpu_thread_siginfo, it is no longer needed because we now always
generate ksiginfo_t data and feed it to libpthread.
3. Add p_sigqueue to proc structure to hold shared signals which were
blocked by all threads in the proc.
4. Add td_sigqueue to thread structure to hold all signals delivered to
thread.
5. i386 and amd64 now return POSIX standard si_code, other arches will
be fixed.
6. In this sigqueue implementation, pending signal set is kept as before,
an extra siginfo list holds additional siginfo_t data for signals.
kernel code uses psignal() still behavior as before, it won't be failed
even under memory pressure, only exception is when deleting a signal,
we should call sigqueue_delete to remove signal from sigqueue but
not SIGDELSET. Current there is no kernel code will deliver a signal
with additional data, so kernel should be as stable as before,
a ksiginfo can carry more information, for example, allow signal to
be delivered but throw away siginfo data if memory is not enough.
SIGKILL and SIGSTOP have fast path in sigqueue_add, because they can
not be caught or masked.
The sigqueue() syscall allows user code to queue a signal to target
process, if resource is unavailable, EAGAIN will be returned as
specification said.
Just before thread exits, signal queue memory will be freed by
sigqueue_flush.
Current, all signals are allowed to be queued, not only realtime signals.
Earlier patch reviewed by: jhb, deischen
Tested on: i386, amd64
2005-10-14 12:43:47 +00:00
|
|
|
if (ksi->ksi_signo == signo) {
|
|
|
|
if (count == 0) {
|
|
|
|
TAILQ_REMOVE(&sq->sq_list, ksi, ksi_link);
|
2005-10-23 04:12:26 +00:00
|
|
|
ksi->ksi_sigq = NULL;
|
1. Change prototype of trapsignal and sendsig to use ksiginfo_t *, most
changes in MD code are trivial, before this change, trapsignal and
sendsig use discrete parameters, now they uses member fields of
ksiginfo_t structure. For sendsig, this change allows us to pass
POSIX realtime signal value to user code.
2. Remove cpu_thread_siginfo, it is no longer needed because we now always
generate ksiginfo_t data and feed it to libpthread.
3. Add p_sigqueue to proc structure to hold shared signals which were
blocked by all threads in the proc.
4. Add td_sigqueue to thread structure to hold all signals delivered to
thread.
5. i386 and amd64 now return POSIX standard si_code, other arches will
be fixed.
6. In this sigqueue implementation, pending signal set is kept as before,
an extra siginfo list holds additional siginfo_t data for signals.
kernel code uses psignal() still behavior as before, it won't be failed
even under memory pressure, only exception is when deleting a signal,
we should call sigqueue_delete to remove signal from sigqueue but
not SIGDELSET. Current there is no kernel code will deliver a signal
with additional data, so kernel should be as stable as before,
a ksiginfo can carry more information, for example, allow signal to
be delivered but throw away siginfo data if memory is not enough.
SIGKILL and SIGSTOP have fast path in sigqueue_add, because they can
not be caught or masked.
The sigqueue() syscall allows user code to queue a signal to target
process, if resource is unavailable, EAGAIN will be returned as
specification said.
Just before thread exits, signal queue memory will be freed by
sigqueue_flush.
Current, all signals are allowed to be queued, not only realtime signals.
Earlier patch reviewed by: jhb, deischen
Tested on: i386, amd64
2005-10-14 12:43:47 +00:00
|
|
|
ksiginfo_copy(ksi, si);
|
2005-10-23 04:12:26 +00:00
|
|
|
if (ksiginfo_tryfree(ksi) && p != NULL)
|
1. Change prototype of trapsignal and sendsig to use ksiginfo_t *, most
changes in MD code are trivial, before this change, trapsignal and
sendsig use discrete parameters, now they uses member fields of
ksiginfo_t structure. For sendsig, this change allows us to pass
POSIX realtime signal value to user code.
2. Remove cpu_thread_siginfo, it is no longer needed because we now always
generate ksiginfo_t data and feed it to libpthread.
3. Add p_sigqueue to proc structure to hold shared signals which were
blocked by all threads in the proc.
4. Add td_sigqueue to thread structure to hold all signals delivered to
thread.
5. i386 and amd64 now return POSIX standard si_code, other arches will
be fixed.
6. In this sigqueue implementation, pending signal set is kept as before,
an extra siginfo list holds additional siginfo_t data for signals.
kernel code uses psignal() still behavior as before, it won't be failed
even under memory pressure, only exception is when deleting a signal,
we should call sigqueue_delete to remove signal from sigqueue but
not SIGDELSET. Current there is no kernel code will deliver a signal
with additional data, so kernel should be as stable as before,
a ksiginfo can carry more information, for example, allow signal to
be delivered but throw away siginfo data if memory is not enough.
SIGKILL and SIGSTOP have fast path in sigqueue_add, because they can
not be caught or masked.
The sigqueue() syscall allows user code to queue a signal to target
process, if resource is unavailable, EAGAIN will be returned as
specification said.
Just before thread exits, signal queue memory will be freed by
sigqueue_flush.
Current, all signals are allowed to be queued, not only realtime signals.
Earlier patch reviewed by: jhb, deischen
Tested on: i386, amd64
2005-10-14 12:43:47 +00:00
|
|
|
p->p_pendingcnt--;
|
|
|
|
}
|
2006-12-25 03:00:15 +00:00
|
|
|
if (++count > 1)
|
|
|
|
break;
|
1. Change prototype of trapsignal and sendsig to use ksiginfo_t *, most
changes in MD code are trivial, before this change, trapsignal and
sendsig use discrete parameters, now they uses member fields of
ksiginfo_t structure. For sendsig, this change allows us to pass
POSIX realtime signal value to user code.
2. Remove cpu_thread_siginfo, it is no longer needed because we now always
generate ksiginfo_t data and feed it to libpthread.
3. Add p_sigqueue to proc structure to hold shared signals which were
blocked by all threads in the proc.
4. Add td_sigqueue to thread structure to hold all signals delivered to
thread.
5. i386 and amd64 now return POSIX standard si_code, other arches will
be fixed.
6. In this sigqueue implementation, pending signal set is kept as before,
an extra siginfo list holds additional siginfo_t data for signals.
kernel code uses psignal() still behavior as before, it won't be failed
even under memory pressure, only exception is when deleting a signal,
we should call sigqueue_delete to remove signal from sigqueue but
not SIGDELSET. Current there is no kernel code will deliver a signal
with additional data, so kernel should be as stable as before,
a ksiginfo can carry more information, for example, allow signal to
be delivered but throw away siginfo data if memory is not enough.
SIGKILL and SIGSTOP have fast path in sigqueue_add, because they can
not be caught or masked.
The sigqueue() syscall allows user code to queue a signal to target
process, if resource is unavailable, EAGAIN will be returned as
specification said.
Just before thread exits, signal queue memory will be freed by
sigqueue_flush.
Current, all signals are allowed to be queued, not only realtime signals.
Earlier patch reviewed by: jhb, deischen
Tested on: i386, amd64
2005-10-14 12:43:47 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (count <= 1)
|
|
|
|
SIGDELSET(sq->sq_signals, signo);
|
|
|
|
si->ksi_signo = signo;
|
|
|
|
return (signo);
|
|
|
|
}
|
|
|
|
|
2005-10-23 04:12:26 +00:00
|
|
|
void
|
|
|
|
sigqueue_take(ksiginfo_t *ksi)
|
|
|
|
{
|
|
|
|
struct ksiginfo *kp;
|
|
|
|
struct proc *p;
|
|
|
|
sigqueue_t *sq;
|
|
|
|
|
2005-11-08 09:09:26 +00:00
|
|
|
if (ksi == NULL || (sq = ksi->ksi_sigq) == NULL)
|
2005-10-23 04:12:26 +00:00
|
|
|
return;
|
|
|
|
|
|
|
|
p = sq->sq_proc;
|
|
|
|
TAILQ_REMOVE(&sq->sq_list, ksi, ksi_link);
|
|
|
|
ksi->ksi_sigq = NULL;
|
|
|
|
if (!(ksi->ksi_flags & KSI_EXT) && p != NULL)
|
|
|
|
p->p_pendingcnt--;
|
|
|
|
|
|
|
|
for (kp = TAILQ_FIRST(&sq->sq_list); kp != NULL;
|
|
|
|
kp = TAILQ_NEXT(kp, ksi_link)) {
|
|
|
|
if (kp->ksi_signo == ksi->ksi_signo)
|
|
|
|
break;
|
|
|
|
}
|
2006-03-02 14:06:40 +00:00
|
|
|
if (kp == NULL && !SIGISMEMBER(sq->sq_kill, ksi->ksi_signo))
|
2005-10-23 04:12:26 +00:00
|
|
|
SIGDELSET(sq->sq_signals, ksi->ksi_signo);
|
|
|
|
}
|
|
|
|
|
1. Change prototype of trapsignal and sendsig to use ksiginfo_t *, most
changes in MD code are trivial, before this change, trapsignal and
sendsig use discrete parameters, now they uses member fields of
ksiginfo_t structure. For sendsig, this change allows us to pass
POSIX realtime signal value to user code.
2. Remove cpu_thread_siginfo, it is no longer needed because we now always
generate ksiginfo_t data and feed it to libpthread.
3. Add p_sigqueue to proc structure to hold shared signals which were
blocked by all threads in the proc.
4. Add td_sigqueue to thread structure to hold all signals delivered to
thread.
5. i386 and amd64 now return POSIX standard si_code, other arches will
be fixed.
6. In this sigqueue implementation, pending signal set is kept as before,
an extra siginfo list holds additional siginfo_t data for signals.
kernel code uses psignal() still behavior as before, it won't be failed
even under memory pressure, only exception is when deleting a signal,
we should call sigqueue_delete to remove signal from sigqueue but
not SIGDELSET. Current there is no kernel code will deliver a signal
with additional data, so kernel should be as stable as before,
a ksiginfo can carry more information, for example, allow signal to
be delivered but throw away siginfo data if memory is not enough.
SIGKILL and SIGSTOP have fast path in sigqueue_add, because they can
not be caught or masked.
The sigqueue() syscall allows user code to queue a signal to target
process, if resource is unavailable, EAGAIN will be returned as
specification said.
Just before thread exits, signal queue memory will be freed by
sigqueue_flush.
Current, all signals are allowed to be queued, not only realtime signals.
Earlier patch reviewed by: jhb, deischen
Tested on: i386, amd64
2005-10-14 12:43:47 +00:00
|
|
|
int
|
|
|
|
sigqueue_add(sigqueue_t *sq, int signo, ksiginfo_t *si)
|
|
|
|
{
|
|
|
|
struct proc *p = sq->sq_proc;
|
|
|
|
struct ksiginfo *ksi;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
KASSERT(sq->sq_flags & SQ_INIT, ("sigqueue not inited"));
|
|
|
|
|
2006-03-02 14:06:40 +00:00
|
|
|
if (signo == SIGKILL || signo == SIGSTOP || si == NULL) {
|
|
|
|
SIGADDSET(sq->sq_kill, signo);
|
1. Change prototype of trapsignal and sendsig to use ksiginfo_t *, most
changes in MD code are trivial, before this change, trapsignal and
sendsig use discrete parameters, now they uses member fields of
ksiginfo_t structure. For sendsig, this change allows us to pass
POSIX realtime signal value to user code.
2. Remove cpu_thread_siginfo, it is no longer needed because we now always
generate ksiginfo_t data and feed it to libpthread.
3. Add p_sigqueue to proc structure to hold shared signals which were
blocked by all threads in the proc.
4. Add td_sigqueue to thread structure to hold all signals delivered to
thread.
5. i386 and amd64 now return POSIX standard si_code, other arches will
be fixed.
6. In this sigqueue implementation, pending signal set is kept as before,
an extra siginfo list holds additional siginfo_t data for signals.
kernel code uses psignal() still behavior as before, it won't be failed
even under memory pressure, only exception is when deleting a signal,
we should call sigqueue_delete to remove signal from sigqueue but
not SIGDELSET. Current there is no kernel code will deliver a signal
with additional data, so kernel should be as stable as before,
a ksiginfo can carry more information, for example, allow signal to
be delivered but throw away siginfo data if memory is not enough.
SIGKILL and SIGSTOP have fast path in sigqueue_add, because they can
not be caught or masked.
The sigqueue() syscall allows user code to queue a signal to target
process, if resource is unavailable, EAGAIN will be returned as
specification said.
Just before thread exits, signal queue memory will be freed by
sigqueue_flush.
Current, all signals are allowed to be queued, not only realtime signals.
Earlier patch reviewed by: jhb, deischen
Tested on: i386, amd64
2005-10-14 12:43:47 +00:00
|
|
|
goto out_set_bit;
|
2006-03-02 14:06:40 +00:00
|
|
|
}
|
1. Change prototype of trapsignal and sendsig to use ksiginfo_t *, most
changes in MD code are trivial, before this change, trapsignal and
sendsig use discrete parameters, now they uses member fields of
ksiginfo_t structure. For sendsig, this change allows us to pass
POSIX realtime signal value to user code.
2. Remove cpu_thread_siginfo, it is no longer needed because we now always
generate ksiginfo_t data and feed it to libpthread.
3. Add p_sigqueue to proc structure to hold shared signals which were
blocked by all threads in the proc.
4. Add td_sigqueue to thread structure to hold all signals delivered to
thread.
5. i386 and amd64 now return POSIX standard si_code, other arches will
be fixed.
6. In this sigqueue implementation, pending signal set is kept as before,
an extra siginfo list holds additional siginfo_t data for signals.
kernel code uses psignal() still behavior as before, it won't be failed
even under memory pressure, only exception is when deleting a signal,
we should call sigqueue_delete to remove signal from sigqueue but
not SIGDELSET. Current there is no kernel code will deliver a signal
with additional data, so kernel should be as stable as before,
a ksiginfo can carry more information, for example, allow signal to
be delivered but throw away siginfo data if memory is not enough.
SIGKILL and SIGSTOP have fast path in sigqueue_add, because they can
not be caught or masked.
The sigqueue() syscall allows user code to queue a signal to target
process, if resource is unavailable, EAGAIN will be returned as
specification said.
Just before thread exits, signal queue memory will be freed by
sigqueue_flush.
Current, all signals are allowed to be queued, not only realtime signals.
Earlier patch reviewed by: jhb, deischen
Tested on: i386, amd64
2005-10-14 12:43:47 +00:00
|
|
|
|
2005-10-23 04:12:26 +00:00
|
|
|
/* directly insert the ksi, don't copy it */
|
|
|
|
if (si->ksi_flags & KSI_INS) {
|
|
|
|
TAILQ_INSERT_TAIL(&sq->sq_list, si, ksi_link);
|
|
|
|
si->ksi_sigq = sq;
|
|
|
|
goto out_set_bit;
|
|
|
|
}
|
|
|
|
|
2006-03-02 14:06:40 +00:00
|
|
|
if (__predict_false(ksiginfo_zone == NULL)) {
|
|
|
|
SIGADDSET(sq->sq_kill, signo);
|
1. Change prototype of trapsignal and sendsig to use ksiginfo_t *, most
changes in MD code are trivial, before this change, trapsignal and
sendsig use discrete parameters, now they uses member fields of
ksiginfo_t structure. For sendsig, this change allows us to pass
POSIX realtime signal value to user code.
2. Remove cpu_thread_siginfo, it is no longer needed because we now always
generate ksiginfo_t data and feed it to libpthread.
3. Add p_sigqueue to proc structure to hold shared signals which were
blocked by all threads in the proc.
4. Add td_sigqueue to thread structure to hold all signals delivered to
thread.
5. i386 and amd64 now return POSIX standard si_code, other arches will
be fixed.
6. In this sigqueue implementation, pending signal set is kept as before,
an extra siginfo list holds additional siginfo_t data for signals.
kernel code uses psignal() still behavior as before, it won't be failed
even under memory pressure, only exception is when deleting a signal,
we should call sigqueue_delete to remove signal from sigqueue but
not SIGDELSET. Current there is no kernel code will deliver a signal
with additional data, so kernel should be as stable as before,
a ksiginfo can carry more information, for example, allow signal to
be delivered but throw away siginfo data if memory is not enough.
SIGKILL and SIGSTOP have fast path in sigqueue_add, because they can
not be caught or masked.
The sigqueue() syscall allows user code to queue a signal to target
process, if resource is unavailable, EAGAIN will be returned as
specification said.
Just before thread exits, signal queue memory will be freed by
sigqueue_flush.
Current, all signals are allowed to be queued, not only realtime signals.
Earlier patch reviewed by: jhb, deischen
Tested on: i386, amd64
2005-10-14 12:43:47 +00:00
|
|
|
goto out_set_bit;
|
2006-03-02 14:06:40 +00:00
|
|
|
}
|
1. Change prototype of trapsignal and sendsig to use ksiginfo_t *, most
changes in MD code are trivial, before this change, trapsignal and
sendsig use discrete parameters, now they uses member fields of
ksiginfo_t structure. For sendsig, this change allows us to pass
POSIX realtime signal value to user code.
2. Remove cpu_thread_siginfo, it is no longer needed because we now always
generate ksiginfo_t data and feed it to libpthread.
3. Add p_sigqueue to proc structure to hold shared signals which were
blocked by all threads in the proc.
4. Add td_sigqueue to thread structure to hold all signals delivered to
thread.
5. i386 and amd64 now return POSIX standard si_code, other arches will
be fixed.
6. In this sigqueue implementation, pending signal set is kept as before,
an extra siginfo list holds additional siginfo_t data for signals.
kernel code uses psignal() still behavior as before, it won't be failed
even under memory pressure, only exception is when deleting a signal,
we should call sigqueue_delete to remove signal from sigqueue but
not SIGDELSET. Current there is no kernel code will deliver a signal
with additional data, so kernel should be as stable as before,
a ksiginfo can carry more information, for example, allow signal to
be delivered but throw away siginfo data if memory is not enough.
SIGKILL and SIGSTOP have fast path in sigqueue_add, because they can
not be caught or masked.
The sigqueue() syscall allows user code to queue a signal to target
process, if resource is unavailable, EAGAIN will be returned as
specification said.
Just before thread exits, signal queue memory will be freed by
sigqueue_flush.
Current, all signals are allowed to be queued, not only realtime signals.
Earlier patch reviewed by: jhb, deischen
Tested on: i386, amd64
2005-10-14 12:43:47 +00:00
|
|
|
|
2005-11-08 09:09:26 +00:00
|
|
|
if (p != NULL && p->p_pendingcnt >= max_pending_per_proc) {
|
1. Change prototype of trapsignal and sendsig to use ksiginfo_t *, most
changes in MD code are trivial, before this change, trapsignal and
sendsig use discrete parameters, now they uses member fields of
ksiginfo_t structure. For sendsig, this change allows us to pass
POSIX realtime signal value to user code.
2. Remove cpu_thread_siginfo, it is no longer needed because we now always
generate ksiginfo_t data and feed it to libpthread.
3. Add p_sigqueue to proc structure to hold shared signals which were
blocked by all threads in the proc.
4. Add td_sigqueue to thread structure to hold all signals delivered to
thread.
5. i386 and amd64 now return POSIX standard si_code, other arches will
be fixed.
6. In this sigqueue implementation, pending signal set is kept as before,
an extra siginfo list holds additional siginfo_t data for signals.
kernel code uses psignal() still behavior as before, it won't be failed
even under memory pressure, only exception is when deleting a signal,
we should call sigqueue_delete to remove signal from sigqueue but
not SIGDELSET. Current there is no kernel code will deliver a signal
with additional data, so kernel should be as stable as before,
a ksiginfo can carry more information, for example, allow signal to
be delivered but throw away siginfo data if memory is not enough.
SIGKILL and SIGSTOP have fast path in sigqueue_add, because they can
not be caught or masked.
The sigqueue() syscall allows user code to queue a signal to target
process, if resource is unavailable, EAGAIN will be returned as
specification said.
Just before thread exits, signal queue memory will be freed by
sigqueue_flush.
Current, all signals are allowed to be queued, not only realtime signals.
Earlier patch reviewed by: jhb, deischen
Tested on: i386, amd64
2005-10-14 12:43:47 +00:00
|
|
|
signal_overflow++;
|
|
|
|
ret = EAGAIN;
|
2005-11-08 09:09:26 +00:00
|
|
|
} else if ((ksi = ksiginfo_alloc(0)) == NULL) {
|
1. Change prototype of trapsignal and sendsig to use ksiginfo_t *, most
changes in MD code are trivial, before this change, trapsignal and
sendsig use discrete parameters, now they uses member fields of
ksiginfo_t structure. For sendsig, this change allows us to pass
POSIX realtime signal value to user code.
2. Remove cpu_thread_siginfo, it is no longer needed because we now always
generate ksiginfo_t data and feed it to libpthread.
3. Add p_sigqueue to proc structure to hold shared signals which were
blocked by all threads in the proc.
4. Add td_sigqueue to thread structure to hold all signals delivered to
thread.
5. i386 and amd64 now return POSIX standard si_code, other arches will
be fixed.
6. In this sigqueue implementation, pending signal set is kept as before,
an extra siginfo list holds additional siginfo_t data for signals.
kernel code uses psignal() still behavior as before, it won't be failed
even under memory pressure, only exception is when deleting a signal,
we should call sigqueue_delete to remove signal from sigqueue but
not SIGDELSET. Current there is no kernel code will deliver a signal
with additional data, so kernel should be as stable as before,
a ksiginfo can carry more information, for example, allow signal to
be delivered but throw away siginfo data if memory is not enough.
SIGKILL and SIGSTOP have fast path in sigqueue_add, because they can
not be caught or masked.
The sigqueue() syscall allows user code to queue a signal to target
process, if resource is unavailable, EAGAIN will be returned as
specification said.
Just before thread exits, signal queue memory will be freed by
sigqueue_flush.
Current, all signals are allowed to be queued, not only realtime signals.
Earlier patch reviewed by: jhb, deischen
Tested on: i386, amd64
2005-10-14 12:43:47 +00:00
|
|
|
signal_alloc_fail++;
|
|
|
|
ret = EAGAIN;
|
|
|
|
} else {
|
|
|
|
if (p != NULL)
|
|
|
|
p->p_pendingcnt++;
|
|
|
|
ksiginfo_copy(si, ksi);
|
|
|
|
ksi->ksi_signo = signo;
|
|
|
|
TAILQ_INSERT_TAIL(&sq->sq_list, ksi, ksi_link);
|
2005-10-23 04:12:26 +00:00
|
|
|
ksi->ksi_sigq = sq;
|
1. Change prototype of trapsignal and sendsig to use ksiginfo_t *, most
changes in MD code are trivial, before this change, trapsignal and
sendsig use discrete parameters, now they uses member fields of
ksiginfo_t structure. For sendsig, this change allows us to pass
POSIX realtime signal value to user code.
2. Remove cpu_thread_siginfo, it is no longer needed because we now always
generate ksiginfo_t data and feed it to libpthread.
3. Add p_sigqueue to proc structure to hold shared signals which were
blocked by all threads in the proc.
4. Add td_sigqueue to thread structure to hold all signals delivered to
thread.
5. i386 and amd64 now return POSIX standard si_code, other arches will
be fixed.
6. In this sigqueue implementation, pending signal set is kept as before,
an extra siginfo list holds additional siginfo_t data for signals.
kernel code uses psignal() still behavior as before, it won't be failed
even under memory pressure, only exception is when deleting a signal,
we should call sigqueue_delete to remove signal from sigqueue but
not SIGDELSET. Current there is no kernel code will deliver a signal
with additional data, so kernel should be as stable as before,
a ksiginfo can carry more information, for example, allow signal to
be delivered but throw away siginfo data if memory is not enough.
SIGKILL and SIGSTOP have fast path in sigqueue_add, because they can
not be caught or masked.
The sigqueue() syscall allows user code to queue a signal to target
process, if resource is unavailable, EAGAIN will be returned as
specification said.
Just before thread exits, signal queue memory will be freed by
sigqueue_flush.
Current, all signals are allowed to be queued, not only realtime signals.
Earlier patch reviewed by: jhb, deischen
Tested on: i386, amd64
2005-10-14 12:43:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if ((si->ksi_flags & KSI_TRAP) != 0) {
|
2006-03-02 14:06:40 +00:00
|
|
|
if (ret != 0)
|
|
|
|
SIGADDSET(sq->sq_kill, signo);
|
1. Change prototype of trapsignal and sendsig to use ksiginfo_t *, most
changes in MD code are trivial, before this change, trapsignal and
sendsig use discrete parameters, now they uses member fields of
ksiginfo_t structure. For sendsig, this change allows us to pass
POSIX realtime signal value to user code.
2. Remove cpu_thread_siginfo, it is no longer needed because we now always
generate ksiginfo_t data and feed it to libpthread.
3. Add p_sigqueue to proc structure to hold shared signals which were
blocked by all threads in the proc.
4. Add td_sigqueue to thread structure to hold all signals delivered to
thread.
5. i386 and amd64 now return POSIX standard si_code, other arches will
be fixed.
6. In this sigqueue implementation, pending signal set is kept as before,
an extra siginfo list holds additional siginfo_t data for signals.
kernel code uses psignal() still behavior as before, it won't be failed
even under memory pressure, only exception is when deleting a signal,
we should call sigqueue_delete to remove signal from sigqueue but
not SIGDELSET. Current there is no kernel code will deliver a signal
with additional data, so kernel should be as stable as before,
a ksiginfo can carry more information, for example, allow signal to
be delivered but throw away siginfo data if memory is not enough.
SIGKILL and SIGSTOP have fast path in sigqueue_add, because they can
not be caught or masked.
The sigqueue() syscall allows user code to queue a signal to target
process, if resource is unavailable, EAGAIN will be returned as
specification said.
Just before thread exits, signal queue memory will be freed by
sigqueue_flush.
Current, all signals are allowed to be queued, not only realtime signals.
Earlier patch reviewed by: jhb, deischen
Tested on: i386, amd64
2005-10-14 12:43:47 +00:00
|
|
|
ret = 0;
|
|
|
|
goto out_set_bit;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ret != 0)
|
|
|
|
return (ret);
|
|
|
|
|
|
|
|
out_set_bit:
|
|
|
|
SIGADDSET(sq->sq_signals, signo);
|
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
sigqueue_flush(sigqueue_t *sq)
|
|
|
|
{
|
|
|
|
struct proc *p = sq->sq_proc;
|
|
|
|
ksiginfo_t *ksi;
|
|
|
|
|
|
|
|
KASSERT(sq->sq_flags & SQ_INIT, ("sigqueue not inited"));
|
|
|
|
|
2005-10-23 04:12:26 +00:00
|
|
|
if (p != NULL)
|
|
|
|
PROC_LOCK_ASSERT(p, MA_OWNED);
|
|
|
|
|
1. Change prototype of trapsignal and sendsig to use ksiginfo_t *, most
changes in MD code are trivial, before this change, trapsignal and
sendsig use discrete parameters, now they uses member fields of
ksiginfo_t structure. For sendsig, this change allows us to pass
POSIX realtime signal value to user code.
2. Remove cpu_thread_siginfo, it is no longer needed because we now always
generate ksiginfo_t data and feed it to libpthread.
3. Add p_sigqueue to proc structure to hold shared signals which were
blocked by all threads in the proc.
4. Add td_sigqueue to thread structure to hold all signals delivered to
thread.
5. i386 and amd64 now return POSIX standard si_code, other arches will
be fixed.
6. In this sigqueue implementation, pending signal set is kept as before,
an extra siginfo list holds additional siginfo_t data for signals.
kernel code uses psignal() still behavior as before, it won't be failed
even under memory pressure, only exception is when deleting a signal,
we should call sigqueue_delete to remove signal from sigqueue but
not SIGDELSET. Current there is no kernel code will deliver a signal
with additional data, so kernel should be as stable as before,
a ksiginfo can carry more information, for example, allow signal to
be delivered but throw away siginfo data if memory is not enough.
SIGKILL and SIGSTOP have fast path in sigqueue_add, because they can
not be caught or masked.
The sigqueue() syscall allows user code to queue a signal to target
process, if resource is unavailable, EAGAIN will be returned as
specification said.
Just before thread exits, signal queue memory will be freed by
sigqueue_flush.
Current, all signals are allowed to be queued, not only realtime signals.
Earlier patch reviewed by: jhb, deischen
Tested on: i386, amd64
2005-10-14 12:43:47 +00:00
|
|
|
while ((ksi = TAILQ_FIRST(&sq->sq_list)) != NULL) {
|
|
|
|
TAILQ_REMOVE(&sq->sq_list, ksi, ksi_link);
|
2005-10-23 04:12:26 +00:00
|
|
|
ksi->ksi_sigq = NULL;
|
|
|
|
if (ksiginfo_tryfree(ksi) && p != NULL)
|
1. Change prototype of trapsignal and sendsig to use ksiginfo_t *, most
changes in MD code are trivial, before this change, trapsignal and
sendsig use discrete parameters, now they uses member fields of
ksiginfo_t structure. For sendsig, this change allows us to pass
POSIX realtime signal value to user code.
2. Remove cpu_thread_siginfo, it is no longer needed because we now always
generate ksiginfo_t data and feed it to libpthread.
3. Add p_sigqueue to proc structure to hold shared signals which were
blocked by all threads in the proc.
4. Add td_sigqueue to thread structure to hold all signals delivered to
thread.
5. i386 and amd64 now return POSIX standard si_code, other arches will
be fixed.
6. In this sigqueue implementation, pending signal set is kept as before,
an extra siginfo list holds additional siginfo_t data for signals.
kernel code uses psignal() still behavior as before, it won't be failed
even under memory pressure, only exception is when deleting a signal,
we should call sigqueue_delete to remove signal from sigqueue but
not SIGDELSET. Current there is no kernel code will deliver a signal
with additional data, so kernel should be as stable as before,
a ksiginfo can carry more information, for example, allow signal to
be delivered but throw away siginfo data if memory is not enough.
SIGKILL and SIGSTOP have fast path in sigqueue_add, because they can
not be caught or masked.
The sigqueue() syscall allows user code to queue a signal to target
process, if resource is unavailable, EAGAIN will be returned as
specification said.
Just before thread exits, signal queue memory will be freed by
sigqueue_flush.
Current, all signals are allowed to be queued, not only realtime signals.
Earlier patch reviewed by: jhb, deischen
Tested on: i386, amd64
2005-10-14 12:43:47 +00:00
|
|
|
p->p_pendingcnt--;
|
|
|
|
}
|
|
|
|
|
|
|
|
SIGEMPTYSET(sq->sq_signals);
|
2006-03-02 14:06:40 +00:00
|
|
|
SIGEMPTYSET(sq->sq_kill);
|
1. Change prototype of trapsignal and sendsig to use ksiginfo_t *, most
changes in MD code are trivial, before this change, trapsignal and
sendsig use discrete parameters, now they uses member fields of
ksiginfo_t structure. For sendsig, this change allows us to pass
POSIX realtime signal value to user code.
2. Remove cpu_thread_siginfo, it is no longer needed because we now always
generate ksiginfo_t data and feed it to libpthread.
3. Add p_sigqueue to proc structure to hold shared signals which were
blocked by all threads in the proc.
4. Add td_sigqueue to thread structure to hold all signals delivered to
thread.
5. i386 and amd64 now return POSIX standard si_code, other arches will
be fixed.
6. In this sigqueue implementation, pending signal set is kept as before,
an extra siginfo list holds additional siginfo_t data for signals.
kernel code uses psignal() still behavior as before, it won't be failed
even under memory pressure, only exception is when deleting a signal,
we should call sigqueue_delete to remove signal from sigqueue but
not SIGDELSET. Current there is no kernel code will deliver a signal
with additional data, so kernel should be as stable as before,
a ksiginfo can carry more information, for example, allow signal to
be delivered but throw away siginfo data if memory is not enough.
SIGKILL and SIGSTOP have fast path in sigqueue_add, because they can
not be caught or masked.
The sigqueue() syscall allows user code to queue a signal to target
process, if resource is unavailable, EAGAIN will be returned as
specification said.
Just before thread exits, signal queue memory will be freed by
sigqueue_flush.
Current, all signals are allowed to be queued, not only realtime signals.
Earlier patch reviewed by: jhb, deischen
Tested on: i386, amd64
2005-10-14 12:43:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
sigqueue_collect_set(sigqueue_t *sq, sigset_t *set)
|
|
|
|
{
|
|
|
|
ksiginfo_t *ksi;
|
|
|
|
|
|
|
|
KASSERT(sq->sq_flags & SQ_INIT, ("sigqueue not inited"));
|
|
|
|
|
|
|
|
TAILQ_FOREACH(ksi, &sq->sq_list, ksi_link)
|
|
|
|
SIGADDSET(*set, ksi->ksi_signo);
|
2006-03-02 14:06:40 +00:00
|
|
|
SIGSETOR(*set, sq->sq_kill);
|
1. Change prototype of trapsignal and sendsig to use ksiginfo_t *, most
changes in MD code are trivial, before this change, trapsignal and
sendsig use discrete parameters, now they uses member fields of
ksiginfo_t structure. For sendsig, this change allows us to pass
POSIX realtime signal value to user code.
2. Remove cpu_thread_siginfo, it is no longer needed because we now always
generate ksiginfo_t data and feed it to libpthread.
3. Add p_sigqueue to proc structure to hold shared signals which were
blocked by all threads in the proc.
4. Add td_sigqueue to thread structure to hold all signals delivered to
thread.
5. i386 and amd64 now return POSIX standard si_code, other arches will
be fixed.
6. In this sigqueue implementation, pending signal set is kept as before,
an extra siginfo list holds additional siginfo_t data for signals.
kernel code uses psignal() still behavior as before, it won't be failed
even under memory pressure, only exception is when deleting a signal,
we should call sigqueue_delete to remove signal from sigqueue but
not SIGDELSET. Current there is no kernel code will deliver a signal
with additional data, so kernel should be as stable as before,
a ksiginfo can carry more information, for example, allow signal to
be delivered but throw away siginfo data if memory is not enough.
SIGKILL and SIGSTOP have fast path in sigqueue_add, because they can
not be caught or masked.
The sigqueue() syscall allows user code to queue a signal to target
process, if resource is unavailable, EAGAIN will be returned as
specification said.
Just before thread exits, signal queue memory will be freed by
sigqueue_flush.
Current, all signals are allowed to be queued, not only realtime signals.
Earlier patch reviewed by: jhb, deischen
Tested on: i386, amd64
2005-10-14 12:43:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
sigqueue_move_set(sigqueue_t *src, sigqueue_t *dst, sigset_t *setp)
|
|
|
|
{
|
|
|
|
sigset_t tmp, set;
|
|
|
|
struct proc *p1, *p2;
|
|
|
|
ksiginfo_t *ksi, *next;
|
|
|
|
|
|
|
|
KASSERT(src->sq_flags & SQ_INIT, ("src sigqueue not inited"));
|
|
|
|
KASSERT(dst->sq_flags & SQ_INIT, ("dst sigqueue not inited"));
|
|
|
|
/*
|
|
|
|
* make a copy, this allows setp to point to src or dst
|
|
|
|
* sq_signals without trouble.
|
|
|
|
*/
|
|
|
|
set = *setp;
|
|
|
|
p1 = src->sq_proc;
|
|
|
|
p2 = dst->sq_proc;
|
|
|
|
/* Move siginfo to target list */
|
2006-10-22 00:09:41 +00:00
|
|
|
TAILQ_FOREACH_SAFE(ksi, &src->sq_list, ksi_link, next) {
|
1. Change prototype of trapsignal and sendsig to use ksiginfo_t *, most
changes in MD code are trivial, before this change, trapsignal and
sendsig use discrete parameters, now they uses member fields of
ksiginfo_t structure. For sendsig, this change allows us to pass
POSIX realtime signal value to user code.
2. Remove cpu_thread_siginfo, it is no longer needed because we now always
generate ksiginfo_t data and feed it to libpthread.
3. Add p_sigqueue to proc structure to hold shared signals which were
blocked by all threads in the proc.
4. Add td_sigqueue to thread structure to hold all signals delivered to
thread.
5. i386 and amd64 now return POSIX standard si_code, other arches will
be fixed.
6. In this sigqueue implementation, pending signal set is kept as before,
an extra siginfo list holds additional siginfo_t data for signals.
kernel code uses psignal() still behavior as before, it won't be failed
even under memory pressure, only exception is when deleting a signal,
we should call sigqueue_delete to remove signal from sigqueue but
not SIGDELSET. Current there is no kernel code will deliver a signal
with additional data, so kernel should be as stable as before,
a ksiginfo can carry more information, for example, allow signal to
be delivered but throw away siginfo data if memory is not enough.
SIGKILL and SIGSTOP have fast path in sigqueue_add, because they can
not be caught or masked.
The sigqueue() syscall allows user code to queue a signal to target
process, if resource is unavailable, EAGAIN will be returned as
specification said.
Just before thread exits, signal queue memory will be freed by
sigqueue_flush.
Current, all signals are allowed to be queued, not only realtime signals.
Earlier patch reviewed by: jhb, deischen
Tested on: i386, amd64
2005-10-14 12:43:47 +00:00
|
|
|
if (SIGISMEMBER(set, ksi->ksi_signo)) {
|
|
|
|
TAILQ_REMOVE(&src->sq_list, ksi, ksi_link);
|
|
|
|
if (p1 != NULL)
|
|
|
|
p1->p_pendingcnt--;
|
|
|
|
TAILQ_INSERT_TAIL(&dst->sq_list, ksi, ksi_link);
|
2005-10-23 04:12:26 +00:00
|
|
|
ksi->ksi_sigq = dst;
|
1. Change prototype of trapsignal and sendsig to use ksiginfo_t *, most
changes in MD code are trivial, before this change, trapsignal and
sendsig use discrete parameters, now they uses member fields of
ksiginfo_t structure. For sendsig, this change allows us to pass
POSIX realtime signal value to user code.
2. Remove cpu_thread_siginfo, it is no longer needed because we now always
generate ksiginfo_t data and feed it to libpthread.
3. Add p_sigqueue to proc structure to hold shared signals which were
blocked by all threads in the proc.
4. Add td_sigqueue to thread structure to hold all signals delivered to
thread.
5. i386 and amd64 now return POSIX standard si_code, other arches will
be fixed.
6. In this sigqueue implementation, pending signal set is kept as before,
an extra siginfo list holds additional siginfo_t data for signals.
kernel code uses psignal() still behavior as before, it won't be failed
even under memory pressure, only exception is when deleting a signal,
we should call sigqueue_delete to remove signal from sigqueue but
not SIGDELSET. Current there is no kernel code will deliver a signal
with additional data, so kernel should be as stable as before,
a ksiginfo can carry more information, for example, allow signal to
be delivered but throw away siginfo data if memory is not enough.
SIGKILL and SIGSTOP have fast path in sigqueue_add, because they can
not be caught or masked.
The sigqueue() syscall allows user code to queue a signal to target
process, if resource is unavailable, EAGAIN will be returned as
specification said.
Just before thread exits, signal queue memory will be freed by
sigqueue_flush.
Current, all signals are allowed to be queued, not only realtime signals.
Earlier patch reviewed by: jhb, deischen
Tested on: i386, amd64
2005-10-14 12:43:47 +00:00
|
|
|
if (p2 != NULL)
|
|
|
|
p2->p_pendingcnt++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Move pending bits to target list */
|
2006-03-02 14:06:40 +00:00
|
|
|
tmp = src->sq_kill;
|
|
|
|
SIGSETAND(tmp, set);
|
|
|
|
SIGSETOR(dst->sq_kill, tmp);
|
|
|
|
SIGSETNAND(src->sq_kill, tmp);
|
|
|
|
|
1. Change prototype of trapsignal and sendsig to use ksiginfo_t *, most
changes in MD code are trivial, before this change, trapsignal and
sendsig use discrete parameters, now they uses member fields of
ksiginfo_t structure. For sendsig, this change allows us to pass
POSIX realtime signal value to user code.
2. Remove cpu_thread_siginfo, it is no longer needed because we now always
generate ksiginfo_t data and feed it to libpthread.
3. Add p_sigqueue to proc structure to hold shared signals which were
blocked by all threads in the proc.
4. Add td_sigqueue to thread structure to hold all signals delivered to
thread.
5. i386 and amd64 now return POSIX standard si_code, other arches will
be fixed.
6. In this sigqueue implementation, pending signal set is kept as before,
an extra siginfo list holds additional siginfo_t data for signals.
kernel code uses psignal() still behavior as before, it won't be failed
even under memory pressure, only exception is when deleting a signal,
we should call sigqueue_delete to remove signal from sigqueue but
not SIGDELSET. Current there is no kernel code will deliver a signal
with additional data, so kernel should be as stable as before,
a ksiginfo can carry more information, for example, allow signal to
be delivered but throw away siginfo data if memory is not enough.
SIGKILL and SIGSTOP have fast path in sigqueue_add, because they can
not be caught or masked.
The sigqueue() syscall allows user code to queue a signal to target
process, if resource is unavailable, EAGAIN will be returned as
specification said.
Just before thread exits, signal queue memory will be freed by
sigqueue_flush.
Current, all signals are allowed to be queued, not only realtime signals.
Earlier patch reviewed by: jhb, deischen
Tested on: i386, amd64
2005-10-14 12:43:47 +00:00
|
|
|
tmp = src->sq_signals;
|
|
|
|
SIGSETAND(tmp, set);
|
|
|
|
SIGSETOR(dst->sq_signals, tmp);
|
|
|
|
SIGSETNAND(src->sq_signals, tmp);
|
|
|
|
|
|
|
|
/* Finally, rescan src queue and set pending bits for it */
|
|
|
|
sigqueue_collect_set(src, &src->sq_signals);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
sigqueue_move(sigqueue_t *src, sigqueue_t *dst, int signo)
|
|
|
|
{
|
|
|
|
sigset_t set;
|
|
|
|
|
|
|
|
SIGEMPTYSET(set);
|
|
|
|
SIGADDSET(set, signo);
|
|
|
|
sigqueue_move_set(src, dst, &set);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
sigqueue_delete_set(sigqueue_t *sq, sigset_t *set)
|
|
|
|
{
|
|
|
|
struct proc *p = sq->sq_proc;
|
|
|
|
ksiginfo_t *ksi, *next;
|
|
|
|
|
|
|
|
KASSERT(sq->sq_flags & SQ_INIT, ("src sigqueue not inited"));
|
|
|
|
|
|
|
|
/* Remove siginfo queue */
|
2006-10-22 00:09:41 +00:00
|
|
|
TAILQ_FOREACH_SAFE(ksi, &sq->sq_list, ksi_link, next) {
|
1. Change prototype of trapsignal and sendsig to use ksiginfo_t *, most
changes in MD code are trivial, before this change, trapsignal and
sendsig use discrete parameters, now they uses member fields of
ksiginfo_t structure. For sendsig, this change allows us to pass
POSIX realtime signal value to user code.
2. Remove cpu_thread_siginfo, it is no longer needed because we now always
generate ksiginfo_t data and feed it to libpthread.
3. Add p_sigqueue to proc structure to hold shared signals which were
blocked by all threads in the proc.
4. Add td_sigqueue to thread structure to hold all signals delivered to
thread.
5. i386 and amd64 now return POSIX standard si_code, other arches will
be fixed.
6. In this sigqueue implementation, pending signal set is kept as before,
an extra siginfo list holds additional siginfo_t data for signals.
kernel code uses psignal() still behavior as before, it won't be failed
even under memory pressure, only exception is when deleting a signal,
we should call sigqueue_delete to remove signal from sigqueue but
not SIGDELSET. Current there is no kernel code will deliver a signal
with additional data, so kernel should be as stable as before,
a ksiginfo can carry more information, for example, allow signal to
be delivered but throw away siginfo data if memory is not enough.
SIGKILL and SIGSTOP have fast path in sigqueue_add, because they can
not be caught or masked.
The sigqueue() syscall allows user code to queue a signal to target
process, if resource is unavailable, EAGAIN will be returned as
specification said.
Just before thread exits, signal queue memory will be freed by
sigqueue_flush.
Current, all signals are allowed to be queued, not only realtime signals.
Earlier patch reviewed by: jhb, deischen
Tested on: i386, amd64
2005-10-14 12:43:47 +00:00
|
|
|
if (SIGISMEMBER(*set, ksi->ksi_signo)) {
|
|
|
|
TAILQ_REMOVE(&sq->sq_list, ksi, ksi_link);
|
2005-10-23 04:12:26 +00:00
|
|
|
ksi->ksi_sigq = NULL;
|
|
|
|
if (ksiginfo_tryfree(ksi) && p != NULL)
|
1. Change prototype of trapsignal and sendsig to use ksiginfo_t *, most
changes in MD code are trivial, before this change, trapsignal and
sendsig use discrete parameters, now they uses member fields of
ksiginfo_t structure. For sendsig, this change allows us to pass
POSIX realtime signal value to user code.
2. Remove cpu_thread_siginfo, it is no longer needed because we now always
generate ksiginfo_t data and feed it to libpthread.
3. Add p_sigqueue to proc structure to hold shared signals which were
blocked by all threads in the proc.
4. Add td_sigqueue to thread structure to hold all signals delivered to
thread.
5. i386 and amd64 now return POSIX standard si_code, other arches will
be fixed.
6. In this sigqueue implementation, pending signal set is kept as before,
an extra siginfo list holds additional siginfo_t data for signals.
kernel code uses psignal() still behavior as before, it won't be failed
even under memory pressure, only exception is when deleting a signal,
we should call sigqueue_delete to remove signal from sigqueue but
not SIGDELSET. Current there is no kernel code will deliver a signal
with additional data, so kernel should be as stable as before,
a ksiginfo can carry more information, for example, allow signal to
be delivered but throw away siginfo data if memory is not enough.
SIGKILL and SIGSTOP have fast path in sigqueue_add, because they can
not be caught or masked.
The sigqueue() syscall allows user code to queue a signal to target
process, if resource is unavailable, EAGAIN will be returned as
specification said.
Just before thread exits, signal queue memory will be freed by
sigqueue_flush.
Current, all signals are allowed to be queued, not only realtime signals.
Earlier patch reviewed by: jhb, deischen
Tested on: i386, amd64
2005-10-14 12:43:47 +00:00
|
|
|
p->p_pendingcnt--;
|
|
|
|
}
|
|
|
|
}
|
2006-03-02 14:06:40 +00:00
|
|
|
SIGSETNAND(sq->sq_kill, *set);
|
1. Change prototype of trapsignal and sendsig to use ksiginfo_t *, most
changes in MD code are trivial, before this change, trapsignal and
sendsig use discrete parameters, now they uses member fields of
ksiginfo_t structure. For sendsig, this change allows us to pass
POSIX realtime signal value to user code.
2. Remove cpu_thread_siginfo, it is no longer needed because we now always
generate ksiginfo_t data and feed it to libpthread.
3. Add p_sigqueue to proc structure to hold shared signals which were
blocked by all threads in the proc.
4. Add td_sigqueue to thread structure to hold all signals delivered to
thread.
5. i386 and amd64 now return POSIX standard si_code, other arches will
be fixed.
6. In this sigqueue implementation, pending signal set is kept as before,
an extra siginfo list holds additional siginfo_t data for signals.
kernel code uses psignal() still behavior as before, it won't be failed
even under memory pressure, only exception is when deleting a signal,
we should call sigqueue_delete to remove signal from sigqueue but
not SIGDELSET. Current there is no kernel code will deliver a signal
with additional data, so kernel should be as stable as before,
a ksiginfo can carry more information, for example, allow signal to
be delivered but throw away siginfo data if memory is not enough.
SIGKILL and SIGSTOP have fast path in sigqueue_add, because they can
not be caught or masked.
The sigqueue() syscall allows user code to queue a signal to target
process, if resource is unavailable, EAGAIN will be returned as
specification said.
Just before thread exits, signal queue memory will be freed by
sigqueue_flush.
Current, all signals are allowed to be queued, not only realtime signals.
Earlier patch reviewed by: jhb, deischen
Tested on: i386, amd64
2005-10-14 12:43:47 +00:00
|
|
|
SIGSETNAND(sq->sq_signals, *set);
|
|
|
|
/* Finally, rescan queue and set pending bits for it */
|
|
|
|
sigqueue_collect_set(sq, &sq->sq_signals);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
sigqueue_delete(sigqueue_t *sq, int signo)
|
|
|
|
{
|
|
|
|
sigset_t set;
|
|
|
|
|
|
|
|
SIGEMPTYSET(set);
|
|
|
|
SIGADDSET(set, signo);
|
|
|
|
sigqueue_delete_set(sq, &set);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Remove a set of signals for a process */
|
|
|
|
void
|
|
|
|
sigqueue_delete_set_proc(struct proc *p, sigset_t *set)
|
|
|
|
{
|
|
|
|
sigqueue_t worklist;
|
|
|
|
struct thread *td0;
|
|
|
|
|
|
|
|
PROC_LOCK_ASSERT(p, MA_OWNED);
|
|
|
|
|
|
|
|
sigqueue_init(&worklist, NULL);
|
|
|
|
sigqueue_move_set(&p->p_sigqueue, &worklist, set);
|
|
|
|
|
|
|
|
FOREACH_THREAD_IN_PROC(p, td0)
|
|
|
|
sigqueue_move_set(&td0->td_sigqueue, &worklist, set);
|
|
|
|
|
|
|
|
sigqueue_flush(&worklist);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
sigqueue_delete_proc(struct proc *p, int signo)
|
|
|
|
{
|
|
|
|
sigset_t set;
|
|
|
|
|
|
|
|
SIGEMPTYSET(set);
|
|
|
|
SIGADDSET(set, signo);
|
|
|
|
sigqueue_delete_set_proc(p, &set);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
sigqueue_delete_stopmask_proc(struct proc *p)
|
|
|
|
{
|
|
|
|
sigset_t set;
|
|
|
|
|
|
|
|
SIGEMPTYSET(set);
|
|
|
|
SIGADDSET(set, SIGSTOP);
|
|
|
|
SIGADDSET(set, SIGTSTP);
|
|
|
|
SIGADDSET(set, SIGTTIN);
|
|
|
|
SIGADDSET(set, SIGTTOU);
|
|
|
|
sigqueue_delete_set_proc(p, &set);
|
|
|
|
}
|
|
|
|
|
2000-09-17 14:28:33 +00:00
|
|
|
/*
|
|
|
|
* Determine signal that should be delivered to process p, the current
|
|
|
|
* process, 0 if none. If there is a pending stop signal with default
|
|
|
|
* action, the process stops in issignal().
|
|
|
|
*/
|
|
|
|
int
|
2009-07-14 22:52:46 +00:00
|
|
|
cursig(struct thread *td, int stop_allowed)
|
2000-09-17 14:28:33 +00:00
|
|
|
{
|
2003-04-01 09:07:36 +00:00
|
|
|
PROC_LOCK_ASSERT(td->td_proc, MA_OWNED);
|
2009-07-14 22:52:46 +00:00
|
|
|
KASSERT(stop_allowed == SIG_STOP_ALLOWED ||
|
|
|
|
stop_allowed == SIG_STOP_NOT_ALLOWED, ("cursig: stop_allowed"));
|
- Merge struct procsig with struct sigacts.
- Move struct sigacts out of the u-area and malloc() it using the
M_SUBPROC malloc bucket.
- Add a small sigacts_*() API for managing sigacts structures: sigacts_alloc(),
sigacts_free(), sigacts_copy(), sigacts_share(), and sigacts_shared().
- Remove the p_sigignore, p_sigacts, and p_sigcatch macros.
- Add a mutex to struct sigacts that protects all the members of the struct.
- Add sigacts locking.
- Remove Giant from nosys(), kill(), killpg(), and kern_sigaction() now
that sigacts is locked.
- Several in-kernel functions such as psignal(), tdsignal(), trapsignal(),
and thread_stopped() are now MP safe.
Reviewed by: arch@
Approved by: re (rwatson)
2003-05-13 20:36:02 +00:00
|
|
|
mtx_assert(&td->td_proc->p_sigacts->ps_mtx, MA_OWNED);
|
Commit 4/14 of sched_lock decomposition.
- Use thread_lock() rather than sched_lock for per-thread scheduling
sychronization.
- Use the per-process spinlock rather than the sched_lock for per-process
scheduling synchronization.
- Move some common code into thread_suspend_switch() to handle the
mechanics of suspending a thread. The locking here is incredibly
convoluted and should be simplified.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-04 23:52:24 +00:00
|
|
|
THREAD_LOCK_ASSERT(td, MA_NOTOWNED);
|
2009-07-14 22:52:46 +00:00
|
|
|
return (SIGPENDING(td) ? issignal(td, stop_allowed) : 0);
|
2000-09-17 14:28:33 +00:00
|
|
|
}
|
|
|
|
|
2002-04-04 17:49:48 +00:00
|
|
|
/*
|
|
|
|
* Arrange for ast() to handle unmasked pending signals on return to user
|
1. Change prototype of trapsignal and sendsig to use ksiginfo_t *, most
changes in MD code are trivial, before this change, trapsignal and
sendsig use discrete parameters, now they uses member fields of
ksiginfo_t structure. For sendsig, this change allows us to pass
POSIX realtime signal value to user code.
2. Remove cpu_thread_siginfo, it is no longer needed because we now always
generate ksiginfo_t data and feed it to libpthread.
3. Add p_sigqueue to proc structure to hold shared signals which were
blocked by all threads in the proc.
4. Add td_sigqueue to thread structure to hold all signals delivered to
thread.
5. i386 and amd64 now return POSIX standard si_code, other arches will
be fixed.
6. In this sigqueue implementation, pending signal set is kept as before,
an extra siginfo list holds additional siginfo_t data for signals.
kernel code uses psignal() still behavior as before, it won't be failed
even under memory pressure, only exception is when deleting a signal,
we should call sigqueue_delete to remove signal from sigqueue but
not SIGDELSET. Current there is no kernel code will deliver a signal
with additional data, so kernel should be as stable as before,
a ksiginfo can carry more information, for example, allow signal to
be delivered but throw away siginfo data if memory is not enough.
SIGKILL and SIGSTOP have fast path in sigqueue_add, because they can
not be caught or masked.
The sigqueue() syscall allows user code to queue a signal to target
process, if resource is unavailable, EAGAIN will be returned as
specification said.
Just before thread exits, signal queue memory will be freed by
sigqueue_flush.
Current, all signals are allowed to be queued, not only realtime signals.
Earlier patch reviewed by: jhb, deischen
Tested on: i386, amd64
2005-10-14 12:43:47 +00:00
|
|
|
* mode. This must be called whenever a signal is added to td_sigqueue or
|
2003-03-31 22:49:17 +00:00
|
|
|
* unmasked in td_sigmask.
|
2002-04-04 17:49:48 +00:00
|
|
|
*/
|
|
|
|
void
|
2003-03-31 22:49:17 +00:00
|
|
|
signotify(struct thread *td)
|
2002-04-04 17:49:48 +00:00
|
|
|
{
|
2003-03-31 22:49:17 +00:00
|
|
|
struct proc *p;
|
|
|
|
|
|
|
|
p = td->td_proc;
|
2002-04-04 17:49:48 +00:00
|
|
|
|
|
|
|
PROC_LOCK_ASSERT(p, MA_OWNED);
|
2003-03-31 22:49:17 +00:00
|
|
|
|
2003-04-18 20:59:05 +00:00
|
|
|
if (SIGPENDING(td)) {
|
Commit 4/14 of sched_lock decomposition.
- Use thread_lock() rather than sched_lock for per-thread scheduling
sychronization.
- Use the per-process spinlock rather than the sched_lock for per-process
scheduling synchronization.
- Move some common code into thread_suspend_switch() to handle the
mechanics of suspending a thread. The locking here is incredibly
convoluted and should be simplified.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-04 23:52:24 +00:00
|
|
|
thread_lock(td);
|
2003-03-31 22:49:17 +00:00
|
|
|
td->td_flags |= TDF_NEEDSIGCHK | TDF_ASTPENDING;
|
Commit 4/14 of sched_lock decomposition.
- Use thread_lock() rather than sched_lock for per-thread scheduling
sychronization.
- Use the per-process spinlock rather than the sched_lock for per-process
scheduling synchronization.
- Move some common code into thread_suspend_switch() to handle the
mechanics of suspending a thread. The locking here is incredibly
convoluted and should be simplified.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-04 23:52:24 +00:00
|
|
|
thread_unlock(td);
|
2003-04-18 20:59:05 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
sigonstack(size_t sp)
|
|
|
|
{
|
2004-01-03 02:02:26 +00:00
|
|
|
struct thread *td = curthread;
|
2003-04-18 20:59:05 +00:00
|
|
|
|
2004-01-03 02:02:26 +00:00
|
|
|
return ((td->td_pflags & TDP_ALTSTACK) ?
|
2004-06-11 11:16:26 +00:00
|
|
|
#if defined(COMPAT_43)
|
2004-01-03 02:02:26 +00:00
|
|
|
((td->td_sigstk.ss_size == 0) ?
|
|
|
|
(td->td_sigstk.ss_flags & SS_ONSTACK) :
|
|
|
|
((sp - (size_t)td->td_sigstk.ss_sp) < td->td_sigstk.ss_size))
|
2003-04-18 20:59:05 +00:00
|
|
|
#else
|
2004-01-03 02:02:26 +00:00
|
|
|
((sp - (size_t)td->td_sigstk.ss_sp) < td->td_sigstk.ss_size)
|
2003-04-18 20:59:05 +00:00
|
|
|
#endif
|
|
|
|
: 0);
|
2002-04-04 17:49:48 +00:00
|
|
|
}
|
|
|
|
|
1999-10-12 13:14:18 +00:00
|
|
|
static __inline int
|
|
|
|
sigprop(int sig)
|
1999-09-29 15:03:48 +00:00
|
|
|
{
|
1999-10-12 13:14:18 +00:00
|
|
|
|
1999-09-29 15:03:48 +00:00
|
|
|
if (sig > 0 && sig < NSIG)
|
|
|
|
return (sigproptbl[_SIG_IDX(sig)]);
|
1999-10-12 13:14:18 +00:00
|
|
|
return (0);
|
1999-09-29 15:03:48 +00:00
|
|
|
}
|
|
|
|
|
2003-03-31 22:49:17 +00:00
|
|
|
int
|
1999-10-12 13:14:18 +00:00
|
|
|
sig_ffs(sigset_t *set)
|
1999-09-29 15:03:48 +00:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
1999-10-12 13:14:18 +00:00
|
|
|
for (i = 0; i < _SIG_WORDS; i++)
|
1999-09-29 15:03:48 +00:00
|
|
|
if (set->__bits[i])
|
|
|
|
return (ffs(set->__bits[i]) + (i * 32));
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2002-09-01 20:37:28 +00:00
|
|
|
* kern_sigaction
|
1999-09-29 15:03:48 +00:00
|
|
|
* sigaction
|
2002-10-25 19:10:58 +00:00
|
|
|
* freebsd4_sigaction
|
1999-09-29 15:03:48 +00:00
|
|
|
* osigaction
|
|
|
|
*/
|
2002-09-01 20:37:28 +00:00
|
|
|
int
|
2002-10-25 19:10:58 +00:00
|
|
|
kern_sigaction(td, sig, act, oact, flags)
|
2002-09-01 20:37:28 +00:00
|
|
|
struct thread *td;
|
1999-09-29 15:03:48 +00:00
|
|
|
register int sig;
|
|
|
|
struct sigaction *act, *oact;
|
2002-10-25 19:10:58 +00:00
|
|
|
int flags;
|
1999-09-29 15:03:48 +00:00
|
|
|
{
|
- Merge struct procsig with struct sigacts.
- Move struct sigacts out of the u-area and malloc() it using the
M_SUBPROC malloc bucket.
- Add a small sigacts_*() API for managing sigacts structures: sigacts_alloc(),
sigacts_free(), sigacts_copy(), sigacts_share(), and sigacts_shared().
- Remove the p_sigignore, p_sigacts, and p_sigcatch macros.
- Add a mutex to struct sigacts that protects all the members of the struct.
- Add sigacts locking.
- Remove Giant from nosys(), kill(), killpg(), and kern_sigaction() now
that sigacts is locked.
- Several in-kernel functions such as psignal(), tdsignal(), trapsignal(),
and thread_stopped() are now MP safe.
Reviewed by: arch@
Approved by: re (rwatson)
2003-05-13 20:36:02 +00:00
|
|
|
struct sigacts *ps;
|
2002-09-01 20:37:28 +00:00
|
|
|
struct proc *p = td->td_proc;
|
1999-09-29 15:03:48 +00:00
|
|
|
|
2001-11-02 23:50:00 +00:00
|
|
|
if (!_SIG_VALID(sig))
|
1999-09-29 15:03:48 +00:00
|
|
|
return (EINVAL);
|
|
|
|
|
2001-03-07 02:59:54 +00:00
|
|
|
PROC_LOCK(p);
|
|
|
|
ps = p->p_sigacts;
|
- Merge struct procsig with struct sigacts.
- Move struct sigacts out of the u-area and malloc() it using the
M_SUBPROC malloc bucket.
- Add a small sigacts_*() API for managing sigacts structures: sigacts_alloc(),
sigacts_free(), sigacts_copy(), sigacts_share(), and sigacts_shared().
- Remove the p_sigignore, p_sigacts, and p_sigcatch macros.
- Add a mutex to struct sigacts that protects all the members of the struct.
- Add sigacts locking.
- Remove Giant from nosys(), kill(), killpg(), and kern_sigaction() now
that sigacts is locked.
- Several in-kernel functions such as psignal(), tdsignal(), trapsignal(),
and thread_stopped() are now MP safe.
Reviewed by: arch@
Approved by: re (rwatson)
2003-05-13 20:36:02 +00:00
|
|
|
mtx_lock(&ps->ps_mtx);
|
1999-09-29 15:03:48 +00:00
|
|
|
if (oact) {
|
|
|
|
oact->sa_mask = ps->ps_catchmask[_SIG_IDX(sig)];
|
|
|
|
oact->sa_flags = 0;
|
|
|
|
if (SIGISMEMBER(ps->ps_sigonstack, sig))
|
|
|
|
oact->sa_flags |= SA_ONSTACK;
|
|
|
|
if (!SIGISMEMBER(ps->ps_sigintr, sig))
|
|
|
|
oact->sa_flags |= SA_RESTART;
|
|
|
|
if (SIGISMEMBER(ps->ps_sigreset, sig))
|
|
|
|
oact->sa_flags |= SA_RESETHAND;
|
|
|
|
if (SIGISMEMBER(ps->ps_signodefer, sig))
|
|
|
|
oact->sa_flags |= SA_NODEFER;
|
2007-12-18 20:39:13 +00:00
|
|
|
if (SIGISMEMBER(ps->ps_siginfo, sig)) {
|
1999-09-29 15:03:48 +00:00
|
|
|
oact->sa_flags |= SA_SIGINFO;
|
2007-12-18 20:39:13 +00:00
|
|
|
oact->sa_sigaction =
|
|
|
|
(__siginfohandler_t *)ps->ps_sigact[_SIG_IDX(sig)];
|
|
|
|
} else
|
|
|
|
oact->sa_handler = ps->ps_sigact[_SIG_IDX(sig)];
|
- Merge struct procsig with struct sigacts.
- Move struct sigacts out of the u-area and malloc() it using the
M_SUBPROC malloc bucket.
- Add a small sigacts_*() API for managing sigacts structures: sigacts_alloc(),
sigacts_free(), sigacts_copy(), sigacts_share(), and sigacts_shared().
- Remove the p_sigignore, p_sigacts, and p_sigcatch macros.
- Add a mutex to struct sigacts that protects all the members of the struct.
- Add sigacts locking.
- Remove Giant from nosys(), kill(), killpg(), and kern_sigaction() now
that sigacts is locked.
- Several in-kernel functions such as psignal(), tdsignal(), trapsignal(),
and thread_stopped() are now MP safe.
Reviewed by: arch@
Approved by: re (rwatson)
2003-05-13 20:36:02 +00:00
|
|
|
if (sig == SIGCHLD && ps->ps_flag & PS_NOCLDSTOP)
|
1999-09-29 15:03:48 +00:00
|
|
|
oact->sa_flags |= SA_NOCLDSTOP;
|
- Merge struct procsig with struct sigacts.
- Move struct sigacts out of the u-area and malloc() it using the
M_SUBPROC malloc bucket.
- Add a small sigacts_*() API for managing sigacts structures: sigacts_alloc(),
sigacts_free(), sigacts_copy(), sigacts_share(), and sigacts_shared().
- Remove the p_sigignore, p_sigacts, and p_sigcatch macros.
- Add a mutex to struct sigacts that protects all the members of the struct.
- Add sigacts locking.
- Remove Giant from nosys(), kill(), killpg(), and kern_sigaction() now
that sigacts is locked.
- Several in-kernel functions such as psignal(), tdsignal(), trapsignal(),
and thread_stopped() are now MP safe.
Reviewed by: arch@
Approved by: re (rwatson)
2003-05-13 20:36:02 +00:00
|
|
|
if (sig == SIGCHLD && ps->ps_flag & PS_NOCLDWAIT)
|
1999-09-29 15:03:48 +00:00
|
|
|
oact->sa_flags |= SA_NOCLDWAIT;
|
|
|
|
}
|
|
|
|
if (act) {
|
|
|
|
if ((sig == SIGKILL || sig == SIGSTOP) &&
|
2001-03-07 02:59:54 +00:00
|
|
|
act->sa_handler != SIG_DFL) {
|
- Merge struct procsig with struct sigacts.
- Move struct sigacts out of the u-area and malloc() it using the
M_SUBPROC malloc bucket.
- Add a small sigacts_*() API for managing sigacts structures: sigacts_alloc(),
sigacts_free(), sigacts_copy(), sigacts_share(), and sigacts_shared().
- Remove the p_sigignore, p_sigacts, and p_sigcatch macros.
- Add a mutex to struct sigacts that protects all the members of the struct.
- Add sigacts locking.
- Remove Giant from nosys(), kill(), killpg(), and kern_sigaction() now
that sigacts is locked.
- Several in-kernel functions such as psignal(), tdsignal(), trapsignal(),
and thread_stopped() are now MP safe.
Reviewed by: arch@
Approved by: re (rwatson)
2003-05-13 20:36:02 +00:00
|
|
|
mtx_unlock(&ps->ps_mtx);
|
2001-03-07 02:59:54 +00:00
|
|
|
PROC_UNLOCK(p);
|
1999-09-29 15:03:48 +00:00
|
|
|
return (EINVAL);
|
2001-03-07 02:59:54 +00:00
|
|
|
}
|
1999-09-29 15:03:48 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Change setting atomically.
|
|
|
|
*/
|
|
|
|
|
|
|
|
ps->ps_catchmask[_SIG_IDX(sig)] = act->sa_mask;
|
|
|
|
SIG_CANTMASK(ps->ps_catchmask[_SIG_IDX(sig)]);
|
|
|
|
if (act->sa_flags & SA_SIGINFO) {
|
2001-08-01 20:35:24 +00:00
|
|
|
ps->ps_sigact[_SIG_IDX(sig)] =
|
|
|
|
(__sighandler_t *)act->sa_sigaction;
|
2001-10-07 16:11:37 +00:00
|
|
|
SIGADDSET(ps->ps_siginfo, sig);
|
|
|
|
} else {
|
|
|
|
ps->ps_sigact[_SIG_IDX(sig)] = act->sa_handler;
|
1999-09-29 15:03:48 +00:00
|
|
|
SIGDELSET(ps->ps_siginfo, sig);
|
|
|
|
}
|
|
|
|
if (!(act->sa_flags & SA_RESTART))
|
|
|
|
SIGADDSET(ps->ps_sigintr, sig);
|
|
|
|
else
|
|
|
|
SIGDELSET(ps->ps_sigintr, sig);
|
|
|
|
if (act->sa_flags & SA_ONSTACK)
|
|
|
|
SIGADDSET(ps->ps_sigonstack, sig);
|
|
|
|
else
|
|
|
|
SIGDELSET(ps->ps_sigonstack, sig);
|
|
|
|
if (act->sa_flags & SA_RESETHAND)
|
|
|
|
SIGADDSET(ps->ps_sigreset, sig);
|
|
|
|
else
|
|
|
|
SIGDELSET(ps->ps_sigreset, sig);
|
|
|
|
if (act->sa_flags & SA_NODEFER)
|
|
|
|
SIGADDSET(ps->ps_signodefer, sig);
|
|
|
|
else
|
|
|
|
SIGDELSET(ps->ps_signodefer, sig);
|
|
|
|
if (sig == SIGCHLD) {
|
|
|
|
if (act->sa_flags & SA_NOCLDSTOP)
|
- Merge struct procsig with struct sigacts.
- Move struct sigacts out of the u-area and malloc() it using the
M_SUBPROC malloc bucket.
- Add a small sigacts_*() API for managing sigacts structures: sigacts_alloc(),
sigacts_free(), sigacts_copy(), sigacts_share(), and sigacts_shared().
- Remove the p_sigignore, p_sigacts, and p_sigcatch macros.
- Add a mutex to struct sigacts that protects all the members of the struct.
- Add sigacts locking.
- Remove Giant from nosys(), kill(), killpg(), and kern_sigaction() now
that sigacts is locked.
- Several in-kernel functions such as psignal(), tdsignal(), trapsignal(),
and thread_stopped() are now MP safe.
Reviewed by: arch@
Approved by: re (rwatson)
2003-05-13 20:36:02 +00:00
|
|
|
ps->ps_flag |= PS_NOCLDSTOP;
|
1999-09-29 15:03:48 +00:00
|
|
|
else
|
- Merge struct procsig with struct sigacts.
- Move struct sigacts out of the u-area and malloc() it using the
M_SUBPROC malloc bucket.
- Add a small sigacts_*() API for managing sigacts structures: sigacts_alloc(),
sigacts_free(), sigacts_copy(), sigacts_share(), and sigacts_shared().
- Remove the p_sigignore, p_sigacts, and p_sigcatch macros.
- Add a mutex to struct sigacts that protects all the members of the struct.
- Add sigacts locking.
- Remove Giant from nosys(), kill(), killpg(), and kern_sigaction() now
that sigacts is locked.
- Several in-kernel functions such as psignal(), tdsignal(), trapsignal(),
and thread_stopped() are now MP safe.
Reviewed by: arch@
Approved by: re (rwatson)
2003-05-13 20:36:02 +00:00
|
|
|
ps->ps_flag &= ~PS_NOCLDSTOP;
|
2002-04-27 22:41:41 +00:00
|
|
|
if (act->sa_flags & SA_NOCLDWAIT) {
|
1999-09-29 15:03:48 +00:00
|
|
|
/*
|
|
|
|
* Paranoia: since SA_NOCLDWAIT is implemented
|
|
|
|
* by reparenting the dying child to PID 1 (and
|
|
|
|
* trust it to reap the zombie), PID 1 itself
|
|
|
|
* is forbidden to set SA_NOCLDWAIT.
|
|
|
|
*/
|
|
|
|
if (p->p_pid == 1)
|
- Merge struct procsig with struct sigacts.
- Move struct sigacts out of the u-area and malloc() it using the
M_SUBPROC malloc bucket.
- Add a small sigacts_*() API for managing sigacts structures: sigacts_alloc(),
sigacts_free(), sigacts_copy(), sigacts_share(), and sigacts_shared().
- Remove the p_sigignore, p_sigacts, and p_sigcatch macros.
- Add a mutex to struct sigacts that protects all the members of the struct.
- Add sigacts locking.
- Remove Giant from nosys(), kill(), killpg(), and kern_sigaction() now
that sigacts is locked.
- Several in-kernel functions such as psignal(), tdsignal(), trapsignal(),
and thread_stopped() are now MP safe.
Reviewed by: arch@
Approved by: re (rwatson)
2003-05-13 20:36:02 +00:00
|
|
|
ps->ps_flag &= ~PS_NOCLDWAIT;
|
1999-09-29 15:03:48 +00:00
|
|
|
else
|
- Merge struct procsig with struct sigacts.
- Move struct sigacts out of the u-area and malloc() it using the
M_SUBPROC malloc bucket.
- Add a small sigacts_*() API for managing sigacts structures: sigacts_alloc(),
sigacts_free(), sigacts_copy(), sigacts_share(), and sigacts_shared().
- Remove the p_sigignore, p_sigacts, and p_sigcatch macros.
- Add a mutex to struct sigacts that protects all the members of the struct.
- Add sigacts locking.
- Remove Giant from nosys(), kill(), killpg(), and kern_sigaction() now
that sigacts is locked.
- Several in-kernel functions such as psignal(), tdsignal(), trapsignal(),
and thread_stopped() are now MP safe.
Reviewed by: arch@
Approved by: re (rwatson)
2003-05-13 20:36:02 +00:00
|
|
|
ps->ps_flag |= PS_NOCLDWAIT;
|
1999-09-29 15:03:48 +00:00
|
|
|
} else
|
- Merge struct procsig with struct sigacts.
- Move struct sigacts out of the u-area and malloc() it using the
M_SUBPROC malloc bucket.
- Add a small sigacts_*() API for managing sigacts structures: sigacts_alloc(),
sigacts_free(), sigacts_copy(), sigacts_share(), and sigacts_shared().
- Remove the p_sigignore, p_sigacts, and p_sigcatch macros.
- Add a mutex to struct sigacts that protects all the members of the struct.
- Add sigacts locking.
- Remove Giant from nosys(), kill(), killpg(), and kern_sigaction() now
that sigacts is locked.
- Several in-kernel functions such as psignal(), tdsignal(), trapsignal(),
and thread_stopped() are now MP safe.
Reviewed by: arch@
Approved by: re (rwatson)
2003-05-13 20:36:02 +00:00
|
|
|
ps->ps_flag &= ~PS_NOCLDWAIT;
|
2002-04-27 22:41:41 +00:00
|
|
|
if (ps->ps_sigact[_SIG_IDX(SIGCHLD)] == SIG_IGN)
|
- Merge struct procsig with struct sigacts.
- Move struct sigacts out of the u-area and malloc() it using the
M_SUBPROC malloc bucket.
- Add a small sigacts_*() API for managing sigacts structures: sigacts_alloc(),
sigacts_free(), sigacts_copy(), sigacts_share(), and sigacts_shared().
- Remove the p_sigignore, p_sigacts, and p_sigcatch macros.
- Add a mutex to struct sigacts that protects all the members of the struct.
- Add sigacts locking.
- Remove Giant from nosys(), kill(), killpg(), and kern_sigaction() now
that sigacts is locked.
- Several in-kernel functions such as psignal(), tdsignal(), trapsignal(),
and thread_stopped() are now MP safe.
Reviewed by: arch@
Approved by: re (rwatson)
2003-05-13 20:36:02 +00:00
|
|
|
ps->ps_flag |= PS_CLDSIGIGN;
|
2002-04-27 22:41:41 +00:00
|
|
|
else
|
- Merge struct procsig with struct sigacts.
- Move struct sigacts out of the u-area and malloc() it using the
M_SUBPROC malloc bucket.
- Add a small sigacts_*() API for managing sigacts structures: sigacts_alloc(),
sigacts_free(), sigacts_copy(), sigacts_share(), and sigacts_shared().
- Remove the p_sigignore, p_sigacts, and p_sigcatch macros.
- Add a mutex to struct sigacts that protects all the members of the struct.
- Add sigacts locking.
- Remove Giant from nosys(), kill(), killpg(), and kern_sigaction() now
that sigacts is locked.
- Several in-kernel functions such as psignal(), tdsignal(), trapsignal(),
and thread_stopped() are now MP safe.
Reviewed by: arch@
Approved by: re (rwatson)
2003-05-13 20:36:02 +00:00
|
|
|
ps->ps_flag &= ~PS_CLDSIGIGN;
|
1999-09-29 15:03:48 +00:00
|
|
|
}
|
|
|
|
/*
|
- Merge struct procsig with struct sigacts.
- Move struct sigacts out of the u-area and malloc() it using the
M_SUBPROC malloc bucket.
- Add a small sigacts_*() API for managing sigacts structures: sigacts_alloc(),
sigacts_free(), sigacts_copy(), sigacts_share(), and sigacts_shared().
- Remove the p_sigignore, p_sigacts, and p_sigcatch macros.
- Add a mutex to struct sigacts that protects all the members of the struct.
- Add sigacts locking.
- Remove Giant from nosys(), kill(), killpg(), and kern_sigaction() now
that sigacts is locked.
- Several in-kernel functions such as psignal(), tdsignal(), trapsignal(),
and thread_stopped() are now MP safe.
Reviewed by: arch@
Approved by: re (rwatson)
2003-05-13 20:36:02 +00:00
|
|
|
* Set bit in ps_sigignore for signals that are set to SIG_IGN,
|
1999-09-29 15:03:48 +00:00
|
|
|
* and for signals set to SIG_DFL where the default is to
|
- Merge struct procsig with struct sigacts.
- Move struct sigacts out of the u-area and malloc() it using the
M_SUBPROC malloc bucket.
- Add a small sigacts_*() API for managing sigacts structures: sigacts_alloc(),
sigacts_free(), sigacts_copy(), sigacts_share(), and sigacts_shared().
- Remove the p_sigignore, p_sigacts, and p_sigcatch macros.
- Add a mutex to struct sigacts that protects all the members of the struct.
- Add sigacts locking.
- Remove Giant from nosys(), kill(), killpg(), and kern_sigaction() now
that sigacts is locked.
- Several in-kernel functions such as psignal(), tdsignal(), trapsignal(),
and thread_stopped() are now MP safe.
Reviewed by: arch@
Approved by: re (rwatson)
2003-05-13 20:36:02 +00:00
|
|
|
* ignore. However, don't put SIGCONT in ps_sigignore, as we
|
1999-09-29 15:03:48 +00:00
|
|
|
* have to restart the process.
|
|
|
|
*/
|
|
|
|
if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN ||
|
|
|
|
(sigprop(sig) & SA_IGNORE &&
|
|
|
|
ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL)) {
|
|
|
|
/* never to be seen again */
|
1. Change prototype of trapsignal and sendsig to use ksiginfo_t *, most
changes in MD code are trivial, before this change, trapsignal and
sendsig use discrete parameters, now they uses member fields of
ksiginfo_t structure. For sendsig, this change allows us to pass
POSIX realtime signal value to user code.
2. Remove cpu_thread_siginfo, it is no longer needed because we now always
generate ksiginfo_t data and feed it to libpthread.
3. Add p_sigqueue to proc structure to hold shared signals which were
blocked by all threads in the proc.
4. Add td_sigqueue to thread structure to hold all signals delivered to
thread.
5. i386 and amd64 now return POSIX standard si_code, other arches will
be fixed.
6. In this sigqueue implementation, pending signal set is kept as before,
an extra siginfo list holds additional siginfo_t data for signals.
kernel code uses psignal() still behavior as before, it won't be failed
even under memory pressure, only exception is when deleting a signal,
we should call sigqueue_delete to remove signal from sigqueue but
not SIGDELSET. Current there is no kernel code will deliver a signal
with additional data, so kernel should be as stable as before,
a ksiginfo can carry more information, for example, allow signal to
be delivered but throw away siginfo data if memory is not enough.
SIGKILL and SIGSTOP have fast path in sigqueue_add, because they can
not be caught or masked.
The sigqueue() syscall allows user code to queue a signal to target
process, if resource is unavailable, EAGAIN will be returned as
specification said.
Just before thread exits, signal queue memory will be freed by
sigqueue_flush.
Current, all signals are allowed to be queued, not only realtime signals.
Earlier patch reviewed by: jhb, deischen
Tested on: i386, amd64
2005-10-14 12:43:47 +00:00
|
|
|
sigqueue_delete_proc(p, sig);
|
1999-09-29 15:03:48 +00:00
|
|
|
if (sig != SIGCONT)
|
|
|
|
/* easier in psignal */
|
- Merge struct procsig with struct sigacts.
- Move struct sigacts out of the u-area and malloc() it using the
M_SUBPROC malloc bucket.
- Add a small sigacts_*() API for managing sigacts structures: sigacts_alloc(),
sigacts_free(), sigacts_copy(), sigacts_share(), and sigacts_shared().
- Remove the p_sigignore, p_sigacts, and p_sigcatch macros.
- Add a mutex to struct sigacts that protects all the members of the struct.
- Add sigacts locking.
- Remove Giant from nosys(), kill(), killpg(), and kern_sigaction() now
that sigacts is locked.
- Several in-kernel functions such as psignal(), tdsignal(), trapsignal(),
and thread_stopped() are now MP safe.
Reviewed by: arch@
Approved by: re (rwatson)
2003-05-13 20:36:02 +00:00
|
|
|
SIGADDSET(ps->ps_sigignore, sig);
|
|
|
|
SIGDELSET(ps->ps_sigcatch, sig);
|
1999-10-11 20:33:17 +00:00
|
|
|
} else {
|
- Merge struct procsig with struct sigacts.
- Move struct sigacts out of the u-area and malloc() it using the
M_SUBPROC malloc bucket.
- Add a small sigacts_*() API for managing sigacts structures: sigacts_alloc(),
sigacts_free(), sigacts_copy(), sigacts_share(), and sigacts_shared().
- Remove the p_sigignore, p_sigacts, and p_sigcatch macros.
- Add a mutex to struct sigacts that protects all the members of the struct.
- Add sigacts locking.
- Remove Giant from nosys(), kill(), killpg(), and kern_sigaction() now
that sigacts is locked.
- Several in-kernel functions such as psignal(), tdsignal(), trapsignal(),
and thread_stopped() are now MP safe.
Reviewed by: arch@
Approved by: re (rwatson)
2003-05-13 20:36:02 +00:00
|
|
|
SIGDELSET(ps->ps_sigignore, sig);
|
1999-09-29 15:03:48 +00:00
|
|
|
if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL)
|
- Merge struct procsig with struct sigacts.
- Move struct sigacts out of the u-area and malloc() it using the
M_SUBPROC malloc bucket.
- Add a small sigacts_*() API for managing sigacts structures: sigacts_alloc(),
sigacts_free(), sigacts_copy(), sigacts_share(), and sigacts_shared().
- Remove the p_sigignore, p_sigacts, and p_sigcatch macros.
- Add a mutex to struct sigacts that protects all the members of the struct.
- Add sigacts locking.
- Remove Giant from nosys(), kill(), killpg(), and kern_sigaction() now
that sigacts is locked.
- Several in-kernel functions such as psignal(), tdsignal(), trapsignal(),
and thread_stopped() are now MP safe.
Reviewed by: arch@
Approved by: re (rwatson)
2003-05-13 20:36:02 +00:00
|
|
|
SIGDELSET(ps->ps_sigcatch, sig);
|
1999-09-29 15:03:48 +00:00
|
|
|
else
|
- Merge struct procsig with struct sigacts.
- Move struct sigacts out of the u-area and malloc() it using the
M_SUBPROC malloc bucket.
- Add a small sigacts_*() API for managing sigacts structures: sigacts_alloc(),
sigacts_free(), sigacts_copy(), sigacts_share(), and sigacts_shared().
- Remove the p_sigignore, p_sigacts, and p_sigcatch macros.
- Add a mutex to struct sigacts that protects all the members of the struct.
- Add sigacts locking.
- Remove Giant from nosys(), kill(), killpg(), and kern_sigaction() now
that sigacts is locked.
- Several in-kernel functions such as psignal(), tdsignal(), trapsignal(),
and thread_stopped() are now MP safe.
Reviewed by: arch@
Approved by: re (rwatson)
2003-05-13 20:36:02 +00:00
|
|
|
SIGADDSET(ps->ps_sigcatch, sig);
|
1999-09-29 15:03:48 +00:00
|
|
|
}
|
2002-10-25 19:10:58 +00:00
|
|
|
#ifdef COMPAT_FREEBSD4
|
|
|
|
if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN ||
|
|
|
|
ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL ||
|
|
|
|
(flags & KSA_FREEBSD4) == 0)
|
|
|
|
SIGDELSET(ps->ps_freebsd4, sig);
|
|
|
|
else
|
|
|
|
SIGADDSET(ps->ps_freebsd4, sig);
|
|
|
|
#endif
|
2001-08-21 02:32:59 +00:00
|
|
|
#ifdef COMPAT_43
|
1999-10-11 20:33:17 +00:00
|
|
|
if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN ||
|
2002-10-25 19:10:58 +00:00
|
|
|
ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL ||
|
|
|
|
(flags & KSA_OSIGSET) == 0)
|
1999-10-11 20:33:17 +00:00
|
|
|
SIGDELSET(ps->ps_osigset, sig);
|
|
|
|
else
|
|
|
|
SIGADDSET(ps->ps_osigset, sig);
|
2001-08-21 02:32:59 +00:00
|
|
|
#endif
|
1999-09-29 15:03:48 +00:00
|
|
|
}
|
- Merge struct procsig with struct sigacts.
- Move struct sigacts out of the u-area and malloc() it using the
M_SUBPROC malloc bucket.
- Add a small sigacts_*() API for managing sigacts structures: sigacts_alloc(),
sigacts_free(), sigacts_copy(), sigacts_share(), and sigacts_shared().
- Remove the p_sigignore, p_sigacts, and p_sigcatch macros.
- Add a mutex to struct sigacts that protects all the members of the struct.
- Add sigacts locking.
- Remove Giant from nosys(), kill(), killpg(), and kern_sigaction() now
that sigacts is locked.
- Several in-kernel functions such as psignal(), tdsignal(), trapsignal(),
and thread_stopped() are now MP safe.
Reviewed by: arch@
Approved by: re (rwatson)
2003-05-13 20:36:02 +00:00
|
|
|
mtx_unlock(&ps->ps_mtx);
|
2001-03-07 02:59:54 +00:00
|
|
|
PROC_UNLOCK(p);
|
1999-09-29 15:03:48 +00:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct sigaction_args {
|
1999-09-29 15:03:48 +00:00
|
|
|
int sig;
|
|
|
|
struct sigaction *act;
|
|
|
|
struct sigaction *oact;
|
1994-05-24 10:09:53 +00:00
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
sigaction(td, uap)
|
|
|
|
struct thread *td;
|
1994-05-24 10:09:53 +00:00
|
|
|
register struct sigaction_args *uap;
|
|
|
|
{
|
1999-09-29 15:03:48 +00:00
|
|
|
struct sigaction act, oact;
|
|
|
|
register struct sigaction *actp, *oactp;
|
|
|
|
int error;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
1999-10-12 13:14:18 +00:00
|
|
|
actp = (uap->act != NULL) ? &act : NULL;
|
|
|
|
oactp = (uap->oact != NULL) ? &oact : NULL;
|
1999-09-29 15:03:48 +00:00
|
|
|
if (actp) {
|
1999-10-12 13:14:18 +00:00
|
|
|
error = copyin(uap->act, actp, sizeof(act));
|
1999-09-29 15:03:48 +00:00
|
|
|
if (error)
|
2003-02-15 09:56:09 +00:00
|
|
|
return (error);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
2002-09-01 20:37:28 +00:00
|
|
|
error = kern_sigaction(td, uap->sig, actp, oactp, 0);
|
2003-04-25 20:01:19 +00:00
|
|
|
if (oactp && !error)
|
1999-10-12 13:14:18 +00:00
|
|
|
error = copyout(oactp, uap->oact, sizeof(oact));
|
1999-09-29 15:03:48 +00:00
|
|
|
return (error);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
2002-10-25 19:10:58 +00:00
|
|
|
#ifdef COMPAT_FREEBSD4
|
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
|
|
|
struct freebsd4_sigaction_args {
|
|
|
|
int sig;
|
|
|
|
struct sigaction *act;
|
|
|
|
struct sigaction *oact;
|
|
|
|
};
|
|
|
|
#endif
|
|
|
|
int
|
|
|
|
freebsd4_sigaction(td, uap)
|
|
|
|
struct thread *td;
|
|
|
|
register struct freebsd4_sigaction_args *uap;
|
|
|
|
{
|
|
|
|
struct sigaction act, oact;
|
|
|
|
register struct sigaction *actp, *oactp;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
|
|
|
|
actp = (uap->act != NULL) ? &act : NULL;
|
|
|
|
oactp = (uap->oact != NULL) ? &oact : NULL;
|
|
|
|
if (actp) {
|
|
|
|
error = copyin(uap->act, actp, sizeof(act));
|
|
|
|
if (error)
|
2003-02-15 09:56:09 +00:00
|
|
|
return (error);
|
2002-10-25 19:10:58 +00:00
|
|
|
}
|
|
|
|
error = kern_sigaction(td, uap->sig, actp, oactp, KSA_FREEBSD4);
|
2003-04-30 19:45:13 +00:00
|
|
|
if (oactp && !error)
|
2002-10-25 19:10:58 +00:00
|
|
|
error = copyout(oactp, uap->oact, sizeof(oact));
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
#endif /* COMAPT_FREEBSD4 */
|
|
|
|
|
2000-08-26 02:27:01 +00:00
|
|
|
#ifdef COMPAT_43 /* XXX - COMPAT_FBSD3 */
|
1999-09-29 15:03:48 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
|
|
|
struct osigaction_args {
|
|
|
|
int signum;
|
|
|
|
struct osigaction *nsa;
|
|
|
|
struct osigaction *osa;
|
|
|
|
};
|
|
|
|
#endif
|
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
osigaction(td, uap)
|
|
|
|
struct thread *td;
|
1999-09-29 15:03:48 +00:00
|
|
|
register struct osigaction_args *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
1999-09-29 15:03:48 +00:00
|
|
|
struct osigaction sa;
|
|
|
|
struct sigaction nsa, osa;
|
|
|
|
register struct sigaction *nsap, *osap;
|
|
|
|
int error;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
1999-10-12 13:14:18 +00:00
|
|
|
if (uap->signum <= 0 || uap->signum >= ONSIG)
|
|
|
|
return (EINVAL);
|
2001-09-01 18:19:21 +00:00
|
|
|
|
1999-10-12 13:14:18 +00:00
|
|
|
nsap = (uap->nsa != NULL) ? &nsa : NULL;
|
|
|
|
osap = (uap->osa != NULL) ? &osa : NULL;
|
2001-09-01 18:19:21 +00:00
|
|
|
|
1999-09-29 15:03:48 +00:00
|
|
|
if (nsap) {
|
1999-10-12 13:14:18 +00:00
|
|
|
error = copyin(uap->nsa, &sa, sizeof(sa));
|
1999-09-29 15:03:48 +00:00
|
|
|
if (error)
|
2003-02-15 09:56:09 +00:00
|
|
|
return (error);
|
1999-09-29 15:03:48 +00:00
|
|
|
nsap->sa_handler = sa.sa_handler;
|
|
|
|
nsap->sa_flags = sa.sa_flags;
|
|
|
|
OSIG2SIG(sa.sa_mask, nsap->sa_mask);
|
Implement SA_SIGINFO for i386. Thanks to Bruce Evans for much more
than a review, this was a nice puzzle.
This is supposed to be binary and source compatible with older
applications that access the old FreeBSD-style three arguments to a
signal handler.
Except those applications that access hidden signal handler arguments
bejond the documented third one. If you have applications that do,
please let me know so that we take the opportunity to provide the
functionality they need in a documented manner.
Also except application that use 'struct sigframe' directly. You need
to recompile gdb and doscmd. `make world` is recommended.
Example program that demonstrates how SA_SIGINFO and old-style FreeBSD
handlers (with their three args) may be used in the same process is at
http://www3.cons.org/tmp/fbsd-siginfo.c
Programs that use the old FreeBSD-style three arguments are easy to
change to SA_SIGINFO (although they don't need to, since the old style
will still work):
Old args to signal handler:
void handler_sn(int sig, int code, struct sigcontext *scp)
New args:
void handler_si(int sig, siginfo_t *si, void *third)
where:
old:code == new:second->si_code
old:scp == &(new:si->si_scp) /* Passed by value! */
The latter is also pointed to by new:third, but accessing via
si->si_scp is preferred because it is type-save.
FreeBSD implementation notes:
- This is just the framework to make the interface POSIX compatible.
For now, no additional functionality is provided. This is supposed
to happen now, starting with floating point values.
- We don't use 'sigcontext_t.si_value' for now (POSIX meant it for
realtime-related values).
- Documentation will be updated when new functionality is added and
the exact arguments passed are determined. The comments in
sys/signal.h are meant to be useful.
Reviewed by: BDE
1999-07-06 07:13:48 +00:00
|
|
|
}
|
2002-10-25 19:10:58 +00:00
|
|
|
error = kern_sigaction(td, uap->signum, nsap, osap, KSA_OSIGSET);
|
1999-09-29 15:03:48 +00:00
|
|
|
if (osap && !error) {
|
|
|
|
sa.sa_handler = osap->sa_handler;
|
|
|
|
sa.sa_flags = osap->sa_flags;
|
|
|
|
SIG2OSIG(osap->sa_mask, sa.sa_mask);
|
1999-10-12 13:14:18 +00:00
|
|
|
error = copyout(&sa, uap->osa, sizeof(sa));
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
1999-09-29 15:03:48 +00:00
|
|
|
return (error);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
2002-10-25 19:10:58 +00:00
|
|
|
|
2006-05-12 05:04:46 +00:00
|
|
|
#if !defined(__i386__)
|
2002-10-25 19:10:58 +00:00
|
|
|
/* Avoid replicating the same stub everywhere */
|
|
|
|
int
|
|
|
|
osigreturn(td, uap)
|
|
|
|
struct thread *td;
|
|
|
|
struct osigreturn_args *uap;
|
|
|
|
{
|
|
|
|
|
|
|
|
return (nosys(td, (struct nosys_args *)uap));
|
|
|
|
}
|
|
|
|
#endif
|
2000-08-26 02:27:01 +00:00
|
|
|
#endif /* COMPAT_43 */
|
1994-05-24 10:09:53 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Initialize signal state for process 0;
|
|
|
|
* set to ignore signals that are ignored by default.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
siginit(p)
|
|
|
|
struct proc *p;
|
|
|
|
{
|
|
|
|
register int i;
|
- Merge struct procsig with struct sigacts.
- Move struct sigacts out of the u-area and malloc() it using the
M_SUBPROC malloc bucket.
- Add a small sigacts_*() API for managing sigacts structures: sigacts_alloc(),
sigacts_free(), sigacts_copy(), sigacts_share(), and sigacts_shared().
- Remove the p_sigignore, p_sigacts, and p_sigcatch macros.
- Add a mutex to struct sigacts that protects all the members of the struct.
- Add sigacts locking.
- Remove Giant from nosys(), kill(), killpg(), and kern_sigaction() now
that sigacts is locked.
- Several in-kernel functions such as psignal(), tdsignal(), trapsignal(),
and thread_stopped() are now MP safe.
Reviewed by: arch@
Approved by: re (rwatson)
2003-05-13 20:36:02 +00:00
|
|
|
struct sigacts *ps;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2001-03-07 02:59:54 +00:00
|
|
|
PROC_LOCK(p);
|
- Merge struct procsig with struct sigacts.
- Move struct sigacts out of the u-area and malloc() it using the
M_SUBPROC malloc bucket.
- Add a small sigacts_*() API for managing sigacts structures: sigacts_alloc(),
sigacts_free(), sigacts_copy(), sigacts_share(), and sigacts_shared().
- Remove the p_sigignore, p_sigacts, and p_sigcatch macros.
- Add a mutex to struct sigacts that protects all the members of the struct.
- Add sigacts locking.
- Remove Giant from nosys(), kill(), killpg(), and kern_sigaction() now
that sigacts is locked.
- Several in-kernel functions such as psignal(), tdsignal(), trapsignal(),
and thread_stopped() are now MP safe.
Reviewed by: arch@
Approved by: re (rwatson)
2003-05-13 20:36:02 +00:00
|
|
|
ps = p->p_sigacts;
|
|
|
|
mtx_lock(&ps->ps_mtx);
|
1999-09-29 15:03:48 +00:00
|
|
|
for (i = 1; i <= NSIG; i++)
|
|
|
|
if (sigprop(i) & SA_IGNORE && i != SIGCONT)
|
- Merge struct procsig with struct sigacts.
- Move struct sigacts out of the u-area and malloc() it using the
M_SUBPROC malloc bucket.
- Add a small sigacts_*() API for managing sigacts structures: sigacts_alloc(),
sigacts_free(), sigacts_copy(), sigacts_share(), and sigacts_shared().
- Remove the p_sigignore, p_sigacts, and p_sigcatch macros.
- Add a mutex to struct sigacts that protects all the members of the struct.
- Add sigacts locking.
- Remove Giant from nosys(), kill(), killpg(), and kern_sigaction() now
that sigacts is locked.
- Several in-kernel functions such as psignal(), tdsignal(), trapsignal(),
and thread_stopped() are now MP safe.
Reviewed by: arch@
Approved by: re (rwatson)
2003-05-13 20:36:02 +00:00
|
|
|
SIGADDSET(ps->ps_sigignore, i);
|
|
|
|
mtx_unlock(&ps->ps_mtx);
|
2001-03-07 02:59:54 +00:00
|
|
|
PROC_UNLOCK(p);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Reset signals for an exec of the specified process.
|
|
|
|
*/
|
|
|
|
void
|
2004-01-03 02:02:26 +00:00
|
|
|
execsigs(struct proc *p)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2004-01-03 02:02:26 +00:00
|
|
|
struct sigacts *ps;
|
|
|
|
int sig;
|
|
|
|
struct thread *td;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Reset caught signals. Held signals remain held
|
2003-03-31 22:49:17 +00:00
|
|
|
* through td_sigmask (unless they were caught,
|
1994-05-24 10:09:53 +00:00
|
|
|
* and are now ignored by default).
|
|
|
|
*/
|
2002-05-02 15:00:14 +00:00
|
|
|
PROC_LOCK_ASSERT(p, MA_OWNED);
|
2004-01-03 02:02:26 +00:00
|
|
|
td = FIRST_THREAD_IN_PROC(p);
|
2001-03-07 02:59:54 +00:00
|
|
|
ps = p->p_sigacts;
|
- Merge struct procsig with struct sigacts.
- Move struct sigacts out of the u-area and malloc() it using the
M_SUBPROC malloc bucket.
- Add a small sigacts_*() API for managing sigacts structures: sigacts_alloc(),
sigacts_free(), sigacts_copy(), sigacts_share(), and sigacts_shared().
- Remove the p_sigignore, p_sigacts, and p_sigcatch macros.
- Add a mutex to struct sigacts that protects all the members of the struct.
- Add sigacts locking.
- Remove Giant from nosys(), kill(), killpg(), and kern_sigaction() now
that sigacts is locked.
- Several in-kernel functions such as psignal(), tdsignal(), trapsignal(),
and thread_stopped() are now MP safe.
Reviewed by: arch@
Approved by: re (rwatson)
2003-05-13 20:36:02 +00:00
|
|
|
mtx_lock(&ps->ps_mtx);
|
|
|
|
while (SIGNOTEMPTY(ps->ps_sigcatch)) {
|
|
|
|
sig = sig_ffs(&ps->ps_sigcatch);
|
|
|
|
SIGDELSET(ps->ps_sigcatch, sig);
|
1999-09-29 15:03:48 +00:00
|
|
|
if (sigprop(sig) & SA_IGNORE) {
|
|
|
|
if (sig != SIGCONT)
|
- Merge struct procsig with struct sigacts.
- Move struct sigacts out of the u-area and malloc() it using the
M_SUBPROC malloc bucket.
- Add a small sigacts_*() API for managing sigacts structures: sigacts_alloc(),
sigacts_free(), sigacts_copy(), sigacts_share(), and sigacts_shared().
- Remove the p_sigignore, p_sigacts, and p_sigcatch macros.
- Add a mutex to struct sigacts that protects all the members of the struct.
- Add sigacts locking.
- Remove Giant from nosys(), kill(), killpg(), and kern_sigaction() now
that sigacts is locked.
- Several in-kernel functions such as psignal(), tdsignal(), trapsignal(),
and thread_stopped() are now MP safe.
Reviewed by: arch@
Approved by: re (rwatson)
2003-05-13 20:36:02 +00:00
|
|
|
SIGADDSET(ps->ps_sigignore, sig);
|
1. Change prototype of trapsignal and sendsig to use ksiginfo_t *, most
changes in MD code are trivial, before this change, trapsignal and
sendsig use discrete parameters, now they uses member fields of
ksiginfo_t structure. For sendsig, this change allows us to pass
POSIX realtime signal value to user code.
2. Remove cpu_thread_siginfo, it is no longer needed because we now always
generate ksiginfo_t data and feed it to libpthread.
3. Add p_sigqueue to proc structure to hold shared signals which were
blocked by all threads in the proc.
4. Add td_sigqueue to thread structure to hold all signals delivered to
thread.
5. i386 and amd64 now return POSIX standard si_code, other arches will
be fixed.
6. In this sigqueue implementation, pending signal set is kept as before,
an extra siginfo list holds additional siginfo_t data for signals.
kernel code uses psignal() still behavior as before, it won't be failed
even under memory pressure, only exception is when deleting a signal,
we should call sigqueue_delete to remove signal from sigqueue but
not SIGDELSET. Current there is no kernel code will deliver a signal
with additional data, so kernel should be as stable as before,
a ksiginfo can carry more information, for example, allow signal to
be delivered but throw away siginfo data if memory is not enough.
SIGKILL and SIGSTOP have fast path in sigqueue_add, because they can
not be caught or masked.
The sigqueue() syscall allows user code to queue a signal to target
process, if resource is unavailable, EAGAIN will be returned as
specification said.
Just before thread exits, signal queue memory will be freed by
sigqueue_flush.
Current, all signals are allowed to be queued, not only realtime signals.
Earlier patch reviewed by: jhb, deischen
Tested on: i386, amd64
2005-10-14 12:43:47 +00:00
|
|
|
sigqueue_delete_proc(p, sig);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
1999-09-29 15:03:48 +00:00
|
|
|
ps->ps_sigact[_SIG_IDX(sig)] = SIG_DFL;
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Reset stack state to the user stack.
|
|
|
|
* Clear set of signals caught on the signal stack.
|
|
|
|
*/
|
2004-01-03 02:02:26 +00:00
|
|
|
td->td_sigstk.ss_flags = SS_DISABLE;
|
|
|
|
td->td_sigstk.ss_size = 0;
|
|
|
|
td->td_sigstk.ss_sp = 0;
|
|
|
|
td->td_pflags &= ~TDP_ALTSTACK;
|
1999-07-18 13:40:11 +00:00
|
|
|
/*
|
|
|
|
* Reset no zombies if child dies flag as Solaris does.
|
|
|
|
*/
|
- Merge struct procsig with struct sigacts.
- Move struct sigacts out of the u-area and malloc() it using the
M_SUBPROC malloc bucket.
- Add a small sigacts_*() API for managing sigacts structures: sigacts_alloc(),
sigacts_free(), sigacts_copy(), sigacts_share(), and sigacts_shared().
- Remove the p_sigignore, p_sigacts, and p_sigcatch macros.
- Add a mutex to struct sigacts that protects all the members of the struct.
- Add sigacts locking.
- Remove Giant from nosys(), kill(), killpg(), and kern_sigaction() now
that sigacts is locked.
- Several in-kernel functions such as psignal(), tdsignal(), trapsignal(),
and thread_stopped() are now MP safe.
Reviewed by: arch@
Approved by: re (rwatson)
2003-05-13 20:36:02 +00:00
|
|
|
ps->ps_flag &= ~(PS_NOCLDWAIT | PS_CLDSIGIGN);
|
2001-06-11 09:15:41 +00:00
|
|
|
if (ps->ps_sigact[_SIG_IDX(SIGCHLD)] == SIG_IGN)
|
|
|
|
ps->ps_sigact[_SIG_IDX(SIGCHLD)] = SIG_DFL;
|
- Merge struct procsig with struct sigacts.
- Move struct sigacts out of the u-area and malloc() it using the
M_SUBPROC malloc bucket.
- Add a small sigacts_*() API for managing sigacts structures: sigacts_alloc(),
sigacts_free(), sigacts_copy(), sigacts_share(), and sigacts_shared().
- Remove the p_sigignore, p_sigacts, and p_sigcatch macros.
- Add a mutex to struct sigacts that protects all the members of the struct.
- Add sigacts locking.
- Remove Giant from nosys(), kill(), killpg(), and kern_sigaction() now
that sigacts is locked.
- Several in-kernel functions such as psignal(), tdsignal(), trapsignal(),
and thread_stopped() are now MP safe.
Reviewed by: arch@
Approved by: re (rwatson)
2003-05-13 20:36:02 +00:00
|
|
|
mtx_unlock(&ps->ps_mtx);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2003-04-18 20:18:44 +00:00
|
|
|
* kern_sigprocmask()
|
2000-04-02 17:52:43 +00:00
|
|
|
*
|
2001-03-07 02:59:54 +00:00
|
|
|
* Manipulate signal mask.
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
2003-04-18 20:18:44 +00:00
|
|
|
int
|
Currently, when signal is delivered to the process and there is a thread
not blocking the signal, signal is placed on the thread sigqueue. If
the selected thread is in kernel executing thr_exit() or sigprocmask()
syscalls, then signal might be not delivered to usermode for arbitrary
amount of time, and for exiting thread it is lost.
Put process-directed signals to the process queue unconditionally,
selecting the thread to deliver the signal only by the thread returning
to usermode, since only then the thread can handle delivery of signal
reliably. For exiting thread or thread that has blocked some signals,
check whether the newly blocked signal is queued for the process, and
try to find a thread to wakeup for delivery, in reschedule_signal(). For
exiting thread, assume that all signals are blocked.
Change cursig() and postsig() to look both into the thread and process
signal queues. When there is a signal that thread returning to usermode
could consume, TDF_NEEDSIGCHK flag is not neccessary set now. Do
unlocked read of p_siglist and p_pendingcnt to check for queued signals.
Note that thread that has a signal unblocked might get spurious wakeup
and EINTR from the interruptible system call now, due to the possibility
of being selected by reschedule_signals(), while other thread returned
to usermode earlier and removed the signal from process queue. This
should not cause compliance issues, since the thread has not blocked a
signal and thus should be ready to receive it anyway.
Reported by: Justin Teller <justin.teller gmail com>
Reviewed by: davidxu, jilles
MFC after: 1 month
2009-10-11 16:49:30 +00:00
|
|
|
kern_sigprocmask(struct thread *td, int how, sigset_t *set, sigset_t *oset,
|
2009-10-27 10:42:24 +00:00
|
|
|
int flags)
|
1999-09-29 15:03:48 +00:00
|
|
|
{
|
Currently, when signal is delivered to the process and there is a thread
not blocking the signal, signal is placed on the thread sigqueue. If
the selected thread is in kernel executing thr_exit() or sigprocmask()
syscalls, then signal might be not delivered to usermode for arbitrary
amount of time, and for exiting thread it is lost.
Put process-directed signals to the process queue unconditionally,
selecting the thread to deliver the signal only by the thread returning
to usermode, since only then the thread can handle delivery of signal
reliably. For exiting thread or thread that has blocked some signals,
check whether the newly blocked signal is queued for the process, and
try to find a thread to wakeup for delivery, in reschedule_signal(). For
exiting thread, assume that all signals are blocked.
Change cursig() and postsig() to look both into the thread and process
signal queues. When there is a signal that thread returning to usermode
could consume, TDF_NEEDSIGCHK flag is not neccessary set now. Do
unlocked read of p_siglist and p_pendingcnt to check for queued signals.
Note that thread that has a signal unblocked might get spurious wakeup
and EINTR from the interruptible system call now, due to the possibility
of being selected by reschedule_signals(), while other thread returned
to usermode earlier and removed the signal from process queue. This
should not cause compliance issues, since the thread has not blocked a
signal and thus should be ready to receive it anyway.
Reported by: Justin Teller <justin.teller gmail com>
Reviewed by: davidxu, jilles
MFC after: 1 month
2009-10-11 16:49:30 +00:00
|
|
|
sigset_t new_block, oset1;
|
|
|
|
struct proc *p;
|
1999-09-29 15:03:48 +00:00
|
|
|
int error;
|
|
|
|
|
Currently, when signal is delivered to the process and there is a thread
not blocking the signal, signal is placed on the thread sigqueue. If
the selected thread is in kernel executing thr_exit() or sigprocmask()
syscalls, then signal might be not delivered to usermode for arbitrary
amount of time, and for exiting thread it is lost.
Put process-directed signals to the process queue unconditionally,
selecting the thread to deliver the signal only by the thread returning
to usermode, since only then the thread can handle delivery of signal
reliably. For exiting thread or thread that has blocked some signals,
check whether the newly blocked signal is queued for the process, and
try to find a thread to wakeup for delivery, in reschedule_signal(). For
exiting thread, assume that all signals are blocked.
Change cursig() and postsig() to look both into the thread and process
signal queues. When there is a signal that thread returning to usermode
could consume, TDF_NEEDSIGCHK flag is not neccessary set now. Do
unlocked read of p_siglist and p_pendingcnt to check for queued signals.
Note that thread that has a signal unblocked might get spurious wakeup
and EINTR from the interruptible system call now, due to the possibility
of being selected by reschedule_signals(), while other thread returned
to usermode earlier and removed the signal from process queue. This
should not cause compliance issues, since the thread has not blocked a
signal and thus should be ready to receive it anyway.
Reported by: Justin Teller <justin.teller gmail com>
Reviewed by: davidxu, jilles
MFC after: 1 month
2009-10-11 16:49:30 +00:00
|
|
|
p = td->td_proc;
|
2009-10-27 10:42:24 +00:00
|
|
|
if (!(flags & SIGPROCMASK_PROC_LOCKED))
|
|
|
|
PROC_LOCK(p);
|
1999-09-29 15:03:48 +00:00
|
|
|
if (oset != NULL)
|
2003-03-31 22:49:17 +00:00
|
|
|
*oset = td->td_sigmask;
|
1999-09-29 15:03:48 +00:00
|
|
|
|
|
|
|
error = 0;
|
Currently, when signal is delivered to the process and there is a thread
not blocking the signal, signal is placed on the thread sigqueue. If
the selected thread is in kernel executing thr_exit() or sigprocmask()
syscalls, then signal might be not delivered to usermode for arbitrary
amount of time, and for exiting thread it is lost.
Put process-directed signals to the process queue unconditionally,
selecting the thread to deliver the signal only by the thread returning
to usermode, since only then the thread can handle delivery of signal
reliably. For exiting thread or thread that has blocked some signals,
check whether the newly blocked signal is queued for the process, and
try to find a thread to wakeup for delivery, in reschedule_signal(). For
exiting thread, assume that all signals are blocked.
Change cursig() and postsig() to look both into the thread and process
signal queues. When there is a signal that thread returning to usermode
could consume, TDF_NEEDSIGCHK flag is not neccessary set now. Do
unlocked read of p_siglist and p_pendingcnt to check for queued signals.
Note that thread that has a signal unblocked might get spurious wakeup
and EINTR from the interruptible system call now, due to the possibility
of being selected by reschedule_signals(), while other thread returned
to usermode earlier and removed the signal from process queue. This
should not cause compliance issues, since the thread has not blocked a
signal and thus should be ready to receive it anyway.
Reported by: Justin Teller <justin.teller gmail com>
Reviewed by: davidxu, jilles
MFC after: 1 month
2009-10-11 16:49:30 +00:00
|
|
|
SIGEMPTYSET(new_block);
|
1999-09-29 15:03:48 +00:00
|
|
|
if (set != NULL) {
|
|
|
|
switch (how) {
|
|
|
|
case SIG_BLOCK:
|
1999-10-11 20:33:17 +00:00
|
|
|
SIG_CANTMASK(*set);
|
Currently, when signal is delivered to the process and there is a thread
not blocking the signal, signal is placed on the thread sigqueue. If
the selected thread is in kernel executing thr_exit() or sigprocmask()
syscalls, then signal might be not delivered to usermode for arbitrary
amount of time, and for exiting thread it is lost.
Put process-directed signals to the process queue unconditionally,
selecting the thread to deliver the signal only by the thread returning
to usermode, since only then the thread can handle delivery of signal
reliably. For exiting thread or thread that has blocked some signals,
check whether the newly blocked signal is queued for the process, and
try to find a thread to wakeup for delivery, in reschedule_signal(). For
exiting thread, assume that all signals are blocked.
Change cursig() and postsig() to look both into the thread and process
signal queues. When there is a signal that thread returning to usermode
could consume, TDF_NEEDSIGCHK flag is not neccessary set now. Do
unlocked read of p_siglist and p_pendingcnt to check for queued signals.
Note that thread that has a signal unblocked might get spurious wakeup
and EINTR from the interruptible system call now, due to the possibility
of being selected by reschedule_signals(), while other thread returned
to usermode earlier and removed the signal from process queue. This
should not cause compliance issues, since the thread has not blocked a
signal and thus should be ready to receive it anyway.
Reported by: Justin Teller <justin.teller gmail com>
Reviewed by: davidxu, jilles
MFC after: 1 month
2009-10-11 16:49:30 +00:00
|
|
|
oset1 = td->td_sigmask;
|
2003-03-31 22:49:17 +00:00
|
|
|
SIGSETOR(td->td_sigmask, *set);
|
Currently, when signal is delivered to the process and there is a thread
not blocking the signal, signal is placed on the thread sigqueue. If
the selected thread is in kernel executing thr_exit() or sigprocmask()
syscalls, then signal might be not delivered to usermode for arbitrary
amount of time, and for exiting thread it is lost.
Put process-directed signals to the process queue unconditionally,
selecting the thread to deliver the signal only by the thread returning
to usermode, since only then the thread can handle delivery of signal
reliably. For exiting thread or thread that has blocked some signals,
check whether the newly blocked signal is queued for the process, and
try to find a thread to wakeup for delivery, in reschedule_signal(). For
exiting thread, assume that all signals are blocked.
Change cursig() and postsig() to look both into the thread and process
signal queues. When there is a signal that thread returning to usermode
could consume, TDF_NEEDSIGCHK flag is not neccessary set now. Do
unlocked read of p_siglist and p_pendingcnt to check for queued signals.
Note that thread that has a signal unblocked might get spurious wakeup
and EINTR from the interruptible system call now, due to the possibility
of being selected by reschedule_signals(), while other thread returned
to usermode earlier and removed the signal from process queue. This
should not cause compliance issues, since the thread has not blocked a
signal and thus should be ready to receive it anyway.
Reported by: Justin Teller <justin.teller gmail com>
Reviewed by: davidxu, jilles
MFC after: 1 month
2009-10-11 16:49:30 +00:00
|
|
|
new_block = td->td_sigmask;
|
|
|
|
SIGSETNAND(new_block, oset1);
|
1999-09-29 15:03:48 +00:00
|
|
|
break;
|
|
|
|
case SIG_UNBLOCK:
|
2003-03-31 22:49:17 +00:00
|
|
|
SIGSETNAND(td->td_sigmask, *set);
|
|
|
|
signotify(td);
|
1999-09-29 15:03:48 +00:00
|
|
|
break;
|
|
|
|
case SIG_SETMASK:
|
1999-10-11 20:33:17 +00:00
|
|
|
SIG_CANTMASK(*set);
|
Currently, when signal is delivered to the process and there is a thread
not blocking the signal, signal is placed on the thread sigqueue. If
the selected thread is in kernel executing thr_exit() or sigprocmask()
syscalls, then signal might be not delivered to usermode for arbitrary
amount of time, and for exiting thread it is lost.
Put process-directed signals to the process queue unconditionally,
selecting the thread to deliver the signal only by the thread returning
to usermode, since only then the thread can handle delivery of signal
reliably. For exiting thread or thread that has blocked some signals,
check whether the newly blocked signal is queued for the process, and
try to find a thread to wakeup for delivery, in reschedule_signal(). For
exiting thread, assume that all signals are blocked.
Change cursig() and postsig() to look both into the thread and process
signal queues. When there is a signal that thread returning to usermode
could consume, TDF_NEEDSIGCHK flag is not neccessary set now. Do
unlocked read of p_siglist and p_pendingcnt to check for queued signals.
Note that thread that has a signal unblocked might get spurious wakeup
and EINTR from the interruptible system call now, due to the possibility
of being selected by reschedule_signals(), while other thread returned
to usermode earlier and removed the signal from process queue. This
should not cause compliance issues, since the thread has not blocked a
signal and thus should be ready to receive it anyway.
Reported by: Justin Teller <justin.teller gmail com>
Reviewed by: davidxu, jilles
MFC after: 1 month
2009-10-11 16:49:30 +00:00
|
|
|
oset1 = td->td_sigmask;
|
2009-10-27 10:42:24 +00:00
|
|
|
if (flags & SIGPROCMASK_OLD)
|
2003-03-31 22:49:17 +00:00
|
|
|
SIGSETLO(td->td_sigmask, *set);
|
1999-10-11 20:33:17 +00:00
|
|
|
else
|
2003-03-31 22:49:17 +00:00
|
|
|
td->td_sigmask = *set;
|
Currently, when signal is delivered to the process and there is a thread
not blocking the signal, signal is placed on the thread sigqueue. If
the selected thread is in kernel executing thr_exit() or sigprocmask()
syscalls, then signal might be not delivered to usermode for arbitrary
amount of time, and for exiting thread it is lost.
Put process-directed signals to the process queue unconditionally,
selecting the thread to deliver the signal only by the thread returning
to usermode, since only then the thread can handle delivery of signal
reliably. For exiting thread or thread that has blocked some signals,
check whether the newly blocked signal is queued for the process, and
try to find a thread to wakeup for delivery, in reschedule_signal(). For
exiting thread, assume that all signals are blocked.
Change cursig() and postsig() to look both into the thread and process
signal queues. When there is a signal that thread returning to usermode
could consume, TDF_NEEDSIGCHK flag is not neccessary set now. Do
unlocked read of p_siglist and p_pendingcnt to check for queued signals.
Note that thread that has a signal unblocked might get spurious wakeup
and EINTR from the interruptible system call now, due to the possibility
of being selected by reschedule_signals(), while other thread returned
to usermode earlier and removed the signal from process queue. This
should not cause compliance issues, since the thread has not blocked a
signal and thus should be ready to receive it anyway.
Reported by: Justin Teller <justin.teller gmail com>
Reviewed by: davidxu, jilles
MFC after: 1 month
2009-10-11 16:49:30 +00:00
|
|
|
new_block = td->td_sigmask;
|
|
|
|
SIGSETNAND(new_block, oset1);
|
2003-03-31 22:49:17 +00:00
|
|
|
signotify(td);
|
1999-09-29 15:03:48 +00:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
error = EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
Currently, when signal is delivered to the process and there is a thread
not blocking the signal, signal is placed on the thread sigqueue. If
the selected thread is in kernel executing thr_exit() or sigprocmask()
syscalls, then signal might be not delivered to usermode for arbitrary
amount of time, and for exiting thread it is lost.
Put process-directed signals to the process queue unconditionally,
selecting the thread to deliver the signal only by the thread returning
to usermode, since only then the thread can handle delivery of signal
reliably. For exiting thread or thread that has blocked some signals,
check whether the newly blocked signal is queued for the process, and
try to find a thread to wakeup for delivery, in reschedule_signal(). For
exiting thread, assume that all signals are blocked.
Change cursig() and postsig() to look both into the thread and process
signal queues. When there is a signal that thread returning to usermode
could consume, TDF_NEEDSIGCHK flag is not neccessary set now. Do
unlocked read of p_siglist and p_pendingcnt to check for queued signals.
Note that thread that has a signal unblocked might get spurious wakeup
and EINTR from the interruptible system call now, due to the possibility
of being selected by reschedule_signals(), while other thread returned
to usermode earlier and removed the signal from process queue. This
should not cause compliance issues, since the thread has not blocked a
signal and thus should be ready to receive it anyway.
Reported by: Justin Teller <justin.teller gmail com>
Reviewed by: davidxu, jilles
MFC after: 1 month
2009-10-11 16:49:30 +00:00
|
|
|
|
|
|
|
/*
|
2009-10-12 10:09:48 +00:00
|
|
|
* The new_block set contains signals that were not previously
|
Currently, when signal is delivered to the process and there is a thread
not blocking the signal, signal is placed on the thread sigqueue. If
the selected thread is in kernel executing thr_exit() or sigprocmask()
syscalls, then signal might be not delivered to usermode for arbitrary
amount of time, and for exiting thread it is lost.
Put process-directed signals to the process queue unconditionally,
selecting the thread to deliver the signal only by the thread returning
to usermode, since only then the thread can handle delivery of signal
reliably. For exiting thread or thread that has blocked some signals,
check whether the newly blocked signal is queued for the process, and
try to find a thread to wakeup for delivery, in reschedule_signal(). For
exiting thread, assume that all signals are blocked.
Change cursig() and postsig() to look both into the thread and process
signal queues. When there is a signal that thread returning to usermode
could consume, TDF_NEEDSIGCHK flag is not neccessary set now. Do
unlocked read of p_siglist and p_pendingcnt to check for queued signals.
Note that thread that has a signal unblocked might get spurious wakeup
and EINTR from the interruptible system call now, due to the possibility
of being selected by reschedule_signals(), while other thread returned
to usermode earlier and removed the signal from process queue. This
should not cause compliance issues, since the thread has not blocked a
signal and thus should be ready to receive it anyway.
Reported by: Justin Teller <justin.teller gmail com>
Reviewed by: davidxu, jilles
MFC after: 1 month
2009-10-11 16:49:30 +00:00
|
|
|
* blocked, but are blocked now.
|
|
|
|
*
|
|
|
|
* In case we block any signal that was not previously blocked
|
|
|
|
* for td, and process has the signal pending, try to schedule
|
|
|
|
* signal delivery to some thread that does not block the signal,
|
|
|
|
* possibly waking it up.
|
|
|
|
*/
|
|
|
|
if (p->p_numthreads != 1)
|
2009-10-30 10:10:39 +00:00
|
|
|
reschedule_signals(p, new_block, flags);
|
Currently, when signal is delivered to the process and there is a thread
not blocking the signal, signal is placed on the thread sigqueue. If
the selected thread is in kernel executing thr_exit() or sigprocmask()
syscalls, then signal might be not delivered to usermode for arbitrary
amount of time, and for exiting thread it is lost.
Put process-directed signals to the process queue unconditionally,
selecting the thread to deliver the signal only by the thread returning
to usermode, since only then the thread can handle delivery of signal
reliably. For exiting thread or thread that has blocked some signals,
check whether the newly blocked signal is queued for the process, and
try to find a thread to wakeup for delivery, in reschedule_signal(). For
exiting thread, assume that all signals are blocked.
Change cursig() and postsig() to look both into the thread and process
signal queues. When there is a signal that thread returning to usermode
could consume, TDF_NEEDSIGCHK flag is not neccessary set now. Do
unlocked read of p_siglist and p_pendingcnt to check for queued signals.
Note that thread that has a signal unblocked might get spurious wakeup
and EINTR from the interruptible system call now, due to the possibility
of being selected by reschedule_signals(), while other thread returned
to usermode earlier and removed the signal from process queue. This
should not cause compliance issues, since the thread has not blocked a
signal and thus should be ready to receive it anyway.
Reported by: Justin Teller <justin.teller gmail com>
Reviewed by: davidxu, jilles
MFC after: 1 month
2009-10-11 16:49:30 +00:00
|
|
|
|
2009-10-27 10:42:24 +00:00
|
|
|
if (!(flags & SIGPROCMASK_PROC_LOCKED))
|
|
|
|
PROC_UNLOCK(p);
|
1999-09-29 15:03:48 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct sigprocmask_args {
|
|
|
|
int how;
|
1999-09-29 15:03:48 +00:00
|
|
|
const sigset_t *set;
|
|
|
|
sigset_t *oset;
|
1994-05-24 10:09:53 +00:00
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
sigprocmask(td, uap)
|
|
|
|
register struct thread *td;
|
1994-05-24 10:09:53 +00:00
|
|
|
struct sigprocmask_args *uap;
|
|
|
|
{
|
1999-09-29 15:03:48 +00:00
|
|
|
sigset_t set, oset;
|
|
|
|
sigset_t *setp, *osetp;
|
|
|
|
int error;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
1999-10-12 13:14:18 +00:00
|
|
|
setp = (uap->set != NULL) ? &set : NULL;
|
|
|
|
osetp = (uap->oset != NULL) ? &oset : NULL;
|
1999-09-29 15:03:48 +00:00
|
|
|
if (setp) {
|
1999-10-12 13:14:18 +00:00
|
|
|
error = copyin(uap->set, setp, sizeof(set));
|
1999-09-29 15:03:48 +00:00
|
|
|
if (error)
|
|
|
|
return (error);
|
|
|
|
}
|
2003-04-18 20:18:44 +00:00
|
|
|
error = kern_sigprocmask(td, uap->how, setp, osetp, 0);
|
1999-09-29 15:03:48 +00:00
|
|
|
if (osetp && !error) {
|
1999-10-12 13:14:18 +00:00
|
|
|
error = copyout(osetp, uap->oset, sizeof(oset));
|
1999-09-29 15:03:48 +00:00
|
|
|
}
|
|
|
|
return (error);
|
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2000-08-26 02:27:01 +00:00
|
|
|
#ifdef COMPAT_43 /* XXX - COMPAT_FBSD3 */
|
1999-09-29 15:03:48 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
|
|
|
struct osigprocmask_args {
|
|
|
|
int how;
|
|
|
|
osigset_t mask;
|
|
|
|
};
|
|
|
|
#endif
|
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
osigprocmask(td, uap)
|
|
|
|
register struct thread *td;
|
1999-09-29 15:03:48 +00:00
|
|
|
struct osigprocmask_args *uap;
|
|
|
|
{
|
|
|
|
sigset_t set, oset;
|
|
|
|
int error;
|
1995-05-30 08:16:23 +00:00
|
|
|
|
1999-09-29 15:03:48 +00:00
|
|
|
OSIG2SIG(uap->mask, set);
|
2003-04-18 20:18:44 +00:00
|
|
|
error = kern_sigprocmask(td, uap->how, &set, &oset, 1);
|
2001-09-12 08:38:13 +00:00
|
|
|
SIG2OSIG(oset, td->td_retval[0]);
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
}
|
2000-08-26 02:27:01 +00:00
|
|
|
#endif /* COMPAT_43 */
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2003-03-31 23:30:41 +00:00
|
|
|
int
|
|
|
|
sigwait(struct thread *td, struct sigwait_args *uap)
|
|
|
|
{
|
1. Change prototype of trapsignal and sendsig to use ksiginfo_t *, most
changes in MD code are trivial, before this change, trapsignal and
sendsig use discrete parameters, now they uses member fields of
ksiginfo_t structure. For sendsig, this change allows us to pass
POSIX realtime signal value to user code.
2. Remove cpu_thread_siginfo, it is no longer needed because we now always
generate ksiginfo_t data and feed it to libpthread.
3. Add p_sigqueue to proc structure to hold shared signals which were
blocked by all threads in the proc.
4. Add td_sigqueue to thread structure to hold all signals delivered to
thread.
5. i386 and amd64 now return POSIX standard si_code, other arches will
be fixed.
6. In this sigqueue implementation, pending signal set is kept as before,
an extra siginfo list holds additional siginfo_t data for signals.
kernel code uses psignal() still behavior as before, it won't be failed
even under memory pressure, only exception is when deleting a signal,
we should call sigqueue_delete to remove signal from sigqueue but
not SIGDELSET. Current there is no kernel code will deliver a signal
with additional data, so kernel should be as stable as before,
a ksiginfo can carry more information, for example, allow signal to
be delivered but throw away siginfo data if memory is not enough.
SIGKILL and SIGSTOP have fast path in sigqueue_add, because they can
not be caught or masked.
The sigqueue() syscall allows user code to queue a signal to target
process, if resource is unavailable, EAGAIN will be returned as
specification said.
Just before thread exits, signal queue memory will be freed by
sigqueue_flush.
Current, all signals are allowed to be queued, not only realtime signals.
Earlier patch reviewed by: jhb, deischen
Tested on: i386, amd64
2005-10-14 12:43:47 +00:00
|
|
|
ksiginfo_t ksi;
|
2003-03-31 23:30:41 +00:00
|
|
|
sigset_t set;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
error = copyin(uap->set, &set, sizeof(set));
|
2004-06-07 13:35:02 +00:00
|
|
|
if (error) {
|
|
|
|
td->td_retval[0] = error;
|
|
|
|
return (0);
|
|
|
|
}
|
2003-03-31 23:30:41 +00:00
|
|
|
|
1. Change prototype of trapsignal and sendsig to use ksiginfo_t *, most
changes in MD code are trivial, before this change, trapsignal and
sendsig use discrete parameters, now they uses member fields of
ksiginfo_t structure. For sendsig, this change allows us to pass
POSIX realtime signal value to user code.
2. Remove cpu_thread_siginfo, it is no longer needed because we now always
generate ksiginfo_t data and feed it to libpthread.
3. Add p_sigqueue to proc structure to hold shared signals which were
blocked by all threads in the proc.
4. Add td_sigqueue to thread structure to hold all signals delivered to
thread.
5. i386 and amd64 now return POSIX standard si_code, other arches will
be fixed.
6. In this sigqueue implementation, pending signal set is kept as before,
an extra siginfo list holds additional siginfo_t data for signals.
kernel code uses psignal() still behavior as before, it won't be failed
even under memory pressure, only exception is when deleting a signal,
we should call sigqueue_delete to remove signal from sigqueue but
not SIGDELSET. Current there is no kernel code will deliver a signal
with additional data, so kernel should be as stable as before,
a ksiginfo can carry more information, for example, allow signal to
be delivered but throw away siginfo data if memory is not enough.
SIGKILL and SIGSTOP have fast path in sigqueue_add, because they can
not be caught or masked.
The sigqueue() syscall allows user code to queue a signal to target
process, if resource is unavailable, EAGAIN will be returned as
specification said.
Just before thread exits, signal queue memory will be freed by
sigqueue_flush.
Current, all signals are allowed to be queued, not only realtime signals.
Earlier patch reviewed by: jhb, deischen
Tested on: i386, amd64
2005-10-14 12:43:47 +00:00
|
|
|
error = kern_sigtimedwait(td, set, &ksi, NULL);
|
2004-06-07 13:35:02 +00:00
|
|
|
if (error) {
|
|
|
|
if (error == ERESTART)
|
|
|
|
return (error);
|
|
|
|
td->td_retval[0] = error;
|
|
|
|
return (0);
|
|
|
|
}
|
2003-03-31 23:30:41 +00:00
|
|
|
|
1. Change prototype of trapsignal and sendsig to use ksiginfo_t *, most
changes in MD code are trivial, before this change, trapsignal and
sendsig use discrete parameters, now they uses member fields of
ksiginfo_t structure. For sendsig, this change allows us to pass
POSIX realtime signal value to user code.
2. Remove cpu_thread_siginfo, it is no longer needed because we now always
generate ksiginfo_t data and feed it to libpthread.
3. Add p_sigqueue to proc structure to hold shared signals which were
blocked by all threads in the proc.
4. Add td_sigqueue to thread structure to hold all signals delivered to
thread.
5. i386 and amd64 now return POSIX standard si_code, other arches will
be fixed.
6. In this sigqueue implementation, pending signal set is kept as before,
an extra siginfo list holds additional siginfo_t data for signals.
kernel code uses psignal() still behavior as before, it won't be failed
even under memory pressure, only exception is when deleting a signal,
we should call sigqueue_delete to remove signal from sigqueue but
not SIGDELSET. Current there is no kernel code will deliver a signal
with additional data, so kernel should be as stable as before,
a ksiginfo can carry more information, for example, allow signal to
be delivered but throw away siginfo data if memory is not enough.
SIGKILL and SIGSTOP have fast path in sigqueue_add, because they can
not be caught or masked.
The sigqueue() syscall allows user code to queue a signal to target
process, if resource is unavailable, EAGAIN will be returned as
specification said.
Just before thread exits, signal queue memory will be freed by
sigqueue_flush.
Current, all signals are allowed to be queued, not only realtime signals.
Earlier patch reviewed by: jhb, deischen
Tested on: i386, amd64
2005-10-14 12:43:47 +00:00
|
|
|
error = copyout(&ksi.ksi_signo, uap->sig, sizeof(ksi.ksi_signo));
|
2004-06-07 13:35:02 +00:00
|
|
|
td->td_retval[0] = error;
|
|
|
|
return (0);
|
2003-03-31 23:30:41 +00:00
|
|
|
}
|
2007-03-04 22:36:48 +00:00
|
|
|
|
2003-03-31 23:30:41 +00:00
|
|
|
int
|
|
|
|
sigtimedwait(struct thread *td, struct sigtimedwait_args *uap)
|
|
|
|
{
|
|
|
|
struct timespec ts;
|
|
|
|
struct timespec *timeout;
|
|
|
|
sigset_t set;
|
1. Change prototype of trapsignal and sendsig to use ksiginfo_t *, most
changes in MD code are trivial, before this change, trapsignal and
sendsig use discrete parameters, now they uses member fields of
ksiginfo_t structure. For sendsig, this change allows us to pass
POSIX realtime signal value to user code.
2. Remove cpu_thread_siginfo, it is no longer needed because we now always
generate ksiginfo_t data and feed it to libpthread.
3. Add p_sigqueue to proc structure to hold shared signals which were
blocked by all threads in the proc.
4. Add td_sigqueue to thread structure to hold all signals delivered to
thread.
5. i386 and amd64 now return POSIX standard si_code, other arches will
be fixed.
6. In this sigqueue implementation, pending signal set is kept as before,
an extra siginfo list holds additional siginfo_t data for signals.
kernel code uses psignal() still behavior as before, it won't be failed
even under memory pressure, only exception is when deleting a signal,
we should call sigqueue_delete to remove signal from sigqueue but
not SIGDELSET. Current there is no kernel code will deliver a signal
with additional data, so kernel should be as stable as before,
a ksiginfo can carry more information, for example, allow signal to
be delivered but throw away siginfo data if memory is not enough.
SIGKILL and SIGSTOP have fast path in sigqueue_add, because they can
not be caught or masked.
The sigqueue() syscall allows user code to queue a signal to target
process, if resource is unavailable, EAGAIN will be returned as
specification said.
Just before thread exits, signal queue memory will be freed by
sigqueue_flush.
Current, all signals are allowed to be queued, not only realtime signals.
Earlier patch reviewed by: jhb, deischen
Tested on: i386, amd64
2005-10-14 12:43:47 +00:00
|
|
|
ksiginfo_t ksi;
|
2003-03-31 23:30:41 +00:00
|
|
|
int error;
|
|
|
|
|
|
|
|
if (uap->timeout) {
|
|
|
|
error = copyin(uap->timeout, &ts, sizeof(ts));
|
|
|
|
if (error)
|
|
|
|
return (error);
|
|
|
|
|
|
|
|
timeout = &ts;
|
|
|
|
} else
|
|
|
|
timeout = NULL;
|
|
|
|
|
|
|
|
error = copyin(uap->set, &set, sizeof(set));
|
|
|
|
if (error)
|
|
|
|
return (error);
|
|
|
|
|
1. Change prototype of trapsignal and sendsig to use ksiginfo_t *, most
changes in MD code are trivial, before this change, trapsignal and
sendsig use discrete parameters, now they uses member fields of
ksiginfo_t structure. For sendsig, this change allows us to pass
POSIX realtime signal value to user code.
2. Remove cpu_thread_siginfo, it is no longer needed because we now always
generate ksiginfo_t data and feed it to libpthread.
3. Add p_sigqueue to proc structure to hold shared signals which were
blocked by all threads in the proc.
4. Add td_sigqueue to thread structure to hold all signals delivered to
thread.
5. i386 and amd64 now return POSIX standard si_code, other arches will
be fixed.
6. In this sigqueue implementation, pending signal set is kept as before,
an extra siginfo list holds additional siginfo_t data for signals.
kernel code uses psignal() still behavior as before, it won't be failed
even under memory pressure, only exception is when deleting a signal,
we should call sigqueue_delete to remove signal from sigqueue but
not SIGDELSET. Current there is no kernel code will deliver a signal
with additional data, so kernel should be as stable as before,
a ksiginfo can carry more information, for example, allow signal to
be delivered but throw away siginfo data if memory is not enough.
SIGKILL and SIGSTOP have fast path in sigqueue_add, because they can
not be caught or masked.
The sigqueue() syscall allows user code to queue a signal to target
process, if resource is unavailable, EAGAIN will be returned as
specification said.
Just before thread exits, signal queue memory will be freed by
sigqueue_flush.
Current, all signals are allowed to be queued, not only realtime signals.
Earlier patch reviewed by: jhb, deischen
Tested on: i386, amd64
2005-10-14 12:43:47 +00:00
|
|
|
error = kern_sigtimedwait(td, set, &ksi, timeout);
|
2003-03-31 23:30:41 +00:00
|
|
|
if (error)
|
|
|
|
return (error);
|
o Change kse_thr_interrupt to allow send a signal to a specified thread,
or unblock a thread in kernel, and allow UTS to specify whether syscall
should be restarted.
o Add ability for UTS to monitor signal comes in and removed from process,
the flag PS_SIGEVENT is used to indicate the events.
o Add a KMF_WAITSIGEVENT for KSE mailbox flag, UTS call kse_release with
this flag set to wait for above signal event.
o For SA based thread, kernel masks all signal in its signal mask, let
UTS to use kse_thr_interrupt interrupt a thread, and install a signal
frame in userland for the thread.
o Add a tm_syncsig in thread mailbox, when a hardware trap occurs,
it is used to deliver synchronous signal to userland, and upcall
is schedule, so UTS can process the synchronous signal for the thread.
Reviewed by: julian (mentor)
2003-06-28 08:29:05 +00:00
|
|
|
|
2003-06-28 08:03:28 +00:00
|
|
|
if (uap->info)
|
1. Change prototype of trapsignal and sendsig to use ksiginfo_t *, most
changes in MD code are trivial, before this change, trapsignal and
sendsig use discrete parameters, now they uses member fields of
ksiginfo_t structure. For sendsig, this change allows us to pass
POSIX realtime signal value to user code.
2. Remove cpu_thread_siginfo, it is no longer needed because we now always
generate ksiginfo_t data and feed it to libpthread.
3. Add p_sigqueue to proc structure to hold shared signals which were
blocked by all threads in the proc.
4. Add td_sigqueue to thread structure to hold all signals delivered to
thread.
5. i386 and amd64 now return POSIX standard si_code, other arches will
be fixed.
6. In this sigqueue implementation, pending signal set is kept as before,
an extra siginfo list holds additional siginfo_t data for signals.
kernel code uses psignal() still behavior as before, it won't be failed
even under memory pressure, only exception is when deleting a signal,
we should call sigqueue_delete to remove signal from sigqueue but
not SIGDELSET. Current there is no kernel code will deliver a signal
with additional data, so kernel should be as stable as before,
a ksiginfo can carry more information, for example, allow signal to
be delivered but throw away siginfo data if memory is not enough.
SIGKILL and SIGSTOP have fast path in sigqueue_add, because they can
not be caught or masked.
The sigqueue() syscall allows user code to queue a signal to target
process, if resource is unavailable, EAGAIN will be returned as
specification said.
Just before thread exits, signal queue memory will be freed by
sigqueue_flush.
Current, all signals are allowed to be queued, not only realtime signals.
Earlier patch reviewed by: jhb, deischen
Tested on: i386, amd64
2005-10-14 12:43:47 +00:00
|
|
|
error = copyout(&ksi.ksi_info, uap->info, sizeof(siginfo_t));
|
|
|
|
|
|
|
|
if (error == 0)
|
|
|
|
td->td_retval[0] = ksi.ksi_signo;
|
2003-03-31 23:30:41 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
sigwaitinfo(struct thread *td, struct sigwaitinfo_args *uap)
|
|
|
|
{
|
1. Change prototype of trapsignal and sendsig to use ksiginfo_t *, most
changes in MD code are trivial, before this change, trapsignal and
sendsig use discrete parameters, now they uses member fields of
ksiginfo_t structure. For sendsig, this change allows us to pass
POSIX realtime signal value to user code.
2. Remove cpu_thread_siginfo, it is no longer needed because we now always
generate ksiginfo_t data and feed it to libpthread.
3. Add p_sigqueue to proc structure to hold shared signals which were
blocked by all threads in the proc.
4. Add td_sigqueue to thread structure to hold all signals delivered to
thread.
5. i386 and amd64 now return POSIX standard si_code, other arches will
be fixed.
6. In this sigqueue implementation, pending signal set is kept as before,
an extra siginfo list holds additional siginfo_t data for signals.
kernel code uses psignal() still behavior as before, it won't be failed
even under memory pressure, only exception is when deleting a signal,
we should call sigqueue_delete to remove signal from sigqueue but
not SIGDELSET. Current there is no kernel code will deliver a signal
with additional data, so kernel should be as stable as before,
a ksiginfo can carry more information, for example, allow signal to
be delivered but throw away siginfo data if memory is not enough.
SIGKILL and SIGSTOP have fast path in sigqueue_add, because they can
not be caught or masked.
The sigqueue() syscall allows user code to queue a signal to target
process, if resource is unavailable, EAGAIN will be returned as
specification said.
Just before thread exits, signal queue memory will be freed by
sigqueue_flush.
Current, all signals are allowed to be queued, not only realtime signals.
Earlier patch reviewed by: jhb, deischen
Tested on: i386, amd64
2005-10-14 12:43:47 +00:00
|
|
|
ksiginfo_t ksi;
|
2003-03-31 23:30:41 +00:00
|
|
|
sigset_t set;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
error = copyin(uap->set, &set, sizeof(set));
|
|
|
|
if (error)
|
|
|
|
return (error);
|
|
|
|
|
1. Change prototype of trapsignal and sendsig to use ksiginfo_t *, most
changes in MD code are trivial, before this change, trapsignal and
sendsig use discrete parameters, now they uses member fields of
ksiginfo_t structure. For sendsig, this change allows us to pass
POSIX realtime signal value to user code.
2. Remove cpu_thread_siginfo, it is no longer needed because we now always
generate ksiginfo_t data and feed it to libpthread.
3. Add p_sigqueue to proc structure to hold shared signals which were
blocked by all threads in the proc.
4. Add td_sigqueue to thread structure to hold all signals delivered to
thread.
5. i386 and amd64 now return POSIX standard si_code, other arches will
be fixed.
6. In this sigqueue implementation, pending signal set is kept as before,
an extra siginfo list holds additional siginfo_t data for signals.
kernel code uses psignal() still behavior as before, it won't be failed
even under memory pressure, only exception is when deleting a signal,
we should call sigqueue_delete to remove signal from sigqueue but
not SIGDELSET. Current there is no kernel code will deliver a signal
with additional data, so kernel should be as stable as before,
a ksiginfo can carry more information, for example, allow signal to
be delivered but throw away siginfo data if memory is not enough.
SIGKILL and SIGSTOP have fast path in sigqueue_add, because they can
not be caught or masked.
The sigqueue() syscall allows user code to queue a signal to target
process, if resource is unavailable, EAGAIN will be returned as
specification said.
Just before thread exits, signal queue memory will be freed by
sigqueue_flush.
Current, all signals are allowed to be queued, not only realtime signals.
Earlier patch reviewed by: jhb, deischen
Tested on: i386, amd64
2005-10-14 12:43:47 +00:00
|
|
|
error = kern_sigtimedwait(td, set, &ksi, NULL);
|
2003-03-31 23:30:41 +00:00
|
|
|
if (error)
|
|
|
|
return (error);
|
|
|
|
|
2003-06-28 08:03:28 +00:00
|
|
|
if (uap->info)
|
1. Change prototype of trapsignal and sendsig to use ksiginfo_t *, most
changes in MD code are trivial, before this change, trapsignal and
sendsig use discrete parameters, now they uses member fields of
ksiginfo_t structure. For sendsig, this change allows us to pass
POSIX realtime signal value to user code.
2. Remove cpu_thread_siginfo, it is no longer needed because we now always
generate ksiginfo_t data and feed it to libpthread.
3. Add p_sigqueue to proc structure to hold shared signals which were
blocked by all threads in the proc.
4. Add td_sigqueue to thread structure to hold all signals delivered to
thread.
5. i386 and amd64 now return POSIX standard si_code, other arches will
be fixed.
6. In this sigqueue implementation, pending signal set is kept as before,
an extra siginfo list holds additional siginfo_t data for signals.
kernel code uses psignal() still behavior as before, it won't be failed
even under memory pressure, only exception is when deleting a signal,
we should call sigqueue_delete to remove signal from sigqueue but
not SIGDELSET. Current there is no kernel code will deliver a signal
with additional data, so kernel should be as stable as before,
a ksiginfo can carry more information, for example, allow signal to
be delivered but throw away siginfo data if memory is not enough.
SIGKILL and SIGSTOP have fast path in sigqueue_add, because they can
not be caught or masked.
The sigqueue() syscall allows user code to queue a signal to target
process, if resource is unavailable, EAGAIN will be returned as
specification said.
Just before thread exits, signal queue memory will be freed by
sigqueue_flush.
Current, all signals are allowed to be queued, not only realtime signals.
Earlier patch reviewed by: jhb, deischen
Tested on: i386, amd64
2005-10-14 12:43:47 +00:00
|
|
|
error = copyout(&ksi.ksi_info, uap->info, sizeof(siginfo_t));
|
|
|
|
|
|
|
|
if (error == 0)
|
|
|
|
td->td_retval[0] = ksi.ksi_signo;
|
2003-03-31 23:30:41 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
2006-10-05 01:56:11 +00:00
|
|
|
int
|
1. Change prototype of trapsignal and sendsig to use ksiginfo_t *, most
changes in MD code are trivial, before this change, trapsignal and
sendsig use discrete parameters, now they uses member fields of
ksiginfo_t structure. For sendsig, this change allows us to pass
POSIX realtime signal value to user code.
2. Remove cpu_thread_siginfo, it is no longer needed because we now always
generate ksiginfo_t data and feed it to libpthread.
3. Add p_sigqueue to proc structure to hold shared signals which were
blocked by all threads in the proc.
4. Add td_sigqueue to thread structure to hold all signals delivered to
thread.
5. i386 and amd64 now return POSIX standard si_code, other arches will
be fixed.
6. In this sigqueue implementation, pending signal set is kept as before,
an extra siginfo list holds additional siginfo_t data for signals.
kernel code uses psignal() still behavior as before, it won't be failed
even under memory pressure, only exception is when deleting a signal,
we should call sigqueue_delete to remove signal from sigqueue but
not SIGDELSET. Current there is no kernel code will deliver a signal
with additional data, so kernel should be as stable as before,
a ksiginfo can carry more information, for example, allow signal to
be delivered but throw away siginfo data if memory is not enough.
SIGKILL and SIGSTOP have fast path in sigqueue_add, because they can
not be caught or masked.
The sigqueue() syscall allows user code to queue a signal to target
process, if resource is unavailable, EAGAIN will be returned as
specification said.
Just before thread exits, signal queue memory will be freed by
sigqueue_flush.
Current, all signals are allowed to be queued, not only realtime signals.
Earlier patch reviewed by: jhb, deischen
Tested on: i386, amd64
2005-10-14 12:43:47 +00:00
|
|
|
kern_sigtimedwait(struct thread *td, sigset_t waitset, ksiginfo_t *ksi,
|
|
|
|
struct timespec *timeout)
|
2003-03-31 23:30:41 +00:00
|
|
|
{
|
2003-07-17 22:52:55 +00:00
|
|
|
struct sigacts *ps;
|
2005-03-02 13:43:51 +00:00
|
|
|
sigset_t savedmask;
|
2003-07-17 22:52:55 +00:00
|
|
|
struct proc *p;
|
2005-02-19 06:05:49 +00:00
|
|
|
int error, sig, hz, i, timevalid = 0;
|
|
|
|
struct timespec rts, ets, ts;
|
|
|
|
struct timeval tv;
|
2003-03-31 23:30:41 +00:00
|
|
|
|
|
|
|
p = td->td_proc;
|
|
|
|
error = 0;
|
|
|
|
sig = 0;
|
2007-06-10 01:43:11 +00:00
|
|
|
ets.tv_sec = 0;
|
|
|
|
ets.tv_nsec = 0;
|
2003-07-17 22:52:55 +00:00
|
|
|
SIG_CANTMASK(waitset);
|
2003-03-31 23:30:41 +00:00
|
|
|
|
|
|
|
PROC_LOCK(p);
|
|
|
|
ps = p->p_sigacts;
|
2003-07-17 22:52:55 +00:00
|
|
|
savedmask = td->td_sigmask;
|
2005-02-19 06:05:49 +00:00
|
|
|
if (timeout) {
|
|
|
|
if (timeout->tv_nsec >= 0 && timeout->tv_nsec < 1000000000) {
|
|
|
|
timevalid = 1;
|
|
|
|
getnanouptime(&rts);
|
|
|
|
ets = rts;
|
|
|
|
timespecadd(&ets, timeout);
|
|
|
|
}
|
|
|
|
}
|
2003-07-17 22:52:55 +00:00
|
|
|
|
2006-02-23 09:24:19 +00:00
|
|
|
restart:
|
2003-07-17 22:52:55 +00:00
|
|
|
for (i = 1; i <= _SIG_MAXSIG; ++i) {
|
|
|
|
if (!SIGISMEMBER(waitset, i))
|
|
|
|
continue;
|
2006-02-23 09:24:19 +00:00
|
|
|
if (!SIGISMEMBER(td->td_sigqueue.sq_signals, i)) {
|
|
|
|
if (SIGISMEMBER(p->p_sigqueue.sq_signals, i)) {
|
|
|
|
sigqueue_move(&p->p_sigqueue,
|
|
|
|
&td->td_sigqueue, i);
|
|
|
|
} else
|
|
|
|
continue;
|
2003-07-17 22:52:55 +00:00
|
|
|
}
|
2006-02-23 09:24:19 +00:00
|
|
|
|
|
|
|
SIGFILLSET(td->td_sigmask);
|
|
|
|
SIG_CANTMASK(td->td_sigmask);
|
|
|
|
SIGDELSET(td->td_sigmask, i);
|
|
|
|
mtx_lock(&ps->ps_mtx);
|
2009-07-14 22:52:46 +00:00
|
|
|
sig = cursig(td, SIG_STOP_ALLOWED);
|
2006-02-23 09:24:19 +00:00
|
|
|
mtx_unlock(&ps->ps_mtx);
|
2005-03-02 13:43:51 +00:00
|
|
|
if (sig)
|
2003-07-17 22:52:55 +00:00
|
|
|
goto out;
|
2006-02-23 09:24:19 +00:00
|
|
|
else {
|
|
|
|
/*
|
|
|
|
* Because cursig() may have stopped current thread,
|
|
|
|
* after it is resumed, things may have already been
|
|
|
|
* changed, it should rescan any pending signals.
|
|
|
|
*/
|
|
|
|
goto restart;
|
|
|
|
}
|
2003-07-17 22:52:55 +00:00
|
|
|
}
|
2006-02-23 09:24:19 +00:00
|
|
|
|
2003-07-17 22:52:55 +00:00
|
|
|
if (error)
|
2003-03-31 23:30:41 +00:00
|
|
|
goto out;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* POSIX says this must be checked after looking for pending
|
|
|
|
* signals.
|
|
|
|
*/
|
|
|
|
if (timeout) {
|
2005-02-19 06:05:49 +00:00
|
|
|
if (!timevalid) {
|
2003-03-31 23:30:41 +00:00
|
|
|
error = EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
2005-02-19 06:05:49 +00:00
|
|
|
getnanouptime(&rts);
|
|
|
|
if (timespeccmp(&rts, &ets, >=)) {
|
2003-07-17 22:52:55 +00:00
|
|
|
error = EAGAIN;
|
|
|
|
goto out;
|
|
|
|
}
|
2005-02-19 06:05:49 +00:00
|
|
|
ts = ets;
|
|
|
|
timespecsub(&ts, &rts);
|
|
|
|
TIMESPEC_TO_TIMEVAL(&tv, &ts);
|
2003-03-31 23:30:41 +00:00
|
|
|
hz = tvtohz(&tv);
|
|
|
|
} else
|
|
|
|
hz = 0;
|
|
|
|
|
2005-03-02 13:43:51 +00:00
|
|
|
td->td_sigmask = savedmask;
|
|
|
|
SIGSETNAND(td->td_sigmask, waitset);
|
|
|
|
signotify(td);
|
2004-03-03 23:03:18 +00:00
|
|
|
error = msleep(&ps, &p->p_mtx, PPAUSE|PCATCH, "sigwait", hz);
|
2005-02-19 06:05:49 +00:00
|
|
|
if (timeout) {
|
|
|
|
if (error == ERESTART) {
|
|
|
|
/* timeout can not be restarted. */
|
|
|
|
error = EINTR;
|
|
|
|
} else if (error == EAGAIN) {
|
|
|
|
/* will calculate timeout by ourself. */
|
|
|
|
error = 0;
|
|
|
|
}
|
|
|
|
}
|
2006-02-23 09:24:19 +00:00
|
|
|
goto restart;
|
o Change kse_thr_interrupt to allow send a signal to a specified thread,
or unblock a thread in kernel, and allow UTS to specify whether syscall
should be restarted.
o Add ability for UTS to monitor signal comes in and removed from process,
the flag PS_SIGEVENT is used to indicate the events.
o Add a KMF_WAITSIGEVENT for KSE mailbox flag, UTS call kse_release with
this flag set to wait for above signal event.
o For SA based thread, kernel masks all signal in its signal mask, let
UTS to use kse_thr_interrupt interrupt a thread, and install a signal
frame in userland for the thread.
o Add a tm_syncsig in thread mailbox, when a hardware trap occurs,
it is used to deliver synchronous signal to userland, and upcall
is schedule, so UTS can process the synchronous signal for the thread.
Reviewed by: julian (mentor)
2003-06-28 08:29:05 +00:00
|
|
|
|
2003-03-31 23:30:41 +00:00
|
|
|
out:
|
2006-02-23 09:24:19 +00:00
|
|
|
td->td_sigmask = savedmask;
|
|
|
|
signotify(td);
|
2003-03-31 23:30:41 +00:00
|
|
|
if (sig) {
|
2005-10-23 04:12:26 +00:00
|
|
|
ksiginfo_init(ksi);
|
1. Change prototype of trapsignal and sendsig to use ksiginfo_t *, most
changes in MD code are trivial, before this change, trapsignal and
sendsig use discrete parameters, now they uses member fields of
ksiginfo_t structure. For sendsig, this change allows us to pass
POSIX realtime signal value to user code.
2. Remove cpu_thread_siginfo, it is no longer needed because we now always
generate ksiginfo_t data and feed it to libpthread.
3. Add p_sigqueue to proc structure to hold shared signals which were
blocked by all threads in the proc.
4. Add td_sigqueue to thread structure to hold all signals delivered to
thread.
5. i386 and amd64 now return POSIX standard si_code, other arches will
be fixed.
6. In this sigqueue implementation, pending signal set is kept as before,
an extra siginfo list holds additional siginfo_t data for signals.
kernel code uses psignal() still behavior as before, it won't be failed
even under memory pressure, only exception is when deleting a signal,
we should call sigqueue_delete to remove signal from sigqueue but
not SIGDELSET. Current there is no kernel code will deliver a signal
with additional data, so kernel should be as stable as before,
a ksiginfo can carry more information, for example, allow signal to
be delivered but throw away siginfo data if memory is not enough.
SIGKILL and SIGSTOP have fast path in sigqueue_add, because they can
not be caught or masked.
The sigqueue() syscall allows user code to queue a signal to target
process, if resource is unavailable, EAGAIN will be returned as
specification said.
Just before thread exits, signal queue memory will be freed by
sigqueue_flush.
Current, all signals are allowed to be queued, not only realtime signals.
Earlier patch reviewed by: jhb, deischen
Tested on: i386, amd64
2005-10-14 12:43:47 +00:00
|
|
|
sigqueue_get(&td->td_sigqueue, sig, ksi);
|
|
|
|
ksi->ksi_signo = sig;
|
2008-05-24 06:22:16 +00:00
|
|
|
|
|
|
|
SDT_PROBE(proc, kernel, , signal_clear, sig, ksi, 0, 0, 0);
|
|
|
|
|
2005-10-30 02:56:08 +00:00
|
|
|
if (ksi->ksi_code == SI_TIMER)
|
|
|
|
itimer_accept(p, ksi->ksi_timerid, ksi);
|
2003-07-17 22:52:55 +00:00
|
|
|
error = 0;
|
2006-02-23 09:24:19 +00:00
|
|
|
|
2003-03-31 23:30:41 +00:00
|
|
|
#ifdef KTRACE
|
2006-02-23 09:24:19 +00:00
|
|
|
if (KTRPOINT(td, KTR_PSIG)) {
|
|
|
|
sig_t action;
|
|
|
|
|
|
|
|
mtx_lock(&ps->ps_mtx);
|
|
|
|
action = ps->ps_sigact[_SIG_IDX(sig)];
|
|
|
|
mtx_unlock(&ps->ps_mtx);
|
2003-07-17 22:52:55 +00:00
|
|
|
ktrpsig(sig, action, &td->td_sigmask, 0);
|
2006-02-23 09:24:19 +00:00
|
|
|
}
|
2003-03-31 23:30:41 +00:00
|
|
|
#endif
|
2006-03-09 08:31:51 +00:00
|
|
|
if (sig == SIGKILL)
|
2006-02-23 09:24:19 +00:00
|
|
|
sigexit(td, sig);
|
2003-07-17 22:52:55 +00:00
|
|
|
}
|
2003-03-31 23:30:41 +00:00
|
|
|
PROC_UNLOCK(p);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
1. Change prototype of trapsignal and sendsig to use ksiginfo_t *, most
changes in MD code are trivial, before this change, trapsignal and
sendsig use discrete parameters, now they uses member fields of
ksiginfo_t structure. For sendsig, this change allows us to pass
POSIX realtime signal value to user code.
2. Remove cpu_thread_siginfo, it is no longer needed because we now always
generate ksiginfo_t data and feed it to libpthread.
3. Add p_sigqueue to proc structure to hold shared signals which were
blocked by all threads in the proc.
4. Add td_sigqueue to thread structure to hold all signals delivered to
thread.
5. i386 and amd64 now return POSIX standard si_code, other arches will
be fixed.
6. In this sigqueue implementation, pending signal set is kept as before,
an extra siginfo list holds additional siginfo_t data for signals.
kernel code uses psignal() still behavior as before, it won't be failed
even under memory pressure, only exception is when deleting a signal,
we should call sigqueue_delete to remove signal from sigqueue but
not SIGDELSET. Current there is no kernel code will deliver a signal
with additional data, so kernel should be as stable as before,
a ksiginfo can carry more information, for example, allow signal to
be delivered but throw away siginfo data if memory is not enough.
SIGKILL and SIGSTOP have fast path in sigqueue_add, because they can
not be caught or masked.
The sigqueue() syscall allows user code to queue a signal to target
process, if resource is unavailable, EAGAIN will be returned as
specification said.
Just before thread exits, signal queue memory will be freed by
sigqueue_flush.
Current, all signals are allowed to be queued, not only realtime signals.
Earlier patch reviewed by: jhb, deischen
Tested on: i386, amd64
2005-10-14 12:43:47 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
|
|
|
struct sigpending_args {
|
|
|
|
sigset_t *set;
|
|
|
|
};
|
|
|
|
#endif
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
sigpending(td, uap)
|
|
|
|
struct thread *td;
|
1994-05-24 10:09:53 +00:00
|
|
|
struct sigpending_args *uap;
|
|
|
|
{
|
2001-09-12 08:38:13 +00:00
|
|
|
struct proc *p = td->td_proc;
|
1. Change prototype of trapsignal and sendsig to use ksiginfo_t *, most
changes in MD code are trivial, before this change, trapsignal and
sendsig use discrete parameters, now they uses member fields of
ksiginfo_t structure. For sendsig, this change allows us to pass
POSIX realtime signal value to user code.
2. Remove cpu_thread_siginfo, it is no longer needed because we now always
generate ksiginfo_t data and feed it to libpthread.
3. Add p_sigqueue to proc structure to hold shared signals which were
blocked by all threads in the proc.
4. Add td_sigqueue to thread structure to hold all signals delivered to
thread.
5. i386 and amd64 now return POSIX standard si_code, other arches will
be fixed.
6. In this sigqueue implementation, pending signal set is kept as before,
an extra siginfo list holds additional siginfo_t data for signals.
kernel code uses psignal() still behavior as before, it won't be failed
even under memory pressure, only exception is when deleting a signal,
we should call sigqueue_delete to remove signal from sigqueue but
not SIGDELSET. Current there is no kernel code will deliver a signal
with additional data, so kernel should be as stable as before,
a ksiginfo can carry more information, for example, allow signal to
be delivered but throw away siginfo data if memory is not enough.
SIGKILL and SIGSTOP have fast path in sigqueue_add, because they can
not be caught or masked.
The sigqueue() syscall allows user code to queue a signal to target
process, if resource is unavailable, EAGAIN will be returned as
specification said.
Just before thread exits, signal queue memory will be freed by
sigqueue_flush.
Current, all signals are allowed to be queued, not only realtime signals.
Earlier patch reviewed by: jhb, deischen
Tested on: i386, amd64
2005-10-14 12:43:47 +00:00
|
|
|
sigset_t pending;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2001-03-07 02:59:54 +00:00
|
|
|
PROC_LOCK(p);
|
1. Change prototype of trapsignal and sendsig to use ksiginfo_t *, most
changes in MD code are trivial, before this change, trapsignal and
sendsig use discrete parameters, now they uses member fields of
ksiginfo_t structure. For sendsig, this change allows us to pass
POSIX realtime signal value to user code.
2. Remove cpu_thread_siginfo, it is no longer needed because we now always
generate ksiginfo_t data and feed it to libpthread.
3. Add p_sigqueue to proc structure to hold shared signals which were
blocked by all threads in the proc.
4. Add td_sigqueue to thread structure to hold all signals delivered to
thread.
5. i386 and amd64 now return POSIX standard si_code, other arches will
be fixed.
6. In this sigqueue implementation, pending signal set is kept as before,
an extra siginfo list holds additional siginfo_t data for signals.
kernel code uses psignal() still behavior as before, it won't be failed
even under memory pressure, only exception is when deleting a signal,
we should call sigqueue_delete to remove signal from sigqueue but
not SIGDELSET. Current there is no kernel code will deliver a signal
with additional data, so kernel should be as stable as before,
a ksiginfo can carry more information, for example, allow signal to
be delivered but throw away siginfo data if memory is not enough.
SIGKILL and SIGSTOP have fast path in sigqueue_add, because they can
not be caught or masked.
The sigqueue() syscall allows user code to queue a signal to target
process, if resource is unavailable, EAGAIN will be returned as
specification said.
Just before thread exits, signal queue memory will be freed by
sigqueue_flush.
Current, all signals are allowed to be queued, not only realtime signals.
Earlier patch reviewed by: jhb, deischen
Tested on: i386, amd64
2005-10-14 12:43:47 +00:00
|
|
|
pending = p->p_sigqueue.sq_signals;
|
|
|
|
SIGSETOR(pending, td->td_sigqueue.sq_signals);
|
2001-03-07 02:59:54 +00:00
|
|
|
PROC_UNLOCK(p);
|
1. Change prototype of trapsignal and sendsig to use ksiginfo_t *, most
changes in MD code are trivial, before this change, trapsignal and
sendsig use discrete parameters, now they uses member fields of
ksiginfo_t structure. For sendsig, this change allows us to pass
POSIX realtime signal value to user code.
2. Remove cpu_thread_siginfo, it is no longer needed because we now always
generate ksiginfo_t data and feed it to libpthread.
3. Add p_sigqueue to proc structure to hold shared signals which were
blocked by all threads in the proc.
4. Add td_sigqueue to thread structure to hold all signals delivered to
thread.
5. i386 and amd64 now return POSIX standard si_code, other arches will
be fixed.
6. In this sigqueue implementation, pending signal set is kept as before,
an extra siginfo list holds additional siginfo_t data for signals.
kernel code uses psignal() still behavior as before, it won't be failed
even under memory pressure, only exception is when deleting a signal,
we should call sigqueue_delete to remove signal from sigqueue but
not SIGDELSET. Current there is no kernel code will deliver a signal
with additional data, so kernel should be as stable as before,
a ksiginfo can carry more information, for example, allow signal to
be delivered but throw away siginfo data if memory is not enough.
SIGKILL and SIGSTOP have fast path in sigqueue_add, because they can
not be caught or masked.
The sigqueue() syscall allows user code to queue a signal to target
process, if resource is unavailable, EAGAIN will be returned as
specification said.
Just before thread exits, signal queue memory will be freed by
sigqueue_flush.
Current, all signals are allowed to be queued, not only realtime signals.
Earlier patch reviewed by: jhb, deischen
Tested on: i386, amd64
2005-10-14 12:43:47 +00:00
|
|
|
return (copyout(&pending, uap->set, sizeof(sigset_t)));
|
1999-09-29 15:03:48 +00:00
|
|
|
}
|
|
|
|
|
2000-08-26 02:27:01 +00:00
|
|
|
#ifdef COMPAT_43 /* XXX - COMPAT_FBSD3 */
|
1999-09-29 15:03:48 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
|
|
|
struct osigpending_args {
|
|
|
|
int dummy;
|
|
|
|
};
|
|
|
|
#endif
|
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
osigpending(td, uap)
|
|
|
|
struct thread *td;
|
1999-09-29 15:03:48 +00:00
|
|
|
struct osigpending_args *uap;
|
|
|
|
{
|
2001-09-12 08:38:13 +00:00
|
|
|
struct proc *p = td->td_proc;
|
1. Change prototype of trapsignal and sendsig to use ksiginfo_t *, most
changes in MD code are trivial, before this change, trapsignal and
sendsig use discrete parameters, now they uses member fields of
ksiginfo_t structure. For sendsig, this change allows us to pass
POSIX realtime signal value to user code.
2. Remove cpu_thread_siginfo, it is no longer needed because we now always
generate ksiginfo_t data and feed it to libpthread.
3. Add p_sigqueue to proc structure to hold shared signals which were
blocked by all threads in the proc.
4. Add td_sigqueue to thread structure to hold all signals delivered to
thread.
5. i386 and amd64 now return POSIX standard si_code, other arches will
be fixed.
6. In this sigqueue implementation, pending signal set is kept as before,
an extra siginfo list holds additional siginfo_t data for signals.
kernel code uses psignal() still behavior as before, it won't be failed
even under memory pressure, only exception is when deleting a signal,
we should call sigqueue_delete to remove signal from sigqueue but
not SIGDELSET. Current there is no kernel code will deliver a signal
with additional data, so kernel should be as stable as before,
a ksiginfo can carry more information, for example, allow signal to
be delivered but throw away siginfo data if memory is not enough.
SIGKILL and SIGSTOP have fast path in sigqueue_add, because they can
not be caught or masked.
The sigqueue() syscall allows user code to queue a signal to target
process, if resource is unavailable, EAGAIN will be returned as
specification said.
Just before thread exits, signal queue memory will be freed by
sigqueue_flush.
Current, all signals are allowed to be queued, not only realtime signals.
Earlier patch reviewed by: jhb, deischen
Tested on: i386, amd64
2005-10-14 12:43:47 +00:00
|
|
|
sigset_t pending;
|
2001-09-12 08:38:13 +00:00
|
|
|
|
2001-03-07 02:59:54 +00:00
|
|
|
PROC_LOCK(p);
|
1. Change prototype of trapsignal and sendsig to use ksiginfo_t *, most
changes in MD code are trivial, before this change, trapsignal and
sendsig use discrete parameters, now they uses member fields of
ksiginfo_t structure. For sendsig, this change allows us to pass
POSIX realtime signal value to user code.
2. Remove cpu_thread_siginfo, it is no longer needed because we now always
generate ksiginfo_t data and feed it to libpthread.
3. Add p_sigqueue to proc structure to hold shared signals which were
blocked by all threads in the proc.
4. Add td_sigqueue to thread structure to hold all signals delivered to
thread.
5. i386 and amd64 now return POSIX standard si_code, other arches will
be fixed.
6. In this sigqueue implementation, pending signal set is kept as before,
an extra siginfo list holds additional siginfo_t data for signals.
kernel code uses psignal() still behavior as before, it won't be failed
even under memory pressure, only exception is when deleting a signal,
we should call sigqueue_delete to remove signal from sigqueue but
not SIGDELSET. Current there is no kernel code will deliver a signal
with additional data, so kernel should be as stable as before,
a ksiginfo can carry more information, for example, allow signal to
be delivered but throw away siginfo data if memory is not enough.
SIGKILL and SIGSTOP have fast path in sigqueue_add, because they can
not be caught or masked.
The sigqueue() syscall allows user code to queue a signal to target
process, if resource is unavailable, EAGAIN will be returned as
specification said.
Just before thread exits, signal queue memory will be freed by
sigqueue_flush.
Current, all signals are allowed to be queued, not only realtime signals.
Earlier patch reviewed by: jhb, deischen
Tested on: i386, amd64
2005-10-14 12:43:47 +00:00
|
|
|
pending = p->p_sigqueue.sq_signals;
|
|
|
|
SIGSETOR(pending, td->td_sigqueue.sq_signals);
|
2001-03-07 02:59:54 +00:00
|
|
|
PROC_UNLOCK(p);
|
1. Change prototype of trapsignal and sendsig to use ksiginfo_t *, most
changes in MD code are trivial, before this change, trapsignal and
sendsig use discrete parameters, now they uses member fields of
ksiginfo_t structure. For sendsig, this change allows us to pass
POSIX realtime signal value to user code.
2. Remove cpu_thread_siginfo, it is no longer needed because we now always
generate ksiginfo_t data and feed it to libpthread.
3. Add p_sigqueue to proc structure to hold shared signals which were
blocked by all threads in the proc.
4. Add td_sigqueue to thread structure to hold all signals delivered to
thread.
5. i386 and amd64 now return POSIX standard si_code, other arches will
be fixed.
6. In this sigqueue implementation, pending signal set is kept as before,
an extra siginfo list holds additional siginfo_t data for signals.
kernel code uses psignal() still behavior as before, it won't be failed
even under memory pressure, only exception is when deleting a signal,
we should call sigqueue_delete to remove signal from sigqueue but
not SIGDELSET. Current there is no kernel code will deliver a signal
with additional data, so kernel should be as stable as before,
a ksiginfo can carry more information, for example, allow signal to
be delivered but throw away siginfo data if memory is not enough.
SIGKILL and SIGSTOP have fast path in sigqueue_add, because they can
not be caught or masked.
The sigqueue() syscall allows user code to queue a signal to target
process, if resource is unavailable, EAGAIN will be returned as
specification said.
Just before thread exits, signal queue memory will be freed by
sigqueue_flush.
Current, all signals are allowed to be queued, not only realtime signals.
Earlier patch reviewed by: jhb, deischen
Tested on: i386, amd64
2005-10-14 12:43:47 +00:00
|
|
|
SIG2OSIG(pending, td->td_retval[0]);
|
1994-05-24 10:09:53 +00:00
|
|
|
return (0);
|
|
|
|
}
|
2000-08-26 02:27:01 +00:00
|
|
|
#endif /* COMPAT_43 */
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2004-06-11 11:16:26 +00:00
|
|
|
#if defined(COMPAT_43)
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Generalized interface signal handler, 4.3-compatible.
|
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct osigvec_args {
|
|
|
|
int signum;
|
|
|
|
struct sigvec *nsv;
|
|
|
|
struct sigvec *osv;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-24 10:09:53 +00:00
|
|
|
/* ARGSUSED */
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
osigvec(td, uap)
|
|
|
|
struct thread *td;
|
1994-05-24 10:09:53 +00:00
|
|
|
register struct osigvec_args *uap;
|
|
|
|
{
|
|
|
|
struct sigvec vec;
|
1999-09-29 15:03:48 +00:00
|
|
|
struct sigaction nsa, osa;
|
|
|
|
register struct sigaction *nsap, *osap;
|
|
|
|
int error;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
1999-10-12 13:14:18 +00:00
|
|
|
if (uap->signum <= 0 || uap->signum >= ONSIG)
|
|
|
|
return (EINVAL);
|
|
|
|
nsap = (uap->nsv != NULL) ? &nsa : NULL;
|
|
|
|
osap = (uap->osv != NULL) ? &osa : NULL;
|
1999-09-29 15:03:48 +00:00
|
|
|
if (nsap) {
|
1999-10-12 13:14:18 +00:00
|
|
|
error = copyin(uap->nsv, &vec, sizeof(vec));
|
1999-09-29 15:03:48 +00:00
|
|
|
if (error)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
1999-09-29 15:03:48 +00:00
|
|
|
nsap->sa_handler = vec.sv_handler;
|
|
|
|
OSIG2SIG(vec.sv_mask, nsap->sa_mask);
|
|
|
|
nsap->sa_flags = vec.sv_flags;
|
|
|
|
nsap->sa_flags ^= SA_RESTART; /* opposite of SV_INTERRUPT */
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
2003-04-17 22:06:43 +00:00
|
|
|
error = kern_sigaction(td, uap->signum, nsap, osap, KSA_OSIGSET);
|
1999-09-29 15:03:48 +00:00
|
|
|
if (osap && !error) {
|
|
|
|
vec.sv_handler = osap->sa_handler;
|
|
|
|
SIG2OSIG(osap->sa_mask, vec.sv_mask);
|
|
|
|
vec.sv_flags = osap->sa_flags;
|
|
|
|
vec.sv_flags &= ~SA_NOCLDWAIT;
|
|
|
|
vec.sv_flags ^= SA_RESTART;
|
1999-10-12 13:14:18 +00:00
|
|
|
error = copyout(&vec, uap->osv, sizeof(vec));
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
1999-09-29 15:03:48 +00:00
|
|
|
return (error);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct osigblock_args {
|
|
|
|
int mask;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
osigblock(td, uap)
|
|
|
|
register struct thread *td;
|
1994-05-24 10:09:53 +00:00
|
|
|
struct osigblock_args *uap;
|
|
|
|
{
|
2009-10-27 10:47:58 +00:00
|
|
|
sigset_t set, oset;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
1999-09-29 15:03:48 +00:00
|
|
|
OSIG2SIG(uap->mask, set);
|
2009-10-27 10:47:58 +00:00
|
|
|
kern_sigprocmask(td, SIG_BLOCK, &set, &oset, 0);
|
|
|
|
SIG2OSIG(oset, td->td_retval[0]);
|
1994-05-24 10:09:53 +00:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct osigsetmask_args {
|
|
|
|
int mask;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
osigsetmask(td, uap)
|
|
|
|
struct thread *td;
|
1994-05-24 10:09:53 +00:00
|
|
|
struct osigsetmask_args *uap;
|
|
|
|
{
|
2009-10-27 10:47:58 +00:00
|
|
|
sigset_t set, oset;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
1999-09-29 15:03:48 +00:00
|
|
|
OSIG2SIG(uap->mask, set);
|
2009-10-27 10:47:58 +00:00
|
|
|
kern_sigprocmask(td, SIG_SETMASK, &set, &oset, 0);
|
|
|
|
SIG2OSIG(oset, td->td_retval[0]);
|
1994-05-24 10:09:53 +00:00
|
|
|
return (0);
|
|
|
|
}
|
2004-06-11 11:16:26 +00:00
|
|
|
#endif /* COMPAT_43 */
|
1994-05-24 10:09:53 +00:00
|
|
|
|
|
|
|
/*
|
2007-03-05 13:10:58 +00:00
|
|
|
* Suspend calling thread until signal, providing mask to be set in the
|
|
|
|
* meantime.
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct sigsuspend_args {
|
1999-09-29 15:03:48 +00:00
|
|
|
const sigset_t *sigmask;
|
1994-05-24 10:09:53 +00:00
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-24 10:09:53 +00:00
|
|
|
/* ARGSUSED */
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
sigsuspend(td, uap)
|
|
|
|
struct thread *td;
|
1994-05-24 10:09:53 +00:00
|
|
|
struct sigsuspend_args *uap;
|
|
|
|
{
|
1999-09-29 15:03:48 +00:00
|
|
|
sigset_t mask;
|
|
|
|
int error;
|
|
|
|
|
1999-10-12 13:14:18 +00:00
|
|
|
error = copyin(uap->sigmask, &mask, sizeof(mask));
|
1999-09-29 15:03:48 +00:00
|
|
|
if (error)
|
|
|
|
return (error);
|
2002-09-01 20:37:28 +00:00
|
|
|
return (kern_sigsuspend(td, mask));
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
kern_sigsuspend(struct thread *td, sigset_t mask)
|
|
|
|
{
|
|
|
|
struct proc *p = td->td_proc;
|
2009-10-27 10:42:24 +00:00
|
|
|
int has_sig, sig;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
|
|
|
/*
|
1999-10-11 20:33:17 +00:00
|
|
|
* When returning from sigsuspend, we want
|
1994-05-24 10:09:53 +00:00
|
|
|
* the old mask to be restored after the
|
|
|
|
* signal handler has finished. Thus, we
|
|
|
|
* save it here and mark the sigacts structure
|
|
|
|
* to indicate this.
|
|
|
|
*/
|
2001-03-07 02:59:54 +00:00
|
|
|
PROC_LOCK(p);
|
2009-10-27 10:42:24 +00:00
|
|
|
kern_sigprocmask(td, SIG_SETMASK, &mask, &td->td_oldsigmask,
|
|
|
|
SIGPROCMASK_PROC_LOCKED);
|
2003-06-09 17:38:32 +00:00
|
|
|
td->td_pflags |= TDP_OLDMASK;
|
2009-10-27 10:42:24 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Process signals now. Otherwise, we can get spurious wakeup
|
|
|
|
* due to signal entered process queue, but delivered to other
|
|
|
|
* thread. But sigsuspend should return only on signal
|
|
|
|
* delivery.
|
|
|
|
*/
|
|
|
|
for (has_sig = 0; !has_sig;) {
|
|
|
|
while (msleep(&p->p_sigacts, &p->p_mtx, PPAUSE|PCATCH, "pause",
|
|
|
|
0) == 0)
|
|
|
|
/* void */;
|
|
|
|
thread_suspend_check(0);
|
|
|
|
mtx_lock(&p->p_sigacts->ps_mtx);
|
|
|
|
while ((sig = cursig(td, SIG_STOP_ALLOWED)) != 0) {
|
|
|
|
postsig(sig);
|
|
|
|
has_sig = 1;
|
|
|
|
}
|
|
|
|
mtx_unlock(&p->p_sigacts->ps_mtx);
|
|
|
|
}
|
2001-03-07 02:59:54 +00:00
|
|
|
PROC_UNLOCK(p);
|
1999-09-29 15:03:48 +00:00
|
|
|
/* always return EINTR rather than ERESTART... */
|
|
|
|
return (EINTR);
|
|
|
|
}
|
|
|
|
|
2000-08-26 02:27:01 +00:00
|
|
|
#ifdef COMPAT_43 /* XXX - COMPAT_FBSD3 */
|
2004-01-28 06:06:04 +00:00
|
|
|
/*
|
|
|
|
* Compatibility sigsuspend call for old binaries. Note nonstandard calling
|
|
|
|
* convention: libc stub passes mask, not pointer, to save a copyin.
|
|
|
|
*/
|
1999-09-29 15:03:48 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
|
|
|
struct osigsuspend_args {
|
|
|
|
osigset_t mask;
|
|
|
|
};
|
|
|
|
#endif
|
|
|
|
/* ARGSUSED */
|
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
osigsuspend(td, uap)
|
|
|
|
struct thread *td;
|
1999-09-29 15:03:48 +00:00
|
|
|
struct osigsuspend_args *uap;
|
|
|
|
{
|
1999-10-11 20:33:17 +00:00
|
|
|
sigset_t mask;
|
1999-09-29 15:03:48 +00:00
|
|
|
|
1999-10-11 20:33:17 +00:00
|
|
|
OSIG2SIG(uap->mask, mask);
|
2009-10-27 10:42:24 +00:00
|
|
|
return (kern_sigsuspend(td, mask));
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
2000-08-26 02:27:01 +00:00
|
|
|
#endif /* COMPAT_43 */
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2004-06-11 11:16:26 +00:00
|
|
|
#if defined(COMPAT_43)
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct osigstack_args {
|
|
|
|
struct sigstack *nss;
|
|
|
|
struct sigstack *oss;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-24 10:09:53 +00:00
|
|
|
/* ARGSUSED */
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
osigstack(td, uap)
|
|
|
|
struct thread *td;
|
1994-05-24 10:09:53 +00:00
|
|
|
register struct osigstack_args *uap;
|
|
|
|
{
|
2003-04-23 18:50:25 +00:00
|
|
|
struct sigstack nss, oss;
|
2001-09-01 18:19:21 +00:00
|
|
|
int error = 0;
|
|
|
|
|
2003-04-23 18:50:25 +00:00
|
|
|
if (uap->nss != NULL) {
|
|
|
|
error = copyin(uap->nss, &nss, sizeof(nss));
|
2000-11-30 05:23:49 +00:00
|
|
|
if (error)
|
2003-04-23 18:50:25 +00:00
|
|
|
return (error);
|
2000-11-30 05:23:49 +00:00
|
|
|
}
|
2004-01-03 02:02:26 +00:00
|
|
|
oss.ss_sp = td->td_sigstk.ss_sp;
|
2003-04-23 18:50:25 +00:00
|
|
|
oss.ss_onstack = sigonstack(cpu_getstack(td));
|
2000-11-30 05:23:49 +00:00
|
|
|
if (uap->nss != NULL) {
|
2004-01-03 02:02:26 +00:00
|
|
|
td->td_sigstk.ss_sp = nss.ss_sp;
|
|
|
|
td->td_sigstk.ss_size = 0;
|
|
|
|
td->td_sigstk.ss_flags |= nss.ss_onstack & SS_ONSTACK;
|
|
|
|
td->td_pflags |= TDP_ALTSTACK;
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
2003-04-23 18:50:25 +00:00
|
|
|
if (uap->oss != NULL)
|
|
|
|
error = copyout(&oss, uap->oss, sizeof(oss));
|
|
|
|
|
2001-09-01 18:19:21 +00:00
|
|
|
return (error);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
2004-06-11 11:16:26 +00:00
|
|
|
#endif /* COMPAT_43 */
|
1994-05-24 10:09:53 +00:00
|
|
|
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct sigaltstack_args {
|
1999-09-29 15:03:48 +00:00
|
|
|
stack_t *ss;
|
|
|
|
stack_t *oss;
|
1994-05-24 10:09:53 +00:00
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-24 10:09:53 +00:00
|
|
|
/* ARGSUSED */
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
sigaltstack(td, uap)
|
|
|
|
struct thread *td;
|
1994-05-24 10:09:53 +00:00
|
|
|
register struct sigaltstack_args *uap;
|
2002-09-01 20:37:28 +00:00
|
|
|
{
|
|
|
|
stack_t ss, oss;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
if (uap->ss != NULL) {
|
|
|
|
error = copyin(uap->ss, &ss, sizeof(ss));
|
|
|
|
if (error)
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
error = kern_sigaltstack(td, (uap->ss != NULL) ? &ss : NULL,
|
|
|
|
(uap->oss != NULL) ? &oss : NULL);
|
|
|
|
if (error)
|
|
|
|
return (error);
|
|
|
|
if (uap->oss != NULL)
|
|
|
|
error = copyout(&oss, uap->oss, sizeof(stack_t));
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
kern_sigaltstack(struct thread *td, stack_t *ss, stack_t *oss)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2001-09-12 08:38:13 +00:00
|
|
|
struct proc *p = td->td_proc;
|
2001-09-01 18:19:21 +00:00
|
|
|
int oonstack;
|
|
|
|
|
2001-09-12 08:38:13 +00:00
|
|
|
oonstack = sigonstack(cpu_getstack(td));
|
2000-11-30 05:23:49 +00:00
|
|
|
|
2002-09-01 20:37:28 +00:00
|
|
|
if (oss != NULL) {
|
2004-01-03 02:02:26 +00:00
|
|
|
*oss = td->td_sigstk;
|
|
|
|
oss->ss_flags = (td->td_pflags & TDP_ALTSTACK)
|
2000-11-30 05:23:49 +00:00
|
|
|
? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
|
|
|
|
}
|
|
|
|
|
2002-09-01 20:37:28 +00:00
|
|
|
if (ss != NULL) {
|
2004-01-03 02:02:26 +00:00
|
|
|
if (oonstack)
|
2003-04-23 19:49:18 +00:00
|
|
|
return (EPERM);
|
2004-01-03 02:02:26 +00:00
|
|
|
if ((ss->ss_flags & ~SS_DISABLE) != 0)
|
2003-04-23 19:49:18 +00:00
|
|
|
return (EINVAL);
|
2002-09-01 20:37:28 +00:00
|
|
|
if (!(ss->ss_flags & SS_DISABLE)) {
|
1. Change prototype of trapsignal and sendsig to use ksiginfo_t *, most
changes in MD code are trivial, before this change, trapsignal and
sendsig use discrete parameters, now they uses member fields of
ksiginfo_t structure. For sendsig, this change allows us to pass
POSIX realtime signal value to user code.
2. Remove cpu_thread_siginfo, it is no longer needed because we now always
generate ksiginfo_t data and feed it to libpthread.
3. Add p_sigqueue to proc structure to hold shared signals which were
blocked by all threads in the proc.
4. Add td_sigqueue to thread structure to hold all signals delivered to
thread.
5. i386 and amd64 now return POSIX standard si_code, other arches will
be fixed.
6. In this sigqueue implementation, pending signal set is kept as before,
an extra siginfo list holds additional siginfo_t data for signals.
kernel code uses psignal() still behavior as before, it won't be failed
even under memory pressure, only exception is when deleting a signal,
we should call sigqueue_delete to remove signal from sigqueue but
not SIGDELSET. Current there is no kernel code will deliver a signal
with additional data, so kernel should be as stable as before,
a ksiginfo can carry more information, for example, allow signal to
be delivered but throw away siginfo data if memory is not enough.
SIGKILL and SIGSTOP have fast path in sigqueue_add, because they can
not be caught or masked.
The sigqueue() syscall allows user code to queue a signal to target
process, if resource is unavailable, EAGAIN will be returned as
specification said.
Just before thread exits, signal queue memory will be freed by
sigqueue_flush.
Current, all signals are allowed to be queued, not only realtime signals.
Earlier patch reviewed by: jhb, deischen
Tested on: i386, amd64
2005-10-14 12:43:47 +00:00
|
|
|
if (ss->ss_size < p->p_sysent->sv_minsigstksz)
|
2003-04-23 19:49:18 +00:00
|
|
|
return (ENOMEM);
|
1. Change prototype of trapsignal and sendsig to use ksiginfo_t *, most
changes in MD code are trivial, before this change, trapsignal and
sendsig use discrete parameters, now they uses member fields of
ksiginfo_t structure. For sendsig, this change allows us to pass
POSIX realtime signal value to user code.
2. Remove cpu_thread_siginfo, it is no longer needed because we now always
generate ksiginfo_t data and feed it to libpthread.
3. Add p_sigqueue to proc structure to hold shared signals which were
blocked by all threads in the proc.
4. Add td_sigqueue to thread structure to hold all signals delivered to
thread.
5. i386 and amd64 now return POSIX standard si_code, other arches will
be fixed.
6. In this sigqueue implementation, pending signal set is kept as before,
an extra siginfo list holds additional siginfo_t data for signals.
kernel code uses psignal() still behavior as before, it won't be failed
even under memory pressure, only exception is when deleting a signal,
we should call sigqueue_delete to remove signal from sigqueue but
not SIGDELSET. Current there is no kernel code will deliver a signal
with additional data, so kernel should be as stable as before,
a ksiginfo can carry more information, for example, allow signal to
be delivered but throw away siginfo data if memory is not enough.
SIGKILL and SIGSTOP have fast path in sigqueue_add, because they can
not be caught or masked.
The sigqueue() syscall allows user code to queue a signal to target
process, if resource is unavailable, EAGAIN will be returned as
specification said.
Just before thread exits, signal queue memory will be freed by
sigqueue_flush.
Current, all signals are allowed to be queued, not only realtime signals.
Earlier patch reviewed by: jhb, deischen
Tested on: i386, amd64
2005-10-14 12:43:47 +00:00
|
|
|
|
2004-01-03 02:02:26 +00:00
|
|
|
td->td_sigstk = *ss;
|
|
|
|
td->td_pflags |= TDP_ALTSTACK;
|
2001-03-07 02:59:54 +00:00
|
|
|
} else {
|
2004-01-03 02:02:26 +00:00
|
|
|
td->td_pflags &= ~TDP_ALTSTACK;
|
2001-03-07 02:59:54 +00:00
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
2003-04-23 19:49:18 +00:00
|
|
|
return (0);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
1994-10-10 01:00:49 +00:00
|
|
|
/*
|
|
|
|
* Common code for kill process group/broadcast kill.
|
|
|
|
* cp is calling process.
|
|
|
|
*/
|
2002-09-28 17:15:38 +00:00
|
|
|
static int
|
2005-02-13 17:37:20 +00:00
|
|
|
killpg1(td, sig, pgid, all)
|
2002-04-13 23:33:36 +00:00
|
|
|
register struct thread *td;
|
2005-02-13 17:37:20 +00:00
|
|
|
int sig, pgid, all;
|
1994-10-10 01:00:49 +00:00
|
|
|
{
|
|
|
|
register struct proc *p;
|
|
|
|
struct pgrp *pgrp;
|
|
|
|
int nfound = 0;
|
1995-05-30 08:16:23 +00:00
|
|
|
|
2000-11-22 07:42:04 +00:00
|
|
|
if (all) {
|
1995-05-30 08:16:23 +00:00
|
|
|
/*
|
|
|
|
* broadcast
|
1994-10-10 01:00:49 +00:00
|
|
|
*/
|
2001-03-28 11:52:56 +00:00
|
|
|
sx_slock(&allproc_lock);
|
2007-01-17 14:58:53 +00:00
|
|
|
FOREACH_PROC_IN_SYSTEM(p) {
|
2001-03-07 02:59:54 +00:00
|
|
|
PROC_LOCK(p);
|
2002-04-13 23:33:36 +00:00
|
|
|
if (p->p_pid <= 1 || p->p_flag & P_SYSTEM ||
|
2006-04-21 19:26:21 +00:00
|
|
|
p == td->td_proc || p->p_state == PRS_NEW) {
|
2001-03-07 02:59:54 +00:00
|
|
|
PROC_UNLOCK(p);
|
|
|
|
continue;
|
|
|
|
}
|
2005-02-13 17:37:20 +00:00
|
|
|
if (p_cansignal(td, p, sig) == 0) {
|
2001-04-24 00:51:53 +00:00
|
|
|
nfound++;
|
|
|
|
if (sig)
|
|
|
|
psignal(p, sig);
|
2001-03-07 02:59:54 +00:00
|
|
|
}
|
2001-04-24 00:51:53 +00:00
|
|
|
PROC_UNLOCK(p);
|
1994-10-10 01:00:49 +00:00
|
|
|
}
|
2001-03-28 11:52:56 +00:00
|
|
|
sx_sunlock(&allproc_lock);
|
2000-11-22 07:42:04 +00:00
|
|
|
} else {
|
2002-04-16 17:11:34 +00:00
|
|
|
sx_slock(&proctree_lock);
|
2002-02-23 11:12:57 +00:00
|
|
|
if (pgid == 0) {
|
1995-05-30 08:16:23 +00:00
|
|
|
/*
|
1994-10-10 01:00:49 +00:00
|
|
|
* zero pgid means send to my process group.
|
|
|
|
*/
|
2002-04-13 23:33:36 +00:00
|
|
|
pgrp = td->td_proc->p_pgrp;
|
2002-02-23 11:12:57 +00:00
|
|
|
PGRP_LOCK(pgrp);
|
|
|
|
} else {
|
1994-10-10 01:00:49 +00:00
|
|
|
pgrp = pgfind(pgid);
|
2002-02-23 11:12:57 +00:00
|
|
|
if (pgrp == NULL) {
|
2002-04-16 17:11:34 +00:00
|
|
|
sx_sunlock(&proctree_lock);
|
1994-10-10 01:00:49 +00:00
|
|
|
return (ESRCH);
|
2002-02-23 11:12:57 +00:00
|
|
|
}
|
1994-10-10 01:00:49 +00:00
|
|
|
}
|
2002-04-16 17:11:34 +00:00
|
|
|
sx_sunlock(&proctree_lock);
|
1999-11-16 10:56:05 +00:00
|
|
|
LIST_FOREACH(p, &pgrp->pg_members, p_pglist) {
|
2005-02-13 17:37:20 +00:00
|
|
|
PROC_LOCK(p);
|
2006-04-21 19:26:21 +00:00
|
|
|
if (p->p_pid <= 1 || p->p_flag & P_SYSTEM ||
|
|
|
|
p->p_state == PRS_NEW ) {
|
2001-03-07 02:59:54 +00:00
|
|
|
PROC_UNLOCK(p);
|
|
|
|
continue;
|
|
|
|
}
|
2005-02-13 17:37:20 +00:00
|
|
|
if (p_cansignal(td, p, sig) == 0) {
|
2001-04-24 00:51:53 +00:00
|
|
|
nfound++;
|
|
|
|
if (sig)
|
|
|
|
psignal(p, sig);
|
2001-03-07 02:59:54 +00:00
|
|
|
}
|
2001-04-24 00:51:53 +00:00
|
|
|
PROC_UNLOCK(p);
|
1994-10-10 01:00:49 +00:00
|
|
|
}
|
2002-02-23 11:12:57 +00:00
|
|
|
PGRP_UNLOCK(pgrp);
|
1994-10-10 01:00:49 +00:00
|
|
|
}
|
|
|
|
return (nfound ? 0 : ESRCH);
|
|
|
|
}
|
|
|
|
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct kill_args {
|
|
|
|
int pid;
|
|
|
|
int signum;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-24 10:09:53 +00:00
|
|
|
/* ARGSUSED */
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
kill(td, uap)
|
|
|
|
register struct thread *td;
|
1994-05-24 10:09:53 +00:00
|
|
|
register struct kill_args *uap;
|
|
|
|
{
|
|
|
|
register struct proc *p;
|
- Merge struct procsig with struct sigacts.
- Move struct sigacts out of the u-area and malloc() it using the
M_SUBPROC malloc bucket.
- Add a small sigacts_*() API for managing sigacts structures: sigacts_alloc(),
sigacts_free(), sigacts_copy(), sigacts_share(), and sigacts_shared().
- Remove the p_sigignore, p_sigacts, and p_sigcatch macros.
- Add a mutex to struct sigacts that protects all the members of the struct.
- Add sigacts locking.
- Remove Giant from nosys(), kill(), killpg(), and kern_sigaction() now
that sigacts is locked.
- Several in-kernel functions such as psignal(), tdsignal(), trapsignal(),
and thread_stopped() are now MP safe.
Reviewed by: arch@
Approved by: re (rwatson)
2003-05-13 20:36:02 +00:00
|
|
|
int error;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2009-06-27 13:58:44 +00:00
|
|
|
AUDIT_ARG_SIGNUM(uap->signum);
|
|
|
|
AUDIT_ARG_PID(uap->pid);
|
2001-11-03 13:26:15 +00:00
|
|
|
if ((u_int)uap->signum > _SIG_MAXSIG)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (EINVAL);
|
2001-09-01 18:19:21 +00:00
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
if (uap->pid > 0) {
|
|
|
|
/* kill single process */
|
2004-07-29 20:38:19 +00:00
|
|
|
if ((p = pfind(uap->pid)) == NULL) {
|
2004-08-03 15:39:23 +00:00
|
|
|
if ((p = zpfind(uap->pid)) == NULL)
|
|
|
|
return (ESRCH);
|
2004-07-29 20:38:19 +00:00
|
|
|
}
|
2009-06-27 13:58:44 +00:00
|
|
|
AUDIT_ARG_PROCESS(p);
|
2005-02-13 17:37:20 +00:00
|
|
|
error = p_cansignal(td, p, uap->signum);
|
- Merge struct procsig with struct sigacts.
- Move struct sigacts out of the u-area and malloc() it using the
M_SUBPROC malloc bucket.
- Add a small sigacts_*() API for managing sigacts structures: sigacts_alloc(),
sigacts_free(), sigacts_copy(), sigacts_share(), and sigacts_shared().
- Remove the p_sigignore, p_sigacts, and p_sigcatch macros.
- Add a mutex to struct sigacts that protects all the members of the struct.
- Add sigacts locking.
- Remove Giant from nosys(), kill(), killpg(), and kern_sigaction() now
that sigacts is locked.
- Several in-kernel functions such as psignal(), tdsignal(), trapsignal(),
and thread_stopped() are now MP safe.
Reviewed by: arch@
Approved by: re (rwatson)
2003-05-13 20:36:02 +00:00
|
|
|
if (error == 0 && uap->signum)
|
|
|
|
psignal(p, uap->signum);
|
|
|
|
PROC_UNLOCK(p);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
switch (uap->pid) {
|
|
|
|
case -1: /* broadcast signal */
|
2005-02-13 17:37:20 +00:00
|
|
|
return (killpg1(td, uap->signum, 0, 1));
|
- Merge struct procsig with struct sigacts.
- Move struct sigacts out of the u-area and malloc() it using the
M_SUBPROC malloc bucket.
- Add a small sigacts_*() API for managing sigacts structures: sigacts_alloc(),
sigacts_free(), sigacts_copy(), sigacts_share(), and sigacts_shared().
- Remove the p_sigignore, p_sigacts, and p_sigcatch macros.
- Add a mutex to struct sigacts that protects all the members of the struct.
- Add sigacts locking.
- Remove Giant from nosys(), kill(), killpg(), and kern_sigaction() now
that sigacts is locked.
- Several in-kernel functions such as psignal(), tdsignal(), trapsignal(),
and thread_stopped() are now MP safe.
Reviewed by: arch@
Approved by: re (rwatson)
2003-05-13 20:36:02 +00:00
|
|
|
case 0: /* signal own process group */
|
2005-02-13 17:37:20 +00:00
|
|
|
return (killpg1(td, uap->signum, 0, 0));
|
- Merge struct procsig with struct sigacts.
- Move struct sigacts out of the u-area and malloc() it using the
M_SUBPROC malloc bucket.
- Add a small sigacts_*() API for managing sigacts structures: sigacts_alloc(),
sigacts_free(), sigacts_copy(), sigacts_share(), and sigacts_shared().
- Remove the p_sigignore, p_sigacts, and p_sigcatch macros.
- Add a mutex to struct sigacts that protects all the members of the struct.
- Add sigacts locking.
- Remove Giant from nosys(), kill(), killpg(), and kern_sigaction() now
that sigacts is locked.
- Several in-kernel functions such as psignal(), tdsignal(), trapsignal(),
and thread_stopped() are now MP safe.
Reviewed by: arch@
Approved by: re (rwatson)
2003-05-13 20:36:02 +00:00
|
|
|
default: /* negative explicit process group */
|
2005-02-13 17:37:20 +00:00
|
|
|
return (killpg1(td, uap->signum, -uap->pid, 0));
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
- Merge struct procsig with struct sigacts.
- Move struct sigacts out of the u-area and malloc() it using the
M_SUBPROC malloc bucket.
- Add a small sigacts_*() API for managing sigacts structures: sigacts_alloc(),
sigacts_free(), sigacts_copy(), sigacts_share(), and sigacts_shared().
- Remove the p_sigignore, p_sigacts, and p_sigcatch macros.
- Add a mutex to struct sigacts that protects all the members of the struct.
- Add sigacts locking.
- Remove Giant from nosys(), kill(), killpg(), and kern_sigaction() now
that sigacts is locked.
- Several in-kernel functions such as psignal(), tdsignal(), trapsignal(),
and thread_stopped() are now MP safe.
Reviewed by: arch@
Approved by: re (rwatson)
2003-05-13 20:36:02 +00:00
|
|
|
/* NOTREACHED */
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
2004-06-11 11:16:26 +00:00
|
|
|
#if defined(COMPAT_43)
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct okillpg_args {
|
|
|
|
int pgid;
|
|
|
|
int signum;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-24 10:09:53 +00:00
|
|
|
/* ARGSUSED */
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
okillpg(td, uap)
|
|
|
|
struct thread *td;
|
1994-05-24 10:09:53 +00:00
|
|
|
register struct okillpg_args *uap;
|
|
|
|
{
|
|
|
|
|
2009-06-27 13:58:44 +00:00
|
|
|
AUDIT_ARG_SIGNUM(uap->signum);
|
|
|
|
AUDIT_ARG_PID(uap->pgid);
|
2001-11-03 13:26:15 +00:00
|
|
|
if ((u_int)uap->signum > _SIG_MAXSIG)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (EINVAL);
|
1. Change prototype of trapsignal and sendsig to use ksiginfo_t *, most
changes in MD code are trivial, before this change, trapsignal and
sendsig use discrete parameters, now they uses member fields of
ksiginfo_t structure. For sendsig, this change allows us to pass
POSIX realtime signal value to user code.
2. Remove cpu_thread_siginfo, it is no longer needed because we now always
generate ksiginfo_t data and feed it to libpthread.
3. Add p_sigqueue to proc structure to hold shared signals which were
blocked by all threads in the proc.
4. Add td_sigqueue to thread structure to hold all signals delivered to
thread.
5. i386 and amd64 now return POSIX standard si_code, other arches will
be fixed.
6. In this sigqueue implementation, pending signal set is kept as before,
an extra siginfo list holds additional siginfo_t data for signals.
kernel code uses psignal() still behavior as before, it won't be failed
even under memory pressure, only exception is when deleting a signal,
we should call sigqueue_delete to remove signal from sigqueue but
not SIGDELSET. Current there is no kernel code will deliver a signal
with additional data, so kernel should be as stable as before,
a ksiginfo can carry more information, for example, allow signal to
be delivered but throw away siginfo data if memory is not enough.
SIGKILL and SIGSTOP have fast path in sigqueue_add, because they can
not be caught or masked.
The sigqueue() syscall allows user code to queue a signal to target
process, if resource is unavailable, EAGAIN will be returned as
specification said.
Just before thread exits, signal queue memory will be freed by
sigqueue_flush.
Current, all signals are allowed to be queued, not only realtime signals.
Earlier patch reviewed by: jhb, deischen
Tested on: i386, amd64
2005-10-14 12:43:47 +00:00
|
|
|
|
2005-02-13 17:37:20 +00:00
|
|
|
return (killpg1(td, uap->signum, uap->pgid, 0));
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
2004-06-11 11:16:26 +00:00
|
|
|
#endif /* COMPAT_43 */
|
1994-05-24 10:09:53 +00:00
|
|
|
|
1. Change prototype of trapsignal and sendsig to use ksiginfo_t *, most
changes in MD code are trivial, before this change, trapsignal and
sendsig use discrete parameters, now they uses member fields of
ksiginfo_t structure. For sendsig, this change allows us to pass
POSIX realtime signal value to user code.
2. Remove cpu_thread_siginfo, it is no longer needed because we now always
generate ksiginfo_t data and feed it to libpthread.
3. Add p_sigqueue to proc structure to hold shared signals which were
blocked by all threads in the proc.
4. Add td_sigqueue to thread structure to hold all signals delivered to
thread.
5. i386 and amd64 now return POSIX standard si_code, other arches will
be fixed.
6. In this sigqueue implementation, pending signal set is kept as before,
an extra siginfo list holds additional siginfo_t data for signals.
kernel code uses psignal() still behavior as before, it won't be failed
even under memory pressure, only exception is when deleting a signal,
we should call sigqueue_delete to remove signal from sigqueue but
not SIGDELSET. Current there is no kernel code will deliver a signal
with additional data, so kernel should be as stable as before,
a ksiginfo can carry more information, for example, allow signal to
be delivered but throw away siginfo data if memory is not enough.
SIGKILL and SIGSTOP have fast path in sigqueue_add, because they can
not be caught or masked.
The sigqueue() syscall allows user code to queue a signal to target
process, if resource is unavailable, EAGAIN will be returned as
specification said.
Just before thread exits, signal queue memory will be freed by
sigqueue_flush.
Current, all signals are allowed to be queued, not only realtime signals.
Earlier patch reviewed by: jhb, deischen
Tested on: i386, amd64
2005-10-14 12:43:47 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
|
|
|
struct sigqueue_args {
|
|
|
|
pid_t pid;
|
|
|
|
int signum;
|
|
|
|
/* union sigval */ void *value;
|
|
|
|
};
|
|
|
|
#endif
|
|
|
|
int
|
|
|
|
sigqueue(struct thread *td, struct sigqueue_args *uap)
|
|
|
|
{
|
|
|
|
ksiginfo_t ksi;
|
|
|
|
struct proc *p;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
if ((u_int)uap->signum > _SIG_MAXSIG)
|
|
|
|
return (EINVAL);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Specification says sigqueue can only send signal to
|
|
|
|
* single process.
|
|
|
|
*/
|
|
|
|
if (uap->pid <= 0)
|
|
|
|
return (EINVAL);
|
|
|
|
|
|
|
|
if ((p = pfind(uap->pid)) == NULL) {
|
|
|
|
if ((p = zpfind(uap->pid)) == NULL)
|
|
|
|
return (ESRCH);
|
|
|
|
}
|
|
|
|
error = p_cansignal(td, p, uap->signum);
|
|
|
|
if (error == 0 && uap->signum != 0) {
|
|
|
|
ksiginfo_init(&ksi);
|
|
|
|
ksi.ksi_signo = uap->signum;
|
|
|
|
ksi.ksi_code = SI_QUEUE;
|
|
|
|
ksi.ksi_pid = td->td_proc->p_pid;
|
|
|
|
ksi.ksi_uid = td->td_ucred->cr_ruid;
|
2005-11-04 09:41:00 +00:00
|
|
|
ksi.ksi_value.sival_ptr = uap->value;
|
2005-11-03 04:49:16 +00:00
|
|
|
error = tdsignal(p, NULL, ksi.ksi_signo, &ksi);
|
1. Change prototype of trapsignal and sendsig to use ksiginfo_t *, most
changes in MD code are trivial, before this change, trapsignal and
sendsig use discrete parameters, now they uses member fields of
ksiginfo_t structure. For sendsig, this change allows us to pass
POSIX realtime signal value to user code.
2. Remove cpu_thread_siginfo, it is no longer needed because we now always
generate ksiginfo_t data and feed it to libpthread.
3. Add p_sigqueue to proc structure to hold shared signals which were
blocked by all threads in the proc.
4. Add td_sigqueue to thread structure to hold all signals delivered to
thread.
5. i386 and amd64 now return POSIX standard si_code, other arches will
be fixed.
6. In this sigqueue implementation, pending signal set is kept as before,
an extra siginfo list holds additional siginfo_t data for signals.
kernel code uses psignal() still behavior as before, it won't be failed
even under memory pressure, only exception is when deleting a signal,
we should call sigqueue_delete to remove signal from sigqueue but
not SIGDELSET. Current there is no kernel code will deliver a signal
with additional data, so kernel should be as stable as before,
a ksiginfo can carry more information, for example, allow signal to
be delivered but throw away siginfo data if memory is not enough.
SIGKILL and SIGSTOP have fast path in sigqueue_add, because they can
not be caught or masked.
The sigqueue() syscall allows user code to queue a signal to target
process, if resource is unavailable, EAGAIN will be returned as
specification said.
Just before thread exits, signal queue memory will be freed by
sigqueue_flush.
Current, all signals are allowed to be queued, not only realtime signals.
Earlier patch reviewed by: jhb, deischen
Tested on: i386, amd64
2005-10-14 12:43:47 +00:00
|
|
|
}
|
|
|
|
PROC_UNLOCK(p);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Send a signal to a process group.
|
|
|
|
*/
|
|
|
|
void
|
1999-09-29 15:03:48 +00:00
|
|
|
gsignal(pgid, sig)
|
|
|
|
int pgid, sig;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
|
|
|
struct pgrp *pgrp;
|
|
|
|
|
2002-02-23 11:12:57 +00:00
|
|
|
if (pgid != 0) {
|
2002-04-16 17:11:34 +00:00
|
|
|
sx_slock(&proctree_lock);
|
2002-02-23 11:12:57 +00:00
|
|
|
pgrp = pgfind(pgid);
|
2002-04-16 17:11:34 +00:00
|
|
|
sx_sunlock(&proctree_lock);
|
2002-02-23 11:12:57 +00:00
|
|
|
if (pgrp != NULL) {
|
|
|
|
pgsignal(pgrp, sig, 0);
|
|
|
|
PGRP_UNLOCK(pgrp);
|
|
|
|
}
|
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
1997-02-10 02:22:35 +00:00
|
|
|
* Send a signal to a process group. If checktty is 1,
|
1994-05-24 10:09:53 +00:00
|
|
|
* limit to members which have a controlling terminal.
|
|
|
|
*/
|
|
|
|
void
|
1999-09-29 15:03:48 +00:00
|
|
|
pgsignal(pgrp, sig, checkctty)
|
1994-05-24 10:09:53 +00:00
|
|
|
struct pgrp *pgrp;
|
1999-09-29 15:03:48 +00:00
|
|
|
int sig, checkctty;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
|
|
|
register struct proc *p;
|
|
|
|
|
2001-03-07 02:59:54 +00:00
|
|
|
if (pgrp) {
|
2002-02-23 11:12:57 +00:00
|
|
|
PGRP_LOCK_ASSERT(pgrp, MA_OWNED);
|
2001-03-07 02:59:54 +00:00
|
|
|
LIST_FOREACH(p, &pgrp->pg_members, p_pglist) {
|
|
|
|
PROC_LOCK(p);
|
1994-05-24 10:09:53 +00:00
|
|
|
if (checkctty == 0 || p->p_flag & P_CONTROLT)
|
1999-09-29 15:03:48 +00:00
|
|
|
psignal(p, sig);
|
2001-03-07 02:59:54 +00:00
|
|
|
PROC_UNLOCK(p);
|
|
|
|
}
|
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2007-03-04 22:36:48 +00:00
|
|
|
* Send a signal caused by a trap to the current thread. If it will be
|
|
|
|
* caught immediately, deliver it with correct code. Otherwise, post it
|
|
|
|
* normally.
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
|
|
|
void
|
1. Change prototype of trapsignal and sendsig to use ksiginfo_t *, most
changes in MD code are trivial, before this change, trapsignal and
sendsig use discrete parameters, now they uses member fields of
ksiginfo_t structure. For sendsig, this change allows us to pass
POSIX realtime signal value to user code.
2. Remove cpu_thread_siginfo, it is no longer needed because we now always
generate ksiginfo_t data and feed it to libpthread.
3. Add p_sigqueue to proc structure to hold shared signals which were
blocked by all threads in the proc.
4. Add td_sigqueue to thread structure to hold all signals delivered to
thread.
5. i386 and amd64 now return POSIX standard si_code, other arches will
be fixed.
6. In this sigqueue implementation, pending signal set is kept as before,
an extra siginfo list holds additional siginfo_t data for signals.
kernel code uses psignal() still behavior as before, it won't be failed
even under memory pressure, only exception is when deleting a signal,
we should call sigqueue_delete to remove signal from sigqueue but
not SIGDELSET. Current there is no kernel code will deliver a signal
with additional data, so kernel should be as stable as before,
a ksiginfo can carry more information, for example, allow signal to
be delivered but throw away siginfo data if memory is not enough.
SIGKILL and SIGSTOP have fast path in sigqueue_add, because they can
not be caught or masked.
The sigqueue() syscall allows user code to queue a signal to target
process, if resource is unavailable, EAGAIN will be returned as
specification said.
Just before thread exits, signal queue memory will be freed by
sigqueue_flush.
Current, all signals are allowed to be queued, not only realtime signals.
Earlier patch reviewed by: jhb, deischen
Tested on: i386, amd64
2005-10-14 12:43:47 +00:00
|
|
|
trapsignal(struct thread *td, ksiginfo_t *ksi)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2003-03-31 22:02:38 +00:00
|
|
|
struct sigacts *ps;
|
2009-10-27 10:47:58 +00:00
|
|
|
sigset_t mask;
|
2003-03-31 22:02:38 +00:00
|
|
|
struct proc *p;
|
1. Change prototype of trapsignal and sendsig to use ksiginfo_t *, most
changes in MD code are trivial, before this change, trapsignal and
sendsig use discrete parameters, now they uses member fields of
ksiginfo_t structure. For sendsig, this change allows us to pass
POSIX realtime signal value to user code.
2. Remove cpu_thread_siginfo, it is no longer needed because we now always
generate ksiginfo_t data and feed it to libpthread.
3. Add p_sigqueue to proc structure to hold shared signals which were
blocked by all threads in the proc.
4. Add td_sigqueue to thread structure to hold all signals delivered to
thread.
5. i386 and amd64 now return POSIX standard si_code, other arches will
be fixed.
6. In this sigqueue implementation, pending signal set is kept as before,
an extra siginfo list holds additional siginfo_t data for signals.
kernel code uses psignal() still behavior as before, it won't be failed
even under memory pressure, only exception is when deleting a signal,
we should call sigqueue_delete to remove signal from sigqueue but
not SIGDELSET. Current there is no kernel code will deliver a signal
with additional data, so kernel should be as stable as before,
a ksiginfo can carry more information, for example, allow signal to
be delivered but throw away siginfo data if memory is not enough.
SIGKILL and SIGSTOP have fast path in sigqueue_add, because they can
not be caught or masked.
The sigqueue() syscall allows user code to queue a signal to target
process, if resource is unavailable, EAGAIN will be returned as
specification said.
Just before thread exits, signal queue memory will be freed by
sigqueue_flush.
Current, all signals are allowed to be queued, not only realtime signals.
Earlier patch reviewed by: jhb, deischen
Tested on: i386, amd64
2005-10-14 12:43:47 +00:00
|
|
|
int sig;
|
|
|
|
int code;
|
2003-03-31 22:02:38 +00:00
|
|
|
|
|
|
|
p = td->td_proc;
|
1. Change prototype of trapsignal and sendsig to use ksiginfo_t *, most
changes in MD code are trivial, before this change, trapsignal and
sendsig use discrete parameters, now they uses member fields of
ksiginfo_t structure. For sendsig, this change allows us to pass
POSIX realtime signal value to user code.
2. Remove cpu_thread_siginfo, it is no longer needed because we now always
generate ksiginfo_t data and feed it to libpthread.
3. Add p_sigqueue to proc structure to hold shared signals which were
blocked by all threads in the proc.
4. Add td_sigqueue to thread structure to hold all signals delivered to
thread.
5. i386 and amd64 now return POSIX standard si_code, other arches will
be fixed.
6. In this sigqueue implementation, pending signal set is kept as before,
an extra siginfo list holds additional siginfo_t data for signals.
kernel code uses psignal() still behavior as before, it won't be failed
even under memory pressure, only exception is when deleting a signal,
we should call sigqueue_delete to remove signal from sigqueue but
not SIGDELSET. Current there is no kernel code will deliver a signal
with additional data, so kernel should be as stable as before,
a ksiginfo can carry more information, for example, allow signal to
be delivered but throw away siginfo data if memory is not enough.
SIGKILL and SIGSTOP have fast path in sigqueue_add, because they can
not be caught or masked.
The sigqueue() syscall allows user code to queue a signal to target
process, if resource is unavailable, EAGAIN will be returned as
specification said.
Just before thread exits, signal queue memory will be freed by
sigqueue_flush.
Current, all signals are allowed to be queued, not only realtime signals.
Earlier patch reviewed by: jhb, deischen
Tested on: i386, amd64
2005-10-14 12:43:47 +00:00
|
|
|
sig = ksi->ksi_signo;
|
|
|
|
code = ksi->ksi_code;
|
|
|
|
KASSERT(_SIG_VALID(sig), ("invalid signal"));
|
|
|
|
|
2006-10-26 21:42:22 +00:00
|
|
|
PROC_LOCK(p);
|
2003-03-09 01:40:55 +00:00
|
|
|
ps = p->p_sigacts;
|
- Merge struct procsig with struct sigacts.
- Move struct sigacts out of the u-area and malloc() it using the
M_SUBPROC malloc bucket.
- Add a small sigacts_*() API for managing sigacts structures: sigacts_alloc(),
sigacts_free(), sigacts_copy(), sigacts_share(), and sigacts_shared().
- Remove the p_sigignore, p_sigacts, and p_sigcatch macros.
- Add a mutex to struct sigacts that protects all the members of the struct.
- Add sigacts locking.
- Remove Giant from nosys(), kill(), killpg(), and kern_sigaction() now
that sigacts is locked.
- Several in-kernel functions such as psignal(), tdsignal(), trapsignal(),
and thread_stopped() are now MP safe.
Reviewed by: arch@
Approved by: re (rwatson)
2003-05-13 20:36:02 +00:00
|
|
|
mtx_lock(&ps->ps_mtx);
|
|
|
|
if ((p->p_flag & P_TRACED) == 0 && SIGISMEMBER(ps->ps_sigcatch, sig) &&
|
2003-03-31 22:49:17 +00:00
|
|
|
!SIGISMEMBER(td->td_sigmask, sig)) {
|
2007-06-01 01:12:45 +00:00
|
|
|
td->td_ru.ru_nsignals++;
|
1994-05-24 10:09:53 +00:00
|
|
|
#ifdef KTRACE
|
2002-06-07 05:43:02 +00:00
|
|
|
if (KTRPOINT(curthread, KTR_PSIG))
|
|
|
|
ktrpsig(sig, ps->ps_sigact[_SIG_IDX(sig)],
|
2003-03-31 22:49:17 +00:00
|
|
|
&td->td_sigmask, code);
|
1994-05-24 10:09:53 +00:00
|
|
|
#endif
|
2006-10-26 21:42:22 +00:00
|
|
|
(*p->p_sysent->sv_sendsig)(ps->ps_sigact[_SIG_IDX(sig)],
|
|
|
|
ksi, &td->td_sigmask);
|
2009-10-30 10:10:39 +00:00
|
|
|
mask = ps->ps_catchmask[_SIG_IDX(sig)];
|
|
|
|
if (!SIGISMEMBER(ps->ps_signodefer, sig))
|
2009-10-27 10:47:58 +00:00
|
|
|
SIGADDSET(mask, sig);
|
2009-10-30 10:10:39 +00:00
|
|
|
kern_sigprocmask(td, SIG_BLOCK, &mask, NULL,
|
|
|
|
SIGPROCMASK_PROC_LOCKED | SIGPROCMASK_PS_LOCKED);
|
1999-09-29 15:03:48 +00:00
|
|
|
if (SIGISMEMBER(ps->ps_sigreset, sig)) {
|
1996-03-30 15:15:30 +00:00
|
|
|
/*
|
2002-09-01 20:37:28 +00:00
|
|
|
* See kern_sigaction() for origin of this code.
|
1996-03-30 15:15:30 +00:00
|
|
|
*/
|
- Merge struct procsig with struct sigacts.
- Move struct sigacts out of the u-area and malloc() it using the
M_SUBPROC malloc bucket.
- Add a small sigacts_*() API for managing sigacts structures: sigacts_alloc(),
sigacts_free(), sigacts_copy(), sigacts_share(), and sigacts_shared().
- Remove the p_sigignore, p_sigacts, and p_sigcatch macros.
- Add a mutex to struct sigacts that protects all the members of the struct.
- Add sigacts locking.
- Remove Giant from nosys(), kill(), killpg(), and kern_sigaction() now
that sigacts is locked.
- Several in-kernel functions such as psignal(), tdsignal(), trapsignal(),
and thread_stopped() are now MP safe.
Reviewed by: arch@
Approved by: re (rwatson)
2003-05-13 20:36:02 +00:00
|
|
|
SIGDELSET(ps->ps_sigcatch, sig);
|
1999-09-29 15:03:48 +00:00
|
|
|
if (sig != SIGCONT &&
|
|
|
|
sigprop(sig) & SA_IGNORE)
|
- Merge struct procsig with struct sigacts.
- Move struct sigacts out of the u-area and malloc() it using the
M_SUBPROC malloc bucket.
- Add a small sigacts_*() API for managing sigacts structures: sigacts_alloc(),
sigacts_free(), sigacts_copy(), sigacts_share(), and sigacts_shared().
- Remove the p_sigignore, p_sigacts, and p_sigcatch macros.
- Add a mutex to struct sigacts that protects all the members of the struct.
- Add sigacts locking.
- Remove Giant from nosys(), kill(), killpg(), and kern_sigaction() now
that sigacts is locked.
- Several in-kernel functions such as psignal(), tdsignal(), trapsignal(),
and thread_stopped() are now MP safe.
Reviewed by: arch@
Approved by: re (rwatson)
2003-05-13 20:36:02 +00:00
|
|
|
SIGADDSET(ps->ps_sigignore, sig);
|
1999-09-29 15:03:48 +00:00
|
|
|
ps->ps_sigact[_SIG_IDX(sig)] = SIG_DFL;
|
1996-03-15 08:01:33 +00:00
|
|
|
}
|
- Merge struct procsig with struct sigacts.
- Move struct sigacts out of the u-area and malloc() it using the
M_SUBPROC malloc bucket.
- Add a small sigacts_*() API for managing sigacts structures: sigacts_alloc(),
sigacts_free(), sigacts_copy(), sigacts_share(), and sigacts_shared().
- Remove the p_sigignore, p_sigacts, and p_sigcatch macros.
- Add a mutex to struct sigacts that protects all the members of the struct.
- Add sigacts locking.
- Remove Giant from nosys(), kill(), killpg(), and kern_sigaction() now
that sigacts is locked.
- Several in-kernel functions such as psignal(), tdsignal(), trapsignal(),
and thread_stopped() are now MP safe.
Reviewed by: arch@
Approved by: re (rwatson)
2003-05-13 20:36:02 +00:00
|
|
|
mtx_unlock(&ps->ps_mtx);
|
1999-10-12 13:14:18 +00:00
|
|
|
} else {
|
2005-12-09 08:29:29 +00:00
|
|
|
/*
|
|
|
|
* Avoid a possible infinite loop if the thread
|
|
|
|
* masking the signal or process is ignoring the
|
|
|
|
* signal.
|
|
|
|
*/
|
|
|
|
if (kern_forcesigexit &&
|
|
|
|
(SIGISMEMBER(td->td_sigmask, sig) ||
|
|
|
|
ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN)) {
|
|
|
|
SIGDELSET(td->td_sigmask, sig);
|
|
|
|
SIGDELSET(ps->ps_sigcatch, sig);
|
|
|
|
SIGDELSET(ps->ps_sigignore, sig);
|
|
|
|
ps->ps_sigact[_SIG_IDX(sig)] = SIG_DFL;
|
|
|
|
}
|
- Merge struct procsig with struct sigacts.
- Move struct sigacts out of the u-area and malloc() it using the
M_SUBPROC malloc bucket.
- Add a small sigacts_*() API for managing sigacts structures: sigacts_alloc(),
sigacts_free(), sigacts_copy(), sigacts_share(), and sigacts_shared().
- Remove the p_sigignore, p_sigacts, and p_sigcatch macros.
- Add a mutex to struct sigacts that protects all the members of the struct.
- Add sigacts locking.
- Remove Giant from nosys(), kill(), killpg(), and kern_sigaction() now
that sigacts is locked.
- Several in-kernel functions such as psignal(), tdsignal(), trapsignal(),
and thread_stopped() are now MP safe.
Reviewed by: arch@
Approved by: re (rwatson)
2003-05-13 20:36:02 +00:00
|
|
|
mtx_unlock(&ps->ps_mtx);
|
1998-12-19 02:55:34 +00:00
|
|
|
p->p_code = code; /* XXX for core dump/debugger */
|
1999-09-29 15:03:48 +00:00
|
|
|
p->p_sig = sig; /* XXX to verify code */
|
2005-11-03 04:49:16 +00:00
|
|
|
tdsignal(p, td, sig, ksi);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
2001-03-07 02:59:54 +00:00
|
|
|
PROC_UNLOCK(p);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
2003-03-31 22:49:17 +00:00
|
|
|
static struct thread *
|
|
|
|
sigtd(struct proc *p, int sig, int prop)
|
|
|
|
{
|
2003-07-17 22:52:55 +00:00
|
|
|
struct thread *td, *signal_td;
|
2003-03-31 22:49:17 +00:00
|
|
|
|
|
|
|
PROC_LOCK_ASSERT(p, MA_OWNED);
|
|
|
|
|
|
|
|
/*
|
2005-03-04 22:46:31 +00:00
|
|
|
* Check if current thread can handle the signal without
|
2009-10-01 12:46:58 +00:00
|
|
|
* switching context to another thread.
|
2003-03-31 22:49:17 +00:00
|
|
|
*/
|
2005-03-04 22:46:31 +00:00
|
|
|
if (curproc == p && !SIGISMEMBER(curthread->td_sigmask, sig))
|
|
|
|
return (curthread);
|
2003-07-17 22:52:55 +00:00
|
|
|
signal_td = NULL;
|
|
|
|
FOREACH_THREAD_IN_PROC(p, td) {
|
|
|
|
if (!SIGISMEMBER(td->td_sigmask, sig)) {
|
2005-03-04 22:46:31 +00:00
|
|
|
signal_td = td;
|
|
|
|
break;
|
2003-07-17 22:52:55 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
if (signal_td == NULL)
|
|
|
|
signal_td = FIRST_THREAD_IN_PROC(p);
|
|
|
|
return (signal_td);
|
2003-03-31 22:49:17 +00:00
|
|
|
}
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Send the signal to the process. If the signal has an action, the action
|
|
|
|
* is usually performed by the target process rather than the caller; we add
|
|
|
|
* the signal to the set of pending signals for the process.
|
|
|
|
*
|
|
|
|
* Exceptions:
|
|
|
|
* o When a stop signal is sent to a sleeping process that takes the
|
|
|
|
* default action, the process is stopped without awakening it.
|
|
|
|
* o SIGCONT restarts stopped processes (or puts them back to sleep)
|
|
|
|
* regardless of the signal action (eg, blocked or ignored).
|
|
|
|
*
|
|
|
|
* Other ignored signals are discarded immediately.
|
2007-05-23 17:27:42 +00:00
|
|
|
*
|
|
|
|
* NB: This function may be entered from the debugger via the "kill" DDB
|
|
|
|
* command. There is little that can be done to mitigate the possibly messy
|
|
|
|
* side effects of this unwise possibility.
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
|
|
|
void
|
2003-03-31 22:49:17 +00:00
|
|
|
psignal(struct proc *p, int sig)
|
1. Change prototype of trapsignal and sendsig to use ksiginfo_t *, most
changes in MD code are trivial, before this change, trapsignal and
sendsig use discrete parameters, now they uses member fields of
ksiginfo_t structure. For sendsig, this change allows us to pass
POSIX realtime signal value to user code.
2. Remove cpu_thread_siginfo, it is no longer needed because we now always
generate ksiginfo_t data and feed it to libpthread.
3. Add p_sigqueue to proc structure to hold shared signals which were
blocked by all threads in the proc.
4. Add td_sigqueue to thread structure to hold all signals delivered to
thread.
5. i386 and amd64 now return POSIX standard si_code, other arches will
be fixed.
6. In this sigqueue implementation, pending signal set is kept as before,
an extra siginfo list holds additional siginfo_t data for signals.
kernel code uses psignal() still behavior as before, it won't be failed
even under memory pressure, only exception is when deleting a signal,
we should call sigqueue_delete to remove signal from sigqueue but
not SIGDELSET. Current there is no kernel code will deliver a signal
with additional data, so kernel should be as stable as before,
a ksiginfo can carry more information, for example, allow signal to
be delivered but throw away siginfo data if memory is not enough.
SIGKILL and SIGSTOP have fast path in sigqueue_add, because they can
not be caught or masked.
The sigqueue() syscall allows user code to queue a signal to target
process, if resource is unavailable, EAGAIN will be returned as
specification said.
Just before thread exits, signal queue memory will be freed by
sigqueue_flush.
Current, all signals are allowed to be queued, not only realtime signals.
Earlier patch reviewed by: jhb, deischen
Tested on: i386, amd64
2005-10-14 12:43:47 +00:00
|
|
|
{
|
2005-11-03 04:49:16 +00:00
|
|
|
(void) tdsignal(p, NULL, sig, NULL);
|
1. Change prototype of trapsignal and sendsig to use ksiginfo_t *, most
changes in MD code are trivial, before this change, trapsignal and
sendsig use discrete parameters, now they uses member fields of
ksiginfo_t structure. For sendsig, this change allows us to pass
POSIX realtime signal value to user code.
2. Remove cpu_thread_siginfo, it is no longer needed because we now always
generate ksiginfo_t data and feed it to libpthread.
3. Add p_sigqueue to proc structure to hold shared signals which were
blocked by all threads in the proc.
4. Add td_sigqueue to thread structure to hold all signals delivered to
thread.
5. i386 and amd64 now return POSIX standard si_code, other arches will
be fixed.
6. In this sigqueue implementation, pending signal set is kept as before,
an extra siginfo list holds additional siginfo_t data for signals.
kernel code uses psignal() still behavior as before, it won't be failed
even under memory pressure, only exception is when deleting a signal,
we should call sigqueue_delete to remove signal from sigqueue but
not SIGDELSET. Current there is no kernel code will deliver a signal
with additional data, so kernel should be as stable as before,
a ksiginfo can carry more information, for example, allow signal to
be delivered but throw away siginfo data if memory is not enough.
SIGKILL and SIGSTOP have fast path in sigqueue_add, because they can
not be caught or masked.
The sigqueue() syscall allows user code to queue a signal to target
process, if resource is unavailable, EAGAIN will be returned as
specification said.
Just before thread exits, signal queue memory will be freed by
sigqueue_flush.
Current, all signals are allowed to be queued, not only realtime signals.
Earlier patch reviewed by: jhb, deischen
Tested on: i386, amd64
2005-10-14 12:43:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2005-11-03 04:49:16 +00:00
|
|
|
psignal_event(struct proc *p, struct sigevent *sigev, ksiginfo_t *ksi)
|
1. Change prototype of trapsignal and sendsig to use ksiginfo_t *, most
changes in MD code are trivial, before this change, trapsignal and
sendsig use discrete parameters, now they uses member fields of
ksiginfo_t structure. For sendsig, this change allows us to pass
POSIX realtime signal value to user code.
2. Remove cpu_thread_siginfo, it is no longer needed because we now always
generate ksiginfo_t data and feed it to libpthread.
3. Add p_sigqueue to proc structure to hold shared signals which were
blocked by all threads in the proc.
4. Add td_sigqueue to thread structure to hold all signals delivered to
thread.
5. i386 and amd64 now return POSIX standard si_code, other arches will
be fixed.
6. In this sigqueue implementation, pending signal set is kept as before,
an extra siginfo list holds additional siginfo_t data for signals.
kernel code uses psignal() still behavior as before, it won't be failed
even under memory pressure, only exception is when deleting a signal,
we should call sigqueue_delete to remove signal from sigqueue but
not SIGDELSET. Current there is no kernel code will deliver a signal
with additional data, so kernel should be as stable as before,
a ksiginfo can carry more information, for example, allow signal to
be delivered but throw away siginfo data if memory is not enough.
SIGKILL and SIGSTOP have fast path in sigqueue_add, because they can
not be caught or masked.
The sigqueue() syscall allows user code to queue a signal to target
process, if resource is unavailable, EAGAIN will be returned as
specification said.
Just before thread exits, signal queue memory will be freed by
sigqueue_flush.
Current, all signals are allowed to be queued, not only realtime signals.
Earlier patch reviewed by: jhb, deischen
Tested on: i386, amd64
2005-10-14 12:43:47 +00:00
|
|
|
{
|
2005-11-03 04:49:16 +00:00
|
|
|
struct thread *td = NULL;
|
2003-08-10 23:05:37 +00:00
|
|
|
|
2003-03-31 22:49:17 +00:00
|
|
|
PROC_LOCK_ASSERT(p, MA_OWNED);
|
2005-11-03 04:49:16 +00:00
|
|
|
|
|
|
|
KASSERT(!KSI_ONQ(ksi), ("psignal_event: ksi on queue"));
|
2003-03-31 22:49:17 +00:00
|
|
|
|
|
|
|
/*
|
2005-11-03 04:49:16 +00:00
|
|
|
* ksi_code and other fields should be set before
|
|
|
|
* calling this function.
|
2003-03-31 22:49:17 +00:00
|
|
|
*/
|
2005-11-03 04:49:16 +00:00
|
|
|
ksi->ksi_signo = sigev->sigev_signo;
|
|
|
|
ksi->ksi_value = sigev->sigev_value;
|
|
|
|
if (sigev->sigev_notify == SIGEV_THREAD_ID) {
|
|
|
|
td = thread_find(p, sigev->sigev_notify_thread_id);
|
|
|
|
if (td == NULL)
|
|
|
|
return (ESRCH);
|
|
|
|
}
|
|
|
|
return (tdsignal(p, td, ksi->ksi_signo, ksi));
|
2003-03-31 22:49:17 +00:00
|
|
|
}
|
|
|
|
|
1. Change prototype of trapsignal and sendsig to use ksiginfo_t *, most
changes in MD code are trivial, before this change, trapsignal and
sendsig use discrete parameters, now they uses member fields of
ksiginfo_t structure. For sendsig, this change allows us to pass
POSIX realtime signal value to user code.
2. Remove cpu_thread_siginfo, it is no longer needed because we now always
generate ksiginfo_t data and feed it to libpthread.
3. Add p_sigqueue to proc structure to hold shared signals which were
blocked by all threads in the proc.
4. Add td_sigqueue to thread structure to hold all signals delivered to
thread.
5. i386 and amd64 now return POSIX standard si_code, other arches will
be fixed.
6. In this sigqueue implementation, pending signal set is kept as before,
an extra siginfo list holds additional siginfo_t data for signals.
kernel code uses psignal() still behavior as before, it won't be failed
even under memory pressure, only exception is when deleting a signal,
we should call sigqueue_delete to remove signal from sigqueue but
not SIGDELSET. Current there is no kernel code will deliver a signal
with additional data, so kernel should be as stable as before,
a ksiginfo can carry more information, for example, allow signal to
be delivered but throw away siginfo data if memory is not enough.
SIGKILL and SIGSTOP have fast path in sigqueue_add, because they can
not be caught or masked.
The sigqueue() syscall allows user code to queue a signal to target
process, if resource is unavailable, EAGAIN will be returned as
specification said.
Just before thread exits, signal queue memory will be freed by
sigqueue_flush.
Current, all signals are allowed to be queued, not only realtime signals.
Earlier patch reviewed by: jhb, deischen
Tested on: i386, amd64
2005-10-14 12:43:47 +00:00
|
|
|
int
|
2005-11-03 04:49:16 +00:00
|
|
|
tdsignal(struct proc *p, struct thread *td, int sig, ksiginfo_t *ksi)
|
o Change kse_thr_interrupt to allow send a signal to a specified thread,
or unblock a thread in kernel, and allow UTS to specify whether syscall
should be restarted.
o Add ability for UTS to monitor signal comes in and removed from process,
the flag PS_SIGEVENT is used to indicate the events.
o Add a KMF_WAITSIGEVENT for KSE mailbox flag, UTS call kse_release with
this flag set to wait for above signal event.
o For SA based thread, kernel masks all signal in its signal mask, let
UTS to use kse_thr_interrupt interrupt a thread, and install a signal
frame in userland for the thread.
o Add a tm_syncsig in thread mailbox, when a hardware trap occurs,
it is used to deliver synchronous signal to userland, and upcall
is schedule, so UTS can process the synchronous signal for the thread.
Reviewed by: julian (mentor)
2003-06-28 08:29:05 +00:00
|
|
|
{
|
1. Change prototype of trapsignal and sendsig to use ksiginfo_t *, most
changes in MD code are trivial, before this change, trapsignal and
sendsig use discrete parameters, now they uses member fields of
ksiginfo_t structure. For sendsig, this change allows us to pass
POSIX realtime signal value to user code.
2. Remove cpu_thread_siginfo, it is no longer needed because we now always
generate ksiginfo_t data and feed it to libpthread.
3. Add p_sigqueue to proc structure to hold shared signals which were
blocked by all threads in the proc.
4. Add td_sigqueue to thread structure to hold all signals delivered to
thread.
5. i386 and amd64 now return POSIX standard si_code, other arches will
be fixed.
6. In this sigqueue implementation, pending signal set is kept as before,
an extra siginfo list holds additional siginfo_t data for signals.
kernel code uses psignal() still behavior as before, it won't be failed
even under memory pressure, only exception is when deleting a signal,
we should call sigqueue_delete to remove signal from sigqueue but
not SIGDELSET. Current there is no kernel code will deliver a signal
with additional data, so kernel should be as stable as before,
a ksiginfo can carry more information, for example, allow signal to
be delivered but throw away siginfo data if memory is not enough.
SIGKILL and SIGSTOP have fast path in sigqueue_add, because they can
not be caught or masked.
The sigqueue() syscall allows user code to queue a signal to target
process, if resource is unavailable, EAGAIN will be returned as
specification said.
Just before thread exits, signal queue memory will be freed by
sigqueue_flush.
Current, all signals are allowed to be queued, not only realtime signals.
Earlier patch reviewed by: jhb, deischen
Tested on: i386, amd64
2005-10-14 12:43:47 +00:00
|
|
|
sig_t action;
|
|
|
|
sigqueue_t *sigqueue;
|
|
|
|
int prop;
|
- Merge struct procsig with struct sigacts.
- Move struct sigacts out of the u-area and malloc() it using the
M_SUBPROC malloc bucket.
- Add a small sigacts_*() API for managing sigacts structures: sigacts_alloc(),
sigacts_free(), sigacts_copy(), sigacts_share(), and sigacts_shared().
- Remove the p_sigignore, p_sigacts, and p_sigcatch macros.
- Add a mutex to struct sigacts that protects all the members of the struct.
- Add sigacts locking.
- Remove Giant from nosys(), kill(), killpg(), and kern_sigaction() now
that sigacts is locked.
- Several in-kernel functions such as psignal(), tdsignal(), trapsignal(),
and thread_stopped() are now MP safe.
Reviewed by: arch@
Approved by: re (rwatson)
2003-05-13 20:36:02 +00:00
|
|
|
struct sigacts *ps;
|
Fix a long standing race between sleep queue and thread
suspension code. When a thread A is going to sleep, it calls
sleepq_catch_signals() to detect any pending signals or thread
suspension request, if nothing happens, it returns without
holding process lock or scheduler lock, this opens a race
window which allows thread B to come in and do process
suspension work, however since A is still at running state,
thread B can do nothing to A, thread A continues, and puts
itself into actually sleeping state, but B has never seen it,
and it sits there forever until B is woken up by other threads
sometimes later(this can be very long delay or never
happen). Fix this bug by forcing sleepq_catch_signals to
return with scheduler lock held.
Fix sleepq_abort() by passing it an interrupted code, previously,
it worked as wakeup_one(), and the interruption can not be
identified correctly by sleep queue code when the sleeping
thread is resumed.
Let thread_suspend_check() returns EINTR or ERESTART, so sleep
queue no longer has to use SIGSTOP as a hack to build a return
value.
Reviewed by: jhb
MFC after: 1 week
2006-02-15 23:52:01 +00:00
|
|
|
int intrval;
|
1. Change prototype of trapsignal and sendsig to use ksiginfo_t *, most
changes in MD code are trivial, before this change, trapsignal and
sendsig use discrete parameters, now they uses member fields of
ksiginfo_t structure. For sendsig, this change allows us to pass
POSIX realtime signal value to user code.
2. Remove cpu_thread_siginfo, it is no longer needed because we now always
generate ksiginfo_t data and feed it to libpthread.
3. Add p_sigqueue to proc structure to hold shared signals which were
blocked by all threads in the proc.
4. Add td_sigqueue to thread structure to hold all signals delivered to
thread.
5. i386 and amd64 now return POSIX standard si_code, other arches will
be fixed.
6. In this sigqueue implementation, pending signal set is kept as before,
an extra siginfo list holds additional siginfo_t data for signals.
kernel code uses psignal() still behavior as before, it won't be failed
even under memory pressure, only exception is when deleting a signal,
we should call sigqueue_delete to remove signal from sigqueue but
not SIGDELSET. Current there is no kernel code will deliver a signal
with additional data, so kernel should be as stable as before,
a ksiginfo can carry more information, for example, allow signal to
be delivered but throw away siginfo data if memory is not enough.
SIGKILL and SIGSTOP have fast path in sigqueue_add, because they can
not be caught or masked.
The sigqueue() syscall allows user code to queue a signal to target
process, if resource is unavailable, EAGAIN will be returned as
specification said.
Just before thread exits, signal queue memory will be freed by
sigqueue_flush.
Current, all signals are allowed to be queued, not only realtime signals.
Earlier patch reviewed by: jhb, deischen
Tested on: i386, amd64
2005-10-14 12:43:47 +00:00
|
|
|
int ret = 0;
|
If a thread that is swapped out is made runnable, then the setrunnable()
routine wakes up proc0 so that proc0 can swap the thread back in.
Historically, this has been done by waking up proc0 directly from
setrunnable() itself via a wakeup(). When waking up a sleeping thread
that was swapped out (the usual case when waking proc0 since only sleeping
threads are eligible to be swapped out), this resulted in a bit of
recursion (e.g. wakeup() -> setrunnable() -> wakeup()).
With sleep queues having separate locks in 6.x and later, this caused a
spin lock LOR (sleepq lock -> sched_lock/thread lock -> sleepq lock).
An attempt was made to fix this in 7.0 by making the proc0 wakeup use
the ithread mechanism for doing the wakeup. However, this required
grabbing proc0's thread lock to perform the wakeup. If proc0 was asleep
elsewhere in the kernel (e.g. waiting for disk I/O), then this degenerated
into the same LOR since the thread lock would be some other sleepq lock.
Fix this by deferring the wakeup of the swapper until after the sleepq
lock held by the upper layer has been locked. The setrunnable() routine
now returns a boolean value to indicate whether or not proc0 needs to be
woken up. The end result is that consumers of the sleepq API such as
*sleep/wakeup, condition variables, sx locks, and lockmgr, have to wakeup
proc0 if they get a non-zero return value from sleepq_abort(),
sleepq_broadcast(), or sleepq_signal().
Discussed with: jeff
Glanced at by: sam
Tested by: Jurgen Weber jurgen - ish com au
MFC after: 2 weeks
2008-08-05 20:02:31 +00:00
|
|
|
int wakeup_swapper;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2005-11-03 04:49:16 +00:00
|
|
|
PROC_LOCK_ASSERT(p, MA_OWNED);
|
|
|
|
|
2003-08-10 23:05:37 +00:00
|
|
|
if (!_SIG_VALID(sig))
|
2007-02-09 17:48:28 +00:00
|
|
|
panic("tdsignal(): invalid signal %d", sig);
|
2003-03-31 22:49:17 +00:00
|
|
|
|
2006-10-26 21:42:22 +00:00
|
|
|
KASSERT(ksi == NULL || !KSI_ONQ(ksi), ("tdsignal: ksi on queue"));
|
1999-09-29 15:03:48 +00:00
|
|
|
|
2005-11-03 04:49:16 +00:00
|
|
|
/*
|
|
|
|
* IEEE Std 1003.1-2001: return success when killing a zombie.
|
|
|
|
*/
|
|
|
|
if (p->p_state == PRS_ZOMBIE) {
|
|
|
|
if (ksi && (ksi->ksi_flags & KSI_INS))
|
|
|
|
ksiginfo_tryfree(ksi);
|
|
|
|
return (ret);
|
|
|
|
}
|
2000-04-16 18:53:38 +00:00
|
|
|
|
2005-11-03 04:49:16 +00:00
|
|
|
ps = p->p_sigacts;
|
|
|
|
KNOTE_LOCKED(&p->p_klist, NOTE_SIGNAL | sig);
|
1999-09-29 15:03:48 +00:00
|
|
|
prop = sigprop(sig);
|
2003-03-31 22:49:17 +00:00
|
|
|
|
2005-11-03 04:49:16 +00:00
|
|
|
if (td == NULL) {
|
|
|
|
td = sigtd(p, sig, prop);
|
Currently, when signal is delivered to the process and there is a thread
not blocking the signal, signal is placed on the thread sigqueue. If
the selected thread is in kernel executing thr_exit() or sigprocmask()
syscalls, then signal might be not delivered to usermode for arbitrary
amount of time, and for exiting thread it is lost.
Put process-directed signals to the process queue unconditionally,
selecting the thread to deliver the signal only by the thread returning
to usermode, since only then the thread can handle delivery of signal
reliably. For exiting thread or thread that has blocked some signals,
check whether the newly blocked signal is queued for the process, and
try to find a thread to wakeup for delivery, in reschedule_signal(). For
exiting thread, assume that all signals are blocked.
Change cursig() and postsig() to look both into the thread and process
signal queues. When there is a signal that thread returning to usermode
could consume, TDF_NEEDSIGCHK flag is not neccessary set now. Do
unlocked read of p_siglist and p_pendingcnt to check for queued signals.
Note that thread that has a signal unblocked might get spurious wakeup
and EINTR from the interruptible system call now, due to the possibility
of being selected by reschedule_signals(), while other thread returned
to usermode earlier and removed the signal from process queue. This
should not cause compliance issues, since the thread has not blocked a
signal and thus should be ready to receive it anyway.
Reported by: Justin Teller <justin.teller gmail com>
Reviewed by: davidxu, jilles
MFC after: 1 month
2009-10-11 16:49:30 +00:00
|
|
|
sigqueue = &p->p_sigqueue;
|
2005-11-03 04:49:16 +00:00
|
|
|
} else {
|
|
|
|
KASSERT(td->td_proc == p, ("invalid thread"));
|
|
|
|
sigqueue = &td->td_sigqueue;
|
2003-07-17 22:52:55 +00:00
|
|
|
}
|
2003-03-31 22:49:17 +00:00
|
|
|
|
2008-05-24 06:22:16 +00:00
|
|
|
SDT_PROBE(proc, kernel, , signal_send, td, p, sig, 0, 0 );
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
2005-06-06 05:13:10 +00:00
|
|
|
* If the signal is being ignored,
|
|
|
|
* then we forget about it immediately.
|
|
|
|
* (Note: we don't set SIGCONT in ps_sigignore,
|
|
|
|
* and if it is set to SIG_IGN,
|
|
|
|
* action will be SIG_DFL here.)
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
2005-06-06 05:13:10 +00:00
|
|
|
mtx_lock(&ps->ps_mtx);
|
2006-10-20 16:19:21 +00:00
|
|
|
if (SIGISMEMBER(ps->ps_sigignore, sig)) {
|
2008-05-24 06:22:16 +00:00
|
|
|
SDT_PROBE(proc, kernel, , signal_discard, ps, td, sig, 0, 0 );
|
|
|
|
|
- Merge struct procsig with struct sigacts.
- Move struct sigacts out of the u-area and malloc() it using the
M_SUBPROC malloc bucket.
- Add a small sigacts_*() API for managing sigacts structures: sigacts_alloc(),
sigacts_free(), sigacts_copy(), sigacts_share(), and sigacts_shared().
- Remove the p_sigignore, p_sigacts, and p_sigcatch macros.
- Add a mutex to struct sigacts that protects all the members of the struct.
- Add sigacts locking.
- Remove Giant from nosys(), kill(), killpg(), and kern_sigaction() now
that sigacts is locked.
- Several in-kernel functions such as psignal(), tdsignal(), trapsignal(),
and thread_stopped() are now MP safe.
Reviewed by: arch@
Approved by: re (rwatson)
2003-05-13 20:36:02 +00:00
|
|
|
mtx_unlock(&ps->ps_mtx);
|
2005-11-03 04:49:16 +00:00
|
|
|
if (ksi && (ksi->ksi_flags & KSI_INS))
|
|
|
|
ksiginfo_tryfree(ksi);
|
1. Change prototype of trapsignal and sendsig to use ksiginfo_t *, most
changes in MD code are trivial, before this change, trapsignal and
sendsig use discrete parameters, now they uses member fields of
ksiginfo_t structure. For sendsig, this change allows us to pass
POSIX realtime signal value to user code.
2. Remove cpu_thread_siginfo, it is no longer needed because we now always
generate ksiginfo_t data and feed it to libpthread.
3. Add p_sigqueue to proc structure to hold shared signals which were
blocked by all threads in the proc.
4. Add td_sigqueue to thread structure to hold all signals delivered to
thread.
5. i386 and amd64 now return POSIX standard si_code, other arches will
be fixed.
6. In this sigqueue implementation, pending signal set is kept as before,
an extra siginfo list holds additional siginfo_t data for signals.
kernel code uses psignal() still behavior as before, it won't be failed
even under memory pressure, only exception is when deleting a signal,
we should call sigqueue_delete to remove signal from sigqueue but
not SIGDELSET. Current there is no kernel code will deliver a signal
with additional data, so kernel should be as stable as before,
a ksiginfo can carry more information, for example, allow signal to
be delivered but throw away siginfo data if memory is not enough.
SIGKILL and SIGSTOP have fast path in sigqueue_add, because they can
not be caught or masked.
The sigqueue() syscall allows user code to queue a signal to target
process, if resource is unavailable, EAGAIN will be returned as
specification said.
Just before thread exits, signal queue memory will be freed by
sigqueue_flush.
Current, all signals are allowed to be queued, not only realtime signals.
Earlier patch reviewed by: jhb, deischen
Tested on: i386, amd64
2005-10-14 12:43:47 +00:00
|
|
|
return (ret);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
2005-06-06 05:13:10 +00:00
|
|
|
if (SIGISMEMBER(td->td_sigmask, sig))
|
|
|
|
action = SIG_HOLD;
|
|
|
|
else if (SIGISMEMBER(ps->ps_sigcatch, sig))
|
|
|
|
action = SIG_CATCH;
|
|
|
|
else
|
|
|
|
action = SIG_DFL;
|
Fix a long standing race between sleep queue and thread
suspension code. When a thread A is going to sleep, it calls
sleepq_catch_signals() to detect any pending signals or thread
suspension request, if nothing happens, it returns without
holding process lock or scheduler lock, this opens a race
window which allows thread B to come in and do process
suspension work, however since A is still at running state,
thread B can do nothing to A, thread A continues, and puts
itself into actually sleeping state, but B has never seen it,
and it sits there forever until B is woken up by other threads
sometimes later(this can be very long delay or never
happen). Fix this bug by forcing sleepq_catch_signals to
return with scheduler lock held.
Fix sleepq_abort() by passing it an interrupted code, previously,
it worked as wakeup_one(), and the interruption can not be
identified correctly by sleep queue code when the sleeping
thread is resumed.
Let thread_suspend_check() returns EINTR or ERESTART, so sleep
queue no longer has to use SIGSTOP as a hack to build a return
value.
Reviewed by: jhb
MFC after: 1 week
2006-02-15 23:52:01 +00:00
|
|
|
if (SIGISMEMBER(ps->ps_sigintr, sig))
|
|
|
|
intrval = EINTR;
|
|
|
|
else
|
|
|
|
intrval = ERESTART;
|
2005-06-06 05:13:10 +00:00
|
|
|
mtx_unlock(&ps->ps_mtx);
|
1994-05-24 10:09:53 +00:00
|
|
|
|
1. Change prototype of trapsignal and sendsig to use ksiginfo_t *, most
changes in MD code are trivial, before this change, trapsignal and
sendsig use discrete parameters, now they uses member fields of
ksiginfo_t structure. For sendsig, this change allows us to pass
POSIX realtime signal value to user code.
2. Remove cpu_thread_siginfo, it is no longer needed because we now always
generate ksiginfo_t data and feed it to libpthread.
3. Add p_sigqueue to proc structure to hold shared signals which were
blocked by all threads in the proc.
4. Add td_sigqueue to thread structure to hold all signals delivered to
thread.
5. i386 and amd64 now return POSIX standard si_code, other arches will
be fixed.
6. In this sigqueue implementation, pending signal set is kept as before,
an extra siginfo list holds additional siginfo_t data for signals.
kernel code uses psignal() still behavior as before, it won't be failed
even under memory pressure, only exception is when deleting a signal,
we should call sigqueue_delete to remove signal from sigqueue but
not SIGDELSET. Current there is no kernel code will deliver a signal
with additional data, so kernel should be as stable as before,
a ksiginfo can carry more information, for example, allow signal to
be delivered but throw away siginfo data if memory is not enough.
SIGKILL and SIGSTOP have fast path in sigqueue_add, because they can
not be caught or masked.
The sigqueue() syscall allows user code to queue a signal to target
process, if resource is unavailable, EAGAIN will be returned as
specification said.
Just before thread exits, signal queue memory will be freed by
sigqueue_flush.
Current, all signals are allowed to be queued, not only realtime signals.
Earlier patch reviewed by: jhb, deischen
Tested on: i386, amd64
2005-10-14 12:43:47 +00:00
|
|
|
if (prop & SA_CONT)
|
|
|
|
sigqueue_delete_stopmask_proc(p);
|
|
|
|
else if (prop & SA_STOP) {
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* If sending a tty stop signal to a member of an orphaned
|
|
|
|
* process group, discard the signal here if the action
|
|
|
|
* is default; don't stop the process below if sleeping,
|
|
|
|
* and don't clear any pending SIGCONT.
|
|
|
|
*/
|
Part 1 of KSE-III
The ability to schedule multiple threads per process
(one one cpu) by making ALL system calls optionally asynchronous.
to come: ia64 and power-pc patches, patches for gdb, test program (in tools)
Reviewed by: Almost everyone who counts
(at various times, peter, jhb, matt, alfred, mini, bernd,
and a cast of thousands)
NOTE: this is still Beta code, and contains lots of debugging stuff.
expect slight instability in signals..
2002-06-29 17:26:22 +00:00
|
|
|
if ((prop & SA_TTYSTOP) &&
|
|
|
|
(p->p_pgrp->pg_jobc == 0) &&
|
2005-11-03 04:49:16 +00:00
|
|
|
(action == SIG_DFL)) {
|
|
|
|
if (ksi && (ksi->ksi_flags & KSI_INS))
|
|
|
|
ksiginfo_tryfree(ksi);
|
|
|
|
return (ret);
|
|
|
|
}
|
1. Change prototype of trapsignal and sendsig to use ksiginfo_t *, most
changes in MD code are trivial, before this change, trapsignal and
sendsig use discrete parameters, now they uses member fields of
ksiginfo_t structure. For sendsig, this change allows us to pass
POSIX realtime signal value to user code.
2. Remove cpu_thread_siginfo, it is no longer needed because we now always
generate ksiginfo_t data and feed it to libpthread.
3. Add p_sigqueue to proc structure to hold shared signals which were
blocked by all threads in the proc.
4. Add td_sigqueue to thread structure to hold all signals delivered to
thread.
5. i386 and amd64 now return POSIX standard si_code, other arches will
be fixed.
6. In this sigqueue implementation, pending signal set is kept as before,
an extra siginfo list holds additional siginfo_t data for signals.
kernel code uses psignal() still behavior as before, it won't be failed
even under memory pressure, only exception is when deleting a signal,
we should call sigqueue_delete to remove signal from sigqueue but
not SIGDELSET. Current there is no kernel code will deliver a signal
with additional data, so kernel should be as stable as before,
a ksiginfo can carry more information, for example, allow signal to
be delivered but throw away siginfo data if memory is not enough.
SIGKILL and SIGSTOP have fast path in sigqueue_add, because they can
not be caught or masked.
The sigqueue() syscall allows user code to queue a signal to target
process, if resource is unavailable, EAGAIN will be returned as
specification said.
Just before thread exits, signal queue memory will be freed by
sigqueue_flush.
Current, all signals are allowed to be queued, not only realtime signals.
Earlier patch reviewed by: jhb, deischen
Tested on: i386, amd64
2005-10-14 12:43:47 +00:00
|
|
|
sigqueue_delete_proc(p, SIGCONT);
|
2005-11-08 09:09:26 +00:00
|
|
|
if (p->p_flag & P_CONTINUED) {
|
|
|
|
p->p_flag &= ~P_CONTINUED;
|
|
|
|
PROC_LOCK(p->p_pptr);
|
|
|
|
sigqueue_take(p->p_ksi);
|
|
|
|
PROC_UNLOCK(p->p_pptr);
|
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
2003-07-17 22:52:55 +00:00
|
|
|
|
1. Change prototype of trapsignal and sendsig to use ksiginfo_t *, most
changes in MD code are trivial, before this change, trapsignal and
sendsig use discrete parameters, now they uses member fields of
ksiginfo_t structure. For sendsig, this change allows us to pass
POSIX realtime signal value to user code.
2. Remove cpu_thread_siginfo, it is no longer needed because we now always
generate ksiginfo_t data and feed it to libpthread.
3. Add p_sigqueue to proc structure to hold shared signals which were
blocked by all threads in the proc.
4. Add td_sigqueue to thread structure to hold all signals delivered to
thread.
5. i386 and amd64 now return POSIX standard si_code, other arches will
be fixed.
6. In this sigqueue implementation, pending signal set is kept as before,
an extra siginfo list holds additional siginfo_t data for signals.
kernel code uses psignal() still behavior as before, it won't be failed
even under memory pressure, only exception is when deleting a signal,
we should call sigqueue_delete to remove signal from sigqueue but
not SIGDELSET. Current there is no kernel code will deliver a signal
with additional data, so kernel should be as stable as before,
a ksiginfo can carry more information, for example, allow signal to
be delivered but throw away siginfo data if memory is not enough.
SIGKILL and SIGSTOP have fast path in sigqueue_add, because they can
not be caught or masked.
The sigqueue() syscall allows user code to queue a signal to target
process, if resource is unavailable, EAGAIN will be returned as
specification said.
Just before thread exits, signal queue memory will be freed by
sigqueue_flush.
Current, all signals are allowed to be queued, not only realtime signals.
Earlier patch reviewed by: jhb, deischen
Tested on: i386, amd64
2005-10-14 12:43:47 +00:00
|
|
|
ret = sigqueue_add(sigqueue, sig, ksi);
|
|
|
|
if (ret != 0)
|
|
|
|
return (ret);
|
2005-11-03 04:49:16 +00:00
|
|
|
signotify(td);
|
2003-04-12 00:38:47 +00:00
|
|
|
/*
|
|
|
|
* Defer further processing for signals which are held,
|
|
|
|
* except that stopped processes must be continued by SIGCONT.
|
|
|
|
*/
|
|
|
|
if (action == SIG_HOLD &&
|
|
|
|
!((prop & SA_CONT) && (p->p_flag & P_STOPPED_SIG)))
|
1. Change prototype of trapsignal and sendsig to use ksiginfo_t *, most
changes in MD code are trivial, before this change, trapsignal and
sendsig use discrete parameters, now they uses member fields of
ksiginfo_t structure. For sendsig, this change allows us to pass
POSIX realtime signal value to user code.
2. Remove cpu_thread_siginfo, it is no longer needed because we now always
generate ksiginfo_t data and feed it to libpthread.
3. Add p_sigqueue to proc structure to hold shared signals which were
blocked by all threads in the proc.
4. Add td_sigqueue to thread structure to hold all signals delivered to
thread.
5. i386 and amd64 now return POSIX standard si_code, other arches will
be fixed.
6. In this sigqueue implementation, pending signal set is kept as before,
an extra siginfo list holds additional siginfo_t data for signals.
kernel code uses psignal() still behavior as before, it won't be failed
even under memory pressure, only exception is when deleting a signal,
we should call sigqueue_delete to remove signal from sigqueue but
not SIGDELSET. Current there is no kernel code will deliver a signal
with additional data, so kernel should be as stable as before,
a ksiginfo can carry more information, for example, allow signal to
be delivered but throw away siginfo data if memory is not enough.
SIGKILL and SIGSTOP have fast path in sigqueue_add, because they can
not be caught or masked.
The sigqueue() syscall allows user code to queue a signal to target
process, if resource is unavailable, EAGAIN will be returned as
specification said.
Just before thread exits, signal queue memory will be freed by
sigqueue_flush.
Current, all signals are allowed to be queued, not only realtime signals.
Earlier patch reviewed by: jhb, deischen
Tested on: i386, amd64
2005-10-14 12:43:47 +00:00
|
|
|
return (ret);
|
2004-10-30 02:56:22 +00:00
|
|
|
/*
|
|
|
|
* SIGKILL: Remove procfs STOPEVENTs.
|
|
|
|
*/
|
|
|
|
if (sig == SIGKILL) {
|
|
|
|
/* from procfs_ioctl.c: PIOCBIC */
|
|
|
|
p->p_stops = 0;
|
|
|
|
/* from procfs_ioctl.c: PIOCCONT */
|
|
|
|
p->p_step = 0;
|
|
|
|
wakeup(&p->p_step);
|
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
Part 1 of KSE-III
The ability to schedule multiple threads per process
(one one cpu) by making ALL system calls optionally asynchronous.
to come: ia64 and power-pc patches, patches for gdb, test program (in tools)
Reviewed by: Almost everyone who counts
(at various times, peter, jhb, matt, alfred, mini, bernd,
and a cast of thousands)
NOTE: this is still Beta code, and contains lots of debugging stuff.
expect slight instability in signals..
2002-06-29 17:26:22 +00:00
|
|
|
* Some signals have a process-wide effect and a per-thread
|
|
|
|
* component. Most processing occurs when the process next
|
|
|
|
* tries to cross the user boundary, however there are some
|
|
|
|
* times when processing needs to be done immediatly, such as
|
|
|
|
* waking up threads so that they can cross the user boundary.
|
|
|
|
* We try do the per-process part here.
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
Part 1 of KSE-III
The ability to schedule multiple threads per process
(one one cpu) by making ALL system calls optionally asynchronous.
to come: ia64 and power-pc patches, patches for gdb, test program (in tools)
Reviewed by: Almost everyone who counts
(at various times, peter, jhb, matt, alfred, mini, bernd,
and a cast of thousands)
NOTE: this is still Beta code, and contains lots of debugging stuff.
expect slight instability in signals..
2002-06-29 17:26:22 +00:00
|
|
|
if (P_SHOULDSTOP(p)) {
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
Part 1 of KSE-III
The ability to schedule multiple threads per process
(one one cpu) by making ALL system calls optionally asynchronous.
to come: ia64 and power-pc patches, patches for gdb, test program (in tools)
Reviewed by: Almost everyone who counts
(at various times, peter, jhb, matt, alfred, mini, bernd,
and a cast of thousands)
NOTE: this is still Beta code, and contains lots of debugging stuff.
expect slight instability in signals..
2002-06-29 17:26:22 +00:00
|
|
|
* The process is in stopped mode. All the threads should be
|
|
|
|
* either winding down or already on the suspended queue.
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
Part 1 of KSE-III
The ability to schedule multiple threads per process
(one one cpu) by making ALL system calls optionally asynchronous.
to come: ia64 and power-pc patches, patches for gdb, test program (in tools)
Reviewed by: Almost everyone who counts
(at various times, peter, jhb, matt, alfred, mini, bernd,
and a cast of thousands)
NOTE: this is still Beta code, and contains lots of debugging stuff.
expect slight instability in signals..
2002-06-29 17:26:22 +00:00
|
|
|
if (p->p_flag & P_TRACED) {
|
|
|
|
/*
|
|
|
|
* The traced process is already stopped,
|
|
|
|
* so no further action is necessary.
|
|
|
|
* No signal can restart us.
|
|
|
|
*/
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (sig == SIGKILL) {
|
|
|
|
/*
|
|
|
|
* SIGKILL sets process running.
|
|
|
|
* It will die elsewhere.
|
|
|
|
* All threads must be restarted.
|
|
|
|
*/
|
2004-10-03 13:23:49 +00:00
|
|
|
p->p_flag &= ~P_STOPPED_SIG;
|
Part 1 of KSE-III
The ability to schedule multiple threads per process
(one one cpu) by making ALL system calls optionally asynchronous.
to come: ia64 and power-pc patches, patches for gdb, test program (in tools)
Reviewed by: Almost everyone who counts
(at various times, peter, jhb, matt, alfred, mini, bernd,
and a cast of thousands)
NOTE: this is still Beta code, and contains lots of debugging stuff.
expect slight instability in signals..
2002-06-29 17:26:22 +00:00
|
|
|
goto runfast;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (prop & SA_CONT) {
|
|
|
|
/*
|
|
|
|
* If SIGCONT is default (or ignored), we continue the
|
1. Change prototype of trapsignal and sendsig to use ksiginfo_t *, most
changes in MD code are trivial, before this change, trapsignal and
sendsig use discrete parameters, now they uses member fields of
ksiginfo_t structure. For sendsig, this change allows us to pass
POSIX realtime signal value to user code.
2. Remove cpu_thread_siginfo, it is no longer needed because we now always
generate ksiginfo_t data and feed it to libpthread.
3. Add p_sigqueue to proc structure to hold shared signals which were
blocked by all threads in the proc.
4. Add td_sigqueue to thread structure to hold all signals delivered to
thread.
5. i386 and amd64 now return POSIX standard si_code, other arches will
be fixed.
6. In this sigqueue implementation, pending signal set is kept as before,
an extra siginfo list holds additional siginfo_t data for signals.
kernel code uses psignal() still behavior as before, it won't be failed
even under memory pressure, only exception is when deleting a signal,
we should call sigqueue_delete to remove signal from sigqueue but
not SIGDELSET. Current there is no kernel code will deliver a signal
with additional data, so kernel should be as stable as before,
a ksiginfo can carry more information, for example, allow signal to
be delivered but throw away siginfo data if memory is not enough.
SIGKILL and SIGSTOP have fast path in sigqueue_add, because they can
not be caught or masked.
The sigqueue() syscall allows user code to queue a signal to target
process, if resource is unavailable, EAGAIN will be returned as
specification said.
Just before thread exits, signal queue memory will be freed by
sigqueue_flush.
Current, all signals are allowed to be queued, not only realtime signals.
Earlier patch reviewed by: jhb, deischen
Tested on: i386, amd64
2005-10-14 12:43:47 +00:00
|
|
|
* process but don't leave the signal in sigqueue as
|
2002-10-01 17:15:53 +00:00
|
|
|
* it has no further action. If SIGCONT is held, we
|
Part 1 of KSE-III
The ability to schedule multiple threads per process
(one one cpu) by making ALL system calls optionally asynchronous.
to come: ia64 and power-pc patches, patches for gdb, test program (in tools)
Reviewed by: Almost everyone who counts
(at various times, peter, jhb, matt, alfred, mini, bernd,
and a cast of thousands)
NOTE: this is still Beta code, and contains lots of debugging stuff.
expect slight instability in signals..
2002-06-29 17:26:22 +00:00
|
|
|
* continue the process and leave the signal in
|
1. Change prototype of trapsignal and sendsig to use ksiginfo_t *, most
changes in MD code are trivial, before this change, trapsignal and
sendsig use discrete parameters, now they uses member fields of
ksiginfo_t structure. For sendsig, this change allows us to pass
POSIX realtime signal value to user code.
2. Remove cpu_thread_siginfo, it is no longer needed because we now always
generate ksiginfo_t data and feed it to libpthread.
3. Add p_sigqueue to proc structure to hold shared signals which were
blocked by all threads in the proc.
4. Add td_sigqueue to thread structure to hold all signals delivered to
thread.
5. i386 and amd64 now return POSIX standard si_code, other arches will
be fixed.
6. In this sigqueue implementation, pending signal set is kept as before,
an extra siginfo list holds additional siginfo_t data for signals.
kernel code uses psignal() still behavior as before, it won't be failed
even under memory pressure, only exception is when deleting a signal,
we should call sigqueue_delete to remove signal from sigqueue but
not SIGDELSET. Current there is no kernel code will deliver a signal
with additional data, so kernel should be as stable as before,
a ksiginfo can carry more information, for example, allow signal to
be delivered but throw away siginfo data if memory is not enough.
SIGKILL and SIGSTOP have fast path in sigqueue_add, because they can
not be caught or masked.
The sigqueue() syscall allows user code to queue a signal to target
process, if resource is unavailable, EAGAIN will be returned as
specification said.
Just before thread exits, signal queue memory will be freed by
sigqueue_flush.
Current, all signals are allowed to be queued, not only realtime signals.
Earlier patch reviewed by: jhb, deischen
Tested on: i386, amd64
2005-10-14 12:43:47 +00:00
|
|
|
* sigqueue. If the process catches SIGCONT, let it
|
Part 1 of KSE-III
The ability to schedule multiple threads per process
(one one cpu) by making ALL system calls optionally asynchronous.
to come: ia64 and power-pc patches, patches for gdb, test program (in tools)
Reviewed by: Almost everyone who counts
(at various times, peter, jhb, matt, alfred, mini, bernd,
and a cast of thousands)
NOTE: this is still Beta code, and contains lots of debugging stuff.
expect slight instability in signals..
2002-06-29 17:26:22 +00:00
|
|
|
* handle the signal itself. If it isn't waiting on
|
|
|
|
* an event, it goes back to run state.
|
|
|
|
* Otherwise, process goes back to sleep state.
|
|
|
|
*/
|
2002-09-05 07:30:18 +00:00
|
|
|
p->p_flag &= ~P_STOPPED_SIG;
|
2008-11-05 03:01:23 +00:00
|
|
|
PROC_SLOCK(p);
|
2005-11-08 09:09:26 +00:00
|
|
|
if (p->p_numthreads == p->p_suspcount) {
|
2008-11-05 03:01:23 +00:00
|
|
|
PROC_SUNLOCK(p);
|
2005-11-08 09:09:26 +00:00
|
|
|
p->p_flag |= P_CONTINUED;
|
2006-02-04 14:10:57 +00:00
|
|
|
p->p_xstat = SIGCONT;
|
|
|
|
PROC_LOCK(p->p_pptr);
|
2005-11-08 09:09:26 +00:00
|
|
|
childproc_continued(p);
|
2006-02-04 14:10:57 +00:00
|
|
|
PROC_UNLOCK(p->p_pptr);
|
2008-11-05 03:01:23 +00:00
|
|
|
PROC_SLOCK(p);
|
2005-11-08 09:09:26 +00:00
|
|
|
}
|
Part 1 of KSE-III
The ability to schedule multiple threads per process
(one one cpu) by making ALL system calls optionally asynchronous.
to come: ia64 and power-pc patches, patches for gdb, test program (in tools)
Reviewed by: Almost everyone who counts
(at various times, peter, jhb, matt, alfred, mini, bernd,
and a cast of thousands)
NOTE: this is still Beta code, and contains lots of debugging stuff.
expect slight instability in signals..
2002-06-29 17:26:22 +00:00
|
|
|
if (action == SIG_DFL) {
|
Commit 4/14 of sched_lock decomposition.
- Use thread_lock() rather than sched_lock for per-thread scheduling
sychronization.
- Use the per-process spinlock rather than the sched_lock for per-process
scheduling synchronization.
- Move some common code into thread_suspend_switch() to handle the
mechanics of suspending a thread. The locking here is incredibly
convoluted and should be simplified.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-04 23:52:24 +00:00
|
|
|
thread_unsuspend(p);
|
2008-11-05 03:01:23 +00:00
|
|
|
PROC_SUNLOCK(p);
|
1. Change prototype of trapsignal and sendsig to use ksiginfo_t *, most
changes in MD code are trivial, before this change, trapsignal and
sendsig use discrete parameters, now they uses member fields of
ksiginfo_t structure. For sendsig, this change allows us to pass
POSIX realtime signal value to user code.
2. Remove cpu_thread_siginfo, it is no longer needed because we now always
generate ksiginfo_t data and feed it to libpthread.
3. Add p_sigqueue to proc structure to hold shared signals which were
blocked by all threads in the proc.
4. Add td_sigqueue to thread structure to hold all signals delivered to
thread.
5. i386 and amd64 now return POSIX standard si_code, other arches will
be fixed.
6. In this sigqueue implementation, pending signal set is kept as before,
an extra siginfo list holds additional siginfo_t data for signals.
kernel code uses psignal() still behavior as before, it won't be failed
even under memory pressure, only exception is when deleting a signal,
we should call sigqueue_delete to remove signal from sigqueue but
not SIGDELSET. Current there is no kernel code will deliver a signal
with additional data, so kernel should be as stable as before,
a ksiginfo can carry more information, for example, allow signal to
be delivered but throw away siginfo data if memory is not enough.
SIGKILL and SIGSTOP have fast path in sigqueue_add, because they can
not be caught or masked.
The sigqueue() syscall allows user code to queue a signal to target
process, if resource is unavailable, EAGAIN will be returned as
specification said.
Just before thread exits, signal queue memory will be freed by
sigqueue_flush.
Current, all signals are allowed to be queued, not only realtime signals.
Earlier patch reviewed by: jhb, deischen
Tested on: i386, amd64
2005-10-14 12:43:47 +00:00
|
|
|
sigqueue_delete(sigqueue, sig);
|
Commit 4/14 of sched_lock decomposition.
- Use thread_lock() rather than sched_lock for per-thread scheduling
sychronization.
- Use the per-process spinlock rather than the sched_lock for per-process
scheduling synchronization.
- Move some common code into thread_suspend_switch() to handle the
mechanics of suspending a thread. The locking here is incredibly
convoluted and should be simplified.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-04 23:52:24 +00:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
if (action == SIG_CATCH) {
|
2006-10-26 21:42:22 +00:00
|
|
|
/*
|
|
|
|
* The process wants to catch it so it needs
|
|
|
|
* to run at least one thread, but which one?
|
|
|
|
*/
|
2008-11-05 03:01:23 +00:00
|
|
|
PROC_SUNLOCK(p);
|
Part 1 of KSE-III
The ability to schedule multiple threads per process
(one one cpu) by making ALL system calls optionally asynchronous.
to come: ia64 and power-pc patches, patches for gdb, test program (in tools)
Reviewed by: Almost everyone who counts
(at various times, peter, jhb, matt, alfred, mini, bernd,
and a cast of thousands)
NOTE: this is still Beta code, and contains lots of debugging stuff.
expect slight instability in signals..
2002-06-29 17:26:22 +00:00
|
|
|
goto runfast;
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* The signal is not ignored or caught.
|
|
|
|
*/
|
2002-08-01 18:45:10 +00:00
|
|
|
thread_unsuspend(p);
|
2008-11-05 03:01:23 +00:00
|
|
|
PROC_SUNLOCK(p);
|
1994-05-24 10:09:53 +00:00
|
|
|
goto out;
|
Part 1 of KSE-III
The ability to schedule multiple threads per process
(one one cpu) by making ALL system calls optionally asynchronous.
to come: ia64 and power-pc patches, patches for gdb, test program (in tools)
Reviewed by: Almost everyone who counts
(at various times, peter, jhb, matt, alfred, mini, bernd,
and a cast of thousands)
NOTE: this is still Beta code, and contains lots of debugging stuff.
expect slight instability in signals..
2002-06-29 17:26:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (prop & SA_STOP) {
|
|
|
|
/*
|
|
|
|
* Already stopped, don't need to stop again
|
|
|
|
* (If we did the shell could get confused).
|
2002-08-01 18:45:10 +00:00
|
|
|
* Just make sure the signal STOP bit set.
|
Part 1 of KSE-III
The ability to schedule multiple threads per process
(one one cpu) by making ALL system calls optionally asynchronous.
to come: ia64 and power-pc patches, patches for gdb, test program (in tools)
Reviewed by: Almost everyone who counts
(at various times, peter, jhb, matt, alfred, mini, bernd,
and a cast of thousands)
NOTE: this is still Beta code, and contains lots of debugging stuff.
expect slight instability in signals..
2002-06-29 17:26:22 +00:00
|
|
|
*/
|
2002-09-05 07:30:18 +00:00
|
|
|
p->p_flag |= P_STOPPED_SIG;
|
1. Change prototype of trapsignal and sendsig to use ksiginfo_t *, most
changes in MD code are trivial, before this change, trapsignal and
sendsig use discrete parameters, now they uses member fields of
ksiginfo_t structure. For sendsig, this change allows us to pass
POSIX realtime signal value to user code.
2. Remove cpu_thread_siginfo, it is no longer needed because we now always
generate ksiginfo_t data and feed it to libpthread.
3. Add p_sigqueue to proc structure to hold shared signals which were
blocked by all threads in the proc.
4. Add td_sigqueue to thread structure to hold all signals delivered to
thread.
5. i386 and amd64 now return POSIX standard si_code, other arches will
be fixed.
6. In this sigqueue implementation, pending signal set is kept as before,
an extra siginfo list holds additional siginfo_t data for signals.
kernel code uses psignal() still behavior as before, it won't be failed
even under memory pressure, only exception is when deleting a signal,
we should call sigqueue_delete to remove signal from sigqueue but
not SIGDELSET. Current there is no kernel code will deliver a signal
with additional data, so kernel should be as stable as before,
a ksiginfo can carry more information, for example, allow signal to
be delivered but throw away siginfo data if memory is not enough.
SIGKILL and SIGSTOP have fast path in sigqueue_add, because they can
not be caught or masked.
The sigqueue() syscall allows user code to queue a signal to target
process, if resource is unavailable, EAGAIN will be returned as
specification said.
Just before thread exits, signal queue memory will be freed by
sigqueue_flush.
Current, all signals are allowed to be queued, not only realtime signals.
Earlier patch reviewed by: jhb, deischen
Tested on: i386, amd64
2005-10-14 12:43:47 +00:00
|
|
|
sigqueue_delete(sigqueue, sig);
|
Part 1 of KSE-III
The ability to schedule multiple threads per process
(one one cpu) by making ALL system calls optionally asynchronous.
to come: ia64 and power-pc patches, patches for gdb, test program (in tools)
Reviewed by: Almost everyone who counts
(at various times, peter, jhb, matt, alfred, mini, bernd,
and a cast of thousands)
NOTE: this is still Beta code, and contains lots of debugging stuff.
expect slight instability in signals..
2002-06-29 17:26:22 +00:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
Part 1 of KSE-III
The ability to schedule multiple threads per process
(one one cpu) by making ALL system calls optionally asynchronous.
to come: ia64 and power-pc patches, patches for gdb, test program (in tools)
Reviewed by: Almost everyone who counts
(at various times, peter, jhb, matt, alfred, mini, bernd,
and a cast of thousands)
NOTE: this is still Beta code, and contains lots of debugging stuff.
expect slight instability in signals..
2002-06-29 17:26:22 +00:00
|
|
|
* All other kinds of signals:
|
|
|
|
* If a thread is sleeping interruptibly, simulate a
|
|
|
|
* wakeup so that when it is continued it will be made
|
|
|
|
* runnable and can look at the signal. However, don't make
|
2002-08-01 18:45:10 +00:00
|
|
|
* the PROCESS runnable, leave it stopped.
|
Part 1 of KSE-III
The ability to schedule multiple threads per process
(one one cpu) by making ALL system calls optionally asynchronous.
to come: ia64 and power-pc patches, patches for gdb, test program (in tools)
Reviewed by: Almost everyone who counts
(at various times, peter, jhb, matt, alfred, mini, bernd,
and a cast of thousands)
NOTE: this is still Beta code, and contains lots of debugging stuff.
expect slight instability in signals..
2002-06-29 17:26:22 +00:00
|
|
|
* It may run a bit until it hits a thread_suspend_check().
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
If a thread that is swapped out is made runnable, then the setrunnable()
routine wakes up proc0 so that proc0 can swap the thread back in.
Historically, this has been done by waking up proc0 directly from
setrunnable() itself via a wakeup(). When waking up a sleeping thread
that was swapped out (the usual case when waking proc0 since only sleeping
threads are eligible to be swapped out), this resulted in a bit of
recursion (e.g. wakeup() -> setrunnable() -> wakeup()).
With sleep queues having separate locks in 6.x and later, this caused a
spin lock LOR (sleepq lock -> sched_lock/thread lock -> sleepq lock).
An attempt was made to fix this in 7.0 by making the proc0 wakeup use
the ithread mechanism for doing the wakeup. However, this required
grabbing proc0's thread lock to perform the wakeup. If proc0 was asleep
elsewhere in the kernel (e.g. waiting for disk I/O), then this degenerated
into the same LOR since the thread lock would be some other sleepq lock.
Fix this by deferring the wakeup of the swapper until after the sleepq
lock held by the upper layer has been locked. The setrunnable() routine
now returns a boolean value to indicate whether or not proc0 needs to be
woken up. The end result is that consumers of the sleepq API such as
*sleep/wakeup, condition variables, sx locks, and lockmgr, have to wakeup
proc0 if they get a non-zero return value from sleepq_abort(),
sleepq_broadcast(), or sleepq_signal().
Discussed with: jeff
Glanced at by: sam
Tested by: Jurgen Weber jurgen - ish com au
MFC after: 2 weeks
2008-08-05 20:02:31 +00:00
|
|
|
wakeup_swapper = 0;
|
2008-11-05 03:01:23 +00:00
|
|
|
PROC_SLOCK(p);
|
Commit 4/14 of sched_lock decomposition.
- Use thread_lock() rather than sched_lock for per-thread scheduling
sychronization.
- Use the per-process spinlock rather than the sched_lock for per-process
scheduling synchronization.
- Move some common code into thread_suspend_switch() to handle the
mechanics of suspending a thread. The locking here is incredibly
convoluted and should be simplified.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-04 23:52:24 +00:00
|
|
|
thread_lock(td);
|
Switch the sleep/wakeup and condition variable implementations to use the
sleep queue interface:
- Sleep queues attempt to merge some of the benefits of both sleep queues
and condition variables. Having sleep qeueus in a hash table avoids
having to allocate a queue head for each wait channel. Thus, struct cv
has shrunk down to just a single char * pointer now. However, the
hash table does not hold threads directly, but queue heads. This means
that once you have located a queue in the hash bucket, you no longer have
to walk the rest of the hash chain looking for threads. Instead, you have
a list of all the threads sleeping on that wait channel.
- Outside of the sleepq code and the sleep/cv code the kernel no longer
differentiates between cv's and sleep/wakeup. For example, calls to
abortsleep() and cv_abort() are replaced with a call to sleepq_abort().
Thus, the TDF_CVWAITQ flag is removed. Also, calls to unsleep() and
cv_waitq_remove() have been replaced with calls to sleepq_remove().
- The sched_sleep() function no longer accepts a priority argument as
sleep's no longer inherently bump the priority. Instead, this is soley
a propery of msleep() which explicitly calls sched_prio() before
blocking.
- The TDF_ONSLEEPQ flag has been dropped as it was never used. The
associated TDF_SET_ONSLEEPQ and TDF_CLR_ON_SLEEPQ macros have also been
dropped and replaced with a single explicit clearing of td_wchan.
TD_SET_ONSLEEPQ() would really have only made sense if it had taken
the wait channel and message as arguments anyway. Now that that only
happens in one place, a macro would be overkill.
2004-02-27 18:52:44 +00:00
|
|
|
if (TD_ON_SLEEPQ(td) && (td->td_flags & TDF_SINTR))
|
If a thread that is swapped out is made runnable, then the setrunnable()
routine wakes up proc0 so that proc0 can swap the thread back in.
Historically, this has been done by waking up proc0 directly from
setrunnable() itself via a wakeup(). When waking up a sleeping thread
that was swapped out (the usual case when waking proc0 since only sleeping
threads are eligible to be swapped out), this resulted in a bit of
recursion (e.g. wakeup() -> setrunnable() -> wakeup()).
With sleep queues having separate locks in 6.x and later, this caused a
spin lock LOR (sleepq lock -> sched_lock/thread lock -> sleepq lock).
An attempt was made to fix this in 7.0 by making the proc0 wakeup use
the ithread mechanism for doing the wakeup. However, this required
grabbing proc0's thread lock to perform the wakeup. If proc0 was asleep
elsewhere in the kernel (e.g. waiting for disk I/O), then this degenerated
into the same LOR since the thread lock would be some other sleepq lock.
Fix this by deferring the wakeup of the swapper until after the sleepq
lock held by the upper layer has been locked. The setrunnable() routine
now returns a boolean value to indicate whether or not proc0 needs to be
woken up. The end result is that consumers of the sleepq API such as
*sleep/wakeup, condition variables, sx locks, and lockmgr, have to wakeup
proc0 if they get a non-zero return value from sleepq_abort(),
sleepq_broadcast(), or sleepq_signal().
Discussed with: jeff
Glanced at by: sam
Tested by: Jurgen Weber jurgen - ish com au
MFC after: 2 weeks
2008-08-05 20:02:31 +00:00
|
|
|
wakeup_swapper = sleepq_abort(td, intrval);
|
Commit 4/14 of sched_lock decomposition.
- Use thread_lock() rather than sched_lock for per-thread scheduling
sychronization.
- Use the per-process spinlock rather than the sched_lock for per-process
scheduling synchronization.
- Move some common code into thread_suspend_switch() to handle the
mechanics of suspending a thread. The locking here is incredibly
convoluted and should be simplified.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-04 23:52:24 +00:00
|
|
|
thread_unlock(td);
|
2008-11-05 03:01:23 +00:00
|
|
|
PROC_SUNLOCK(p);
|
If a thread that is swapped out is made runnable, then the setrunnable()
routine wakes up proc0 so that proc0 can swap the thread back in.
Historically, this has been done by waking up proc0 directly from
setrunnable() itself via a wakeup(). When waking up a sleeping thread
that was swapped out (the usual case when waking proc0 since only sleeping
threads are eligible to be swapped out), this resulted in a bit of
recursion (e.g. wakeup() -> setrunnable() -> wakeup()).
With sleep queues having separate locks in 6.x and later, this caused a
spin lock LOR (sleepq lock -> sched_lock/thread lock -> sleepq lock).
An attempt was made to fix this in 7.0 by making the proc0 wakeup use
the ithread mechanism for doing the wakeup. However, this required
grabbing proc0's thread lock to perform the wakeup. If proc0 was asleep
elsewhere in the kernel (e.g. waiting for disk I/O), then this degenerated
into the same LOR since the thread lock would be some other sleepq lock.
Fix this by deferring the wakeup of the swapper until after the sleepq
lock held by the upper layer has been locked. The setrunnable() routine
now returns a boolean value to indicate whether or not proc0 needs to be
woken up. The end result is that consumers of the sleepq API such as
*sleep/wakeup, condition variables, sx locks, and lockmgr, have to wakeup
proc0 if they get a non-zero return value from sleepq_abort(),
sleepq_broadcast(), or sleepq_signal().
Discussed with: jeff
Glanced at by: sam
Tested by: Jurgen Weber jurgen - ish com au
MFC after: 2 weeks
2008-08-05 20:02:31 +00:00
|
|
|
if (wakeup_swapper)
|
|
|
|
kick_proc0();
|
Part 1 of KSE-III
The ability to schedule multiple threads per process
(one one cpu) by making ALL system calls optionally asynchronous.
to come: ia64 and power-pc patches, patches for gdb, test program (in tools)
Reviewed by: Almost everyone who counts
(at various times, peter, jhb, matt, alfred, mini, bernd,
and a cast of thousands)
NOTE: this is still Beta code, and contains lots of debugging stuff.
expect slight instability in signals..
2002-06-29 17:26:22 +00:00
|
|
|
goto out;
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
2004-03-29 22:46:54 +00:00
|
|
|
* Mutexes are short lived. Threads waiting on them will
|
|
|
|
* hit thread_suspend_check() soon.
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
2005-06-06 05:13:10 +00:00
|
|
|
} else if (p->p_state == PRS_NORMAL) {
|
|
|
|
if (p->p_flag & P_TRACED || action == SIG_CATCH) {
|
Fix a long standing race between sleep queue and thread
suspension code. When a thread A is going to sleep, it calls
sleepq_catch_signals() to detect any pending signals or thread
suspension request, if nothing happens, it returns without
holding process lock or scheduler lock, this opens a race
window which allows thread B to come in and do process
suspension work, however since A is still at running state,
thread B can do nothing to A, thread A continues, and puts
itself into actually sleeping state, but B has never seen it,
and it sits there forever until B is woken up by other threads
sometimes later(this can be very long delay or never
happen). Fix this bug by forcing sleepq_catch_signals to
return with scheduler lock held.
Fix sleepq_abort() by passing it an interrupted code, previously,
it worked as wakeup_one(), and the interruption can not be
identified correctly by sleep queue code when the sleeping
thread is resumed.
Let thread_suspend_check() returns EINTR or ERESTART, so sleep
queue no longer has to use SIGSTOP as a hack to build a return
value.
Reviewed by: jhb
MFC after: 1 week
2006-02-15 23:52:01 +00:00
|
|
|
tdsigwakeup(td, sig, action, intrval);
|
1994-05-24 10:09:53 +00:00
|
|
|
goto out;
|
2002-08-21 20:03:55 +00:00
|
|
|
}
|
2005-06-06 05:13:10 +00:00
|
|
|
|
|
|
|
MPASS(action == SIG_DFL);
|
|
|
|
|
In the kernel code, we have the tsleep() call with the PCATCH argument.
PCATCH means 'if we get a signal, interrupt me!" and tsleep returns
either EINTR or ERESTART depending on the circumstances. ERESTART is
"special" because it causes the system call to fail, but right as it
returns back to userland it tells the trap handler to move %eip back a
bit so that userland will immediately re-run the syscall.
This is a syscall restart. It only works for things like read() etc where
nothing has changed yet. Note that *userland* is tricked into restarting
the syscall by the kernel. The kernel doesn't actually do the restart. It
is deadly for things like select, poll, nanosleep etc where it might cause
the elapsed time to be reset and start again from scratch. So those
syscalls do this to prevent userland rerunning the syscall:
if (error == ERESTART) error = EINTR;
Fake "signals" like SIGTSTP from ^Z etc do not normally invoke userland
signal handlers. But, in -current, the PCATCH *is* being triggered and
tsleep is returning ERESTART, and the syscall is aborted even though no
userland signal handler was run.
That is the fault here. We're triggering the PCATCH in cases that we
shouldn't. ie: it is being triggered on *any* signal processing, rather
than the case where the signal is posted to userland.
--- Peter
The work of psignal() is a patchwork of special case required by the process
debugging and job-control facilities...
--- Kirk McKusick
"The design and impelementation of the 4.4BSD Operating system"
Page 105
in STABLE source, when psignal is posting a STOP signal to sleeping
process and the signal action of the process is SIG_DFL, system will
directly change the process state from SSLEEP to SSTOP, and when
SIGCONT is posted to the stopped process, if it finds that the process
is still on sleep queue, the process state will be restored to SSLEEP,
and won't wakeup the process.
this commit mimics the behaviour in STABLE source tree.
Reviewed by: Jon Mini, Tim Robbins, Peter Wemm
Approved by: julian@freebsd.org (mentor)
2002-09-03 12:56:01 +00:00
|
|
|
if (prop & SA_STOP) {
|
2008-03-19 06:19:01 +00:00
|
|
|
if (p->p_flag & P_PPWAIT)
|
In the kernel code, we have the tsleep() call with the PCATCH argument.
PCATCH means 'if we get a signal, interrupt me!" and tsleep returns
either EINTR or ERESTART depending on the circumstances. ERESTART is
"special" because it causes the system call to fail, but right as it
returns back to userland it tells the trap handler to move %eip back a
bit so that userland will immediately re-run the syscall.
This is a syscall restart. It only works for things like read() etc where
nothing has changed yet. Note that *userland* is tricked into restarting
the syscall by the kernel. The kernel doesn't actually do the restart. It
is deadly for things like select, poll, nanosleep etc where it might cause
the elapsed time to be reset and start again from scratch. So those
syscalls do this to prevent userland rerunning the syscall:
if (error == ERESTART) error = EINTR;
Fake "signals" like SIGTSTP from ^Z etc do not normally invoke userland
signal handlers. But, in -current, the PCATCH *is* being triggered and
tsleep is returning ERESTART, and the syscall is aborted even though no
userland signal handler was run.
That is the fault here. We're triggering the PCATCH in cases that we
shouldn't. ie: it is being triggered on *any* signal processing, rather
than the case where the signal is posted to userland.
--- Peter
The work of psignal() is a patchwork of special case required by the process
debugging and job-control facilities...
--- Kirk McKusick
"The design and impelementation of the 4.4BSD Operating system"
Page 105
in STABLE source, when psignal is posting a STOP signal to sleeping
process and the signal action of the process is SIG_DFL, system will
directly change the process state from SSLEEP to SSTOP, and when
SIGCONT is posted to the stopped process, if it finds that the process
is still on sleep queue, the process state will be restored to SSLEEP,
and won't wakeup the process.
this commit mimics the behaviour in STABLE source tree.
Reviewed by: Jon Mini, Tim Robbins, Peter Wemm
Approved by: julian@freebsd.org (mentor)
2002-09-03 12:56:01 +00:00
|
|
|
goto out;
|
2003-03-11 00:07:53 +00:00
|
|
|
p->p_flag |= P_STOPPED_SIG;
|
|
|
|
p->p_xstat = sig;
|
2008-11-05 03:01:23 +00:00
|
|
|
PROC_SLOCK(p);
|
2006-02-13 03:16:55 +00:00
|
|
|
sig_suspend_threads(td, p, 1);
|
2003-03-31 22:49:17 +00:00
|
|
|
if (p->p_numthreads == p->p_suspcount) {
|
2005-11-08 09:09:26 +00:00
|
|
|
/*
|
|
|
|
* only thread sending signal to another
|
|
|
|
* process can reach here, if thread is sending
|
|
|
|
* signal to its process, because thread does
|
|
|
|
* not suspend itself here, p_numthreads
|
|
|
|
* should never be equal to p_suspcount.
|
|
|
|
*/
|
|
|
|
thread_stopped(p);
|
2008-11-05 03:01:23 +00:00
|
|
|
PROC_SUNLOCK(p);
|
1. Change prototype of trapsignal and sendsig to use ksiginfo_t *, most
changes in MD code are trivial, before this change, trapsignal and
sendsig use discrete parameters, now they uses member fields of
ksiginfo_t structure. For sendsig, this change allows us to pass
POSIX realtime signal value to user code.
2. Remove cpu_thread_siginfo, it is no longer needed because we now always
generate ksiginfo_t data and feed it to libpthread.
3. Add p_sigqueue to proc structure to hold shared signals which were
blocked by all threads in the proc.
4. Add td_sigqueue to thread structure to hold all signals delivered to
thread.
5. i386 and amd64 now return POSIX standard si_code, other arches will
be fixed.
6. In this sigqueue implementation, pending signal set is kept as before,
an extra siginfo list holds additional siginfo_t data for signals.
kernel code uses psignal() still behavior as before, it won't be failed
even under memory pressure, only exception is when deleting a signal,
we should call sigqueue_delete to remove signal from sigqueue but
not SIGDELSET. Current there is no kernel code will deliver a signal
with additional data, so kernel should be as stable as before,
a ksiginfo can carry more information, for example, allow signal to
be delivered but throw away siginfo data if memory is not enough.
SIGKILL and SIGSTOP have fast path in sigqueue_add, because they can
not be caught or masked.
The sigqueue() syscall allows user code to queue a signal to target
process, if resource is unavailable, EAGAIN will be returned as
specification said.
Just before thread exits, signal queue memory will be freed by
sigqueue_flush.
Current, all signals are allowed to be queued, not only realtime signals.
Earlier patch reviewed by: jhb, deischen
Tested on: i386, amd64
2005-10-14 12:43:47 +00:00
|
|
|
sigqueue_delete_proc(p, p->p_xstat);
|
2008-11-05 03:01:23 +00:00
|
|
|
} else
|
|
|
|
PROC_SUNLOCK(p);
|
In the kernel code, we have the tsleep() call with the PCATCH argument.
PCATCH means 'if we get a signal, interrupt me!" and tsleep returns
either EINTR or ERESTART depending on the circumstances. ERESTART is
"special" because it causes the system call to fail, but right as it
returns back to userland it tells the trap handler to move %eip back a
bit so that userland will immediately re-run the syscall.
This is a syscall restart. It only works for things like read() etc where
nothing has changed yet. Note that *userland* is tricked into restarting
the syscall by the kernel. The kernel doesn't actually do the restart. It
is deadly for things like select, poll, nanosleep etc where it might cause
the elapsed time to be reset and start again from scratch. So those
syscalls do this to prevent userland rerunning the syscall:
if (error == ERESTART) error = EINTR;
Fake "signals" like SIGTSTP from ^Z etc do not normally invoke userland
signal handlers. But, in -current, the PCATCH *is* being triggered and
tsleep is returning ERESTART, and the syscall is aborted even though no
userland signal handler was run.
That is the fault here. We're triggering the PCATCH in cases that we
shouldn't. ie: it is being triggered on *any* signal processing, rather
than the case where the signal is posted to userland.
--- Peter
The work of psignal() is a patchwork of special case required by the process
debugging and job-control facilities...
--- Kirk McKusick
"The design and impelementation of the 4.4BSD Operating system"
Page 105
in STABLE source, when psignal is posting a STOP signal to sleeping
process and the signal action of the process is SIG_DFL, system will
directly change the process state from SSLEEP to SSTOP, and when
SIGCONT is posted to the stopped process, if it finds that the process
is still on sleep queue, the process state will be restored to SSLEEP,
and won't wakeup the process.
this commit mimics the behaviour in STABLE source tree.
Reviewed by: Jon Mini, Tim Robbins, Peter Wemm
Approved by: julian@freebsd.org (mentor)
2002-09-03 12:56:01 +00:00
|
|
|
goto out;
|
2008-03-19 06:19:01 +00:00
|
|
|
}
|
Part 1 of KSE-III
The ability to schedule multiple threads per process
(one one cpu) by making ALL system calls optionally asynchronous.
to come: ia64 and power-pc patches, patches for gdb, test program (in tools)
Reviewed by: Almost everyone who counts
(at various times, peter, jhb, matt, alfred, mini, bernd,
and a cast of thousands)
NOTE: this is still Beta code, and contains lots of debugging stuff.
expect slight instability in signals..
2002-06-29 17:26:22 +00:00
|
|
|
} else {
|
|
|
|
/* Not in "NORMAL" state. discard the signal. */
|
1. Change prototype of trapsignal and sendsig to use ksiginfo_t *, most
changes in MD code are trivial, before this change, trapsignal and
sendsig use discrete parameters, now they uses member fields of
ksiginfo_t structure. For sendsig, this change allows us to pass
POSIX realtime signal value to user code.
2. Remove cpu_thread_siginfo, it is no longer needed because we now always
generate ksiginfo_t data and feed it to libpthread.
3. Add p_sigqueue to proc structure to hold shared signals which were
blocked by all threads in the proc.
4. Add td_sigqueue to thread structure to hold all signals delivered to
thread.
5. i386 and amd64 now return POSIX standard si_code, other arches will
be fixed.
6. In this sigqueue implementation, pending signal set is kept as before,
an extra siginfo list holds additional siginfo_t data for signals.
kernel code uses psignal() still behavior as before, it won't be failed
even under memory pressure, only exception is when deleting a signal,
we should call sigqueue_delete to remove signal from sigqueue but
not SIGDELSET. Current there is no kernel code will deliver a signal
with additional data, so kernel should be as stable as before,
a ksiginfo can carry more information, for example, allow signal to
be delivered but throw away siginfo data if memory is not enough.
SIGKILL and SIGSTOP have fast path in sigqueue_add, because they can
not be caught or masked.
The sigqueue() syscall allows user code to queue a signal to target
process, if resource is unavailable, EAGAIN will be returned as
specification said.
Just before thread exits, signal queue memory will be freed by
sigqueue_flush.
Current, all signals are allowed to be queued, not only realtime signals.
Earlier patch reviewed by: jhb, deischen
Tested on: i386, amd64
2005-10-14 12:43:47 +00:00
|
|
|
sigqueue_delete(sigqueue, sig);
|
Part 1 of KSE-III
The ability to schedule multiple threads per process
(one one cpu) by making ALL system calls optionally asynchronous.
to come: ia64 and power-pc patches, patches for gdb, test program (in tools)
Reviewed by: Almost everyone who counts
(at various times, peter, jhb, matt, alfred, mini, bernd,
and a cast of thousands)
NOTE: this is still Beta code, and contains lots of debugging stuff.
expect slight instability in signals..
2002-06-29 17:26:22 +00:00
|
|
|
goto out;
|
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
|
Part 1 of KSE-III
The ability to schedule multiple threads per process
(one one cpu) by making ALL system calls optionally asynchronous.
to come: ia64 and power-pc patches, patches for gdb, test program (in tools)
Reviewed by: Almost everyone who counts
(at various times, peter, jhb, matt, alfred, mini, bernd,
and a cast of thousands)
NOTE: this is still Beta code, and contains lots of debugging stuff.
expect slight instability in signals..
2002-06-29 17:26:22 +00:00
|
|
|
/*
|
|
|
|
* The process is not stopped so we need to apply the signal to all the
|
|
|
|
* running threads.
|
|
|
|
*/
|
|
|
|
runfast:
|
Fix a long standing race between sleep queue and thread
suspension code. When a thread A is going to sleep, it calls
sleepq_catch_signals() to detect any pending signals or thread
suspension request, if nothing happens, it returns without
holding process lock or scheduler lock, this opens a race
window which allows thread B to come in and do process
suspension work, however since A is still at running state,
thread B can do nothing to A, thread A continues, and puts
itself into actually sleeping state, but B has never seen it,
and it sits there forever until B is woken up by other threads
sometimes later(this can be very long delay or never
happen). Fix this bug by forcing sleepq_catch_signals to
return with scheduler lock held.
Fix sleepq_abort() by passing it an interrupted code, previously,
it worked as wakeup_one(), and the interruption can not be
identified correctly by sleep queue code when the sleeping
thread is resumed.
Let thread_suspend_check() returns EINTR or ERESTART, so sleep
queue no longer has to use SIGSTOP as a hack to build a return
value.
Reviewed by: jhb
MFC after: 1 week
2006-02-15 23:52:01 +00:00
|
|
|
tdsigwakeup(td, sig, action, intrval);
|
2008-11-05 03:01:23 +00:00
|
|
|
PROC_SLOCK(p);
|
Part 1 of KSE-III
The ability to schedule multiple threads per process
(one one cpu) by making ALL system calls optionally asynchronous.
to come: ia64 and power-pc patches, patches for gdb, test program (in tools)
Reviewed by: Almost everyone who counts
(at various times, peter, jhb, matt, alfred, mini, bernd,
and a cast of thousands)
NOTE: this is still Beta code, and contains lots of debugging stuff.
expect slight instability in signals..
2002-06-29 17:26:22 +00:00
|
|
|
thread_unsuspend(p);
|
2008-11-05 03:01:23 +00:00
|
|
|
PROC_SUNLOCK(p);
|
Part 1 of KSE-III
The ability to schedule multiple threads per process
(one one cpu) by making ALL system calls optionally asynchronous.
to come: ia64 and power-pc patches, patches for gdb, test program (in tools)
Reviewed by: Almost everyone who counts
(at various times, peter, jhb, matt, alfred, mini, bernd,
and a cast of thousands)
NOTE: this is still Beta code, and contains lots of debugging stuff.
expect slight instability in signals..
2002-06-29 17:26:22 +00:00
|
|
|
out:
|
2008-11-05 03:01:23 +00:00
|
|
|
/* If we jump here, proc slock should not be owned. */
|
|
|
|
PROC_SLOCK_ASSERT(p, MA_NOTOWNED);
|
1. Change prototype of trapsignal and sendsig to use ksiginfo_t *, most
changes in MD code are trivial, before this change, trapsignal and
sendsig use discrete parameters, now they uses member fields of
ksiginfo_t structure. For sendsig, this change allows us to pass
POSIX realtime signal value to user code.
2. Remove cpu_thread_siginfo, it is no longer needed because we now always
generate ksiginfo_t data and feed it to libpthread.
3. Add p_sigqueue to proc structure to hold shared signals which were
blocked by all threads in the proc.
4. Add td_sigqueue to thread structure to hold all signals delivered to
thread.
5. i386 and amd64 now return POSIX standard si_code, other arches will
be fixed.
6. In this sigqueue implementation, pending signal set is kept as before,
an extra siginfo list holds additional siginfo_t data for signals.
kernel code uses psignal() still behavior as before, it won't be failed
even under memory pressure, only exception is when deleting a signal,
we should call sigqueue_delete to remove signal from sigqueue but
not SIGDELSET. Current there is no kernel code will deliver a signal
with additional data, so kernel should be as stable as before,
a ksiginfo can carry more information, for example, allow signal to
be delivered but throw away siginfo data if memory is not enough.
SIGKILL and SIGSTOP have fast path in sigqueue_add, because they can
not be caught or masked.
The sigqueue() syscall allows user code to queue a signal to target
process, if resource is unavailable, EAGAIN will be returned as
specification said.
Just before thread exits, signal queue memory will be freed by
sigqueue_flush.
Current, all signals are allowed to be queued, not only realtime signals.
Earlier patch reviewed by: jhb, deischen
Tested on: i386, amd64
2005-10-14 12:43:47 +00:00
|
|
|
return (ret);
|
Part 1 of KSE-III
The ability to schedule multiple threads per process
(one one cpu) by making ALL system calls optionally asynchronous.
to come: ia64 and power-pc patches, patches for gdb, test program (in tools)
Reviewed by: Almost everyone who counts
(at various times, peter, jhb, matt, alfred, mini, bernd,
and a cast of thousands)
NOTE: this is still Beta code, and contains lots of debugging stuff.
expect slight instability in signals..
2002-06-29 17:26:22 +00:00
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
|
Part 1 of KSE-III
The ability to schedule multiple threads per process
(one one cpu) by making ALL system calls optionally asynchronous.
to come: ia64 and power-pc patches, patches for gdb, test program (in tools)
Reviewed by: Almost everyone who counts
(at various times, peter, jhb, matt, alfred, mini, bernd,
and a cast of thousands)
NOTE: this is still Beta code, and contains lots of debugging stuff.
expect slight instability in signals..
2002-06-29 17:26:22 +00:00
|
|
|
/*
|
|
|
|
* The force of a signal has been directed against a single
|
2004-05-21 10:02:24 +00:00
|
|
|
* thread. We need to see what we can do about knocking it
|
Part 1 of KSE-III
The ability to schedule multiple threads per process
(one one cpu) by making ALL system calls optionally asynchronous.
to come: ia64 and power-pc patches, patches for gdb, test program (in tools)
Reviewed by: Almost everyone who counts
(at various times, peter, jhb, matt, alfred, mini, bernd,
and a cast of thousands)
NOTE: this is still Beta code, and contains lots of debugging stuff.
expect slight instability in signals..
2002-06-29 17:26:22 +00:00
|
|
|
* out of any sleep it may be in etc.
|
|
|
|
*/
|
|
|
|
static void
|
Fix a long standing race between sleep queue and thread
suspension code. When a thread A is going to sleep, it calls
sleepq_catch_signals() to detect any pending signals or thread
suspension request, if nothing happens, it returns without
holding process lock or scheduler lock, this opens a race
window which allows thread B to come in and do process
suspension work, however since A is still at running state,
thread B can do nothing to A, thread A continues, and puts
itself into actually sleeping state, but B has never seen it,
and it sits there forever until B is woken up by other threads
sometimes later(this can be very long delay or never
happen). Fix this bug by forcing sleepq_catch_signals to
return with scheduler lock held.
Fix sleepq_abort() by passing it an interrupted code, previously,
it worked as wakeup_one(), and the interruption can not be
identified correctly by sleep queue code when the sleeping
thread is resumed.
Let thread_suspend_check() returns EINTR or ERESTART, so sleep
queue no longer has to use SIGSTOP as a hack to build a return
value.
Reviewed by: jhb
MFC after: 1 week
2006-02-15 23:52:01 +00:00
|
|
|
tdsigwakeup(struct thread *td, int sig, sig_t action, int intrval)
|
Part 1 of KSE-III
The ability to schedule multiple threads per process
(one one cpu) by making ALL system calls optionally asynchronous.
to come: ia64 and power-pc patches, patches for gdb, test program (in tools)
Reviewed by: Almost everyone who counts
(at various times, peter, jhb, matt, alfred, mini, bernd,
and a cast of thousands)
NOTE: this is still Beta code, and contains lots of debugging stuff.
expect slight instability in signals..
2002-06-29 17:26:22 +00:00
|
|
|
{
|
|
|
|
struct proc *p = td->td_proc;
|
|
|
|
register int prop;
|
If a thread that is swapped out is made runnable, then the setrunnable()
routine wakes up proc0 so that proc0 can swap the thread back in.
Historically, this has been done by waking up proc0 directly from
setrunnable() itself via a wakeup(). When waking up a sleeping thread
that was swapped out (the usual case when waking proc0 since only sleeping
threads are eligible to be swapped out), this resulted in a bit of
recursion (e.g. wakeup() -> setrunnable() -> wakeup()).
With sleep queues having separate locks in 6.x and later, this caused a
spin lock LOR (sleepq lock -> sched_lock/thread lock -> sleepq lock).
An attempt was made to fix this in 7.0 by making the proc0 wakeup use
the ithread mechanism for doing the wakeup. However, this required
grabbing proc0's thread lock to perform the wakeup. If proc0 was asleep
elsewhere in the kernel (e.g. waiting for disk I/O), then this degenerated
into the same LOR since the thread lock would be some other sleepq lock.
Fix this by deferring the wakeup of the swapper until after the sleepq
lock held by the upper layer has been locked. The setrunnable() routine
now returns a boolean value to indicate whether or not proc0 needs to be
woken up. The end result is that consumers of the sleepq API such as
*sleep/wakeup, condition variables, sx locks, and lockmgr, have to wakeup
proc0 if they get a non-zero return value from sleepq_abort(),
sleepq_broadcast(), or sleepq_signal().
Discussed with: jeff
Glanced at by: sam
Tested by: Jurgen Weber jurgen - ish com au
MFC after: 2 weeks
2008-08-05 20:02:31 +00:00
|
|
|
int wakeup_swapper;
|
Part 1 of KSE-III
The ability to schedule multiple threads per process
(one one cpu) by making ALL system calls optionally asynchronous.
to come: ia64 and power-pc patches, patches for gdb, test program (in tools)
Reviewed by: Almost everyone who counts
(at various times, peter, jhb, matt, alfred, mini, bernd,
and a cast of thousands)
NOTE: this is still Beta code, and contains lots of debugging stuff.
expect slight instability in signals..
2002-06-29 17:26:22 +00:00
|
|
|
|
If a thread that is swapped out is made runnable, then the setrunnable()
routine wakes up proc0 so that proc0 can swap the thread back in.
Historically, this has been done by waking up proc0 directly from
setrunnable() itself via a wakeup(). When waking up a sleeping thread
that was swapped out (the usual case when waking proc0 since only sleeping
threads are eligible to be swapped out), this resulted in a bit of
recursion (e.g. wakeup() -> setrunnable() -> wakeup()).
With sleep queues having separate locks in 6.x and later, this caused a
spin lock LOR (sleepq lock -> sched_lock/thread lock -> sleepq lock).
An attempt was made to fix this in 7.0 by making the proc0 wakeup use
the ithread mechanism for doing the wakeup. However, this required
grabbing proc0's thread lock to perform the wakeup. If proc0 was asleep
elsewhere in the kernel (e.g. waiting for disk I/O), then this degenerated
into the same LOR since the thread lock would be some other sleepq lock.
Fix this by deferring the wakeup of the swapper until after the sleepq
lock held by the upper layer has been locked. The setrunnable() routine
now returns a boolean value to indicate whether or not proc0 needs to be
woken up. The end result is that consumers of the sleepq API such as
*sleep/wakeup, condition variables, sx locks, and lockmgr, have to wakeup
proc0 if they get a non-zero return value from sleepq_abort(),
sleepq_broadcast(), or sleepq_signal().
Discussed with: jeff
Glanced at by: sam
Tested by: Jurgen Weber jurgen - ish com au
MFC after: 2 weeks
2008-08-05 20:02:31 +00:00
|
|
|
wakeup_swapper = 0;
|
2003-04-18 20:59:05 +00:00
|
|
|
PROC_LOCK_ASSERT(p, MA_OWNED);
|
Part 1 of KSE-III
The ability to schedule multiple threads per process
(one one cpu) by making ALL system calls optionally asynchronous.
to come: ia64 and power-pc patches, patches for gdb, test program (in tools)
Reviewed by: Almost everyone who counts
(at various times, peter, jhb, matt, alfred, mini, bernd,
and a cast of thousands)
NOTE: this is still Beta code, and contains lots of debugging stuff.
expect slight instability in signals..
2002-06-29 17:26:22 +00:00
|
|
|
prop = sigprop(sig);
|
2004-05-21 10:02:24 +00:00
|
|
|
|
2008-11-05 03:01:23 +00:00
|
|
|
PROC_SLOCK(p);
|
2008-03-19 06:19:01 +00:00
|
|
|
thread_lock(td);
|
Part 1 of KSE-III
The ability to schedule multiple threads per process
(one one cpu) by making ALL system calls optionally asynchronous.
to come: ia64 and power-pc patches, patches for gdb, test program (in tools)
Reviewed by: Almost everyone who counts
(at various times, peter, jhb, matt, alfred, mini, bernd,
and a cast of thousands)
NOTE: this is still Beta code, and contains lots of debugging stuff.
expect slight instability in signals..
2002-06-29 17:26:22 +00:00
|
|
|
/*
|
2002-07-03 09:15:20 +00:00
|
|
|
* Bring the priority of a thread up if we want it to get
|
Part 1 of KSE-III
The ability to schedule multiple threads per process
(one one cpu) by making ALL system calls optionally asynchronous.
to come: ia64 and power-pc patches, patches for gdb, test program (in tools)
Reviewed by: Almost everyone who counts
(at various times, peter, jhb, matt, alfred, mini, bernd,
and a cast of thousands)
NOTE: this is still Beta code, and contains lots of debugging stuff.
expect slight instability in signals..
2002-06-29 17:26:22 +00:00
|
|
|
* killed in this lifetime.
|
|
|
|
*/
|
2007-07-19 08:49:16 +00:00
|
|
|
if (action == SIG_DFL && (prop & SA_KILL) && td->td_priority > PUSER)
|
|
|
|
sched_prio(td, PUSER);
|
2004-05-20 20:17:28 +00:00
|
|
|
if (TD_ON_SLEEPQ(td)) {
|
Part 1 of KSE-III
The ability to schedule multiple threads per process
(one one cpu) by making ALL system calls optionally asynchronous.
to come: ia64 and power-pc patches, patches for gdb, test program (in tools)
Reviewed by: Almost everyone who counts
(at various times, peter, jhb, matt, alfred, mini, bernd,
and a cast of thousands)
NOTE: this is still Beta code, and contains lots of debugging stuff.
expect slight instability in signals..
2002-06-29 17:26:22 +00:00
|
|
|
/*
|
|
|
|
* If thread is sleeping uninterruptibly
|
|
|
|
* we can't interrupt the sleep... the signal will
|
|
|
|
* be noticed when the process returns through
|
|
|
|
* trap() or syscall().
|
|
|
|
*/
|
Switch the sleep/wakeup and condition variable implementations to use the
sleep queue interface:
- Sleep queues attempt to merge some of the benefits of both sleep queues
and condition variables. Having sleep qeueus in a hash table avoids
having to allocate a queue head for each wait channel. Thus, struct cv
has shrunk down to just a single char * pointer now. However, the
hash table does not hold threads directly, but queue heads. This means
that once you have located a queue in the hash bucket, you no longer have
to walk the rest of the hash chain looking for threads. Instead, you have
a list of all the threads sleeping on that wait channel.
- Outside of the sleepq code and the sleep/cv code the kernel no longer
differentiates between cv's and sleep/wakeup. For example, calls to
abortsleep() and cv_abort() are replaced with a call to sleepq_abort().
Thus, the TDF_CVWAITQ flag is removed. Also, calls to unsleep() and
cv_waitq_remove() have been replaced with calls to sleepq_remove().
- The sched_sleep() function no longer accepts a priority argument as
sleep's no longer inherently bump the priority. Instead, this is soley
a propery of msleep() which explicitly calls sched_prio() before
blocking.
- The TDF_ONSLEEPQ flag has been dropped as it was never used. The
associated TDF_SET_ONSLEEPQ and TDF_CLR_ON_SLEEPQ macros have also been
dropped and replaced with a single explicit clearing of td_wchan.
TD_SET_ONSLEEPQ() would really have only made sense if it had taken
the wait channel and message as arguments anyway. Now that that only
happens in one place, a macro would be overkill.
2004-02-27 18:52:44 +00:00
|
|
|
if ((td->td_flags & TDF_SINTR) == 0)
|
2008-03-19 06:19:01 +00:00
|
|
|
goto out;
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
2005-06-06 05:13:10 +00:00
|
|
|
* If SIGCONT is default (or ignored) and process is
|
|
|
|
* asleep, we are finished; the process should not
|
|
|
|
* be awakened.
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
2005-06-06 05:13:10 +00:00
|
|
|
if ((prop & SA_CONT) && action == SIG_DFL) {
|
Commit 4/14 of sched_lock decomposition.
- Use thread_lock() rather than sched_lock for per-thread scheduling
sychronization.
- Use the per-process spinlock rather than the sched_lock for per-process
scheduling synchronization.
- Move some common code into thread_suspend_switch() to handle the
mechanics of suspending a thread. The locking here is incredibly
convoluted and should be simplified.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-04 23:52:24 +00:00
|
|
|
thread_unlock(td);
|
2008-11-05 03:01:23 +00:00
|
|
|
PROC_SUNLOCK(p);
|
1. Change prototype of trapsignal and sendsig to use ksiginfo_t *, most
changes in MD code are trivial, before this change, trapsignal and
sendsig use discrete parameters, now they uses member fields of
ksiginfo_t structure. For sendsig, this change allows us to pass
POSIX realtime signal value to user code.
2. Remove cpu_thread_siginfo, it is no longer needed because we now always
generate ksiginfo_t data and feed it to libpthread.
3. Add p_sigqueue to proc structure to hold shared signals which were
blocked by all threads in the proc.
4. Add td_sigqueue to thread structure to hold all signals delivered to
thread.
5. i386 and amd64 now return POSIX standard si_code, other arches will
be fixed.
6. In this sigqueue implementation, pending signal set is kept as before,
an extra siginfo list holds additional siginfo_t data for signals.
kernel code uses psignal() still behavior as before, it won't be failed
even under memory pressure, only exception is when deleting a signal,
we should call sigqueue_delete to remove signal from sigqueue but
not SIGDELSET. Current there is no kernel code will deliver a signal
with additional data, so kernel should be as stable as before,
a ksiginfo can carry more information, for example, allow signal to
be delivered but throw away siginfo data if memory is not enough.
SIGKILL and SIGSTOP have fast path in sigqueue_add, because they can
not be caught or masked.
The sigqueue() syscall allows user code to queue a signal to target
process, if resource is unavailable, EAGAIN will be returned as
specification said.
Just before thread exits, signal queue memory will be freed by
sigqueue_flush.
Current, all signals are allowed to be queued, not only realtime signals.
Earlier patch reviewed by: jhb, deischen
Tested on: i386, amd64
2005-10-14 12:43:47 +00:00
|
|
|
sigqueue_delete(&p->p_sigqueue, sig);
|
2002-07-03 09:15:20 +00:00
|
|
|
/*
|
2005-06-06 05:13:10 +00:00
|
|
|
* It may be on either list in this state.
|
|
|
|
* Remove from both for now.
|
2002-07-03 09:15:20 +00:00
|
|
|
*/
|
1. Change prototype of trapsignal and sendsig to use ksiginfo_t *, most
changes in MD code are trivial, before this change, trapsignal and
sendsig use discrete parameters, now they uses member fields of
ksiginfo_t structure. For sendsig, this change allows us to pass
POSIX realtime signal value to user code.
2. Remove cpu_thread_siginfo, it is no longer needed because we now always
generate ksiginfo_t data and feed it to libpthread.
3. Add p_sigqueue to proc structure to hold shared signals which were
blocked by all threads in the proc.
4. Add td_sigqueue to thread structure to hold all signals delivered to
thread.
5. i386 and amd64 now return POSIX standard si_code, other arches will
be fixed.
6. In this sigqueue implementation, pending signal set is kept as before,
an extra siginfo list holds additional siginfo_t data for signals.
kernel code uses psignal() still behavior as before, it won't be failed
even under memory pressure, only exception is when deleting a signal,
we should call sigqueue_delete to remove signal from sigqueue but
not SIGDELSET. Current there is no kernel code will deliver a signal
with additional data, so kernel should be as stable as before,
a ksiginfo can carry more information, for example, allow signal to
be delivered but throw away siginfo data if memory is not enough.
SIGKILL and SIGSTOP have fast path in sigqueue_add, because they can
not be caught or masked.
The sigqueue() syscall allows user code to queue a signal to target
process, if resource is unavailable, EAGAIN will be returned as
specification said.
Just before thread exits, signal queue memory will be freed by
sigqueue_flush.
Current, all signals are allowed to be queued, not only realtime signals.
Earlier patch reviewed by: jhb, deischen
Tested on: i386, amd64
2005-10-14 12:43:47 +00:00
|
|
|
sigqueue_delete(&td->td_sigqueue, sig);
|
2005-06-06 05:13:10 +00:00
|
|
|
return;
|
2002-07-03 09:15:20 +00:00
|
|
|
}
|
2005-06-06 05:13:10 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Give low priority threads a better chance to run.
|
|
|
|
*/
|
|
|
|
if (td->td_priority > PUSER)
|
|
|
|
sched_prio(td, PUSER);
|
|
|
|
|
If a thread that is swapped out is made runnable, then the setrunnable()
routine wakes up proc0 so that proc0 can swap the thread back in.
Historically, this has been done by waking up proc0 directly from
setrunnable() itself via a wakeup(). When waking up a sleeping thread
that was swapped out (the usual case when waking proc0 since only sleeping
threads are eligible to be swapped out), this resulted in a bit of
recursion (e.g. wakeup() -> setrunnable() -> wakeup()).
With sleep queues having separate locks in 6.x and later, this caused a
spin lock LOR (sleepq lock -> sched_lock/thread lock -> sleepq lock).
An attempt was made to fix this in 7.0 by making the proc0 wakeup use
the ithread mechanism for doing the wakeup. However, this required
grabbing proc0's thread lock to perform the wakeup. If proc0 was asleep
elsewhere in the kernel (e.g. waiting for disk I/O), then this degenerated
into the same LOR since the thread lock would be some other sleepq lock.
Fix this by deferring the wakeup of the swapper until after the sleepq
lock held by the upper layer has been locked. The setrunnable() routine
now returns a boolean value to indicate whether or not proc0 needs to be
woken up. The end result is that consumers of the sleepq API such as
*sleep/wakeup, condition variables, sx locks, and lockmgr, have to wakeup
proc0 if they get a non-zero return value from sleepq_abort(),
sleepq_broadcast(), or sleepq_signal().
Discussed with: jeff
Glanced at by: sam
Tested by: Jurgen Weber jurgen - ish com au
MFC after: 2 weeks
2008-08-05 20:02:31 +00:00
|
|
|
wakeup_swapper = sleepq_abort(td, intrval);
|
2004-05-21 10:02:24 +00:00
|
|
|
} else {
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
2004-05-21 10:02:24 +00:00
|
|
|
* Other states do nothing with the signal immediately,
|
1994-05-24 10:09:53 +00:00
|
|
|
* other than kicking ourselves if we are running.
|
|
|
|
* It will either never be noticed, or noticed very soon.
|
|
|
|
*/
|
2004-05-21 10:02:24 +00:00
|
|
|
#ifdef SMP
|
Switch the sleep/wakeup and condition variable implementations to use the
sleep queue interface:
- Sleep queues attempt to merge some of the benefits of both sleep queues
and condition variables. Having sleep qeueus in a hash table avoids
having to allocate a queue head for each wait channel. Thus, struct cv
has shrunk down to just a single char * pointer now. However, the
hash table does not hold threads directly, but queue heads. This means
that once you have located a queue in the hash bucket, you no longer have
to walk the rest of the hash chain looking for threads. Instead, you have
a list of all the threads sleeping on that wait channel.
- Outside of the sleepq code and the sleep/cv code the kernel no longer
differentiates between cv's and sleep/wakeup. For example, calls to
abortsleep() and cv_abort() are replaced with a call to sleepq_abort().
Thus, the TDF_CVWAITQ flag is removed. Also, calls to unsleep() and
cv_waitq_remove() have been replaced with calls to sleepq_remove().
- The sched_sleep() function no longer accepts a priority argument as
sleep's no longer inherently bump the priority. Instead, this is soley
a propery of msleep() which explicitly calls sched_prio() before
blocking.
- The TDF_ONSLEEPQ flag has been dropped as it was never used. The
associated TDF_SET_ONSLEEPQ and TDF_CLR_ON_SLEEPQ macros have also been
dropped and replaced with a single explicit clearing of td_wchan.
TD_SET_ONSLEEPQ() would really have only made sense if it had taken
the wait channel and message as arguments anyway. Now that that only
happens in one place, a macro would be overkill.
2004-02-27 18:52:44 +00:00
|
|
|
if (TD_IS_RUNNING(td) && td != curthread)
|
2002-07-03 09:15:20 +00:00
|
|
|
forward_signal(td);
|
|
|
|
#endif
|
2004-05-21 10:02:24 +00:00
|
|
|
}
|
2008-03-19 06:19:01 +00:00
|
|
|
out:
|
2008-11-05 03:01:23 +00:00
|
|
|
PROC_SUNLOCK(p);
|
2008-03-19 06:19:01 +00:00
|
|
|
thread_unlock(td);
|
If a thread that is swapped out is made runnable, then the setrunnable()
routine wakes up proc0 so that proc0 can swap the thread back in.
Historically, this has been done by waking up proc0 directly from
setrunnable() itself via a wakeup(). When waking up a sleeping thread
that was swapped out (the usual case when waking proc0 since only sleeping
threads are eligible to be swapped out), this resulted in a bit of
recursion (e.g. wakeup() -> setrunnable() -> wakeup()).
With sleep queues having separate locks in 6.x and later, this caused a
spin lock LOR (sleepq lock -> sched_lock/thread lock -> sleepq lock).
An attempt was made to fix this in 7.0 by making the proc0 wakeup use
the ithread mechanism for doing the wakeup. However, this required
grabbing proc0's thread lock to perform the wakeup. If proc0 was asleep
elsewhere in the kernel (e.g. waiting for disk I/O), then this degenerated
into the same LOR since the thread lock would be some other sleepq lock.
Fix this by deferring the wakeup of the swapper until after the sleepq
lock held by the upper layer has been locked. The setrunnable() routine
now returns a boolean value to indicate whether or not proc0 needs to be
woken up. The end result is that consumers of the sleepq API such as
*sleep/wakeup, condition variables, sx locks, and lockmgr, have to wakeup
proc0 if they get a non-zero return value from sleepq_abort(),
sleepq_broadcast(), or sleepq_signal().
Discussed with: jeff
Glanced at by: sam
Tested by: Jurgen Weber jurgen - ish com au
MFC after: 2 weeks
2008-08-05 20:02:31 +00:00
|
|
|
if (wakeup_swapper)
|
|
|
|
kick_proc0();
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
2006-02-13 03:16:55 +00:00
|
|
|
static void
|
|
|
|
sig_suspend_threads(struct thread *td, struct proc *p, int sending)
|
|
|
|
{
|
|
|
|
struct thread *td2;
|
2009-07-14 22:52:46 +00:00
|
|
|
int wakeup_swapper;
|
2006-02-13 03:16:55 +00:00
|
|
|
|
|
|
|
PROC_LOCK_ASSERT(p, MA_OWNED);
|
2008-11-05 03:01:23 +00:00
|
|
|
PROC_SLOCK_ASSERT(p, MA_OWNED);
|
2006-02-13 03:16:55 +00:00
|
|
|
|
2009-07-14 22:52:46 +00:00
|
|
|
wakeup_swapper = 0;
|
2006-02-13 03:16:55 +00:00
|
|
|
FOREACH_THREAD_IN_PROC(p, td2) {
|
Commit 4/14 of sched_lock decomposition.
- Use thread_lock() rather than sched_lock for per-thread scheduling
sychronization.
- Use the per-process spinlock rather than the sched_lock for per-process
scheduling synchronization.
- Move some common code into thread_suspend_switch() to handle the
mechanics of suspending a thread. The locking here is incredibly
convoluted and should be simplified.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-04 23:52:24 +00:00
|
|
|
thread_lock(td2);
|
2008-03-21 08:23:25 +00:00
|
|
|
td2->td_flags |= TDF_ASTPENDING | TDF_NEEDSUSPCHK;
|
2006-02-13 03:16:55 +00:00
|
|
|
if ((TD_IS_SLEEPING(td2) || TD_IS_SWAPPED(td2)) &&
|
2009-07-14 22:52:46 +00:00
|
|
|
(td2->td_flags & TDF_SINTR)) {
|
|
|
|
if (td2->td_flags & TDF_SBDRY) {
|
|
|
|
if (TD_IS_SUSPENDED(td2))
|
|
|
|
wakeup_swapper |=
|
|
|
|
thread_unsuspend_one(td2);
|
|
|
|
if (TD_ON_SLEEPQ(td2))
|
|
|
|
wakeup_swapper |=
|
|
|
|
sleepq_abort(td2, ERESTART);
|
|
|
|
} else if (!TD_IS_SUSPENDED(td2)) {
|
|
|
|
thread_suspend_one(td2);
|
|
|
|
}
|
|
|
|
} else if (!TD_IS_SUSPENDED(td2)) {
|
2006-02-13 03:16:55 +00:00
|
|
|
if (sending || td != td2)
|
|
|
|
td2->td_flags |= TDF_ASTPENDING;
|
|
|
|
#ifdef SMP
|
|
|
|
if (TD_IS_RUNNING(td2) && td2 != td)
|
|
|
|
forward_signal(td2);
|
|
|
|
#endif
|
|
|
|
}
|
Commit 4/14 of sched_lock decomposition.
- Use thread_lock() rather than sched_lock for per-thread scheduling
sychronization.
- Use the per-process spinlock rather than the sched_lock for per-process
scheduling synchronization.
- Move some common code into thread_suspend_switch() to handle the
mechanics of suspending a thread. The locking here is incredibly
convoluted and should be simplified.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-04 23:52:24 +00:00
|
|
|
thread_unlock(td2);
|
2006-02-13 03:16:55 +00:00
|
|
|
}
|
2009-07-14 22:52:46 +00:00
|
|
|
if (wakeup_swapper)
|
|
|
|
kick_proc0();
|
2006-02-13 03:16:55 +00:00
|
|
|
}
|
|
|
|
|
Add code to support debugging threaded process.
1. Add tm_lwpid into kse_thr_mailbox to indicate which kernel
thread current user thread is running on. Add tm_dflags into
kse_thr_mailbox, the flags is written by debugger, it tells
UTS and kernel what should be done when the process is being
debugged, current, there two flags TMDF_SSTEP and TMDF_DONOTRUNUSER.
TMDF_SSTEP is used to tell kernel to turn on single stepping,
or turn off if it is not set.
TMDF_DONOTRUNUSER is used to tell kernel to schedule upcall
whenever possible, to UTS, it means do not run the user thread
until debugger clears it, this behaviour is necessary because
gdb wants to resume only one thread when the thread's pc is
at a breakpoint, and thread needs to go forward, in order to
avoid other threads sneak pass the breakpoints, it needs to remove
breakpoint, only wants one thread to go. Also, add km_lwp to
kse_mailbox, the lwp id is copied to kse_thr_mailbox at context
switch time when process is not being debugged, so when process
is attached, debugger can map kernel thread to user thread.
2. Add p_xthread to proc strcuture and td_xsig to thread structure.
p_xthread is used by a thread when it wants to report event
to debugger, every thread can set the pointer, especially, when
it is used in ptracestop, it is the last thread reporting event
will win the race. Every thread has a td_xsig to exchange signal
with debugger, thread uses TDF_XSIG flag to indicate it is reporting
signal to debugger, if the flag is not cleared, thread will keep
retrying until it is cleared by debugger, p_xthread may be
used by debugger to indicate CURRENT thread. The p_xstat is still
in proc structure to keep wait() to work, in future, we may
just use td_xsig.
3. Add TDF_DBSUSPEND flag, the flag is used by debugger to suspend
a thread. When process stops, debugger can set the flag for
thread, thread will check the flag in thread_suspend_check,
enters a loop, unless it is cleared by debugger, process is
detached or process is existing. The flag is also checked in
ptracestop, so debugger can temporarily suspend a thread even
if the thread wants to exchange signal.
4. Current, in ptrace, we always resume all threads, but if a thread
has already a TDF_DBSUSPEND flag set by debugger, it won't run.
Encouraged by: marcel, julian, deischen
2004-07-13 07:20:10 +00:00
|
|
|
int
|
2003-09-26 15:09:46 +00:00
|
|
|
ptracestop(struct thread *td, int sig)
|
|
|
|
{
|
|
|
|
struct proc *p = td->td_proc;
|
|
|
|
|
2004-01-29 00:58:21 +00:00
|
|
|
PROC_LOCK_ASSERT(p, MA_OWNED);
|
2003-09-26 15:09:46 +00:00
|
|
|
WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK,
|
2007-03-21 21:20:51 +00:00
|
|
|
&p->p_mtx.lock_object, "Stopping for traced signal");
|
2003-09-26 15:09:46 +00:00
|
|
|
|
2008-10-15 06:31:37 +00:00
|
|
|
td->td_dbgflags |= TDB_XSIG;
|
Add code to support debugging threaded process.
1. Add tm_lwpid into kse_thr_mailbox to indicate which kernel
thread current user thread is running on. Add tm_dflags into
kse_thr_mailbox, the flags is written by debugger, it tells
UTS and kernel what should be done when the process is being
debugged, current, there two flags TMDF_SSTEP and TMDF_DONOTRUNUSER.
TMDF_SSTEP is used to tell kernel to turn on single stepping,
or turn off if it is not set.
TMDF_DONOTRUNUSER is used to tell kernel to schedule upcall
whenever possible, to UTS, it means do not run the user thread
until debugger clears it, this behaviour is necessary because
gdb wants to resume only one thread when the thread's pc is
at a breakpoint, and thread needs to go forward, in order to
avoid other threads sneak pass the breakpoints, it needs to remove
breakpoint, only wants one thread to go. Also, add km_lwp to
kse_mailbox, the lwp id is copied to kse_thr_mailbox at context
switch time when process is not being debugged, so when process
is attached, debugger can map kernel thread to user thread.
2. Add p_xthread to proc strcuture and td_xsig to thread structure.
p_xthread is used by a thread when it wants to report event
to debugger, every thread can set the pointer, especially, when
it is used in ptracestop, it is the last thread reporting event
will win the race. Every thread has a td_xsig to exchange signal
with debugger, thread uses TDF_XSIG flag to indicate it is reporting
signal to debugger, if the flag is not cleared, thread will keep
retrying until it is cleared by debugger, p_xthread may be
used by debugger to indicate CURRENT thread. The p_xstat is still
in proc structure to keep wait() to work, in future, we may
just use td_xsig.
3. Add TDF_DBSUSPEND flag, the flag is used by debugger to suspend
a thread. When process stops, debugger can set the flag for
thread, thread will check the flag in thread_suspend_check,
enters a loop, unless it is cleared by debugger, process is
detached or process is existing. The flag is also checked in
ptracestop, so debugger can temporarily suspend a thread even
if the thread wants to exchange signal.
4. Current, in ptrace, we always resume all threads, but if a thread
has already a TDF_DBSUSPEND flag set by debugger, it won't run.
Encouraged by: marcel, julian, deischen
2004-07-13 07:20:10 +00:00
|
|
|
td->td_xsig = sig;
|
2008-11-05 03:01:23 +00:00
|
|
|
PROC_SLOCK(p);
|
2008-10-15 06:31:37 +00:00
|
|
|
while ((p->p_flag & P_TRACED) && (td->td_dbgflags & TDB_XSIG)) {
|
Add code to support debugging threaded process.
1. Add tm_lwpid into kse_thr_mailbox to indicate which kernel
thread current user thread is running on. Add tm_dflags into
kse_thr_mailbox, the flags is written by debugger, it tells
UTS and kernel what should be done when the process is being
debugged, current, there two flags TMDF_SSTEP and TMDF_DONOTRUNUSER.
TMDF_SSTEP is used to tell kernel to turn on single stepping,
or turn off if it is not set.
TMDF_DONOTRUNUSER is used to tell kernel to schedule upcall
whenever possible, to UTS, it means do not run the user thread
until debugger clears it, this behaviour is necessary because
gdb wants to resume only one thread when the thread's pc is
at a breakpoint, and thread needs to go forward, in order to
avoid other threads sneak pass the breakpoints, it needs to remove
breakpoint, only wants one thread to go. Also, add km_lwp to
kse_mailbox, the lwp id is copied to kse_thr_mailbox at context
switch time when process is not being debugged, so when process
is attached, debugger can map kernel thread to user thread.
2. Add p_xthread to proc strcuture and td_xsig to thread structure.
p_xthread is used by a thread when it wants to report event
to debugger, every thread can set the pointer, especially, when
it is used in ptracestop, it is the last thread reporting event
will win the race. Every thread has a td_xsig to exchange signal
with debugger, thread uses TDF_XSIG flag to indicate it is reporting
signal to debugger, if the flag is not cleared, thread will keep
retrying until it is cleared by debugger, p_xthread may be
used by debugger to indicate CURRENT thread. The p_xstat is still
in proc structure to keep wait() to work, in future, we may
just use td_xsig.
3. Add TDF_DBSUSPEND flag, the flag is used by debugger to suspend
a thread. When process stops, debugger can set the flag for
thread, thread will check the flag in thread_suspend_check,
enters a loop, unless it is cleared by debugger, process is
detached or process is existing. The flag is also checked in
ptracestop, so debugger can temporarily suspend a thread even
if the thread wants to exchange signal.
4. Current, in ptrace, we always resume all threads, but if a thread
has already a TDF_DBSUSPEND flag set by debugger, it won't run.
Encouraged by: marcel, julian, deischen
2004-07-13 07:20:10 +00:00
|
|
|
if (p->p_flag & P_SINGLE_EXIT) {
|
2008-10-15 06:31:37 +00:00
|
|
|
td->td_dbgflags &= ~TDB_XSIG;
|
2008-11-05 03:01:23 +00:00
|
|
|
PROC_SUNLOCK(p);
|
Add code to support debugging threaded process.
1. Add tm_lwpid into kse_thr_mailbox to indicate which kernel
thread current user thread is running on. Add tm_dflags into
kse_thr_mailbox, the flags is written by debugger, it tells
UTS and kernel what should be done when the process is being
debugged, current, there two flags TMDF_SSTEP and TMDF_DONOTRUNUSER.
TMDF_SSTEP is used to tell kernel to turn on single stepping,
or turn off if it is not set.
TMDF_DONOTRUNUSER is used to tell kernel to schedule upcall
whenever possible, to UTS, it means do not run the user thread
until debugger clears it, this behaviour is necessary because
gdb wants to resume only one thread when the thread's pc is
at a breakpoint, and thread needs to go forward, in order to
avoid other threads sneak pass the breakpoints, it needs to remove
breakpoint, only wants one thread to go. Also, add km_lwp to
kse_mailbox, the lwp id is copied to kse_thr_mailbox at context
switch time when process is not being debugged, so when process
is attached, debugger can map kernel thread to user thread.
2. Add p_xthread to proc strcuture and td_xsig to thread structure.
p_xthread is used by a thread when it wants to report event
to debugger, every thread can set the pointer, especially, when
it is used in ptracestop, it is the last thread reporting event
will win the race. Every thread has a td_xsig to exchange signal
with debugger, thread uses TDF_XSIG flag to indicate it is reporting
signal to debugger, if the flag is not cleared, thread will keep
retrying until it is cleared by debugger, p_xthread may be
used by debugger to indicate CURRENT thread. The p_xstat is still
in proc structure to keep wait() to work, in future, we may
just use td_xsig.
3. Add TDF_DBSUSPEND flag, the flag is used by debugger to suspend
a thread. When process stops, debugger can set the flag for
thread, thread will check the flag in thread_suspend_check,
enters a loop, unless it is cleared by debugger, process is
detached or process is existing. The flag is also checked in
ptracestop, so debugger can temporarily suspend a thread even
if the thread wants to exchange signal.
4. Current, in ptrace, we always resume all threads, but if a thread
has already a TDF_DBSUSPEND flag set by debugger, it won't run.
Encouraged by: marcel, julian, deischen
2004-07-13 07:20:10 +00:00
|
|
|
return (sig);
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Just make wait() to work, the last stopped thread
|
|
|
|
* will win.
|
|
|
|
*/
|
|
|
|
p->p_xstat = sig;
|
|
|
|
p->p_xthread = td;
|
|
|
|
p->p_flag |= (P_STOPPED_SIG|P_STOPPED_TRACE);
|
2006-02-13 03:16:55 +00:00
|
|
|
sig_suspend_threads(td, p, 0);
|
Add code to support debugging threaded process.
1. Add tm_lwpid into kse_thr_mailbox to indicate which kernel
thread current user thread is running on. Add tm_dflags into
kse_thr_mailbox, the flags is written by debugger, it tells
UTS and kernel what should be done when the process is being
debugged, current, there two flags TMDF_SSTEP and TMDF_DONOTRUNUSER.
TMDF_SSTEP is used to tell kernel to turn on single stepping,
or turn off if it is not set.
TMDF_DONOTRUNUSER is used to tell kernel to schedule upcall
whenever possible, to UTS, it means do not run the user thread
until debugger clears it, this behaviour is necessary because
gdb wants to resume only one thread when the thread's pc is
at a breakpoint, and thread needs to go forward, in order to
avoid other threads sneak pass the breakpoints, it needs to remove
breakpoint, only wants one thread to go. Also, add km_lwp to
kse_mailbox, the lwp id is copied to kse_thr_mailbox at context
switch time when process is not being debugged, so when process
is attached, debugger can map kernel thread to user thread.
2. Add p_xthread to proc strcuture and td_xsig to thread structure.
p_xthread is used by a thread when it wants to report event
to debugger, every thread can set the pointer, especially, when
it is used in ptracestop, it is the last thread reporting event
will win the race. Every thread has a td_xsig to exchange signal
with debugger, thread uses TDF_XSIG flag to indicate it is reporting
signal to debugger, if the flag is not cleared, thread will keep
retrying until it is cleared by debugger, p_xthread may be
used by debugger to indicate CURRENT thread. The p_xstat is still
in proc structure to keep wait() to work, in future, we may
just use td_xsig.
3. Add TDF_DBSUSPEND flag, the flag is used by debugger to suspend
a thread. When process stops, debugger can set the flag for
thread, thread will check the flag in thread_suspend_check,
enters a loop, unless it is cleared by debugger, process is
detached or process is existing. The flag is also checked in
ptracestop, so debugger can temporarily suspend a thread even
if the thread wants to exchange signal.
4. Current, in ptrace, we always resume all threads, but if a thread
has already a TDF_DBSUSPEND flag set by debugger, it won't run.
Encouraged by: marcel, julian, deischen
2004-07-13 07:20:10 +00:00
|
|
|
stopme:
|
Commit 4/14 of sched_lock decomposition.
- Use thread_lock() rather than sched_lock for per-thread scheduling
sychronization.
- Use the per-process spinlock rather than the sched_lock for per-process
scheduling synchronization.
- Move some common code into thread_suspend_switch() to handle the
mechanics of suspending a thread. The locking here is incredibly
convoluted and should be simplified.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-04 23:52:24 +00:00
|
|
|
thread_suspend_switch(td);
|
|
|
|
if (!(p->p_flag & P_TRACED)) {
|
Add code to support debugging threaded process.
1. Add tm_lwpid into kse_thr_mailbox to indicate which kernel
thread current user thread is running on. Add tm_dflags into
kse_thr_mailbox, the flags is written by debugger, it tells
UTS and kernel what should be done when the process is being
debugged, current, there two flags TMDF_SSTEP and TMDF_DONOTRUNUSER.
TMDF_SSTEP is used to tell kernel to turn on single stepping,
or turn off if it is not set.
TMDF_DONOTRUNUSER is used to tell kernel to schedule upcall
whenever possible, to UTS, it means do not run the user thread
until debugger clears it, this behaviour is necessary because
gdb wants to resume only one thread when the thread's pc is
at a breakpoint, and thread needs to go forward, in order to
avoid other threads sneak pass the breakpoints, it needs to remove
breakpoint, only wants one thread to go. Also, add km_lwp to
kse_mailbox, the lwp id is copied to kse_thr_mailbox at context
switch time when process is not being debugged, so when process
is attached, debugger can map kernel thread to user thread.
2. Add p_xthread to proc strcuture and td_xsig to thread structure.
p_xthread is used by a thread when it wants to report event
to debugger, every thread can set the pointer, especially, when
it is used in ptracestop, it is the last thread reporting event
will win the race. Every thread has a td_xsig to exchange signal
with debugger, thread uses TDF_XSIG flag to indicate it is reporting
signal to debugger, if the flag is not cleared, thread will keep
retrying until it is cleared by debugger, p_xthread may be
used by debugger to indicate CURRENT thread. The p_xstat is still
in proc structure to keep wait() to work, in future, we may
just use td_xsig.
3. Add TDF_DBSUSPEND flag, the flag is used by debugger to suspend
a thread. When process stops, debugger can set the flag for
thread, thread will check the flag in thread_suspend_check,
enters a loop, unless it is cleared by debugger, process is
detached or process is existing. The flag is also checked in
ptracestop, so debugger can temporarily suspend a thread even
if the thread wants to exchange signal.
4. Current, in ptrace, we always resume all threads, but if a thread
has already a TDF_DBSUSPEND flag set by debugger, it won't run.
Encouraged by: marcel, julian, deischen
2004-07-13 07:20:10 +00:00
|
|
|
break;
|
Commit 4/14 of sched_lock decomposition.
- Use thread_lock() rather than sched_lock for per-thread scheduling
sychronization.
- Use the per-process spinlock rather than the sched_lock for per-process
scheduling synchronization.
- Move some common code into thread_suspend_switch() to handle the
mechanics of suspending a thread. The locking here is incredibly
convoluted and should be simplified.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-04 23:52:24 +00:00
|
|
|
}
|
2008-10-15 06:31:37 +00:00
|
|
|
if (td->td_dbgflags & TDB_SUSPEND) {
|
Add code to support debugging threaded process.
1. Add tm_lwpid into kse_thr_mailbox to indicate which kernel
thread current user thread is running on. Add tm_dflags into
kse_thr_mailbox, the flags is written by debugger, it tells
UTS and kernel what should be done when the process is being
debugged, current, there two flags TMDF_SSTEP and TMDF_DONOTRUNUSER.
TMDF_SSTEP is used to tell kernel to turn on single stepping,
or turn off if it is not set.
TMDF_DONOTRUNUSER is used to tell kernel to schedule upcall
whenever possible, to UTS, it means do not run the user thread
until debugger clears it, this behaviour is necessary because
gdb wants to resume only one thread when the thread's pc is
at a breakpoint, and thread needs to go forward, in order to
avoid other threads sneak pass the breakpoints, it needs to remove
breakpoint, only wants one thread to go. Also, add km_lwp to
kse_mailbox, the lwp id is copied to kse_thr_mailbox at context
switch time when process is not being debugged, so when process
is attached, debugger can map kernel thread to user thread.
2. Add p_xthread to proc strcuture and td_xsig to thread structure.
p_xthread is used by a thread when it wants to report event
to debugger, every thread can set the pointer, especially, when
it is used in ptracestop, it is the last thread reporting event
will win the race. Every thread has a td_xsig to exchange signal
with debugger, thread uses TDF_XSIG flag to indicate it is reporting
signal to debugger, if the flag is not cleared, thread will keep
retrying until it is cleared by debugger, p_xthread may be
used by debugger to indicate CURRENT thread. The p_xstat is still
in proc structure to keep wait() to work, in future, we may
just use td_xsig.
3. Add TDF_DBSUSPEND flag, the flag is used by debugger to suspend
a thread. When process stops, debugger can set the flag for
thread, thread will check the flag in thread_suspend_check,
enters a loop, unless it is cleared by debugger, process is
detached or process is existing. The flag is also checked in
ptracestop, so debugger can temporarily suspend a thread even
if the thread wants to exchange signal.
4. Current, in ptrace, we always resume all threads, but if a thread
has already a TDF_DBSUSPEND flag set by debugger, it won't run.
Encouraged by: marcel, julian, deischen
2004-07-13 07:20:10 +00:00
|
|
|
if (p->p_flag & P_SINGLE_EXIT)
|
|
|
|
break;
|
|
|
|
goto stopme;
|
|
|
|
}
|
|
|
|
}
|
2008-11-05 03:01:23 +00:00
|
|
|
PROC_SUNLOCK(p);
|
Add code to support debugging threaded process.
1. Add tm_lwpid into kse_thr_mailbox to indicate which kernel
thread current user thread is running on. Add tm_dflags into
kse_thr_mailbox, the flags is written by debugger, it tells
UTS and kernel what should be done when the process is being
debugged, current, there two flags TMDF_SSTEP and TMDF_DONOTRUNUSER.
TMDF_SSTEP is used to tell kernel to turn on single stepping,
or turn off if it is not set.
TMDF_DONOTRUNUSER is used to tell kernel to schedule upcall
whenever possible, to UTS, it means do not run the user thread
until debugger clears it, this behaviour is necessary because
gdb wants to resume only one thread when the thread's pc is
at a breakpoint, and thread needs to go forward, in order to
avoid other threads sneak pass the breakpoints, it needs to remove
breakpoint, only wants one thread to go. Also, add km_lwp to
kse_mailbox, the lwp id is copied to kse_thr_mailbox at context
switch time when process is not being debugged, so when process
is attached, debugger can map kernel thread to user thread.
2. Add p_xthread to proc strcuture and td_xsig to thread structure.
p_xthread is used by a thread when it wants to report event
to debugger, every thread can set the pointer, especially, when
it is used in ptracestop, it is the last thread reporting event
will win the race. Every thread has a td_xsig to exchange signal
with debugger, thread uses TDF_XSIG flag to indicate it is reporting
signal to debugger, if the flag is not cleared, thread will keep
retrying until it is cleared by debugger, p_xthread may be
used by debugger to indicate CURRENT thread. The p_xstat is still
in proc structure to keep wait() to work, in future, we may
just use td_xsig.
3. Add TDF_DBSUSPEND flag, the flag is used by debugger to suspend
a thread. When process stops, debugger can set the flag for
thread, thread will check the flag in thread_suspend_check,
enters a loop, unless it is cleared by debugger, process is
detached or process is existing. The flag is also checked in
ptracestop, so debugger can temporarily suspend a thread even
if the thread wants to exchange signal.
4. Current, in ptrace, we always resume all threads, but if a thread
has already a TDF_DBSUSPEND flag set by debugger, it won't run.
Encouraged by: marcel, julian, deischen
2004-07-13 07:20:10 +00:00
|
|
|
return (td->td_xsig);
|
2003-09-26 15:09:46 +00:00
|
|
|
}
|
|
|
|
|
Currently, when signal is delivered to the process and there is a thread
not blocking the signal, signal is placed on the thread sigqueue. If
the selected thread is in kernel executing thr_exit() or sigprocmask()
syscalls, then signal might be not delivered to usermode for arbitrary
amount of time, and for exiting thread it is lost.
Put process-directed signals to the process queue unconditionally,
selecting the thread to deliver the signal only by the thread returning
to usermode, since only then the thread can handle delivery of signal
reliably. For exiting thread or thread that has blocked some signals,
check whether the newly blocked signal is queued for the process, and
try to find a thread to wakeup for delivery, in reschedule_signal(). For
exiting thread, assume that all signals are blocked.
Change cursig() and postsig() to look both into the thread and process
signal queues. When there is a signal that thread returning to usermode
could consume, TDF_NEEDSIGCHK flag is not neccessary set now. Do
unlocked read of p_siglist and p_pendingcnt to check for queued signals.
Note that thread that has a signal unblocked might get spurious wakeup
and EINTR from the interruptible system call now, due to the possibility
of being selected by reschedule_signals(), while other thread returned
to usermode earlier and removed the signal from process queue. This
should not cause compliance issues, since the thread has not blocked a
signal and thus should be ready to receive it anyway.
Reported by: Justin Teller <justin.teller gmail com>
Reviewed by: davidxu, jilles
MFC after: 1 month
2009-10-11 16:49:30 +00:00
|
|
|
static void
|
2009-10-30 10:10:39 +00:00
|
|
|
reschedule_signals(struct proc *p, sigset_t block, int flags)
|
Currently, when signal is delivered to the process and there is a thread
not blocking the signal, signal is placed on the thread sigqueue. If
the selected thread is in kernel executing thr_exit() or sigprocmask()
syscalls, then signal might be not delivered to usermode for arbitrary
amount of time, and for exiting thread it is lost.
Put process-directed signals to the process queue unconditionally,
selecting the thread to deliver the signal only by the thread returning
to usermode, since only then the thread can handle delivery of signal
reliably. For exiting thread or thread that has blocked some signals,
check whether the newly blocked signal is queued for the process, and
try to find a thread to wakeup for delivery, in reschedule_signal(). For
exiting thread, assume that all signals are blocked.
Change cursig() and postsig() to look both into the thread and process
signal queues. When there is a signal that thread returning to usermode
could consume, TDF_NEEDSIGCHK flag is not neccessary set now. Do
unlocked read of p_siglist and p_pendingcnt to check for queued signals.
Note that thread that has a signal unblocked might get spurious wakeup
and EINTR from the interruptible system call now, due to the possibility
of being selected by reschedule_signals(), while other thread returned
to usermode earlier and removed the signal from process queue. This
should not cause compliance issues, since the thread has not blocked a
signal and thus should be ready to receive it anyway.
Reported by: Justin Teller <justin.teller gmail com>
Reviewed by: davidxu, jilles
MFC after: 1 month
2009-10-11 16:49:30 +00:00
|
|
|
{
|
|
|
|
struct sigacts *ps;
|
|
|
|
struct thread *td;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
PROC_LOCK_ASSERT(p, MA_OWNED);
|
|
|
|
|
|
|
|
ps = p->p_sigacts;
|
|
|
|
for (i = 1; !SIGISEMPTY(block); i++) {
|
|
|
|
if (!SIGISMEMBER(block, i))
|
|
|
|
continue;
|
|
|
|
SIGDELSET(block, i);
|
|
|
|
if (!SIGISMEMBER(p->p_siglist, i))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
td = sigtd(p, i, 0);
|
|
|
|
signotify(td);
|
2009-10-30 10:10:39 +00:00
|
|
|
if (!(flags & SIGPROCMASK_PS_LOCKED))
|
|
|
|
mtx_lock(&ps->ps_mtx);
|
Currently, when signal is delivered to the process and there is a thread
not blocking the signal, signal is placed on the thread sigqueue. If
the selected thread is in kernel executing thr_exit() or sigprocmask()
syscalls, then signal might be not delivered to usermode for arbitrary
amount of time, and for exiting thread it is lost.
Put process-directed signals to the process queue unconditionally,
selecting the thread to deliver the signal only by the thread returning
to usermode, since only then the thread can handle delivery of signal
reliably. For exiting thread or thread that has blocked some signals,
check whether the newly blocked signal is queued for the process, and
try to find a thread to wakeup for delivery, in reschedule_signal(). For
exiting thread, assume that all signals are blocked.
Change cursig() and postsig() to look both into the thread and process
signal queues. When there is a signal that thread returning to usermode
could consume, TDF_NEEDSIGCHK flag is not neccessary set now. Do
unlocked read of p_siglist and p_pendingcnt to check for queued signals.
Note that thread that has a signal unblocked might get spurious wakeup
and EINTR from the interruptible system call now, due to the possibility
of being selected by reschedule_signals(), while other thread returned
to usermode earlier and removed the signal from process queue. This
should not cause compliance issues, since the thread has not blocked a
signal and thus should be ready to receive it anyway.
Reported by: Justin Teller <justin.teller gmail com>
Reviewed by: davidxu, jilles
MFC after: 1 month
2009-10-11 16:49:30 +00:00
|
|
|
if (p->p_flag & P_TRACED || SIGISMEMBER(ps->ps_sigcatch, i))
|
|
|
|
tdsigwakeup(td, i, SIG_CATCH,
|
|
|
|
(SIGISMEMBER(ps->ps_sigintr, i) ? EINTR :
|
|
|
|
ERESTART));
|
2009-10-30 10:10:39 +00:00
|
|
|
if (!(flags & SIGPROCMASK_PS_LOCKED))
|
|
|
|
mtx_unlock(&ps->ps_mtx);
|
Currently, when signal is delivered to the process and there is a thread
not blocking the signal, signal is placed on the thread sigqueue. If
the selected thread is in kernel executing thr_exit() or sigprocmask()
syscalls, then signal might be not delivered to usermode for arbitrary
amount of time, and for exiting thread it is lost.
Put process-directed signals to the process queue unconditionally,
selecting the thread to deliver the signal only by the thread returning
to usermode, since only then the thread can handle delivery of signal
reliably. For exiting thread or thread that has blocked some signals,
check whether the newly blocked signal is queued for the process, and
try to find a thread to wakeup for delivery, in reschedule_signal(). For
exiting thread, assume that all signals are blocked.
Change cursig() and postsig() to look both into the thread and process
signal queues. When there is a signal that thread returning to usermode
could consume, TDF_NEEDSIGCHK flag is not neccessary set now. Do
unlocked read of p_siglist and p_pendingcnt to check for queued signals.
Note that thread that has a signal unblocked might get spurious wakeup
and EINTR from the interruptible system call now, due to the possibility
of being selected by reschedule_signals(), while other thread returned
to usermode earlier and removed the signal from process queue. This
should not cause compliance issues, since the thread has not blocked a
signal and thus should be ready to receive it anyway.
Reported by: Justin Teller <justin.teller gmail com>
Reviewed by: davidxu, jilles
MFC after: 1 month
2009-10-11 16:49:30 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
tdsigcleanup(struct thread *td)
|
|
|
|
{
|
|
|
|
struct proc *p;
|
|
|
|
sigset_t unblocked;
|
|
|
|
|
|
|
|
p = td->td_proc;
|
|
|
|
PROC_LOCK_ASSERT(p, MA_OWNED);
|
|
|
|
|
|
|
|
sigqueue_flush(&td->td_sigqueue);
|
|
|
|
if (p->p_numthreads == 1)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Since we cannot handle signals, notify signal post code
|
|
|
|
* about this by filling the sigmask.
|
|
|
|
*
|
|
|
|
* Also, if needed, wake up thread(s) that do not block the
|
|
|
|
* same signals as the exiting thread, since the thread might
|
|
|
|
* have been selected for delivery and woken up.
|
|
|
|
*/
|
|
|
|
SIGFILLSET(unblocked);
|
|
|
|
SIGSETNAND(unblocked, td->td_sigmask);
|
|
|
|
SIGFILLSET(td->td_sigmask);
|
2009-10-30 10:10:39 +00:00
|
|
|
reschedule_signals(p, unblocked, 0);
|
Currently, when signal is delivered to the process and there is a thread
not blocking the signal, signal is placed on the thread sigqueue. If
the selected thread is in kernel executing thr_exit() or sigprocmask()
syscalls, then signal might be not delivered to usermode for arbitrary
amount of time, and for exiting thread it is lost.
Put process-directed signals to the process queue unconditionally,
selecting the thread to deliver the signal only by the thread returning
to usermode, since only then the thread can handle delivery of signal
reliably. For exiting thread or thread that has blocked some signals,
check whether the newly blocked signal is queued for the process, and
try to find a thread to wakeup for delivery, in reschedule_signal(). For
exiting thread, assume that all signals are blocked.
Change cursig() and postsig() to look both into the thread and process
signal queues. When there is a signal that thread returning to usermode
could consume, TDF_NEEDSIGCHK flag is not neccessary set now. Do
unlocked read of p_siglist and p_pendingcnt to check for queued signals.
Note that thread that has a signal unblocked might get spurious wakeup
and EINTR from the interruptible system call now, due to the possibility
of being selected by reschedule_signals(), while other thread returned
to usermode earlier and removed the signal from process queue. This
should not cause compliance issues, since the thread has not blocked a
signal and thus should be ready to receive it anyway.
Reported by: Justin Teller <justin.teller gmail com>
Reviewed by: davidxu, jilles
MFC after: 1 month
2009-10-11 16:49:30 +00:00
|
|
|
|
|
|
|
}
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* If the current process has received a signal (should be caught or cause
|
|
|
|
* termination, should interrupt current syscall), return the signal number.
|
|
|
|
* Stop signals with default action are processed immediately, then cleared;
|
|
|
|
* they aren't returned. This is checked after each entry to the system for
|
|
|
|
* a syscall or trap (though this can usually be done without calling issignal
|
2002-05-29 23:44:32 +00:00
|
|
|
* by checking the pending signal masks in cursig.) The normal call
|
1994-05-24 10:09:53 +00:00
|
|
|
* sequence is
|
|
|
|
*
|
Part 1 of KSE-III
The ability to schedule multiple threads per process
(one one cpu) by making ALL system calls optionally asynchronous.
to come: ia64 and power-pc patches, patches for gdb, test program (in tools)
Reviewed by: Almost everyone who counts
(at various times, peter, jhb, matt, alfred, mini, bernd,
and a cast of thousands)
NOTE: this is still Beta code, and contains lots of debugging stuff.
expect slight instability in signals..
2002-06-29 17:26:22 +00:00
|
|
|
* while (sig = cursig(curthread))
|
1999-09-29 15:03:48 +00:00
|
|
|
* postsig(sig);
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
2003-05-05 21:16:28 +00:00
|
|
|
static int
|
2009-07-14 22:52:46 +00:00
|
|
|
issignal(struct thread *td, int stop_allowed)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
Part 1 of KSE-III
The ability to schedule multiple threads per process
(one one cpu) by making ALL system calls optionally asynchronous.
to come: ia64 and power-pc patches, patches for gdb, test program (in tools)
Reviewed by: Almost everyone who counts
(at various times, peter, jhb, matt, alfred, mini, bernd,
and a cast of thousands)
NOTE: this is still Beta code, and contains lots of debugging stuff.
expect slight instability in signals..
2002-06-29 17:26:22 +00:00
|
|
|
struct proc *p;
|
- Merge struct procsig with struct sigacts.
- Move struct sigacts out of the u-area and malloc() it using the
M_SUBPROC malloc bucket.
- Add a small sigacts_*() API for managing sigacts structures: sigacts_alloc(),
sigacts_free(), sigacts_copy(), sigacts_share(), and sigacts_shared().
- Remove the p_sigignore, p_sigacts, and p_sigcatch macros.
- Add a mutex to struct sigacts that protects all the members of the struct.
- Add sigacts locking.
- Remove Giant from nosys(), kill(), killpg(), and kern_sigaction() now
that sigacts is locked.
- Several in-kernel functions such as psignal(), tdsignal(), trapsignal(),
and thread_stopped() are now MP safe.
Reviewed by: arch@
Approved by: re (rwatson)
2003-05-13 20:36:02 +00:00
|
|
|
struct sigacts *ps;
|
Currently, when signal is delivered to the process and there is a thread
not blocking the signal, signal is placed on the thread sigqueue. If
the selected thread is in kernel executing thr_exit() or sigprocmask()
syscalls, then signal might be not delivered to usermode for arbitrary
amount of time, and for exiting thread it is lost.
Put process-directed signals to the process queue unconditionally,
selecting the thread to deliver the signal only by the thread returning
to usermode, since only then the thread can handle delivery of signal
reliably. For exiting thread or thread that has blocked some signals,
check whether the newly blocked signal is queued for the process, and
try to find a thread to wakeup for delivery, in reschedule_signal(). For
exiting thread, assume that all signals are blocked.
Change cursig() and postsig() to look both into the thread and process
signal queues. When there is a signal that thread returning to usermode
could consume, TDF_NEEDSIGCHK flag is not neccessary set now. Do
unlocked read of p_siglist and p_pendingcnt to check for queued signals.
Note that thread that has a signal unblocked might get spurious wakeup
and EINTR from the interruptible system call now, due to the possibility
of being selected by reschedule_signals(), while other thread returned
to usermode earlier and removed the signal from process queue. This
should not cause compliance issues, since the thread has not blocked a
signal and thus should be ready to receive it anyway.
Reported by: Justin Teller <justin.teller gmail com>
Reviewed by: davidxu, jilles
MFC after: 1 month
2009-10-11 16:49:30 +00:00
|
|
|
struct sigqueue *queue;
|
2003-03-31 22:49:17 +00:00
|
|
|
sigset_t sigpending;
|
Currently, when signal is delivered to the process and there is a thread
not blocking the signal, signal is placed on the thread sigqueue. If
the selected thread is in kernel executing thr_exit() or sigprocmask()
syscalls, then signal might be not delivered to usermode for arbitrary
amount of time, and for exiting thread it is lost.
Put process-directed signals to the process queue unconditionally,
selecting the thread to deliver the signal only by the thread returning
to usermode, since only then the thread can handle delivery of signal
reliably. For exiting thread or thread that has blocked some signals,
check whether the newly blocked signal is queued for the process, and
try to find a thread to wakeup for delivery, in reschedule_signal(). For
exiting thread, assume that all signals are blocked.
Change cursig() and postsig() to look both into the thread and process
signal queues. When there is a signal that thread returning to usermode
could consume, TDF_NEEDSIGCHK flag is not neccessary set now. Do
unlocked read of p_siglist and p_pendingcnt to check for queued signals.
Note that thread that has a signal unblocked might get spurious wakeup
and EINTR from the interruptible system call now, due to the possibility
of being selected by reschedule_signals(), while other thread returned
to usermode earlier and removed the signal from process queue. This
should not cause compliance issues, since the thread has not blocked a
signal and thus should be ready to receive it anyway.
Reported by: Justin Teller <justin.teller gmail com>
Reviewed by: davidxu, jilles
MFC after: 1 month
2009-10-11 16:49:30 +00:00
|
|
|
int sig, prop, newsig, signo;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
Part 1 of KSE-III
The ability to schedule multiple threads per process
(one one cpu) by making ALL system calls optionally asynchronous.
to come: ia64 and power-pc patches, patches for gdb, test program (in tools)
Reviewed by: Almost everyone who counts
(at various times, peter, jhb, matt, alfred, mini, bernd,
and a cast of thousands)
NOTE: this is still Beta code, and contains lots of debugging stuff.
expect slight instability in signals..
2002-06-29 17:26:22 +00:00
|
|
|
p = td->td_proc;
|
- Merge struct procsig with struct sigacts.
- Move struct sigacts out of the u-area and malloc() it using the
M_SUBPROC malloc bucket.
- Add a small sigacts_*() API for managing sigacts structures: sigacts_alloc(),
sigacts_free(), sigacts_copy(), sigacts_share(), and sigacts_shared().
- Remove the p_sigignore, p_sigacts, and p_sigcatch macros.
- Add a mutex to struct sigacts that protects all the members of the struct.
- Add sigacts locking.
- Remove Giant from nosys(), kill(), killpg(), and kern_sigaction() now
that sigacts is locked.
- Several in-kernel functions such as psignal(), tdsignal(), trapsignal(),
and thread_stopped() are now MP safe.
Reviewed by: arch@
Approved by: re (rwatson)
2003-05-13 20:36:02 +00:00
|
|
|
ps = p->p_sigacts;
|
|
|
|
mtx_assert(&ps->ps_mtx, MA_OWNED);
|
2001-03-07 02:59:54 +00:00
|
|
|
PROC_LOCK_ASSERT(p, MA_OWNED);
|
1994-05-24 10:09:53 +00:00
|
|
|
for (;;) {
|
1997-12-06 04:11:14 +00:00
|
|
|
int traced = (p->p_flag & P_TRACED) || (p->p_stops & S_SIG);
|
|
|
|
|
1. Change prototype of trapsignal and sendsig to use ksiginfo_t *, most
changes in MD code are trivial, before this change, trapsignal and
sendsig use discrete parameters, now they uses member fields of
ksiginfo_t structure. For sendsig, this change allows us to pass
POSIX realtime signal value to user code.
2. Remove cpu_thread_siginfo, it is no longer needed because we now always
generate ksiginfo_t data and feed it to libpthread.
3. Add p_sigqueue to proc structure to hold shared signals which were
blocked by all threads in the proc.
4. Add td_sigqueue to thread structure to hold all signals delivered to
thread.
5. i386 and amd64 now return POSIX standard si_code, other arches will
be fixed.
6. In this sigqueue implementation, pending signal set is kept as before,
an extra siginfo list holds additional siginfo_t data for signals.
kernel code uses psignal() still behavior as before, it won't be failed
even under memory pressure, only exception is when deleting a signal,
we should call sigqueue_delete to remove signal from sigqueue but
not SIGDELSET. Current there is no kernel code will deliver a signal
with additional data, so kernel should be as stable as before,
a ksiginfo can carry more information, for example, allow signal to
be delivered but throw away siginfo data if memory is not enough.
SIGKILL and SIGSTOP have fast path in sigqueue_add, because they can
not be caught or masked.
The sigqueue() syscall allows user code to queue a signal to target
process, if resource is unavailable, EAGAIN will be returned as
specification said.
Just before thread exits, signal queue memory will be freed by
sigqueue_flush.
Current, all signals are allowed to be queued, not only realtime signals.
Earlier patch reviewed by: jhb, deischen
Tested on: i386, amd64
2005-10-14 12:43:47 +00:00
|
|
|
sigpending = td->td_sigqueue.sq_signals;
|
Currently, when signal is delivered to the process and there is a thread
not blocking the signal, signal is placed on the thread sigqueue. If
the selected thread is in kernel executing thr_exit() or sigprocmask()
syscalls, then signal might be not delivered to usermode for arbitrary
amount of time, and for exiting thread it is lost.
Put process-directed signals to the process queue unconditionally,
selecting the thread to deliver the signal only by the thread returning
to usermode, since only then the thread can handle delivery of signal
reliably. For exiting thread or thread that has blocked some signals,
check whether the newly blocked signal is queued for the process, and
try to find a thread to wakeup for delivery, in reschedule_signal(). For
exiting thread, assume that all signals are blocked.
Change cursig() and postsig() to look both into the thread and process
signal queues. When there is a signal that thread returning to usermode
could consume, TDF_NEEDSIGCHK flag is not neccessary set now. Do
unlocked read of p_siglist and p_pendingcnt to check for queued signals.
Note that thread that has a signal unblocked might get spurious wakeup
and EINTR from the interruptible system call now, due to the possibility
of being selected by reschedule_signals(), while other thread returned
to usermode earlier and removed the signal from process queue. This
should not cause compliance issues, since the thread has not blocked a
signal and thus should be ready to receive it anyway.
Reported by: Justin Teller <justin.teller gmail com>
Reviewed by: davidxu, jilles
MFC after: 1 month
2009-10-11 16:49:30 +00:00
|
|
|
SIGSETOR(sigpending, p->p_sigqueue.sq_signals);
|
2003-03-31 22:49:17 +00:00
|
|
|
SIGSETNAND(sigpending, td->td_sigmask);
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
if (p->p_flag & P_PPWAIT)
|
2003-03-31 22:49:17 +00:00
|
|
|
SIG_STOPSIGMASK(sigpending);
|
|
|
|
if (SIGISEMPTY(sigpending)) /* no signal to send */
|
1994-05-24 10:09:53 +00:00
|
|
|
return (0);
|
2003-03-31 22:49:17 +00:00
|
|
|
sig = sig_ffs(&sigpending);
|
1997-12-06 04:11:14 +00:00
|
|
|
|
2004-01-08 22:44:54 +00:00
|
|
|
if (p->p_stops & S_SIG) {
|
|
|
|
mtx_unlock(&ps->ps_mtx);
|
|
|
|
stopevent(p, S_SIG, sig);
|
|
|
|
mtx_lock(&ps->ps_mtx);
|
|
|
|
}
|
1997-12-06 04:11:14 +00:00
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* We should see pending but ignored signals
|
|
|
|
* only if P_TRACED was on when they were posted.
|
|
|
|
*/
|
- Merge struct procsig with struct sigacts.
- Move struct sigacts out of the u-area and malloc() it using the
M_SUBPROC malloc bucket.
- Add a small sigacts_*() API for managing sigacts structures: sigacts_alloc(),
sigacts_free(), sigacts_copy(), sigacts_share(), and sigacts_shared().
- Remove the p_sigignore, p_sigacts, and p_sigcatch macros.
- Add a mutex to struct sigacts that protects all the members of the struct.
- Add sigacts locking.
- Remove Giant from nosys(), kill(), killpg(), and kern_sigaction() now
that sigacts is locked.
- Several in-kernel functions such as psignal(), tdsignal(), trapsignal(),
and thread_stopped() are now MP safe.
Reviewed by: arch@
Approved by: re (rwatson)
2003-05-13 20:36:02 +00:00
|
|
|
if (SIGISMEMBER(ps->ps_sigignore, sig) && (traced == 0)) {
|
1. Change prototype of trapsignal and sendsig to use ksiginfo_t *, most
changes in MD code are trivial, before this change, trapsignal and
sendsig use discrete parameters, now they uses member fields of
ksiginfo_t structure. For sendsig, this change allows us to pass
POSIX realtime signal value to user code.
2. Remove cpu_thread_siginfo, it is no longer needed because we now always
generate ksiginfo_t data and feed it to libpthread.
3. Add p_sigqueue to proc structure to hold shared signals which were
blocked by all threads in the proc.
4. Add td_sigqueue to thread structure to hold all signals delivered to
thread.
5. i386 and amd64 now return POSIX standard si_code, other arches will
be fixed.
6. In this sigqueue implementation, pending signal set is kept as before,
an extra siginfo list holds additional siginfo_t data for signals.
kernel code uses psignal() still behavior as before, it won't be failed
even under memory pressure, only exception is when deleting a signal,
we should call sigqueue_delete to remove signal from sigqueue but
not SIGDELSET. Current there is no kernel code will deliver a signal
with additional data, so kernel should be as stable as before,
a ksiginfo can carry more information, for example, allow signal to
be delivered but throw away siginfo data if memory is not enough.
SIGKILL and SIGSTOP have fast path in sigqueue_add, because they can
not be caught or masked.
The sigqueue() syscall allows user code to queue a signal to target
process, if resource is unavailable, EAGAIN will be returned as
specification said.
Just before thread exits, signal queue memory will be freed by
sigqueue_flush.
Current, all signals are allowed to be queued, not only realtime signals.
Earlier patch reviewed by: jhb, deischen
Tested on: i386, amd64
2005-10-14 12:43:47 +00:00
|
|
|
sigqueue_delete(&td->td_sigqueue, sig);
|
Currently, when signal is delivered to the process and there is a thread
not blocking the signal, signal is placed on the thread sigqueue. If
the selected thread is in kernel executing thr_exit() or sigprocmask()
syscalls, then signal might be not delivered to usermode for arbitrary
amount of time, and for exiting thread it is lost.
Put process-directed signals to the process queue unconditionally,
selecting the thread to deliver the signal only by the thread returning
to usermode, since only then the thread can handle delivery of signal
reliably. For exiting thread or thread that has blocked some signals,
check whether the newly blocked signal is queued for the process, and
try to find a thread to wakeup for delivery, in reschedule_signal(). For
exiting thread, assume that all signals are blocked.
Change cursig() and postsig() to look both into the thread and process
signal queues. When there is a signal that thread returning to usermode
could consume, TDF_NEEDSIGCHK flag is not neccessary set now. Do
unlocked read of p_siglist and p_pendingcnt to check for queued signals.
Note that thread that has a signal unblocked might get spurious wakeup
and EINTR from the interruptible system call now, due to the possibility
of being selected by reschedule_signals(), while other thread returned
to usermode earlier and removed the signal from process queue. This
should not cause compliance issues, since the thread has not blocked a
signal and thus should be ready to receive it anyway.
Reported by: Justin Teller <justin.teller gmail com>
Reviewed by: davidxu, jilles
MFC after: 1 month
2009-10-11 16:49:30 +00:00
|
|
|
sigqueue_delete(&p->p_sigqueue, sig);
|
1994-05-24 10:09:53 +00:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (p->p_flag & P_TRACED && (p->p_flag & P_PPWAIT) == 0) {
|
|
|
|
/*
|
2002-05-09 04:13:41 +00:00
|
|
|
* If traced, always stop.
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
- Merge struct procsig with struct sigacts.
- Move struct sigacts out of the u-area and malloc() it using the
M_SUBPROC malloc bucket.
- Add a small sigacts_*() API for managing sigacts structures: sigacts_alloc(),
sigacts_free(), sigacts_copy(), sigacts_share(), and sigacts_shared().
- Remove the p_sigignore, p_sigacts, and p_sigcatch macros.
- Add a mutex to struct sigacts that protects all the members of the struct.
- Add sigacts locking.
- Remove Giant from nosys(), kill(), killpg(), and kern_sigaction() now
that sigacts is locked.
- Several in-kernel functions such as psignal(), tdsignal(), trapsignal(),
and thread_stopped() are now MP safe.
Reviewed by: arch@
Approved by: re (rwatson)
2003-05-13 20:36:02 +00:00
|
|
|
mtx_unlock(&ps->ps_mtx);
|
Add code to support debugging threaded process.
1. Add tm_lwpid into kse_thr_mailbox to indicate which kernel
thread current user thread is running on. Add tm_dflags into
kse_thr_mailbox, the flags is written by debugger, it tells
UTS and kernel what should be done when the process is being
debugged, current, there two flags TMDF_SSTEP and TMDF_DONOTRUNUSER.
TMDF_SSTEP is used to tell kernel to turn on single stepping,
or turn off if it is not set.
TMDF_DONOTRUNUSER is used to tell kernel to schedule upcall
whenever possible, to UTS, it means do not run the user thread
until debugger clears it, this behaviour is necessary because
gdb wants to resume only one thread when the thread's pc is
at a breakpoint, and thread needs to go forward, in order to
avoid other threads sneak pass the breakpoints, it needs to remove
breakpoint, only wants one thread to go. Also, add km_lwp to
kse_mailbox, the lwp id is copied to kse_thr_mailbox at context
switch time when process is not being debugged, so when process
is attached, debugger can map kernel thread to user thread.
2. Add p_xthread to proc strcuture and td_xsig to thread structure.
p_xthread is used by a thread when it wants to report event
to debugger, every thread can set the pointer, especially, when
it is used in ptracestop, it is the last thread reporting event
will win the race. Every thread has a td_xsig to exchange signal
with debugger, thread uses TDF_XSIG flag to indicate it is reporting
signal to debugger, if the flag is not cleared, thread will keep
retrying until it is cleared by debugger, p_xthread may be
used by debugger to indicate CURRENT thread. The p_xstat is still
in proc structure to keep wait() to work, in future, we may
just use td_xsig.
3. Add TDF_DBSUSPEND flag, the flag is used by debugger to suspend
a thread. When process stops, debugger can set the flag for
thread, thread will check the flag in thread_suspend_check,
enters a loop, unless it is cleared by debugger, process is
detached or process is existing. The flag is also checked in
ptracestop, so debugger can temporarily suspend a thread even
if the thread wants to exchange signal.
4. Current, in ptrace, we always resume all threads, but if a thread
has already a TDF_DBSUSPEND flag set by debugger, it won't run.
Encouraged by: marcel, julian, deischen
2004-07-13 07:20:10 +00:00
|
|
|
newsig = ptracestop(td, sig);
|
- Merge struct procsig with struct sigacts.
- Move struct sigacts out of the u-area and malloc() it using the
M_SUBPROC malloc bucket.
- Add a small sigacts_*() API for managing sigacts structures: sigacts_alloc(),
sigacts_free(), sigacts_copy(), sigacts_share(), and sigacts_shared().
- Remove the p_sigignore, p_sigacts, and p_sigcatch macros.
- Add a mutex to struct sigacts that protects all the members of the struct.
- Add sigacts locking.
- Remove Giant from nosys(), kill(), killpg(), and kern_sigaction() now
that sigacts is locked.
- Several in-kernel functions such as psignal(), tdsignal(), trapsignal(),
and thread_stopped() are now MP safe.
Reviewed by: arch@
Approved by: re (rwatson)
2003-05-13 20:36:02 +00:00
|
|
|
mtx_lock(&ps->ps_mtx);
|
1994-05-24 10:09:53 +00:00
|
|
|
|
1. Change prototype of trapsignal and sendsig to use ksiginfo_t *, most
changes in MD code are trivial, before this change, trapsignal and
sendsig use discrete parameters, now they uses member fields of
ksiginfo_t structure. For sendsig, this change allows us to pass
POSIX realtime signal value to user code.
2. Remove cpu_thread_siginfo, it is no longer needed because we now always
generate ksiginfo_t data and feed it to libpthread.
3. Add p_sigqueue to proc structure to hold shared signals which were
blocked by all threads in the proc.
4. Add td_sigqueue to thread structure to hold all signals delivered to
thread.
5. i386 and amd64 now return POSIX standard si_code, other arches will
be fixed.
6. In this sigqueue implementation, pending signal set is kept as before,
an extra siginfo list holds additional siginfo_t data for signals.
kernel code uses psignal() still behavior as before, it won't be failed
even under memory pressure, only exception is when deleting a signal,
we should call sigqueue_delete to remove signal from sigqueue but
not SIGDELSET. Current there is no kernel code will deliver a signal
with additional data, so kernel should be as stable as before,
a ksiginfo can carry more information, for example, allow signal to
be delivered but throw away siginfo data if memory is not enough.
SIGKILL and SIGSTOP have fast path in sigqueue_add, because they can
not be caught or masked.
The sigqueue() syscall allows user code to queue a signal to target
process, if resource is unavailable, EAGAIN will be returned as
specification said.
Just before thread exits, signal queue memory will be freed by
sigqueue_flush.
Current, all signals are allowed to be queued, not only realtime signals.
Earlier patch reviewed by: jhb, deischen
Tested on: i386, amd64
2005-10-14 12:43:47 +00:00
|
|
|
if (sig != newsig) {
|
2005-11-12 04:22:16 +00:00
|
|
|
ksiginfo_t ksi;
|
Currently, when signal is delivered to the process and there is a thread
not blocking the signal, signal is placed on the thread sigqueue. If
the selected thread is in kernel executing thr_exit() or sigprocmask()
syscalls, then signal might be not delivered to usermode for arbitrary
amount of time, and for exiting thread it is lost.
Put process-directed signals to the process queue unconditionally,
selecting the thread to deliver the signal only by the thread returning
to usermode, since only then the thread can handle delivery of signal
reliably. For exiting thread or thread that has blocked some signals,
check whether the newly blocked signal is queued for the process, and
try to find a thread to wakeup for delivery, in reschedule_signal(). For
exiting thread, assume that all signals are blocked.
Change cursig() and postsig() to look both into the thread and process
signal queues. When there is a signal that thread returning to usermode
could consume, TDF_NEEDSIGCHK flag is not neccessary set now. Do
unlocked read of p_siglist and p_pendingcnt to check for queued signals.
Note that thread that has a signal unblocked might get spurious wakeup
and EINTR from the interruptible system call now, due to the possibility
of being selected by reschedule_signals(), while other thread returned
to usermode earlier and removed the signal from process queue. This
should not cause compliance issues, since the thread has not blocked a
signal and thus should be ready to receive it anyway.
Reported by: Justin Teller <justin.teller gmail com>
Reviewed by: davidxu, jilles
MFC after: 1 month
2009-10-11 16:49:30 +00:00
|
|
|
|
|
|
|
queue = &td->td_sigqueue;
|
1. Change prototype of trapsignal and sendsig to use ksiginfo_t *, most
changes in MD code are trivial, before this change, trapsignal and
sendsig use discrete parameters, now they uses member fields of
ksiginfo_t structure. For sendsig, this change allows us to pass
POSIX realtime signal value to user code.
2. Remove cpu_thread_siginfo, it is no longer needed because we now always
generate ksiginfo_t data and feed it to libpthread.
3. Add p_sigqueue to proc structure to hold shared signals which were
blocked by all threads in the proc.
4. Add td_sigqueue to thread structure to hold all signals delivered to
thread.
5. i386 and amd64 now return POSIX standard si_code, other arches will
be fixed.
6. In this sigqueue implementation, pending signal set is kept as before,
an extra siginfo list holds additional siginfo_t data for signals.
kernel code uses psignal() still behavior as before, it won't be failed
even under memory pressure, only exception is when deleting a signal,
we should call sigqueue_delete to remove signal from sigqueue but
not SIGDELSET. Current there is no kernel code will deliver a signal
with additional data, so kernel should be as stable as before,
a ksiginfo can carry more information, for example, allow signal to
be delivered but throw away siginfo data if memory is not enough.
SIGKILL and SIGSTOP have fast path in sigqueue_add, because they can
not be caught or masked.
The sigqueue() syscall allows user code to queue a signal to target
process, if resource is unavailable, EAGAIN will be returned as
specification said.
Just before thread exits, signal queue memory will be freed by
sigqueue_flush.
Current, all signals are allowed to be queued, not only realtime signals.
Earlier patch reviewed by: jhb, deischen
Tested on: i386, amd64
2005-10-14 12:43:47 +00:00
|
|
|
/*
|
|
|
|
* clear old signal.
|
|
|
|
* XXX shrug off debugger, it causes siginfo to
|
|
|
|
* be thrown away.
|
|
|
|
*/
|
Currently, when signal is delivered to the process and there is a thread
not blocking the signal, signal is placed on the thread sigqueue. If
the selected thread is in kernel executing thr_exit() or sigprocmask()
syscalls, then signal might be not delivered to usermode for arbitrary
amount of time, and for exiting thread it is lost.
Put process-directed signals to the process queue unconditionally,
selecting the thread to deliver the signal only by the thread returning
to usermode, since only then the thread can handle delivery of signal
reliably. For exiting thread or thread that has blocked some signals,
check whether the newly blocked signal is queued for the process, and
try to find a thread to wakeup for delivery, in reschedule_signal(). For
exiting thread, assume that all signals are blocked.
Change cursig() and postsig() to look both into the thread and process
signal queues. When there is a signal that thread returning to usermode
could consume, TDF_NEEDSIGCHK flag is not neccessary set now. Do
unlocked read of p_siglist and p_pendingcnt to check for queued signals.
Note that thread that has a signal unblocked might get spurious wakeup
and EINTR from the interruptible system call now, due to the possibility
of being selected by reschedule_signals(), while other thread returned
to usermode earlier and removed the signal from process queue. This
should not cause compliance issues, since the thread has not blocked a
signal and thus should be ready to receive it anyway.
Reported by: Justin Teller <justin.teller gmail com>
Reviewed by: davidxu, jilles
MFC after: 1 month
2009-10-11 16:49:30 +00:00
|
|
|
if (sigqueue_get(queue, sig, &ksi) == 0) {
|
|
|
|
queue = &p->p_sigqueue;
|
|
|
|
signo = sigqueue_get(queue, sig, &ksi);
|
|
|
|
KASSERT(signo == sig, ("signo != sig"));
|
|
|
|
}
|
1. Change prototype of trapsignal and sendsig to use ksiginfo_t *, most
changes in MD code are trivial, before this change, trapsignal and
sendsig use discrete parameters, now they uses member fields of
ksiginfo_t structure. For sendsig, this change allows us to pass
POSIX realtime signal value to user code.
2. Remove cpu_thread_siginfo, it is no longer needed because we now always
generate ksiginfo_t data and feed it to libpthread.
3. Add p_sigqueue to proc structure to hold shared signals which were
blocked by all threads in the proc.
4. Add td_sigqueue to thread structure to hold all signals delivered to
thread.
5. i386 and amd64 now return POSIX standard si_code, other arches will
be fixed.
6. In this sigqueue implementation, pending signal set is kept as before,
an extra siginfo list holds additional siginfo_t data for signals.
kernel code uses psignal() still behavior as before, it won't be failed
even under memory pressure, only exception is when deleting a signal,
we should call sigqueue_delete to remove signal from sigqueue but
not SIGDELSET. Current there is no kernel code will deliver a signal
with additional data, so kernel should be as stable as before,
a ksiginfo can carry more information, for example, allow signal to
be delivered but throw away siginfo data if memory is not enough.
SIGKILL and SIGSTOP have fast path in sigqueue_add, because they can
not be caught or masked.
The sigqueue() syscall allows user code to queue a signal to target
process, if resource is unavailable, EAGAIN will be returned as
specification said.
Just before thread exits, signal queue memory will be freed by
sigqueue_flush.
Current, all signals are allowed to be queued, not only realtime signals.
Earlier patch reviewed by: jhb, deischen
Tested on: i386, amd64
2005-10-14 12:43:47 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If parent wants us to take the signal,
|
|
|
|
* then it will leave it in p->p_xstat;
|
|
|
|
* otherwise we just look for signals again.
|
|
|
|
*/
|
|
|
|
if (newsig == 0)
|
|
|
|
continue;
|
|
|
|
sig = newsig;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Put the new signal into td_sigqueue. If the
|
|
|
|
* signal is being masked, look for other signals.
|
|
|
|
*/
|
Currently, when signal is delivered to the process and there is a thread
not blocking the signal, signal is placed on the thread sigqueue. If
the selected thread is in kernel executing thr_exit() or sigprocmask()
syscalls, then signal might be not delivered to usermode for arbitrary
amount of time, and for exiting thread it is lost.
Put process-directed signals to the process queue unconditionally,
selecting the thread to deliver the signal only by the thread returning
to usermode, since only then the thread can handle delivery of signal
reliably. For exiting thread or thread that has blocked some signals,
check whether the newly blocked signal is queued for the process, and
try to find a thread to wakeup for delivery, in reschedule_signal(). For
exiting thread, assume that all signals are blocked.
Change cursig() and postsig() to look both into the thread and process
signal queues. When there is a signal that thread returning to usermode
could consume, TDF_NEEDSIGCHK flag is not neccessary set now. Do
unlocked read of p_siglist and p_pendingcnt to check for queued signals.
Note that thread that has a signal unblocked might get spurious wakeup
and EINTR from the interruptible system call now, due to the possibility
of being selected by reschedule_signals(), while other thread returned
to usermode earlier and removed the signal from process queue. This
should not cause compliance issues, since the thread has not blocked a
signal and thus should be ready to receive it anyway.
Reported by: Justin Teller <justin.teller gmail com>
Reviewed by: davidxu, jilles
MFC after: 1 month
2009-10-11 16:49:30 +00:00
|
|
|
SIGADDSET(queue->sq_signals, sig);
|
1. Change prototype of trapsignal and sendsig to use ksiginfo_t *, most
changes in MD code are trivial, before this change, trapsignal and
sendsig use discrete parameters, now they uses member fields of
ksiginfo_t structure. For sendsig, this change allows us to pass
POSIX realtime signal value to user code.
2. Remove cpu_thread_siginfo, it is no longer needed because we now always
generate ksiginfo_t data and feed it to libpthread.
3. Add p_sigqueue to proc structure to hold shared signals which were
blocked by all threads in the proc.
4. Add td_sigqueue to thread structure to hold all signals delivered to
thread.
5. i386 and amd64 now return POSIX standard si_code, other arches will
be fixed.
6. In this sigqueue implementation, pending signal set is kept as before,
an extra siginfo list holds additional siginfo_t data for signals.
kernel code uses psignal() still behavior as before, it won't be failed
even under memory pressure, only exception is when deleting a signal,
we should call sigqueue_delete to remove signal from sigqueue but
not SIGDELSET. Current there is no kernel code will deliver a signal
with additional data, so kernel should be as stable as before,
a ksiginfo can carry more information, for example, allow signal to
be delivered but throw away siginfo data if memory is not enough.
SIGKILL and SIGSTOP have fast path in sigqueue_add, because they can
not be caught or masked.
The sigqueue() syscall allows user code to queue a signal to target
process, if resource is unavailable, EAGAIN will be returned as
specification said.
Just before thread exits, signal queue memory will be freed by
sigqueue_flush.
Current, all signals are allowed to be queued, not only realtime signals.
Earlier patch reviewed by: jhb, deischen
Tested on: i386, amd64
2005-10-14 12:43:47 +00:00
|
|
|
if (SIGISMEMBER(td->td_sigmask, sig))
|
|
|
|
continue;
|
|
|
|
signotify(td);
|
|
|
|
}
|
|
|
|
|
2003-05-16 01:34:23 +00:00
|
|
|
/*
|
|
|
|
* If the traced bit got turned off, go back up
|
|
|
|
* to the top to rescan signals. This ensures
|
|
|
|
* that p_sig* and p_sigact are consistent.
|
|
|
|
*/
|
|
|
|
if ((p->p_flag & P_TRACED) == 0)
|
|
|
|
continue;
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
2003-05-16 01:34:23 +00:00
|
|
|
prop = sigprop(sig);
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Decide whether the signal should be returned.
|
|
|
|
* Return the signal's number, or fall through
|
|
|
|
* to clear it from the pending mask.
|
|
|
|
*/
|
2002-12-17 19:13:03 +00:00
|
|
|
switch ((intptr_t)p->p_sigacts->ps_sigact[_SIG_IDX(sig)]) {
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2002-12-17 19:13:03 +00:00
|
|
|
case (intptr_t)SIG_DFL:
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Don't take default actions on system processes.
|
|
|
|
*/
|
|
|
|
if (p->p_pid <= 1) {
|
|
|
|
#ifdef DIAGNOSTIC
|
|
|
|
/*
|
|
|
|
* Are you sure you want to ignore SIGSEGV
|
|
|
|
* in init? XXX
|
|
|
|
*/
|
1994-10-10 01:00:49 +00:00
|
|
|
printf("Process (pid %lu) got signal %d\n",
|
1999-09-29 15:03:48 +00:00
|
|
|
(u_long)p->p_pid, sig);
|
1994-05-24 10:09:53 +00:00
|
|
|
#endif
|
|
|
|
break; /* == ignore */
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* If there is a pending stop signal to process
|
|
|
|
* with default action, stop here,
|
|
|
|
* then clear the signal. However,
|
|
|
|
* if process is member of an orphaned
|
|
|
|
* process group, ignore tty stop signals.
|
|
|
|
*/
|
|
|
|
if (prop & SA_STOP) {
|
|
|
|
if (p->p_flag & P_TRACED ||
|
|
|
|
(p->p_pgrp->pg_jobc == 0 &&
|
2002-02-23 11:12:57 +00:00
|
|
|
prop & SA_TTYSTOP))
|
1994-05-24 10:09:53 +00:00
|
|
|
break; /* == ignore */
|
2009-07-14 22:52:46 +00:00
|
|
|
|
|
|
|
/* Ignore, but do not drop the stop signal. */
|
|
|
|
if (stop_allowed != SIG_STOP_ALLOWED)
|
|
|
|
return (sig);
|
- Merge struct procsig with struct sigacts.
- Move struct sigacts out of the u-area and malloc() it using the
M_SUBPROC malloc bucket.
- Add a small sigacts_*() API for managing sigacts structures: sigacts_alloc(),
sigacts_free(), sigacts_copy(), sigacts_share(), and sigacts_shared().
- Remove the p_sigignore, p_sigacts, and p_sigcatch macros.
- Add a mutex to struct sigacts that protects all the members of the struct.
- Add sigacts locking.
- Remove Giant from nosys(), kill(), killpg(), and kern_sigaction() now
that sigacts is locked.
- Several in-kernel functions such as psignal(), tdsignal(), trapsignal(),
and thread_stopped() are now MP safe.
Reviewed by: arch@
Approved by: re (rwatson)
2003-05-13 20:36:02 +00:00
|
|
|
mtx_unlock(&ps->ps_mtx);
|
|
|
|
WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK,
|
2007-03-21 21:20:51 +00:00
|
|
|
&p->p_mtx.lock_object, "Catching SIGSTOP");
|
2003-03-11 00:07:53 +00:00
|
|
|
p->p_flag |= P_STOPPED_SIG;
|
1999-09-29 15:03:48 +00:00
|
|
|
p->p_xstat = sig;
|
2008-11-05 03:01:23 +00:00
|
|
|
PROC_SLOCK(p);
|
2006-02-13 03:16:55 +00:00
|
|
|
sig_suspend_threads(td, p, 0);
|
Commit 4/14 of sched_lock decomposition.
- Use thread_lock() rather than sched_lock for per-thread scheduling
sychronization.
- Use the per-process spinlock rather than the sched_lock for per-process
scheduling synchronization.
- Move some common code into thread_suspend_switch() to handle the
mechanics of suspending a thread. The locking here is incredibly
convoluted and should be simplified.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-04 23:52:24 +00:00
|
|
|
thread_suspend_switch(td);
|
2008-11-05 03:01:23 +00:00
|
|
|
PROC_SUNLOCK(p);
|
- Merge struct procsig with struct sigacts.
- Move struct sigacts out of the u-area and malloc() it using the
M_SUBPROC malloc bucket.
- Add a small sigacts_*() API for managing sigacts structures: sigacts_alloc(),
sigacts_free(), sigacts_copy(), sigacts_share(), and sigacts_shared().
- Remove the p_sigignore, p_sigacts, and p_sigcatch macros.
- Add a mutex to struct sigacts that protects all the members of the struct.
- Add sigacts locking.
- Remove Giant from nosys(), kill(), killpg(), and kern_sigaction() now
that sigacts is locked.
- Several in-kernel functions such as psignal(), tdsignal(), trapsignal(),
and thread_stopped() are now MP safe.
Reviewed by: arch@
Approved by: re (rwatson)
2003-05-13 20:36:02 +00:00
|
|
|
mtx_lock(&ps->ps_mtx);
|
1994-05-24 10:09:53 +00:00
|
|
|
break;
|
2002-09-29 04:47:41 +00:00
|
|
|
} else if (prop & SA_IGNORE) {
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Except for SIGCONT, shouldn't get here.
|
|
|
|
* Default action is to ignore; drop it.
|
|
|
|
*/
|
|
|
|
break; /* == ignore */
|
|
|
|
} else
|
1999-09-29 15:03:48 +00:00
|
|
|
return (sig);
|
1994-05-24 10:09:53 +00:00
|
|
|
/*NOTREACHED*/
|
|
|
|
|
2002-12-17 19:13:03 +00:00
|
|
|
case (intptr_t)SIG_IGN:
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Masking above should prevent us ever trying
|
|
|
|
* to take action on an ignored signal other
|
|
|
|
* than SIGCONT, unless process is traced.
|
|
|
|
*/
|
|
|
|
if ((prop & SA_CONT) == 0 &&
|
|
|
|
(p->p_flag & P_TRACED) == 0)
|
|
|
|
printf("issignal\n");
|
|
|
|
break; /* == ignore */
|
|
|
|
|
|
|
|
default:
|
|
|
|
/*
|
|
|
|
* This signal has an action, let
|
|
|
|
* postsig() process it.
|
|
|
|
*/
|
1999-09-29 15:03:48 +00:00
|
|
|
return (sig);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
1. Change prototype of trapsignal and sendsig to use ksiginfo_t *, most
changes in MD code are trivial, before this change, trapsignal and
sendsig use discrete parameters, now they uses member fields of
ksiginfo_t structure. For sendsig, this change allows us to pass
POSIX realtime signal value to user code.
2. Remove cpu_thread_siginfo, it is no longer needed because we now always
generate ksiginfo_t data and feed it to libpthread.
3. Add p_sigqueue to proc structure to hold shared signals which were
blocked by all threads in the proc.
4. Add td_sigqueue to thread structure to hold all signals delivered to
thread.
5. i386 and amd64 now return POSIX standard si_code, other arches will
be fixed.
6. In this sigqueue implementation, pending signal set is kept as before,
an extra siginfo list holds additional siginfo_t data for signals.
kernel code uses psignal() still behavior as before, it won't be failed
even under memory pressure, only exception is when deleting a signal,
we should call sigqueue_delete to remove signal from sigqueue but
not SIGDELSET. Current there is no kernel code will deliver a signal
with additional data, so kernel should be as stable as before,
a ksiginfo can carry more information, for example, allow signal to
be delivered but throw away siginfo data if memory is not enough.
SIGKILL and SIGSTOP have fast path in sigqueue_add, because they can
not be caught or masked.
The sigqueue() syscall allows user code to queue a signal to target
process, if resource is unavailable, EAGAIN will be returned as
specification said.
Just before thread exits, signal queue memory will be freed by
sigqueue_flush.
Current, all signals are allowed to be queued, not only realtime signals.
Earlier patch reviewed by: jhb, deischen
Tested on: i386, amd64
2005-10-14 12:43:47 +00:00
|
|
|
sigqueue_delete(&td->td_sigqueue, sig); /* take the signal! */
|
Currently, when signal is delivered to the process and there is a thread
not blocking the signal, signal is placed on the thread sigqueue. If
the selected thread is in kernel executing thr_exit() or sigprocmask()
syscalls, then signal might be not delivered to usermode for arbitrary
amount of time, and for exiting thread it is lost.
Put process-directed signals to the process queue unconditionally,
selecting the thread to deliver the signal only by the thread returning
to usermode, since only then the thread can handle delivery of signal
reliably. For exiting thread or thread that has blocked some signals,
check whether the newly blocked signal is queued for the process, and
try to find a thread to wakeup for delivery, in reschedule_signal(). For
exiting thread, assume that all signals are blocked.
Change cursig() and postsig() to look both into the thread and process
signal queues. When there is a signal that thread returning to usermode
could consume, TDF_NEEDSIGCHK flag is not neccessary set now. Do
unlocked read of p_siglist and p_pendingcnt to check for queued signals.
Note that thread that has a signal unblocked might get spurious wakeup
and EINTR from the interruptible system call now, due to the possibility
of being selected by reschedule_signals(), while other thread returned
to usermode earlier and removed the signal from process queue. This
should not cause compliance issues, since the thread has not blocked a
signal and thus should be ready to receive it anyway.
Reported by: Justin Teller <justin.teller gmail com>
Reviewed by: davidxu, jilles
MFC after: 1 month
2009-10-11 16:49:30 +00:00
|
|
|
sigqueue_delete(&p->p_sigqueue, sig);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
/* NOTREACHED */
|
|
|
|
}
|
|
|
|
|
2003-03-11 00:07:53 +00:00
|
|
|
void
|
|
|
|
thread_stopped(struct proc *p)
|
|
|
|
{
|
|
|
|
int n;
|
|
|
|
|
|
|
|
PROC_LOCK_ASSERT(p, MA_OWNED);
|
2008-11-05 03:01:23 +00:00
|
|
|
PROC_SLOCK_ASSERT(p, MA_OWNED);
|
2003-03-11 00:07:53 +00:00
|
|
|
n = p->p_suspcount;
|
2006-02-23 05:50:55 +00:00
|
|
|
if (p == curproc)
|
2003-03-11 00:07:53 +00:00
|
|
|
n++;
|
|
|
|
if ((p->p_flag & P_STOPPED_SIG) && (n == p->p_numthreads)) {
|
2008-11-05 03:01:23 +00:00
|
|
|
PROC_SUNLOCK(p);
|
2005-04-19 08:11:28 +00:00
|
|
|
p->p_flag &= ~P_WAITED;
|
2003-03-11 00:07:53 +00:00
|
|
|
PROC_LOCK(p->p_pptr);
|
2006-02-04 14:10:57 +00:00
|
|
|
childproc_stopped(p, (p->p_flag & P_TRACED) ?
|
|
|
|
CLD_TRAPPED : CLD_STOPPED);
|
2003-03-11 00:07:53 +00:00
|
|
|
PROC_UNLOCK(p->p_pptr);
|
2008-11-05 03:01:23 +00:00
|
|
|
PROC_SLOCK(p);
|
2003-03-11 00:07:53 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Take the action for the specified signal
|
|
|
|
* from the current set of pending signals.
|
|
|
|
*/
|
|
|
|
void
|
1999-09-29 15:03:48 +00:00
|
|
|
postsig(sig)
|
|
|
|
register int sig;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2001-09-12 08:38:13 +00:00
|
|
|
struct thread *td = curthread;
|
|
|
|
register struct proc *p = td->td_proc;
|
2001-03-07 02:59:54 +00:00
|
|
|
struct sigacts *ps;
|
1999-09-29 15:03:48 +00:00
|
|
|
sig_t action;
|
1. Change prototype of trapsignal and sendsig to use ksiginfo_t *, most
changes in MD code are trivial, before this change, trapsignal and
sendsig use discrete parameters, now they uses member fields of
ksiginfo_t structure. For sendsig, this change allows us to pass
POSIX realtime signal value to user code.
2. Remove cpu_thread_siginfo, it is no longer needed because we now always
generate ksiginfo_t data and feed it to libpthread.
3. Add p_sigqueue to proc structure to hold shared signals which were
blocked by all threads in the proc.
4. Add td_sigqueue to thread structure to hold all signals delivered to
thread.
5. i386 and amd64 now return POSIX standard si_code, other arches will
be fixed.
6. In this sigqueue implementation, pending signal set is kept as before,
an extra siginfo list holds additional siginfo_t data for signals.
kernel code uses psignal() still behavior as before, it won't be failed
even under memory pressure, only exception is when deleting a signal,
we should call sigqueue_delete to remove signal from sigqueue but
not SIGDELSET. Current there is no kernel code will deliver a signal
with additional data, so kernel should be as stable as before,
a ksiginfo can carry more information, for example, allow signal to
be delivered but throw away siginfo data if memory is not enough.
SIGKILL and SIGSTOP have fast path in sigqueue_add, because they can
not be caught or masked.
The sigqueue() syscall allows user code to queue a signal to target
process, if resource is unavailable, EAGAIN will be returned as
specification said.
Just before thread exits, signal queue memory will be freed by
sigqueue_flush.
Current, all signals are allowed to be queued, not only realtime signals.
Earlier patch reviewed by: jhb, deischen
Tested on: i386, amd64
2005-10-14 12:43:47 +00:00
|
|
|
ksiginfo_t ksi;
|
2009-10-27 10:47:58 +00:00
|
|
|
sigset_t returnmask, mask;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
1999-09-29 15:03:48 +00:00
|
|
|
KASSERT(sig != 0, ("postsig"));
|
1999-01-08 17:31:30 +00:00
|
|
|
|
2001-06-22 23:02:37 +00:00
|
|
|
PROC_LOCK_ASSERT(p, MA_OWNED);
|
2001-03-07 02:59:54 +00:00
|
|
|
ps = p->p_sigacts;
|
- Merge struct procsig with struct sigacts.
- Move struct sigacts out of the u-area and malloc() it using the
M_SUBPROC malloc bucket.
- Add a small sigacts_*() API for managing sigacts structures: sigacts_alloc(),
sigacts_free(), sigacts_copy(), sigacts_share(), and sigacts_shared().
- Remove the p_sigignore, p_sigacts, and p_sigcatch macros.
- Add a mutex to struct sigacts that protects all the members of the struct.
- Add sigacts locking.
- Remove Giant from nosys(), kill(), killpg(), and kern_sigaction() now
that sigacts is locked.
- Several in-kernel functions such as psignal(), tdsignal(), trapsignal(),
and thread_stopped() are now MP safe.
Reviewed by: arch@
Approved by: re (rwatson)
2003-05-13 20:36:02 +00:00
|
|
|
mtx_assert(&ps->ps_mtx, MA_OWNED);
|
2005-10-23 04:12:26 +00:00
|
|
|
ksiginfo_init(&ksi);
|
Currently, when signal is delivered to the process and there is a thread
not blocking the signal, signal is placed on the thread sigqueue. If
the selected thread is in kernel executing thr_exit() or sigprocmask()
syscalls, then signal might be not delivered to usermode for arbitrary
amount of time, and for exiting thread it is lost.
Put process-directed signals to the process queue unconditionally,
selecting the thread to deliver the signal only by the thread returning
to usermode, since only then the thread can handle delivery of signal
reliably. For exiting thread or thread that has blocked some signals,
check whether the newly blocked signal is queued for the process, and
try to find a thread to wakeup for delivery, in reschedule_signal(). For
exiting thread, assume that all signals are blocked.
Change cursig() and postsig() to look both into the thread and process
signal queues. When there is a signal that thread returning to usermode
could consume, TDF_NEEDSIGCHK flag is not neccessary set now. Do
unlocked read of p_siglist and p_pendingcnt to check for queued signals.
Note that thread that has a signal unblocked might get spurious wakeup
and EINTR from the interruptible system call now, due to the possibility
of being selected by reschedule_signals(), while other thread returned
to usermode earlier and removed the signal from process queue. This
should not cause compliance issues, since the thread has not blocked a
signal and thus should be ready to receive it anyway.
Reported by: Justin Teller <justin.teller gmail com>
Reviewed by: davidxu, jilles
MFC after: 1 month
2009-10-11 16:49:30 +00:00
|
|
|
if (sigqueue_get(&td->td_sigqueue, sig, &ksi) == 0 &&
|
|
|
|
sigqueue_get(&p->p_sigqueue, sig, &ksi) == 0)
|
|
|
|
return;
|
1. Change prototype of trapsignal and sendsig to use ksiginfo_t *, most
changes in MD code are trivial, before this change, trapsignal and
sendsig use discrete parameters, now they uses member fields of
ksiginfo_t structure. For sendsig, this change allows us to pass
POSIX realtime signal value to user code.
2. Remove cpu_thread_siginfo, it is no longer needed because we now always
generate ksiginfo_t data and feed it to libpthread.
3. Add p_sigqueue to proc structure to hold shared signals which were
blocked by all threads in the proc.
4. Add td_sigqueue to thread structure to hold all signals delivered to
thread.
5. i386 and amd64 now return POSIX standard si_code, other arches will
be fixed.
6. In this sigqueue implementation, pending signal set is kept as before,
an extra siginfo list holds additional siginfo_t data for signals.
kernel code uses psignal() still behavior as before, it won't be failed
even under memory pressure, only exception is when deleting a signal,
we should call sigqueue_delete to remove signal from sigqueue but
not SIGDELSET. Current there is no kernel code will deliver a signal
with additional data, so kernel should be as stable as before,
a ksiginfo can carry more information, for example, allow signal to
be delivered but throw away siginfo data if memory is not enough.
SIGKILL and SIGSTOP have fast path in sigqueue_add, because they can
not be caught or masked.
The sigqueue() syscall allows user code to queue a signal to target
process, if resource is unavailable, EAGAIN will be returned as
specification said.
Just before thread exits, signal queue memory will be freed by
sigqueue_flush.
Current, all signals are allowed to be queued, not only realtime signals.
Earlier patch reviewed by: jhb, deischen
Tested on: i386, amd64
2005-10-14 12:43:47 +00:00
|
|
|
ksi.ksi_signo = sig;
|
2005-10-30 02:56:08 +00:00
|
|
|
if (ksi.ksi_code == SI_TIMER)
|
|
|
|
itimer_accept(p, ksi.ksi_timerid, &ksi);
|
1999-09-29 15:03:48 +00:00
|
|
|
action = ps->ps_sigact[_SIG_IDX(sig)];
|
1994-05-24 10:09:53 +00:00
|
|
|
#ifdef KTRACE
|
2002-06-07 05:43:02 +00:00
|
|
|
if (KTRPOINT(td, KTR_PSIG))
|
2003-06-09 17:38:32 +00:00
|
|
|
ktrpsig(sig, action, td->td_pflags & TDP_OLDMASK ?
|
2003-03-31 22:49:17 +00:00
|
|
|
&td->td_oldsigmask : &td->td_sigmask, 0);
|
1994-05-24 10:09:53 +00:00
|
|
|
#endif
|
2004-01-08 22:44:54 +00:00
|
|
|
if (p->p_stops & S_SIG) {
|
|
|
|
mtx_unlock(&ps->ps_mtx);
|
|
|
|
stopevent(p, S_SIG, sig);
|
|
|
|
mtx_lock(&ps->ps_mtx);
|
|
|
|
}
|
1997-12-06 04:11:14 +00:00
|
|
|
|
2006-10-26 21:42:22 +00:00
|
|
|
if (action == SIG_DFL) {
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Default action, where the default is to kill
|
|
|
|
* the process. (Other cases were ignored above.)
|
|
|
|
*/
|
- Merge struct procsig with struct sigacts.
- Move struct sigacts out of the u-area and malloc() it using the
M_SUBPROC malloc bucket.
- Add a small sigacts_*() API for managing sigacts structures: sigacts_alloc(),
sigacts_free(), sigacts_copy(), sigacts_share(), and sigacts_shared().
- Remove the p_sigignore, p_sigacts, and p_sigcatch macros.
- Add a mutex to struct sigacts that protects all the members of the struct.
- Add sigacts locking.
- Remove Giant from nosys(), kill(), killpg(), and kern_sigaction() now
that sigacts is locked.
- Several in-kernel functions such as psignal(), tdsignal(), trapsignal(),
and thread_stopped() are now MP safe.
Reviewed by: arch@
Approved by: re (rwatson)
2003-05-13 20:36:02 +00:00
|
|
|
mtx_unlock(&ps->ps_mtx);
|
2001-09-12 08:38:13 +00:00
|
|
|
sigexit(td, sig);
|
1994-05-24 10:09:53 +00:00
|
|
|
/* NOTREACHED */
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* If we get here, the signal must be caught.
|
|
|
|
*/
|
2003-03-31 22:49:17 +00:00
|
|
|
KASSERT(action != SIG_IGN && !SIGISMEMBER(td->td_sigmask, sig),
|
1999-01-10 01:58:29 +00:00
|
|
|
("postsig action"));
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Set the new mask value and also defer further
|
1999-10-11 20:33:17 +00:00
|
|
|
* occurrences of this signal.
|
1994-05-24 10:09:53 +00:00
|
|
|
*
|
1999-10-11 20:33:17 +00:00
|
|
|
* Special case: user has done a sigsuspend. Here the
|
1994-05-24 10:09:53 +00:00
|
|
|
* current mask is not of interest, but rather the
|
1999-10-11 20:33:17 +00:00
|
|
|
* mask from before the sigsuspend is what we want
|
1994-05-24 10:09:53 +00:00
|
|
|
* restored after the signal processing is completed.
|
|
|
|
*/
|
2003-06-09 17:38:32 +00:00
|
|
|
if (td->td_pflags & TDP_OLDMASK) {
|
2003-03-31 22:49:17 +00:00
|
|
|
returnmask = td->td_oldsigmask;
|
2003-06-09 17:38:32 +00:00
|
|
|
td->td_pflags &= ~TDP_OLDMASK;
|
1994-05-24 10:09:53 +00:00
|
|
|
} else
|
2003-03-31 22:49:17 +00:00
|
|
|
returnmask = td->td_sigmask;
|
1999-09-29 15:03:48 +00:00
|
|
|
|
2009-10-30 10:10:39 +00:00
|
|
|
mask = ps->ps_catchmask[_SIG_IDX(sig)];
|
|
|
|
if (!SIGISMEMBER(ps->ps_signodefer, sig))
|
2009-10-27 10:47:58 +00:00
|
|
|
SIGADDSET(mask, sig);
|
2009-10-30 10:10:39 +00:00
|
|
|
kern_sigprocmask(td, SIG_BLOCK, &mask, NULL,
|
|
|
|
SIGPROCMASK_PROC_LOCKED | SIGPROCMASK_PS_LOCKED);
|
1999-09-29 15:03:48 +00:00
|
|
|
|
|
|
|
if (SIGISMEMBER(ps->ps_sigreset, sig)) {
|
1996-03-30 15:15:30 +00:00
|
|
|
/*
|
2002-09-01 20:37:28 +00:00
|
|
|
* See kern_sigaction() for origin of this code.
|
1996-03-30 15:15:30 +00:00
|
|
|
*/
|
- Merge struct procsig with struct sigacts.
- Move struct sigacts out of the u-area and malloc() it using the
M_SUBPROC malloc bucket.
- Add a small sigacts_*() API for managing sigacts structures: sigacts_alloc(),
sigacts_free(), sigacts_copy(), sigacts_share(), and sigacts_shared().
- Remove the p_sigignore, p_sigacts, and p_sigcatch macros.
- Add a mutex to struct sigacts that protects all the members of the struct.
- Add sigacts locking.
- Remove Giant from nosys(), kill(), killpg(), and kern_sigaction() now
that sigacts is locked.
- Several in-kernel functions such as psignal(), tdsignal(), trapsignal(),
and thread_stopped() are now MP safe.
Reviewed by: arch@
Approved by: re (rwatson)
2003-05-13 20:36:02 +00:00
|
|
|
SIGDELSET(ps->ps_sigcatch, sig);
|
1999-09-29 15:03:48 +00:00
|
|
|
if (sig != SIGCONT &&
|
|
|
|
sigprop(sig) & SA_IGNORE)
|
- Merge struct procsig with struct sigacts.
- Move struct sigacts out of the u-area and malloc() it using the
M_SUBPROC malloc bucket.
- Add a small sigacts_*() API for managing sigacts structures: sigacts_alloc(),
sigacts_free(), sigacts_copy(), sigacts_share(), and sigacts_shared().
- Remove the p_sigignore, p_sigacts, and p_sigcatch macros.
- Add a mutex to struct sigacts that protects all the members of the struct.
- Add sigacts locking.
- Remove Giant from nosys(), kill(), killpg(), and kern_sigaction() now
that sigacts is locked.
- Several in-kernel functions such as psignal(), tdsignal(), trapsignal(),
and thread_stopped() are now MP safe.
Reviewed by: arch@
Approved by: re (rwatson)
2003-05-13 20:36:02 +00:00
|
|
|
SIGADDSET(ps->ps_sigignore, sig);
|
1999-09-29 15:03:48 +00:00
|
|
|
ps->ps_sigact[_SIG_IDX(sig)] = SIG_DFL;
|
1996-03-15 08:01:33 +00:00
|
|
|
}
|
2007-06-01 01:12:45 +00:00
|
|
|
td->td_ru.ru_nsignals++;
|
2009-02-26 15:51:54 +00:00
|
|
|
if (p->p_sig == sig) {
|
1998-12-19 02:55:34 +00:00
|
|
|
p->p_code = 0;
|
|
|
|
p->p_sig = 0;
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
2006-10-26 21:42:22 +00:00
|
|
|
(*p->p_sysent->sv_sendsig)(action, &ksi, &returnmask);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Kill the current process for stated reason.
|
|
|
|
*/
|
1994-05-25 09:21:21 +00:00
|
|
|
void
|
1994-05-24 10:09:53 +00:00
|
|
|
killproc(p, why)
|
|
|
|
struct proc *p;
|
|
|
|
char *why;
|
|
|
|
{
|
2001-05-15 23:13:58 +00:00
|
|
|
|
|
|
|
PROC_LOCK_ASSERT(p, MA_OWNED);
|
2000-09-07 01:33:02 +00:00
|
|
|
CTR3(KTR_PROC, "killproc: proc %p (pid %d, %s)",
|
|
|
|
p, p->p_pid, p->p_comm);
|
1996-01-31 12:44:33 +00:00
|
|
|
log(LOG_ERR, "pid %d (%s), uid %d, was killed: %s\n", p->p_pid, p->p_comm,
|
o Merge contents of struct pcred into struct ucred. Specifically, add the
real uid, saved uid, real gid, and saved gid to ucred, as well as the
pcred->pc_uidinfo, which was associated with the real uid, only rename
it to cr_ruidinfo so as not to conflict with cr_uidinfo, which
corresponds to the effective uid.
o Remove p_cred from struct proc; add p_ucred to struct proc, replacing
original macro that pointed.
p->p_ucred to p->p_cred->pc_ucred.
o Universally update code so that it makes use of ucred instead of pcred,
p->p_ucred instead of p->p_pcred, cr_ruidinfo instead of p_uidinfo,
cr_{r,sv}{u,g}id instead of p_*, etc.
o Remove pcred0 and its initialization from init_main.c; initialize
cr_ruidinfo there.
o Restruction many credential modification chunks to always crdup while
we figure out locking and optimizations; generally speaking, this
means moving to a structure like this:
newcred = crdup(oldcred);
...
p->p_ucred = newcred;
crfree(oldcred);
It's not race-free, but better than nothing. There are also races
in sys_process.c, all inter-process authorization, fork, exec, and
exit.
o Remove sigio->sio_ruid since sigio->sio_ucred now contains the ruid;
remove comments indicating that the old arrangement was a problem.
o Restructure exec1() a little to use newcred/oldcred arrangement, and
use improved uid management primitives.
o Clean up exit1() so as to do less work in credential cleanup due to
pcred removal.
o Clean up fork1() so as to do less work in credential cleanup and
allocation.
o Clean up ktrcanset() to take into account changes, and move to using
suser_xxx() instead of performing a direct uid==0 comparision.
o Improve commenting in various kern_prot.c credential modification
calls to better document current behavior. In a couple of places,
current behavior is a little questionable and we need to check
POSIX.1 to make sure it's "right". More commenting work still
remains to be done.
o Update credential management calls, such as crfree(), to take into
account new ruidinfo reference.
o Modify or add the following uid and gid helper routines:
change_euid()
change_egid()
change_ruid()
change_rgid()
change_svuid()
change_svgid()
In each case, the call now acts on a credential not a process, and as
such no longer requires more complicated process locking/etc. They
now assume the caller will do any necessary allocation of an
exclusive credential reference. Each is commented to document its
reference requirements.
o CANSIGIO() is simplified to require only credentials, not processes
and pcreds.
o Remove lots of (p_pcred==NULL) checks.
o Add an XXX to authorization code in nfs_lock.c, since it's
questionable, and needs to be considered carefully.
o Simplify posix4 authorization code to require only credentials, not
processes and pcreds. Note that this authorization, as well as
CANSIGIO(), needs to be updated to use the p_cansignal() and
p_cansched() centralized authorization routines, as they currently
do not take into account some desirable restrictions that are handled
by the centralized routines, as well as being inconsistent with other
similar authorization instances.
o Update libkvm to take these changes into account.
Obtained from: TrustedBSD Project
Reviewed by: green, bde, jhb, freebsd-arch, freebsd-audit
2001-05-25 16:59:11 +00:00
|
|
|
p->p_ucred ? p->p_ucred->cr_uid : -1, why);
|
1994-05-24 10:09:53 +00:00
|
|
|
psignal(p, SIGKILL);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Force the current process to exit with the specified signal, dumping core
|
|
|
|
* if appropriate. We bypass the normal tests for masked and caught signals,
|
|
|
|
* allowing unrecoverable failures to terminate the process without changing
|
|
|
|
* signal state. Mark the accounting record with the signal termination.
|
|
|
|
* If dumping core, save the signal number for the debugger. Calls exit and
|
|
|
|
* does not return.
|
|
|
|
*/
|
1994-05-25 09:21:21 +00:00
|
|
|
void
|
2001-09-12 08:38:13 +00:00
|
|
|
sigexit(td, sig)
|
|
|
|
struct thread *td;
|
1999-09-29 15:03:48 +00:00
|
|
|
int sig;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2001-09-12 08:38:13 +00:00
|
|
|
struct proc *p = td->td_proc;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2001-03-07 02:59:54 +00:00
|
|
|
PROC_LOCK_ASSERT(p, MA_OWNED);
|
1994-05-24 10:09:53 +00:00
|
|
|
p->p_acflag |= AXSIG;
|
2005-04-10 02:31:24 +00:00
|
|
|
/*
|
|
|
|
* We must be single-threading to generate a core dump. This
|
|
|
|
* ensures that the registers in the core file are up-to-date.
|
|
|
|
* Also, the ELF dump handler assumes that the thread list doesn't
|
|
|
|
* change out from under it.
|
|
|
|
*
|
|
|
|
* XXX If another thread attempts to single-thread before us
|
|
|
|
* (e.g. via fork()), we won't get a dump at all.
|
|
|
|
*/
|
|
|
|
if ((sigprop(sig) & SA_CORE) && (thread_single(SINGLE_NO_EXIT) == 0)) {
|
1999-09-29 15:03:48 +00:00
|
|
|
p->p_sig = sig;
|
1994-09-30 00:38:34 +00:00
|
|
|
/*
|
|
|
|
* Log signals which would cause core dumps
|
1995-05-30 08:16:23 +00:00
|
|
|
* (Log as LOG_INFO to appease those who don't want
|
1994-09-30 00:38:34 +00:00
|
|
|
* these messages.)
|
|
|
|
* XXX : Todo, as well as euid, write out ruid too
|
2004-03-05 22:39:53 +00:00
|
|
|
* Note that coredump() drops proc lock.
|
1994-09-30 00:38:34 +00:00
|
|
|
*/
|
2001-09-12 08:38:13 +00:00
|
|
|
if (coredump(td) == 0)
|
1999-09-29 15:03:48 +00:00
|
|
|
sig |= WCOREFLAG;
|
1998-07-28 22:34:12 +00:00
|
|
|
if (kern_logsigexit)
|
|
|
|
log(LOG_INFO,
|
|
|
|
"pid %d (%s), uid %d: exited on signal %d%s\n",
|
|
|
|
p->p_pid, p->p_comm,
|
2002-04-13 23:33:36 +00:00
|
|
|
td->td_ucred ? td->td_ucred->cr_uid : -1,
|
1999-09-29 15:03:48 +00:00
|
|
|
sig &~ WCOREFLAG,
|
|
|
|
sig & WCOREFLAG ? " (core dumped)" : "");
|
2004-03-05 22:39:53 +00:00
|
|
|
} else
|
2001-03-07 02:59:54 +00:00
|
|
|
PROC_UNLOCK(p);
|
2001-09-12 08:38:13 +00:00
|
|
|
exit1(td, W_EXITCODE(0, sig));
|
1994-05-24 10:09:53 +00:00
|
|
|
/* NOTREACHED */
|
|
|
|
}
|
|
|
|
|
2005-11-08 09:09:26 +00:00
|
|
|
/*
|
2006-02-04 14:10:57 +00:00
|
|
|
* Send queued SIGCHLD to parent when child process's state
|
|
|
|
* is changed.
|
2005-11-08 09:09:26 +00:00
|
|
|
*/
|
2006-02-04 14:10:57 +00:00
|
|
|
static void
|
|
|
|
sigparent(struct proc *p, int reason, int status)
|
2005-11-08 09:09:26 +00:00
|
|
|
{
|
|
|
|
PROC_LOCK_ASSERT(p, MA_OWNED);
|
|
|
|
PROC_LOCK_ASSERT(p->p_pptr, MA_OWNED);
|
|
|
|
|
|
|
|
if (p->p_ksi != NULL) {
|
|
|
|
p->p_ksi->ksi_signo = SIGCHLD;
|
|
|
|
p->p_ksi->ksi_code = reason;
|
2006-02-04 14:10:57 +00:00
|
|
|
p->p_ksi->ksi_status = status;
|
2005-11-08 09:09:26 +00:00
|
|
|
p->p_ksi->ksi_pid = p->p_pid;
|
|
|
|
p->p_ksi->ksi_uid = p->p_ucred->cr_ruid;
|
|
|
|
if (KSI_ONQ(p->p_ksi))
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
tdsignal(p->p_pptr, NULL, SIGCHLD, p->p_ksi);
|
|
|
|
}
|
|
|
|
|
2006-02-04 14:10:57 +00:00
|
|
|
static void
|
|
|
|
childproc_jobstate(struct proc *p, int reason, int status)
|
2005-11-08 09:09:26 +00:00
|
|
|
{
|
2006-02-04 14:10:57 +00:00
|
|
|
struct sigacts *ps;
|
|
|
|
|
2005-11-08 09:09:26 +00:00
|
|
|
PROC_LOCK_ASSERT(p, MA_OWNED);
|
2006-02-04 14:10:57 +00:00
|
|
|
PROC_LOCK_ASSERT(p->p_pptr, MA_OWNED);
|
2005-11-08 09:09:26 +00:00
|
|
|
|
2006-02-04 14:10:57 +00:00
|
|
|
/*
|
|
|
|
* Wake up parent sleeping in kern_wait(), also send
|
|
|
|
* SIGCHLD to parent, but SIGCHLD does not guarantee
|
|
|
|
* that parent will awake, because parent may masked
|
|
|
|
* the signal.
|
|
|
|
*/
|
|
|
|
p->p_pptr->p_flag |= P_STATCHILD;
|
|
|
|
wakeup(p->p_pptr);
|
|
|
|
|
|
|
|
ps = p->p_pptr->p_sigacts;
|
|
|
|
mtx_lock(&ps->ps_mtx);
|
|
|
|
if ((ps->ps_flag & PS_NOCLDSTOP) == 0) {
|
|
|
|
mtx_unlock(&ps->ps_mtx);
|
|
|
|
sigparent(p, reason, status);
|
|
|
|
} else
|
|
|
|
mtx_unlock(&ps->ps_mtx);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
childproc_stopped(struct proc *p, int reason)
|
|
|
|
{
|
|
|
|
childproc_jobstate(p, reason, p->p_xstat);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
childproc_continued(struct proc *p)
|
|
|
|
{
|
|
|
|
childproc_jobstate(p, CLD_CONTINUED, SIGCONT);
|
2005-11-08 09:09:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
childproc_exited(struct proc *p)
|
|
|
|
{
|
|
|
|
int reason;
|
2005-11-09 07:58:16 +00:00
|
|
|
int status = p->p_xstat; /* convert to int */
|
2005-11-08 09:09:26 +00:00
|
|
|
|
|
|
|
reason = CLD_EXITED;
|
2005-11-09 07:58:16 +00:00
|
|
|
if (WCOREDUMP(status))
|
2005-11-08 09:09:26 +00:00
|
|
|
reason = CLD_DUMPED;
|
2005-11-09 07:58:16 +00:00
|
|
|
else if (WIFSIGNALED(status))
|
2006-02-04 14:10:57 +00:00
|
|
|
reason = CLD_KILLED;
|
|
|
|
/*
|
|
|
|
* XXX avoid calling wakeup(p->p_pptr), the work is
|
|
|
|
* done in exit1().
|
|
|
|
*/
|
|
|
|
sigparent(p, reason, status);
|
2005-11-08 09:09:26 +00:00
|
|
|
}
|
|
|
|
|
2004-11-15 20:51:32 +00:00
|
|
|
static char corefilename[MAXPATHLEN] = {"%N.core"};
|
1998-07-08 06:38:39 +00:00
|
|
|
SYSCTL_STRING(_kern, OID_AUTO, corefile, CTLFLAG_RW, corefilename,
|
|
|
|
sizeof(corefilename), "process corefile name format string");
|
|
|
|
|
|
|
|
/*
|
|
|
|
* expand_name(name, uid, pid)
|
|
|
|
* Expand the name described in corefilename, using name, uid, and pid.
|
|
|
|
* corefilename is a printf-like string, with three format specifiers:
|
|
|
|
* %N name of process ("name")
|
|
|
|
* %P process id (pid)
|
|
|
|
* %U user id (uid)
|
|
|
|
* For example, "%N.core" is the default; they can be disabled completely
|
|
|
|
* by using "/dev/null", or all core files can be stored in "/cores/%U/%N-%P".
|
|
|
|
* This is controlled by the sysctl variable kern.corefile (see above).
|
|
|
|
*/
|
1999-09-01 00:29:56 +00:00
|
|
|
static char *
|
1998-07-08 06:38:39 +00:00
|
|
|
expand_name(name, uid, pid)
|
2002-05-08 09:06:47 +00:00
|
|
|
const char *name;
|
|
|
|
uid_t uid;
|
|
|
|
pid_t pid;
|
|
|
|
{
|
2008-03-08 16:31:29 +00:00
|
|
|
struct sbuf sb;
|
|
|
|
const char *format;
|
1998-07-08 06:38:39 +00:00
|
|
|
char *temp;
|
2008-03-08 16:31:29 +00:00
|
|
|
size_t i;
|
1998-07-08 06:38:39 +00:00
|
|
|
|
2002-05-08 09:06:47 +00:00
|
|
|
format = corefilename;
|
|
|
|
temp = malloc(MAXPATHLEN, M_TEMP, M_NOWAIT | M_ZERO);
|
1998-12-02 01:53:48 +00:00
|
|
|
if (temp == NULL)
|
2002-05-08 09:06:47 +00:00
|
|
|
return (NULL);
|
2008-03-08 16:31:29 +00:00
|
|
|
(void)sbuf_new(&sb, temp, MAXPATHLEN, SBUF_FIXEDLEN);
|
|
|
|
for (i = 0; format[i]; i++) {
|
1998-07-08 06:38:39 +00:00
|
|
|
switch (format[i]) {
|
|
|
|
case '%': /* Format character */
|
|
|
|
i++;
|
|
|
|
switch (format[i]) {
|
|
|
|
case '%':
|
2008-03-08 16:31:29 +00:00
|
|
|
sbuf_putc(&sb, '%');
|
1998-07-08 06:38:39 +00:00
|
|
|
break;
|
|
|
|
case 'N': /* process name */
|
2008-03-08 16:31:29 +00:00
|
|
|
sbuf_printf(&sb, "%s", name);
|
1998-07-08 06:38:39 +00:00
|
|
|
break;
|
|
|
|
case 'P': /* process id */
|
2008-03-08 16:31:29 +00:00
|
|
|
sbuf_printf(&sb, "%u", pid);
|
1998-07-08 06:38:39 +00:00
|
|
|
break;
|
|
|
|
case 'U': /* user id */
|
2008-03-08 16:31:29 +00:00
|
|
|
sbuf_printf(&sb, "%u", uid);
|
1998-07-08 06:38:39 +00:00
|
|
|
break;
|
|
|
|
default:
|
2002-05-08 09:06:47 +00:00
|
|
|
log(LOG_ERR,
|
2008-03-08 16:31:29 +00:00
|
|
|
"Unknown format character %c in "
|
|
|
|
"corename `%s'\n", format[i], format);
|
1998-07-08 06:38:39 +00:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
default:
|
2008-03-08 16:31:29 +00:00
|
|
|
sbuf_putc(&sb, format[i]);
|
1998-07-08 06:38:39 +00:00
|
|
|
}
|
|
|
|
}
|
2008-03-08 16:31:29 +00:00
|
|
|
if (sbuf_overflowed(&sb)) {
|
|
|
|
sbuf_delete(&sb);
|
|
|
|
log(LOG_ERR, "pid %ld (%s), uid (%lu): corename is too "
|
|
|
|
"long\n", (long)pid, name, (u_long)uid);
|
|
|
|
free(temp, M_TEMP);
|
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
sbuf_finish(&sb);
|
|
|
|
sbuf_delete(&sb);
|
2002-05-08 09:06:47 +00:00
|
|
|
return (temp);
|
1998-07-08 06:38:39 +00:00
|
|
|
}
|
|
|
|
|
1999-09-01 00:29:56 +00:00
|
|
|
/*
|
|
|
|
* Dump a process' core. The main routine does some
|
|
|
|
* policy checking, and creates the name of the coredump;
|
|
|
|
* then it passes on a vnode and a size limit to the process-specific
|
|
|
|
* coredump routine if there is one; if there _is not_ one, it returns
|
|
|
|
* ENOSYS; otherwise it returns the error from the process-specific routine.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static int
|
2001-09-12 08:38:13 +00:00
|
|
|
coredump(struct thread *td)
|
1999-09-01 00:29:56 +00:00
|
|
|
{
|
2001-09-12 08:38:13 +00:00
|
|
|
struct proc *p = td->td_proc;
|
1999-09-01 00:29:56 +00:00
|
|
|
register struct vnode *vp;
|
2002-04-13 23:33:36 +00:00
|
|
|
register struct ucred *cred = td->td_ucred;
|
2001-09-08 20:02:33 +00:00
|
|
|
struct flock lf;
|
1999-09-01 00:29:56 +00:00
|
|
|
struct nameidata nd;
|
|
|
|
struct vattr vattr;
|
2003-10-25 16:14:09 +00:00
|
|
|
int error, error1, flags, locked;
|
2000-07-11 22:07:57 +00:00
|
|
|
struct mount *mp;
|
1999-09-01 00:29:56 +00:00
|
|
|
char *name; /* name of corefile */
|
|
|
|
off_t limit;
|
2006-03-28 21:30:22 +00:00
|
|
|
int vfslocked;
|
1999-09-01 00:29:56 +00:00
|
|
|
|
2004-03-05 22:39:53 +00:00
|
|
|
PROC_LOCK_ASSERT(p, MA_OWNED);
|
2005-04-10 02:31:24 +00:00
|
|
|
MPASS((p->p_flag & P_HADTHREADS) == 0 || p->p_singlethread == td);
|
2001-03-07 02:59:54 +00:00
|
|
|
_STOPEVENT(p, S_CORE, 0);
|
|
|
|
|
Implement AUE_CORE, which adds process core dump support into the kernel.
This change introduces audit_proc_coredump() which is called by coredump(9)
to create an audit record for the coredump event. When a process
dumps a core, it could be security relevant. It could be an indicator that
a stack within the process has been overflowed with an incorrectly constructed
malicious payload or a number of other events.
The record that is generated looks like this:
header,111,10,process dumped core,0,Thu Oct 25 19:36:29 2007, + 179 msec
argument,0,0xb,signal
path,/usr/home/csjp/test.core
subject,csjp,csjp,staff,csjp,staff,1101,1095,50457,10.37.129.2
return,success,1
trailer,111
- We allocate a completely new record to make sure we arent clobbering
the audit data associated with the syscall that produced the core
(assuming the core is being generated in response to SIGABRT and not
an invalid memory access).
- Shuffle around expand_name() so we can use the coredump name at the very
beginning of the coredump call. Make sure we free the storage referenced
by "name" if we need to bail out early.
- Audit both successful and failed coredump creation efforts
Obtained from: TrustedBSD Project
Reviewed by: rwatson
MFC after: 1 month
2007-10-26 01:23:07 +00:00
|
|
|
name = expand_name(p->p_comm, td->td_ucred->cr_uid, p->p_pid);
|
|
|
|
if (name == NULL) {
|
2008-03-08 15:48:06 +00:00
|
|
|
PROC_UNLOCK(p);
|
Implement AUE_CORE, which adds process core dump support into the kernel.
This change introduces audit_proc_coredump() which is called by coredump(9)
to create an audit record for the coredump event. When a process
dumps a core, it could be security relevant. It could be an indicator that
a stack within the process has been overflowed with an incorrectly constructed
malicious payload or a number of other events.
The record that is generated looks like this:
header,111,10,process dumped core,0,Thu Oct 25 19:36:29 2007, + 179 msec
argument,0,0xb,signal
path,/usr/home/csjp/test.core
subject,csjp,csjp,staff,csjp,staff,1101,1095,50457,10.37.129.2
return,success,1
trailer,111
- We allocate a completely new record to make sure we arent clobbering
the audit data associated with the syscall that produced the core
(assuming the core is being generated in response to SIGABRT and not
an invalid memory access).
- Shuffle around expand_name() so we can use the coredump name at the very
beginning of the coredump call. Make sure we free the storage referenced
by "name" if we need to bail out early.
- Audit both successful and failed coredump creation efforts
Obtained from: TrustedBSD Project
Reviewed by: rwatson
MFC after: 1 month
2007-10-26 01:23:07 +00:00
|
|
|
#ifdef AUDIT
|
|
|
|
audit_proc_coredump(td, NULL, EINVAL);
|
|
|
|
#endif
|
|
|
|
return (EINVAL);
|
|
|
|
}
|
2001-03-07 02:59:54 +00:00
|
|
|
if (((sugid_coredump == 0) && p->p_flag & P_SUGID) || do_coredump == 0) {
|
|
|
|
PROC_UNLOCK(p);
|
Implement AUE_CORE, which adds process core dump support into the kernel.
This change introduces audit_proc_coredump() which is called by coredump(9)
to create an audit record for the coredump event. When a process
dumps a core, it could be security relevant. It could be an indicator that
a stack within the process has been overflowed with an incorrectly constructed
malicious payload or a number of other events.
The record that is generated looks like this:
header,111,10,process dumped core,0,Thu Oct 25 19:36:29 2007, + 179 msec
argument,0,0xb,signal
path,/usr/home/csjp/test.core
subject,csjp,csjp,staff,csjp,staff,1101,1095,50457,10.37.129.2
return,success,1
trailer,111
- We allocate a completely new record to make sure we arent clobbering
the audit data associated with the syscall that produced the core
(assuming the core is being generated in response to SIGABRT and not
an invalid memory access).
- Shuffle around expand_name() so we can use the coredump name at the very
beginning of the coredump call. Make sure we free the storage referenced
by "name" if we need to bail out early.
- Audit both successful and failed coredump creation efforts
Obtained from: TrustedBSD Project
Reviewed by: rwatson
MFC after: 1 month
2007-10-26 01:23:07 +00:00
|
|
|
#ifdef AUDIT
|
|
|
|
audit_proc_coredump(td, name, EFAULT);
|
|
|
|
#endif
|
|
|
|
free(name, M_TEMP);
|
1999-09-01 00:29:56 +00:00
|
|
|
return (EFAULT);
|
2001-03-07 02:59:54 +00:00
|
|
|
}
|
1999-09-01 00:29:56 +00:00
|
|
|
|
|
|
|
/*
|
1999-10-30 18:55:11 +00:00
|
|
|
* Note that the bulk of limit checking is done after
|
|
|
|
* the corefile is created. The exception is if the limit
|
|
|
|
* for corefiles is 0, in which case we don't bother
|
|
|
|
* creating the corefile at all. This layout means that
|
|
|
|
* a corefile is truncated instead of not being created,
|
|
|
|
* if it is larger than the limit.
|
1999-09-01 00:29:56 +00:00
|
|
|
*/
|
2004-02-04 21:52:57 +00:00
|
|
|
limit = (off_t)lim_cur(p, RLIMIT_CORE);
|
2001-03-07 02:59:54 +00:00
|
|
|
PROC_UNLOCK(p);
|
Implement AUE_CORE, which adds process core dump support into the kernel.
This change introduces audit_proc_coredump() which is called by coredump(9)
to create an audit record for the coredump event. When a process
dumps a core, it could be security relevant. It could be an indicator that
a stack within the process has been overflowed with an incorrectly constructed
malicious payload or a number of other events.
The record that is generated looks like this:
header,111,10,process dumped core,0,Thu Oct 25 19:36:29 2007, + 179 msec
argument,0,0xb,signal
path,/usr/home/csjp/test.core
subject,csjp,csjp,staff,csjp,staff,1101,1095,50457,10.37.129.2
return,success,1
trailer,111
- We allocate a completely new record to make sure we arent clobbering
the audit data associated with the syscall that produced the core
(assuming the core is being generated in response to SIGABRT and not
an invalid memory access).
- Shuffle around expand_name() so we can use the coredump name at the very
beginning of the coredump call. Make sure we free the storage referenced
by "name" if we need to bail out early.
- Audit both successful and failed coredump creation efforts
Obtained from: TrustedBSD Project
Reviewed by: rwatson
MFC after: 1 month
2007-10-26 01:23:07 +00:00
|
|
|
if (limit == 0) {
|
|
|
|
#ifdef AUDIT
|
|
|
|
audit_proc_coredump(td, name, EFBIG);
|
|
|
|
#endif
|
|
|
|
free(name, M_TEMP);
|
2004-02-04 21:52:57 +00:00
|
|
|
return (EFBIG);
|
Implement AUE_CORE, which adds process core dump support into the kernel.
This change introduces audit_proc_coredump() which is called by coredump(9)
to create an audit record for the coredump event. When a process
dumps a core, it could be security relevant. It could be an indicator that
a stack within the process has been overflowed with an incorrectly constructed
malicious payload or a number of other events.
The record that is generated looks like this:
header,111,10,process dumped core,0,Thu Oct 25 19:36:29 2007, + 179 msec
argument,0,0xb,signal
path,/usr/home/csjp/test.core
subject,csjp,csjp,staff,csjp,staff,1101,1095,50457,10.37.129.2
return,success,1
trailer,111
- We allocate a completely new record to make sure we arent clobbering
the audit data associated with the syscall that produced the core
(assuming the core is being generated in response to SIGABRT and not
an invalid memory access).
- Shuffle around expand_name() so we can use the coredump name at the very
beginning of the coredump call. Make sure we free the storage referenced
by "name" if we need to bail out early.
- Audit both successful and failed coredump creation efforts
Obtained from: TrustedBSD Project
Reviewed by: rwatson
MFC after: 1 month
2007-10-26 01:23:07 +00:00
|
|
|
}
|
1999-10-30 18:55:11 +00:00
|
|
|
|
2000-07-11 22:07:57 +00:00
|
|
|
restart:
|
2006-03-28 21:30:22 +00:00
|
|
|
NDINIT(&nd, LOOKUP, NOFOLLOW | MPSAFE, UIO_SYSSPACE, name, td);
|
2000-07-04 03:34:11 +00:00
|
|
|
flags = O_CREAT | FWRITE | O_NOFOLLOW;
|
2009-06-21 13:41:32 +00:00
|
|
|
error = vn_open_cred(&nd, &flags, S_IRUSR | S_IWUSR, VN_OPEN_NOAUDIT,
|
2009-06-23 11:29:54 +00:00
|
|
|
cred, NULL);
|
Implement AUE_CORE, which adds process core dump support into the kernel.
This change introduces audit_proc_coredump() which is called by coredump(9)
to create an audit record for the coredump event. When a process
dumps a core, it could be security relevant. It could be an indicator that
a stack within the process has been overflowed with an incorrectly constructed
malicious payload or a number of other events.
The record that is generated looks like this:
header,111,10,process dumped core,0,Thu Oct 25 19:36:29 2007, + 179 msec
argument,0,0xb,signal
path,/usr/home/csjp/test.core
subject,csjp,csjp,staff,csjp,staff,1101,1095,50457,10.37.129.2
return,success,1
trailer,111
- We allocate a completely new record to make sure we arent clobbering
the audit data associated with the syscall that produced the core
(assuming the core is being generated in response to SIGABRT and not
an invalid memory access).
- Shuffle around expand_name() so we can use the coredump name at the very
beginning of the coredump call. Make sure we free the storage referenced
by "name" if we need to bail out early.
- Audit both successful and failed coredump creation efforts
Obtained from: TrustedBSD Project
Reviewed by: rwatson
MFC after: 1 month
2007-10-26 01:23:07 +00:00
|
|
|
if (error) {
|
|
|
|
#ifdef AUDIT
|
|
|
|
audit_proc_coredump(td, name, error);
|
|
|
|
#endif
|
|
|
|
free(name, M_TEMP);
|
1999-09-01 00:29:56 +00:00
|
|
|
return (error);
|
Implement AUE_CORE, which adds process core dump support into the kernel.
This change introduces audit_proc_coredump() which is called by coredump(9)
to create an audit record for the coredump event. When a process
dumps a core, it could be security relevant. It could be an indicator that
a stack within the process has been overflowed with an incorrectly constructed
malicious payload or a number of other events.
The record that is generated looks like this:
header,111,10,process dumped core,0,Thu Oct 25 19:36:29 2007, + 179 msec
argument,0,0xb,signal
path,/usr/home/csjp/test.core
subject,csjp,csjp,staff,csjp,staff,1101,1095,50457,10.37.129.2
return,success,1
trailer,111
- We allocate a completely new record to make sure we arent clobbering
the audit data associated with the syscall that produced the core
(assuming the core is being generated in response to SIGABRT and not
an invalid memory access).
- Shuffle around expand_name() so we can use the coredump name at the very
beginning of the coredump call. Make sure we free the storage referenced
by "name" if we need to bail out early.
- Audit both successful and failed coredump creation efforts
Obtained from: TrustedBSD Project
Reviewed by: rwatson
MFC after: 1 month
2007-10-26 01:23:07 +00:00
|
|
|
}
|
2006-03-28 21:30:22 +00:00
|
|
|
vfslocked = NDHASGIANT(&nd);
|
1999-12-15 23:02:35 +00:00
|
|
|
NDFREE(&nd, NDF_ONLY_PNBUF);
|
1999-09-01 00:29:56 +00:00
|
|
|
vp = nd.ni_vp;
|
2001-09-08 20:02:33 +00:00
|
|
|
|
2002-07-10 06:31:35 +00:00
|
|
|
/* Don't dump to non-regular files or files with links. */
|
|
|
|
if (vp->v_type != VREG ||
|
2008-08-28 15:23:18 +00:00
|
|
|
VOP_GETATTR(vp, &vattr, cred) || vattr.va_nlink != 1) {
|
2008-01-13 14:44:15 +00:00
|
|
|
VOP_UNLOCK(vp, 0);
|
2002-07-10 06:31:35 +00:00
|
|
|
error = EFAULT;
|
2006-03-28 21:30:22 +00:00
|
|
|
goto close;
|
2002-07-10 06:31:35 +00:00
|
|
|
}
|
|
|
|
|
2008-01-13 14:44:15 +00:00
|
|
|
VOP_UNLOCK(vp, 0);
|
2001-09-08 20:02:33 +00:00
|
|
|
lf.l_whence = SEEK_SET;
|
|
|
|
lf.l_start = 0;
|
|
|
|
lf.l_len = 0;
|
|
|
|
lf.l_type = F_WRLCK;
|
2003-10-25 16:14:09 +00:00
|
|
|
locked = (VOP_ADVLOCK(vp, (caddr_t)p, F_SETLK, &lf, F_FLOCK) == 0);
|
2001-09-08 20:02:33 +00:00
|
|
|
|
2000-07-11 22:07:57 +00:00
|
|
|
if (vn_start_write(vp, &mp, V_NOWAIT) != 0) {
|
2001-09-08 20:02:33 +00:00
|
|
|
lf.l_type = F_UNLCK;
|
2003-10-25 16:43:50 +00:00
|
|
|
if (locked)
|
|
|
|
VOP_ADVLOCK(vp, (caddr_t)p, F_UNLCK, &lf, F_FLOCK);
|
2001-09-12 08:38:13 +00:00
|
|
|
if ((error = vn_close(vp, FWRITE, cred, td)) != 0)
|
2006-03-28 21:30:22 +00:00
|
|
|
goto out;
|
2000-07-11 22:07:57 +00:00
|
|
|
if ((error = vn_start_write(NULL, &mp, V_XSLEEP | PCATCH)) != 0)
|
2006-03-28 21:30:22 +00:00
|
|
|
goto out;
|
|
|
|
VFS_UNLOCK_GIANT(vfslocked);
|
2000-07-11 22:07:57 +00:00
|
|
|
goto restart;
|
|
|
|
}
|
1999-09-01 00:29:56 +00:00
|
|
|
|
|
|
|
VATTR_NULL(&vattr);
|
|
|
|
vattr.va_size = 0;
|
2004-08-09 05:46:46 +00:00
|
|
|
if (set_core_nodump_flag)
|
|
|
|
vattr.va_flags = UF_NODUMP;
|
2008-01-10 01:10:58 +00:00
|
|
|
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
|
2008-08-28 15:23:18 +00:00
|
|
|
VOP_SETATTR(vp, &vattr, cred);
|
2008-01-13 14:44:15 +00:00
|
|
|
VOP_UNLOCK(vp, 0);
|
2006-05-07 22:50:22 +00:00
|
|
|
vn_finished_write(mp);
|
2001-03-07 02:59:54 +00:00
|
|
|
PROC_LOCK(p);
|
1999-09-01 00:29:56 +00:00
|
|
|
p->p_acflag |= ACORE;
|
2001-03-07 02:59:54 +00:00
|
|
|
PROC_UNLOCK(p);
|
1999-09-01 00:29:56 +00:00
|
|
|
|
|
|
|
error = p->p_sysent->sv_coredump ?
|
2001-09-12 08:38:13 +00:00
|
|
|
p->p_sysent->sv_coredump(td, vp, limit) :
|
1999-09-01 00:29:56 +00:00
|
|
|
ENOSYS;
|
|
|
|
|
2003-10-25 16:14:09 +00:00
|
|
|
if (locked) {
|
|
|
|
lf.l_type = F_UNLCK;
|
|
|
|
VOP_ADVLOCK(vp, (caddr_t)p, F_UNLCK, &lf, F_FLOCK);
|
|
|
|
}
|
2006-03-28 21:30:22 +00:00
|
|
|
close:
|
2001-09-12 08:38:13 +00:00
|
|
|
error1 = vn_close(vp, FWRITE, cred, td);
|
1999-09-01 00:29:56 +00:00
|
|
|
if (error == 0)
|
|
|
|
error = error1;
|
2006-03-28 21:30:22 +00:00
|
|
|
out:
|
Implement AUE_CORE, which adds process core dump support into the kernel.
This change introduces audit_proc_coredump() which is called by coredump(9)
to create an audit record for the coredump event. When a process
dumps a core, it could be security relevant. It could be an indicator that
a stack within the process has been overflowed with an incorrectly constructed
malicious payload or a number of other events.
The record that is generated looks like this:
header,111,10,process dumped core,0,Thu Oct 25 19:36:29 2007, + 179 msec
argument,0,0xb,signal
path,/usr/home/csjp/test.core
subject,csjp,csjp,staff,csjp,staff,1101,1095,50457,10.37.129.2
return,success,1
trailer,111
- We allocate a completely new record to make sure we arent clobbering
the audit data associated with the syscall that produced the core
(assuming the core is being generated in response to SIGABRT and not
an invalid memory access).
- Shuffle around expand_name() so we can use the coredump name at the very
beginning of the coredump call. Make sure we free the storage referenced
by "name" if we need to bail out early.
- Audit both successful and failed coredump creation efforts
Obtained from: TrustedBSD Project
Reviewed by: rwatson
MFC after: 1 month
2007-10-26 01:23:07 +00:00
|
|
|
#ifdef AUDIT
|
|
|
|
audit_proc_coredump(td, name, error);
|
|
|
|
#endif
|
|
|
|
free(name, M_TEMP);
|
2006-03-28 21:30:22 +00:00
|
|
|
VFS_UNLOCK_GIANT(vfslocked);
|
1999-09-01 00:29:56 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
2007-03-04 22:36:48 +00:00
|
|
|
* Nonexistent system call-- signal process (may want to handle it). Flag
|
|
|
|
* error in case process won't see signal immediately (blocked or ignored).
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct nosys_args {
|
|
|
|
int dummy;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-24 10:09:53 +00:00
|
|
|
/* ARGSUSED */
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
nosys(td, args)
|
|
|
|
struct thread *td;
|
1994-05-24 10:09:53 +00:00
|
|
|
struct nosys_args *args;
|
|
|
|
{
|
2001-09-12 08:38:13 +00:00
|
|
|
struct proc *p = td->td_proc;
|
|
|
|
|
2001-03-07 02:59:54 +00:00
|
|
|
PROC_LOCK(p);
|
1994-05-24 10:09:53 +00:00
|
|
|
psignal(p, SIGSYS);
|
2001-03-07 02:59:54 +00:00
|
|
|
PROC_UNLOCK(p);
|
2002-05-05 04:50:47 +00:00
|
|
|
return (ENOSYS);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
Installed the second patch attached to kern/7899 with some changes suggested
by bde, a few other tweaks to get the patch to apply cleanly again and
some improvements to the comments.
This change closes some fairly minor security holes associated with
F_SETOWN, fixes a few bugs, and removes some limitations that F_SETOWN
had on tty devices. For more details, see the description on the PR.
Because this patch increases the size of the proc and pgrp structures,
it is necessary to re-install the includes and recompile libkvm,
the vinum lkm, fstat, gcore, gdb, ipfilter, ps, top, and w.
PR: kern/7899
Reviewed by: bde, elvind
1998-11-11 10:04:13 +00:00
|
|
|
|
|
|
|
/*
|
2007-03-04 22:36:48 +00:00
|
|
|
* Send a SIGIO or SIGURG signal to a process or process group using stored
|
|
|
|
* credentials rather than those of the current process.
|
Installed the second patch attached to kern/7899 with some changes suggested
by bde, a few other tweaks to get the patch to apply cleanly again and
some improvements to the comments.
This change closes some fairly minor security holes associated with
F_SETOWN, fixes a few bugs, and removes some limitations that F_SETOWN
had on tty devices. For more details, see the description on the PR.
Because this patch increases the size of the proc and pgrp structures,
it is necessary to re-install the includes and recompile libkvm,
the vinum lkm, fstat, gcore, gdb, ipfilter, ps, top, and w.
PR: kern/7899
Reviewed by: bde, elvind
1998-11-11 10:04:13 +00:00
|
|
|
*/
|
|
|
|
void
|
2002-05-01 20:44:46 +00:00
|
|
|
pgsigio(sigiop, sig, checkctty)
|
|
|
|
struct sigio **sigiop;
|
1999-09-29 15:03:48 +00:00
|
|
|
int sig, checkctty;
|
Installed the second patch attached to kern/7899 with some changes suggested
by bde, a few other tweaks to get the patch to apply cleanly again and
some improvements to the comments.
This change closes some fairly minor security holes associated with
F_SETOWN, fixes a few bugs, and removes some limitations that F_SETOWN
had on tty devices. For more details, see the description on the PR.
Because this patch increases the size of the proc and pgrp structures,
it is necessary to re-install the includes and recompile libkvm,
the vinum lkm, fstat, gcore, gdb, ipfilter, ps, top, and w.
PR: kern/7899
Reviewed by: bde, elvind
1998-11-11 10:04:13 +00:00
|
|
|
{
|
2002-05-01 20:44:46 +00:00
|
|
|
struct sigio *sigio;
|
2002-02-23 11:12:57 +00:00
|
|
|
|
2002-05-01 20:44:46 +00:00
|
|
|
SIGIO_LOCK();
|
|
|
|
sigio = *sigiop;
|
|
|
|
if (sigio == NULL) {
|
|
|
|
SIGIO_UNLOCK();
|
|
|
|
return;
|
|
|
|
}
|
Installed the second patch attached to kern/7899 with some changes suggested
by bde, a few other tweaks to get the patch to apply cleanly again and
some improvements to the comments.
This change closes some fairly minor security holes associated with
F_SETOWN, fixes a few bugs, and removes some limitations that F_SETOWN
had on tty devices. For more details, see the description on the PR.
Because this patch increases the size of the proc and pgrp structures,
it is necessary to re-install the includes and recompile libkvm,
the vinum lkm, fstat, gcore, gdb, ipfilter, ps, top, and w.
PR: kern/7899
Reviewed by: bde, elvind
1998-11-11 10:04:13 +00:00
|
|
|
if (sigio->sio_pgid > 0) {
|
2001-03-07 02:59:54 +00:00
|
|
|
PROC_LOCK(sigio->sio_proc);
|
2002-01-10 01:25:35 +00:00
|
|
|
if (CANSIGIO(sigio->sio_ucred, sigio->sio_proc->p_ucred))
|
1999-09-29 15:03:48 +00:00
|
|
|
psignal(sigio->sio_proc, sig);
|
2001-03-07 02:59:54 +00:00
|
|
|
PROC_UNLOCK(sigio->sio_proc);
|
Installed the second patch attached to kern/7899 with some changes suggested
by bde, a few other tweaks to get the patch to apply cleanly again and
some improvements to the comments.
This change closes some fairly minor security holes associated with
F_SETOWN, fixes a few bugs, and removes some limitations that F_SETOWN
had on tty devices. For more details, see the description on the PR.
Because this patch increases the size of the proc and pgrp structures,
it is necessary to re-install the includes and recompile libkvm,
the vinum lkm, fstat, gcore, gdb, ipfilter, ps, top, and w.
PR: kern/7899
Reviewed by: bde, elvind
1998-11-11 10:04:13 +00:00
|
|
|
} else if (sigio->sio_pgid < 0) {
|
|
|
|
struct proc *p;
|
|
|
|
|
2002-02-23 11:12:57 +00:00
|
|
|
PGRP_LOCK(sigio->sio_pgrp);
|
2001-03-07 02:59:54 +00:00
|
|
|
LIST_FOREACH(p, &sigio->sio_pgrp->pg_members, p_pglist) {
|
|
|
|
PROC_LOCK(p);
|
2002-01-10 01:25:35 +00:00
|
|
|
if (CANSIGIO(sigio->sio_ucred, p->p_ucred) &&
|
Installed the second patch attached to kern/7899 with some changes suggested
by bde, a few other tweaks to get the patch to apply cleanly again and
some improvements to the comments.
This change closes some fairly minor security holes associated with
F_SETOWN, fixes a few bugs, and removes some limitations that F_SETOWN
had on tty devices. For more details, see the description on the PR.
Because this patch increases the size of the proc and pgrp structures,
it is necessary to re-install the includes and recompile libkvm,
the vinum lkm, fstat, gcore, gdb, ipfilter, ps, top, and w.
PR: kern/7899
Reviewed by: bde, elvind
1998-11-11 10:04:13 +00:00
|
|
|
(checkctty == 0 || (p->p_flag & P_CONTROLT)))
|
1999-09-29 15:03:48 +00:00
|
|
|
psignal(p, sig);
|
2001-03-07 02:59:54 +00:00
|
|
|
PROC_UNLOCK(p);
|
|
|
|
}
|
2002-02-23 11:12:57 +00:00
|
|
|
PGRP_UNLOCK(sigio->sio_pgrp);
|
Installed the second patch attached to kern/7899 with some changes suggested
by bde, a few other tweaks to get the patch to apply cleanly again and
some improvements to the comments.
This change closes some fairly minor security holes associated with
F_SETOWN, fixes a few bugs, and removes some limitations that F_SETOWN
had on tty devices. For more details, see the description on the PR.
Because this patch increases the size of the proc and pgrp structures,
it is necessary to re-install the includes and recompile libkvm,
the vinum lkm, fstat, gcore, gdb, ipfilter, ps, top, and w.
PR: kern/7899
Reviewed by: bde, elvind
1998-11-11 10:04:13 +00:00
|
|
|
}
|
2002-05-01 20:44:46 +00:00
|
|
|
SIGIO_UNLOCK();
|
Installed the second patch attached to kern/7899 with some changes suggested
by bde, a few other tweaks to get the patch to apply cleanly again and
some improvements to the comments.
This change closes some fairly minor security holes associated with
F_SETOWN, fixes a few bugs, and removes some limitations that F_SETOWN
had on tty devices. For more details, see the description on the PR.
Because this patch increases the size of the proc and pgrp structures,
it is necessary to re-install the includes and recompile libkvm,
the vinum lkm, fstat, gcore, gdb, ipfilter, ps, top, and w.
PR: kern/7899
Reviewed by: bde, elvind
1998-11-11 10:04:13 +00:00
|
|
|
}
|
2000-04-16 18:53:38 +00:00
|
|
|
|
|
|
|
static int
|
|
|
|
filt_sigattach(struct knote *kn)
|
|
|
|
{
|
|
|
|
struct proc *p = curproc;
|
|
|
|
|
|
|
|
kn->kn_ptr.p_proc = p;
|
|
|
|
kn->kn_flags |= EV_CLEAR; /* automatically set */
|
|
|
|
|
2004-08-15 06:24:42 +00:00
|
|
|
knlist_add(&p->p_klist, kn, 0);
|
2000-04-16 18:53:38 +00:00
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
filt_sigdetach(struct knote *kn)
|
|
|
|
{
|
|
|
|
struct proc *p = kn->kn_ptr.p_proc;
|
|
|
|
|
2004-08-15 06:24:42 +00:00
|
|
|
knlist_remove(&p->p_klist, kn, 0);
|
2000-04-16 18:53:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* signal knotes are shared with proc knotes, so we apply a mask to
|
|
|
|
* the hint in order to differentiate them from process hints. This
|
|
|
|
* could be avoided by using a signal-specific knote list, but probably
|
|
|
|
* isn't worth the trouble.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
filt_signal(struct knote *kn, long hint)
|
|
|
|
{
|
|
|
|
|
|
|
|
if (hint & NOTE_SIGNAL) {
|
|
|
|
hint &= ~NOTE_SIGNAL;
|
|
|
|
|
|
|
|
if (kn->kn_id == hint)
|
|
|
|
kn->kn_data++;
|
|
|
|
}
|
|
|
|
return (kn->kn_data != 0);
|
|
|
|
}
|
- Merge struct procsig with struct sigacts.
- Move struct sigacts out of the u-area and malloc() it using the
M_SUBPROC malloc bucket.
- Add a small sigacts_*() API for managing sigacts structures: sigacts_alloc(),
sigacts_free(), sigacts_copy(), sigacts_share(), and sigacts_shared().
- Remove the p_sigignore, p_sigacts, and p_sigcatch macros.
- Add a mutex to struct sigacts that protects all the members of the struct.
- Add sigacts locking.
- Remove Giant from nosys(), kill(), killpg(), and kern_sigaction() now
that sigacts is locked.
- Several in-kernel functions such as psignal(), tdsignal(), trapsignal(),
and thread_stopped() are now MP safe.
Reviewed by: arch@
Approved by: re (rwatson)
2003-05-13 20:36:02 +00:00
|
|
|
|
|
|
|
struct sigacts *
|
|
|
|
sigacts_alloc(void)
|
|
|
|
{
|
|
|
|
struct sigacts *ps;
|
|
|
|
|
|
|
|
ps = malloc(sizeof(struct sigacts), M_SUBPROC, M_WAITOK | M_ZERO);
|
|
|
|
ps->ps_refcnt = 1;
|
|
|
|
mtx_init(&ps->ps_mtx, "sigacts", NULL, MTX_DEF);
|
|
|
|
return (ps);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
sigacts_free(struct sigacts *ps)
|
|
|
|
{
|
|
|
|
|
|
|
|
mtx_lock(&ps->ps_mtx);
|
|
|
|
ps->ps_refcnt--;
|
|
|
|
if (ps->ps_refcnt == 0) {
|
|
|
|
mtx_destroy(&ps->ps_mtx);
|
|
|
|
free(ps, M_SUBPROC);
|
|
|
|
} else
|
|
|
|
mtx_unlock(&ps->ps_mtx);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct sigacts *
|
|
|
|
sigacts_hold(struct sigacts *ps)
|
|
|
|
{
|
|
|
|
mtx_lock(&ps->ps_mtx);
|
|
|
|
ps->ps_refcnt++;
|
|
|
|
mtx_unlock(&ps->ps_mtx);
|
|
|
|
return (ps);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
sigacts_copy(struct sigacts *dest, struct sigacts *src)
|
|
|
|
{
|
|
|
|
|
|
|
|
KASSERT(dest->ps_refcnt == 1, ("sigacts_copy to shared dest"));
|
|
|
|
mtx_lock(&src->ps_mtx);
|
|
|
|
bcopy(src, dest, offsetof(struct sigacts, ps_refcnt));
|
|
|
|
mtx_unlock(&src->ps_mtx);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
sigacts_shared(struct sigacts *ps)
|
|
|
|
{
|
|
|
|
int shared;
|
|
|
|
|
|
|
|
mtx_lock(&ps->ps_mtx);
|
|
|
|
shared = ps->ps_refcnt > 1;
|
|
|
|
mtx_unlock(&ps->ps_mtx);
|
|
|
|
return (shared);
|
|
|
|
}
|