MFC: Close some races between procfs/ptrace and exit1() by changing
exit1() to block until any current PHOLD's are released. This includes Simplifying the cleanup code in kern_ptrace() and removing the now unnecessary vmspace ref counting magic from proc_rwmem(). Also, the locking for ptrace_single_step(), ptrace_set_pc(), and ptrace_clear_single_step() have been fixed to be consistent across the tree. Approved by: re (scottl)
This commit is contained in:
parent
6800859061
commit
3d7b05e4b2
@ -1768,6 +1768,8 @@ ptrace_read_int(struct thread *td, vm_offset_t addr, u_int32_t *v)
|
||||
{
|
||||
struct iovec iov;
|
||||
struct uio uio;
|
||||
|
||||
PROC_LOCK_ASSERT(td->td_proc, MA_NOTOWNED);
|
||||
iov.iov_base = (caddr_t) v;
|
||||
iov.iov_len = sizeof(u_int32_t);
|
||||
uio.uio_iov = &iov;
|
||||
@ -1785,6 +1787,8 @@ ptrace_write_int(struct thread *td, vm_offset_t addr, u_int32_t v)
|
||||
{
|
||||
struct iovec iov;
|
||||
struct uio uio;
|
||||
|
||||
PROC_LOCK_ASSERT(td->td_proc, MA_NOTOWNED);
|
||||
iov.iov_base = (caddr_t) &v;
|
||||
iov.iov_len = sizeof(u_int32_t);
|
||||
uio.uio_iov = &iov;
|
||||
@ -1848,6 +1852,8 @@ ptrace_read_register(struct thread *td, int regno)
|
||||
static int
|
||||
ptrace_clear_bpt(struct thread *td, struct mdbpt *bpt)
|
||||
{
|
||||
|
||||
PROC_LOCK_ASSERT(td->td_proc, MA_NOTOWNED);
|
||||
return ptrace_write_int(td, bpt->addr, bpt->contents);
|
||||
}
|
||||
|
||||
@ -1856,6 +1862,8 @@ ptrace_set_bpt(struct thread *td, struct mdbpt *bpt)
|
||||
{
|
||||
int error;
|
||||
u_int32_t bpins = 0x00000080;
|
||||
|
||||
PROC_LOCK_ASSERT(td->td_proc, MA_NOTOWNED);
|
||||
error = ptrace_read_int(td, bpt->addr, &bpt->contents);
|
||||
if (error)
|
||||
return error;
|
||||
@ -1865,12 +1873,20 @@ ptrace_set_bpt(struct thread *td, struct mdbpt *bpt)
|
||||
int
|
||||
ptrace_clear_single_step(struct thread *td)
|
||||
{
|
||||
struct proc *p;
|
||||
|
||||
p = td->td_proc;
|
||||
PROC_LOCK_ASSERT(p, MA_OWNED);
|
||||
if (td->td_md.md_flags & MDTD_STEP2) {
|
||||
PROC_UNLOCK(p);
|
||||
ptrace_clear_bpt(td, &td->td_md.md_sstep[1]);
|
||||
ptrace_clear_bpt(td, &td->td_md.md_sstep[0]);
|
||||
PROC_LOCK(p);
|
||||
td->td_md.md_flags &= ~MDTD_STEP2;
|
||||
} else if (td->td_md.md_flags & MDTD_STEP1) {
|
||||
PROC_UNLOCK(p);
|
||||
ptrace_clear_bpt(td, &td->td_md.md_sstep[0]);
|
||||
PROC_LOCK(p);
|
||||
td->td_md.md_flags &= ~MDTD_STEP1;
|
||||
}
|
||||
return 0;
|
||||
@ -1879,6 +1895,7 @@ ptrace_clear_single_step(struct thread *td)
|
||||
int
|
||||
ptrace_single_step(struct thread *td)
|
||||
{
|
||||
struct proc *p;
|
||||
int error;
|
||||
vm_offset_t pc = td->td_frame->tf_regs[FRAME_PC];
|
||||
alpha_instruction ins;
|
||||
@ -1888,9 +1905,11 @@ ptrace_single_step(struct thread *td)
|
||||
if (td->td_md.md_flags & (MDTD_STEP1|MDTD_STEP2))
|
||||
panic("ptrace_single_step: step breakpoints not removed");
|
||||
|
||||
p = td->td_proc;
|
||||
PROC_UNLOCK(p);
|
||||
error = ptrace_read_int(td, pc, &ins.bits);
|
||||
if (error)
|
||||
return (error);
|
||||
goto out;
|
||||
|
||||
switch (ins.branch_format.opcode) {
|
||||
|
||||
@ -1930,18 +1949,20 @@ ptrace_single_step(struct thread *td)
|
||||
td->td_md.md_sstep[0].addr = addr[0];
|
||||
error = ptrace_set_bpt(td, &td->td_md.md_sstep[0]);
|
||||
if (error)
|
||||
return (error);
|
||||
goto out;
|
||||
if (count == 2) {
|
||||
td->td_md.md_sstep[1].addr = addr[1];
|
||||
error = ptrace_set_bpt(td, &td->td_md.md_sstep[1]);
|
||||
if (error) {
|
||||
ptrace_clear_bpt(td, &td->td_md.md_sstep[0]);
|
||||
return (error);
|
||||
goto out;
|
||||
}
|
||||
td->td_md.md_flags |= MDTD_STEP2;
|
||||
} else
|
||||
td->td_md.md_flags |= MDTD_STEP1;
|
||||
|
||||
out:
|
||||
PROC_LOCK(p);
|
||||
return (error);
|
||||
}
|
||||
|
||||
|
@ -404,8 +404,12 @@ trap(a0, a1, a2, entry, framep)
|
||||
case ALPHA_IF_CODE_BUGCHK:
|
||||
if (td->td_md.md_flags & (MDTD_STEP1|MDTD_STEP2)) {
|
||||
mtx_lock(&Giant);
|
||||
PROC_LOCK(p);
|
||||
_PHOLD(p);
|
||||
ptrace_clear_single_step(td);
|
||||
td->td_frame->tf_regs[FRAME_PC] -= 4;
|
||||
_PRELE(p);
|
||||
PROC_UNLOCK(p);
|
||||
mtx_unlock(&Giant);
|
||||
}
|
||||
ucode = a0; /* trap type */
|
||||
|
@ -319,6 +319,8 @@ ptrace_read_int(struct thread *td, vm_offset_t addr, u_int32_t *v)
|
||||
{
|
||||
struct iovec iov;
|
||||
struct uio uio;
|
||||
|
||||
PROC_LOCK_ASSERT(td->td_proc, MA_NOTOWNED);
|
||||
iov.iov_base = (caddr_t) v;
|
||||
iov.iov_len = sizeof(u_int32_t);
|
||||
uio.uio_iov = &iov;
|
||||
@ -336,6 +338,8 @@ ptrace_write_int(struct thread *td, vm_offset_t addr, u_int32_t v)
|
||||
{
|
||||
struct iovec iov;
|
||||
struct uio uio;
|
||||
|
||||
PROC_LOCK_ASSERT(td->td_proc, MA_NOTOWNED);
|
||||
iov.iov_base = (caddr_t) &v;
|
||||
iov.iov_len = sizeof(u_int32_t);
|
||||
uio.uio_iov = &iov;
|
||||
@ -351,28 +355,38 @@ ptrace_write_int(struct thread *td, vm_offset_t addr, u_int32_t v)
|
||||
int
|
||||
ptrace_single_step(struct thread *td)
|
||||
{
|
||||
struct proc *p;
|
||||
int error;
|
||||
|
||||
KASSERT(td->td_md.md_ptrace_instr == 0,
|
||||
("Didn't clear single step"));
|
||||
p = td->td_proc;
|
||||
PROC_UNLOCK(p);
|
||||
error = ptrace_read_int(td, td->td_frame->tf_pc + 4,
|
||||
&td->td_md.md_ptrace_instr);
|
||||
if (error)
|
||||
return (error);
|
||||
goto out;
|
||||
error = ptrace_write_int(td, td->td_frame->tf_pc + 4,
|
||||
PTRACE_BREAKPOINT);
|
||||
if (error)
|
||||
td->td_md.md_ptrace_instr = 0;
|
||||
td->td_md.md_ptrace_addr = td->td_frame->tf_pc + 4;
|
||||
out:
|
||||
PROC_LOCK(p);
|
||||
return (error);
|
||||
}
|
||||
|
||||
int
|
||||
ptrace_clear_single_step(struct thread *td)
|
||||
{
|
||||
struct proc *p;
|
||||
|
||||
if (td->td_md.md_ptrace_instr) {
|
||||
p = td->td_proc;
|
||||
PROC_UNLOCK(p);
|
||||
ptrace_write_int(td, td->td_md.md_ptrace_addr,
|
||||
td->td_md.md_ptrace_instr);
|
||||
PROC_LOCK(p);
|
||||
td->td_md.md_ptrace_instr = 0;
|
||||
}
|
||||
return (0);
|
||||
|
@ -250,7 +250,11 @@ undefinedinstruction(trapframe_t *frame)
|
||||
break;
|
||||
|
||||
if (fault_code & FAULT_USER && fault_instruction == PTRACE_BREAKPOINT) {
|
||||
PROC_LOCK(td->td_proc);
|
||||
_PHOLD(td->td_proc);
|
||||
ptrace_clear_single_step(td);
|
||||
_PRELE(td->td_proc);
|
||||
PROC_UNLOCK(td->td_proc);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -99,6 +99,10 @@ pfs_visible(struct thread *td, struct pfs_node *pn, pid_t pid, struct proc **p)
|
||||
if (pid != NO_PID) {
|
||||
if ((proc = pfind(pid)) == NULL)
|
||||
PFS_RETURN (0);
|
||||
if (proc->p_flag & P_WEXIT) {
|
||||
PROC_UNLOCK(proc);
|
||||
PFS_RETURN (0);
|
||||
}
|
||||
if (p_cansee(td, proc) != 0 ||
|
||||
(pn->pn_vis != NULL && !(pn->pn_vis)(td, proc, pn))) {
|
||||
PROC_UNLOCK(proc);
|
||||
@ -706,6 +710,10 @@ pfs_readlink(struct vop_readlink_args *va)
|
||||
if (pvd->pvd_pid != NO_PID) {
|
||||
if ((proc = pfind(pvd->pvd_pid)) == NULL)
|
||||
PFS_RETURN (EIO);
|
||||
if (proc->p_flag & P_WEXIT) {
|
||||
PROC_UNLOCK(proc);
|
||||
PFS_RETURN (EIO);
|
||||
}
|
||||
_PHOLD(proc);
|
||||
PROC_UNLOCK(proc);
|
||||
}
|
||||
|
@ -356,6 +356,12 @@ linux_ptrace(struct thread *td, struct linux_ptrace_args *uap)
|
||||
break;
|
||||
}
|
||||
|
||||
/* Exiting processes can't be debugged. */
|
||||
if ((p->p_flag & P_WEXIT) != 0) {
|
||||
error = ESRCH;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
if ((error = p_candebug(td, p)) != 0)
|
||||
goto fail;
|
||||
|
||||
|
@ -1100,6 +1100,7 @@ ia64_flush_dirty(struct thread *td, struct _special *r)
|
||||
r->rnat = (bspst > kstk && (bspst & 0x1ffL) < (kstk & 0x1ffL))
|
||||
? *(uint64_t*)(kstk | 0x1f8L) : rnat;
|
||||
} else {
|
||||
PHOLD(td->td_proc);
|
||||
iov.iov_base = (void*)(uintptr_t)kstk;
|
||||
iov.iov_len = r->ndirty;
|
||||
uio.uio_iov = &iov;
|
||||
@ -1117,6 +1118,7 @@ ia64_flush_dirty(struct thread *td, struct _special *r)
|
||||
*/
|
||||
if (uio.uio_resid != 0 && error == 0)
|
||||
error = ENOSPC;
|
||||
PRELE(td->td_proc);
|
||||
}
|
||||
|
||||
r->bspstore += r->ndirty;
|
||||
|
@ -171,7 +171,29 @@ exit1(struct thread *td, int rv)
|
||||
*/
|
||||
}
|
||||
|
||||
/*
|
||||
* Wakeup anyone in procfs' PIOCWAIT. They should have a hold
|
||||
* on our vmspace, so we should block below until they have
|
||||
* released their reference to us. Note that if they have
|
||||
* requested S_EXIT stops we will block here until they ack
|
||||
* via PIOCCONT.
|
||||
*/
|
||||
_STOPEVENT(p, S_EXIT, rv);
|
||||
|
||||
/*
|
||||
* Note that we are exiting and do another wakeup of anyone in
|
||||
* PIOCWAIT in case they aren't listening for S_EXIT stops or
|
||||
* decided to wait again after we told them we are exiting.
|
||||
*/
|
||||
p->p_flag |= P_WEXIT;
|
||||
wakeup(&p->p_stype);
|
||||
|
||||
/*
|
||||
* Wait for any processes that have a hold on our vmspace to
|
||||
* release their reference.
|
||||
*/
|
||||
while (p->p_lock > 0)
|
||||
msleep(&p->p_lock, &p->p_mtx, PWAIT, "exithold", 0);
|
||||
PROC_UNLOCK(p);
|
||||
|
||||
/* Are we a task leader? */
|
||||
@ -189,11 +211,6 @@ exit1(struct thread *td, int rv)
|
||||
mtx_unlock(&ppeers_lock);
|
||||
}
|
||||
|
||||
PROC_LOCK(p);
|
||||
_STOPEVENT(p, S_EXIT, rv);
|
||||
wakeup(&p->p_stype); /* Wakeup anyone in procfs' PIOCWAIT */
|
||||
PROC_UNLOCK(p);
|
||||
|
||||
/*
|
||||
* Check if any loadable modules need anything done at process exit.
|
||||
* E.g. SYSV IPC stuff
|
||||
|
@ -148,7 +148,9 @@ kse_switchin(struct thread *td, struct kse_switchin_args *uap)
|
||||
td->td_mailbox = uap->tmbx;
|
||||
td->td_pflags |= TDP_CAN_UNBIND;
|
||||
}
|
||||
PROC_LOCK(td->td_proc);
|
||||
if (td->td_proc->p_flag & P_TRACED) {
|
||||
_PHOLD(td->td_proc);
|
||||
if (tmbx.tm_dflags & TMDF_SSTEP)
|
||||
ptrace_single_step(td);
|
||||
else
|
||||
@ -160,7 +162,9 @@ kse_switchin(struct thread *td, struct kse_switchin_args *uap)
|
||||
ku->ku_flags |= KUF_DOUPCALL;
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
}
|
||||
_PRELE(td->td_proc);
|
||||
}
|
||||
PROC_UNLOCK(td->td_proc);
|
||||
}
|
||||
return ((error == 0) ? EJUSTRETURN : error);
|
||||
}
|
||||
@ -782,8 +786,13 @@ kse_create(struct thread *td, struct kse_create_args *uap)
|
||||
*/
|
||||
cpu_set_upcall_kse(newtd, newku->ku_func,
|
||||
newku->ku_mailbox, &newku->ku_stack);
|
||||
if (p->p_flag & P_TRACED)
|
||||
PROC_LOCK(p);
|
||||
if (p->p_flag & P_TRACED) {
|
||||
_PHOLD(p);
|
||||
ptrace_clear_single_step(newtd);
|
||||
_PRELE(p);
|
||||
}
|
||||
PROC_UNLOCK(p);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1376,8 +1385,13 @@ thread_userret(struct thread *td, struct trapframe *frame)
|
||||
if (!(ku->ku_mflags & KMF_NOUPCALL)) {
|
||||
cpu_set_upcall_kse(td, ku->ku_func, ku->ku_mailbox,
|
||||
&ku->ku_stack);
|
||||
if (p->p_flag & P_TRACED)
|
||||
PROC_LOCK(p);
|
||||
if (p->p_flag & P_TRACED) {
|
||||
_PHOLD(p);
|
||||
ptrace_clear_single_step(td);
|
||||
_PRELE(p);
|
||||
}
|
||||
PROC_UNLOCK(p);
|
||||
error = suword32(&ku->ku_mailbox->km_lwp,
|
||||
td->td_tid);
|
||||
if (error)
|
||||
|
@ -210,30 +210,24 @@ proc_sstep(struct thread *td)
|
||||
int
|
||||
proc_rwmem(struct proc *p, struct uio *uio)
|
||||
{
|
||||
struct vmspace *vm;
|
||||
vm_map_t map;
|
||||
vm_object_t backing_object, object = NULL;
|
||||
vm_offset_t pageno = 0; /* page number */
|
||||
vm_prot_t reqprot;
|
||||
int error, refcnt, writing;
|
||||
int error, writing;
|
||||
|
||||
/*
|
||||
* if the vmspace is in the midst of being deallocated or the
|
||||
* process is exiting, don't try to grab anything. The page table
|
||||
* usage in that process can be messed up.
|
||||
* Assert that someone has locked this vmspace. (Should be
|
||||
* curthread but we can't assert that.) This keeps the process
|
||||
* from exiting out from under us until this operation completes.
|
||||
*/
|
||||
vm = p->p_vmspace;
|
||||
if ((p->p_flag & P_WEXIT))
|
||||
return (EFAULT);
|
||||
do {
|
||||
if ((refcnt = vm->vm_refcnt) < 1)
|
||||
return (EFAULT);
|
||||
} while (!atomic_cmpset_int(&vm->vm_refcnt, refcnt, refcnt + 1));
|
||||
KASSERT(p->p_lock >= 1, ("%s: process %p (pid %d) not held", __func__,
|
||||
p, p->p_pid));
|
||||
|
||||
/*
|
||||
* The map we want...
|
||||
*/
|
||||
map = &vm->vm_map;
|
||||
map = &p->p_vmspace->vm_map;
|
||||
|
||||
writing = uio->uio_rw == UIO_WRITE;
|
||||
reqprot = writing ? (VM_PROT_WRITE | VM_PROT_OVERRIDE_WRITE) :
|
||||
@ -336,7 +330,6 @@ proc_rwmem(struct proc *p, struct uio *uio)
|
||||
|
||||
} while (error == 0 && uio->uio_resid > 0);
|
||||
|
||||
vmspace_free(vm);
|
||||
return (error);
|
||||
}
|
||||
|
||||
@ -551,6 +544,11 @@ kern_ptrace(struct thread *td, int req, pid_t pid, void *addr, int data)
|
||||
pid = p->p_pid;
|
||||
}
|
||||
}
|
||||
|
||||
if ((p->p_flag & P_WEXIT) != 0) {
|
||||
error = ESRCH;
|
||||
goto fail;
|
||||
}
|
||||
if ((error = p_cansee(td, p)) != 0)
|
||||
goto fail;
|
||||
|
||||
@ -658,11 +656,14 @@ kern_ptrace(struct thread *td, int req, pid_t pid, void *addr, int data)
|
||||
break;
|
||||
}
|
||||
|
||||
/* Keep this process around until we finish this request. */
|
||||
_PHOLD(p);
|
||||
|
||||
#ifdef FIX_SSTEP
|
||||
/*
|
||||
* Single step fixup ala procfs
|
||||
*/
|
||||
FIX_SSTEP(td2); /* XXXKSE */
|
||||
FIX_SSTEP(td2);
|
||||
#endif
|
||||
|
||||
/*
|
||||
@ -676,9 +677,7 @@ kern_ptrace(struct thread *td, int req, pid_t pid, void *addr, int data)
|
||||
/* set my trace flag and "owner" so it can read/write me */
|
||||
p->p_flag |= P_TRACED;
|
||||
p->p_oppid = p->p_pptr->p_pid;
|
||||
PROC_UNLOCK(p);
|
||||
sx_xunlock(&proctree_lock);
|
||||
return (0);
|
||||
break;
|
||||
|
||||
case PT_ATTACH:
|
||||
/* security check done above */
|
||||
@ -690,40 +689,24 @@ kern_ptrace(struct thread *td, int req, pid_t pid, void *addr, int data)
|
||||
goto sendsig; /* in PT_CONTINUE below */
|
||||
|
||||
case PT_CLEARSTEP:
|
||||
_PHOLD(p);
|
||||
error = ptrace_clear_single_step(td2);
|
||||
_PRELE(p);
|
||||
if (error)
|
||||
goto fail;
|
||||
PROC_UNLOCK(p);
|
||||
return (0);
|
||||
break;
|
||||
|
||||
case PT_SETSTEP:
|
||||
_PHOLD(p);
|
||||
error = ptrace_single_step(td2);
|
||||
_PRELE(p);
|
||||
if (error)
|
||||
goto fail;
|
||||
PROC_UNLOCK(p);
|
||||
return (0);
|
||||
break;
|
||||
|
||||
case PT_SUSPEND:
|
||||
_PHOLD(p);
|
||||
mtx_lock_spin(&sched_lock);
|
||||
td2->td_flags |= TDF_DBSUSPEND;
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
_PRELE(p);
|
||||
PROC_UNLOCK(p);
|
||||
return (0);
|
||||
break;
|
||||
|
||||
case PT_RESUME:
|
||||
_PHOLD(p);
|
||||
mtx_lock_spin(&sched_lock);
|
||||
td2->td_flags &= ~TDF_DBSUSPEND;
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
_PRELE(p);
|
||||
PROC_UNLOCK(p);
|
||||
return (0);
|
||||
break;
|
||||
|
||||
case PT_STEP:
|
||||
case PT_CONTINUE:
|
||||
@ -734,20 +717,14 @@ kern_ptrace(struct thread *td, int req, pid_t pid, void *addr, int data)
|
||||
/* Zero means do not send any signal */
|
||||
if (data < 0 || data > _SIG_MAXSIG) {
|
||||
error = EINVAL;
|
||||
goto fail;
|
||||
break;
|
||||
}
|
||||
|
||||
_PHOLD(p);
|
||||
|
||||
switch (req) {
|
||||
case PT_STEP:
|
||||
PROC_UNLOCK(p);
|
||||
error = ptrace_single_step(td2);
|
||||
if (error) {
|
||||
PRELE(p);
|
||||
goto fail_noproc;
|
||||
}
|
||||
PROC_LOCK(p);
|
||||
if (error)
|
||||
goto out;
|
||||
break;
|
||||
case PT_TO_SCE:
|
||||
p->p_stops |= S_PT_SCE;
|
||||
@ -761,15 +738,10 @@ kern_ptrace(struct thread *td, int req, pid_t pid, void *addr, int data)
|
||||
}
|
||||
|
||||
if (addr != (void *)1) {
|
||||
PROC_UNLOCK(p);
|
||||
error = ptrace_set_pc(td2, (u_long)(uintfptr_t)addr);
|
||||
if (error) {
|
||||
PRELE(p);
|
||||
goto fail_noproc;
|
||||
}
|
||||
PROC_LOCK(p);
|
||||
if (error)
|
||||
break;
|
||||
}
|
||||
_PRELE(p);
|
||||
|
||||
if (req == PT_DETACH) {
|
||||
/* reset process parent */
|
||||
@ -794,8 +766,10 @@ kern_ptrace(struct thread *td, int req, pid_t pid, void *addr, int data)
|
||||
}
|
||||
|
||||
sendsig:
|
||||
if (proctree_locked)
|
||||
if (proctree_locked) {
|
||||
sx_xunlock(&proctree_lock);
|
||||
proctree_locked = 0;
|
||||
}
|
||||
/* deliver or queue signal */
|
||||
mtx_lock_spin(&sched_lock);
|
||||
td2->td_flags &= ~TDF_XSIG;
|
||||
@ -826,8 +800,7 @@ kern_ptrace(struct thread *td, int req, pid_t pid, void *addr, int data)
|
||||
if (data)
|
||||
psignal(p, data);
|
||||
|
||||
PROC_UNLOCK(p);
|
||||
return (0);
|
||||
break;
|
||||
|
||||
case PT_WRITE_I:
|
||||
case PT_WRITE_D:
|
||||
@ -863,10 +836,10 @@ kern_ptrace(struct thread *td, int req, pid_t pid, void *addr, int data)
|
||||
}
|
||||
if (!write)
|
||||
td->td_retval[0] = tmp;
|
||||
return (error);
|
||||
PROC_LOCK(p);
|
||||
break;
|
||||
|
||||
case PT_IO:
|
||||
PROC_UNLOCK(p);
|
||||
#ifdef COMPAT_IA32
|
||||
if (wrap32) {
|
||||
piod32 = addr;
|
||||
@ -902,8 +875,10 @@ kern_ptrace(struct thread *td, int req, pid_t pid, void *addr, int data)
|
||||
uio.uio_rw = UIO_WRITE;
|
||||
break;
|
||||
default:
|
||||
return (EINVAL);
|
||||
error = EINVAL;
|
||||
goto out;
|
||||
}
|
||||
PROC_UNLOCK(p);
|
||||
error = proc_rwmem(p, &uio);
|
||||
#ifdef COMPAT_IA32
|
||||
if (wrap32)
|
||||
@ -911,59 +886,43 @@ kern_ptrace(struct thread *td, int req, pid_t pid, void *addr, int data)
|
||||
else
|
||||
#endif
|
||||
piod->piod_len -= uio.uio_resid;
|
||||
return (error);
|
||||
PROC_LOCK(p);
|
||||
break;
|
||||
|
||||
case PT_KILL:
|
||||
data = SIGKILL;
|
||||
goto sendsig; /* in PT_CONTINUE above */
|
||||
|
||||
case PT_SETREGS:
|
||||
_PHOLD(p);
|
||||
error = PROC_WRITE(regs, td2, addr);
|
||||
_PRELE(p);
|
||||
PROC_UNLOCK(p);
|
||||
return (error);
|
||||
break;
|
||||
|
||||
case PT_GETREGS:
|
||||
_PHOLD(p);
|
||||
error = PROC_READ(regs, td2, addr);
|
||||
_PRELE(p);
|
||||
PROC_UNLOCK(p);
|
||||
return (error);
|
||||
break;
|
||||
|
||||
case PT_SETFPREGS:
|
||||
_PHOLD(p);
|
||||
error = PROC_WRITE(fpregs, td2, addr);
|
||||
_PRELE(p);
|
||||
PROC_UNLOCK(p);
|
||||
return (error);
|
||||
break;
|
||||
|
||||
case PT_GETFPREGS:
|
||||
_PHOLD(p);
|
||||
error = PROC_READ(fpregs, td2, addr);
|
||||
_PRELE(p);
|
||||
PROC_UNLOCK(p);
|
||||
return (error);
|
||||
break;
|
||||
|
||||
case PT_SETDBREGS:
|
||||
_PHOLD(p);
|
||||
error = PROC_WRITE(dbregs, td2, addr);
|
||||
_PRELE(p);
|
||||
PROC_UNLOCK(p);
|
||||
return (error);
|
||||
break;
|
||||
|
||||
case PT_GETDBREGS:
|
||||
_PHOLD(p);
|
||||
error = PROC_READ(dbregs, td2, addr);
|
||||
_PRELE(p);
|
||||
PROC_UNLOCK(p);
|
||||
return (error);
|
||||
break;
|
||||
|
||||
case PT_LWPINFO:
|
||||
if (data == 0 || data > sizeof(*pl))
|
||||
return (EINVAL);
|
||||
if (data == 0 || data > sizeof(*pl)) {
|
||||
error = EINVAL;
|
||||
break;
|
||||
}
|
||||
pl = addr;
|
||||
_PHOLD(p);
|
||||
pl->pl_lwpid = td2->td_tid;
|
||||
if (td2->td_flags & TDF_XSIG)
|
||||
pl->pl_event = PL_EVENT_SIGNAL;
|
||||
@ -976,19 +935,16 @@ kern_ptrace(struct thread *td, int req, pid_t pid, void *addr, int data)
|
||||
} else {
|
||||
pl->pl_flags = 0;
|
||||
}
|
||||
_PRELE(p);
|
||||
PROC_UNLOCK(p);
|
||||
return (0);
|
||||
break;
|
||||
|
||||
case PT_GETNUMLWPS:
|
||||
td->td_retval[0] = p->p_numthreads;
|
||||
PROC_UNLOCK(p);
|
||||
return (0);
|
||||
break;
|
||||
|
||||
case PT_GETLWPLIST:
|
||||
if (data <= 0) {
|
||||
PROC_UNLOCK(p);
|
||||
return (EINVAL);
|
||||
error = EINVAL;
|
||||
break;
|
||||
}
|
||||
num = imin(p->p_numthreads, data);
|
||||
PROC_UNLOCK(p);
|
||||
@ -1007,27 +963,27 @@ kern_ptrace(struct thread *td, int req, pid_t pid, void *addr, int data)
|
||||
free(buf, M_TEMP);
|
||||
if (!error)
|
||||
td->td_retval[0] = num;
|
||||
return (error);
|
||||
PROC_LOCK(p);
|
||||
break;
|
||||
|
||||
default:
|
||||
#ifdef __HAVE_PTRACE_MACHDEP
|
||||
if (req >= PT_FIRSTMACH) {
|
||||
_PHOLD(p);
|
||||
PROC_UNLOCK(p);
|
||||
error = cpu_ptrace(td2, req, addr, data);
|
||||
PRELE(p);
|
||||
return (error);
|
||||
}
|
||||
PROC_LOCK(p);
|
||||
} else
|
||||
#endif
|
||||
/* Unknown request. */
|
||||
error = EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
/* Unknown request. */
|
||||
error = EINVAL;
|
||||
|
||||
out:
|
||||
/* Drop our hold on this process now that the request has completed. */
|
||||
_PRELE(p);
|
||||
fail:
|
||||
PROC_UNLOCK(p);
|
||||
fail_noproc:
|
||||
if (proctree_locked)
|
||||
sx_xunlock(&proctree_lock);
|
||||
return (error);
|
||||
|
@ -776,6 +776,8 @@ MALLOC_DECLARE(M_ZOMBIE);
|
||||
} while (0)
|
||||
#define _PHOLD(p) do { \
|
||||
PROC_LOCK_ASSERT((p), MA_OWNED); \
|
||||
KASSERT(!((p)->p_flag & P_WEXIT) || (p) == curproc, \
|
||||
("PHOLD of exiting process")); \
|
||||
(p)->p_lock++; \
|
||||
if (((p)->p_sflag & PS_INMEM) == 0) \
|
||||
faultin((p)); \
|
||||
@ -789,6 +791,8 @@ MALLOC_DECLARE(M_ZOMBIE);
|
||||
#define _PRELE(p) do { \
|
||||
PROC_LOCK_ASSERT((p), MA_OWNED); \
|
||||
(--(p)->p_lock); \
|
||||
if (((p)->p_flag & P_WEXIT) && (p)->p_lock == 0) \
|
||||
wakeup(&(p)->p_lock); \
|
||||
} while (0)
|
||||
|
||||
/* Check whether a thread is safe to be swapped out. */
|
||||
|
Loading…
Reference in New Issue
Block a user