diff --git a/sys/amd64/amd64/exception.S b/sys/amd64/amd64/exception.S index ddfd38834b24..28579ac8e96a 100644 --- a/sys/amd64/amd64/exception.S +++ b/sys/amd64/amd64/exception.S @@ -76,18 +76,6 @@ * must restore them prior to calling 'iret'. The cpu adjusts the %cs and * %ss segment registers, but does not mess with %ds, %es, or %fs. Thus we * must load them with appropriate values for supervisor mode operation. - * - * On entry to a trap or interrupt WE DO NOT OWN THE MP LOCK. This means - * that we must be careful in regards to accessing global variables. We - * save (push) the current cpl (our software interrupt disable mask), call - * the trap function, then call doreti to restore the cpl and deal with - * ASTs (software interrupts). doreti will determine if the restoration - * of the cpl unmasked any pending interrupts and will issue those interrupts - * synchronously prior to doing the iret. - * - * At the moment we must own the MP lock to do any cpl manipulation, which - * means we must own it prior to calling doreti. The syscall case attempts - * to avoid this by handling a reduced set of cases itself and iret'ing. */ #define IDTVEC(name) ALIGN_TEXT; .globl __CONCAT(X,name); \ .type __CONCAT(X,name),@function; __CONCAT(X,name): @@ -197,9 +185,6 @@ calltrap: * final spot. It has to be done this way because esp can't be just * temporarily altered for the pushfl - an interrupt might come in * and clobber the saved cs/eip. - * - * We do not obtain the MP lock, but the call to syscall might. If it - * does it will release the lock prior to returning. */ SUPERALIGN_TEXT IDTVEC(lcall_syscall) @@ -214,9 +199,6 @@ IDTVEC(lcall_syscall) * Even though the name says 'int0x80', this is actually a TGT (trap gate) * rather then an IGT (interrupt gate). Thus interrupts are enabled on * entry just as they are for a normal syscall. - * - * We do not obtain the MP lock, but the call to syscall might. If it - * does it will release the lock prior to returning. */ SUPERALIGN_TEXT IDTVEC(int0x80_syscall) diff --git a/sys/amd64/amd64/exception.s b/sys/amd64/amd64/exception.s index ddfd38834b24..28579ac8e96a 100644 --- a/sys/amd64/amd64/exception.s +++ b/sys/amd64/amd64/exception.s @@ -76,18 +76,6 @@ * must restore them prior to calling 'iret'. The cpu adjusts the %cs and * %ss segment registers, but does not mess with %ds, %es, or %fs. Thus we * must load them with appropriate values for supervisor mode operation. - * - * On entry to a trap or interrupt WE DO NOT OWN THE MP LOCK. This means - * that we must be careful in regards to accessing global variables. We - * save (push) the current cpl (our software interrupt disable mask), call - * the trap function, then call doreti to restore the cpl and deal with - * ASTs (software interrupts). doreti will determine if the restoration - * of the cpl unmasked any pending interrupts and will issue those interrupts - * synchronously prior to doing the iret. - * - * At the moment we must own the MP lock to do any cpl manipulation, which - * means we must own it prior to calling doreti. The syscall case attempts - * to avoid this by handling a reduced set of cases itself and iret'ing. */ #define IDTVEC(name) ALIGN_TEXT; .globl __CONCAT(X,name); \ .type __CONCAT(X,name),@function; __CONCAT(X,name): @@ -197,9 +185,6 @@ calltrap: * final spot. It has to be done this way because esp can't be just * temporarily altered for the pushfl - an interrupt might come in * and clobber the saved cs/eip. - * - * We do not obtain the MP lock, but the call to syscall might. If it - * does it will release the lock prior to returning. */ SUPERALIGN_TEXT IDTVEC(lcall_syscall) @@ -214,9 +199,6 @@ IDTVEC(lcall_syscall) * Even though the name says 'int0x80', this is actually a TGT (trap gate) * rather then an IGT (interrupt gate). Thus interrupts are enabled on * entry just as they are for a normal syscall. - * - * We do not obtain the MP lock, but the call to syscall might. If it - * does it will release the lock prior to returning. */ SUPERALIGN_TEXT IDTVEC(int0x80_syscall) diff --git a/sys/amd64/amd64/trap.c b/sys/amd64/amd64/trap.c index a84a4f24302d..11effc39be57 100644 --- a/sys/amd64/amd64/trap.c +++ b/sys/amd64/amd64/trap.c @@ -1012,15 +1012,9 @@ int trapwrite(addr) } /* - * syscall - MP aware system call request C handler + * syscall - system call request C handler * - * A system call is essentially treated as a trap except that the - * MP lock is not held on entry or return. We are responsible for - * obtaining the MP lock if necessary and for handling ASTs - * (e.g. a task switch) prior to return. - * - * In general, only simple access and manipulation of curproc and - * the current stack is allowed without having to hold MP lock. + * A system call is essentially treated as a trap. */ void syscall(frame) @@ -1103,7 +1097,7 @@ syscall(frame) } /* - * Try to run the syscall without the MP lock if the syscall + * Try to run the syscall without Giant if the syscall * is MP safe. */ if ((callp->sy_narg & SYF_MPSAFE) == 0) { @@ -1112,7 +1106,7 @@ syscall(frame) #ifdef KTRACE /* - * We have to obtain the MP lock no matter what if + * We have to obtain Giant no matter what if * we are ktracing */ if (KTRPOINT(p, KTR_SYSCALL)) { @@ -1124,13 +1118,10 @@ syscall(frame) p->p_retval[0] = 0; p->p_retval[1] = frame.tf_edx; - STOPEVENT(p, S_SCE, narg); /* MP aware */ + STOPEVENT(p, S_SCE, narg); error = (*callp->sy_call)(p, args); - /* - * MP SAFE (we may or may not have the MP lock at this point) - */ switch (error) { case 0: frame.tf_eax = p->p_retval[0]; @@ -1163,7 +1154,7 @@ bad: } /* - * Traced syscall. trapsignal() is not MP aware. + * Traced syscall. */ if ((frame.tf_eflags & PSL_T) && !(frame.tf_eflags & PSL_VM)) { if (!mtx_owned(&Giant)) diff --git a/sys/i386/i386/exception.s b/sys/i386/i386/exception.s index ddfd38834b24..28579ac8e96a 100644 --- a/sys/i386/i386/exception.s +++ b/sys/i386/i386/exception.s @@ -76,18 +76,6 @@ * must restore them prior to calling 'iret'. The cpu adjusts the %cs and * %ss segment registers, but does not mess with %ds, %es, or %fs. Thus we * must load them with appropriate values for supervisor mode operation. - * - * On entry to a trap or interrupt WE DO NOT OWN THE MP LOCK. This means - * that we must be careful in regards to accessing global variables. We - * save (push) the current cpl (our software interrupt disable mask), call - * the trap function, then call doreti to restore the cpl and deal with - * ASTs (software interrupts). doreti will determine if the restoration - * of the cpl unmasked any pending interrupts and will issue those interrupts - * synchronously prior to doing the iret. - * - * At the moment we must own the MP lock to do any cpl manipulation, which - * means we must own it prior to calling doreti. The syscall case attempts - * to avoid this by handling a reduced set of cases itself and iret'ing. */ #define IDTVEC(name) ALIGN_TEXT; .globl __CONCAT(X,name); \ .type __CONCAT(X,name),@function; __CONCAT(X,name): @@ -197,9 +185,6 @@ calltrap: * final spot. It has to be done this way because esp can't be just * temporarily altered for the pushfl - an interrupt might come in * and clobber the saved cs/eip. - * - * We do not obtain the MP lock, but the call to syscall might. If it - * does it will release the lock prior to returning. */ SUPERALIGN_TEXT IDTVEC(lcall_syscall) @@ -214,9 +199,6 @@ IDTVEC(lcall_syscall) * Even though the name says 'int0x80', this is actually a TGT (trap gate) * rather then an IGT (interrupt gate). Thus interrupts are enabled on * entry just as they are for a normal syscall. - * - * We do not obtain the MP lock, but the call to syscall might. If it - * does it will release the lock prior to returning. */ SUPERALIGN_TEXT IDTVEC(int0x80_syscall) diff --git a/sys/i386/i386/trap.c b/sys/i386/i386/trap.c index a84a4f24302d..11effc39be57 100644 --- a/sys/i386/i386/trap.c +++ b/sys/i386/i386/trap.c @@ -1012,15 +1012,9 @@ int trapwrite(addr) } /* - * syscall - MP aware system call request C handler + * syscall - system call request C handler * - * A system call is essentially treated as a trap except that the - * MP lock is not held on entry or return. We are responsible for - * obtaining the MP lock if necessary and for handling ASTs - * (e.g. a task switch) prior to return. - * - * In general, only simple access and manipulation of curproc and - * the current stack is allowed without having to hold MP lock. + * A system call is essentially treated as a trap. */ void syscall(frame) @@ -1103,7 +1097,7 @@ syscall(frame) } /* - * Try to run the syscall without the MP lock if the syscall + * Try to run the syscall without Giant if the syscall * is MP safe. */ if ((callp->sy_narg & SYF_MPSAFE) == 0) { @@ -1112,7 +1106,7 @@ syscall(frame) #ifdef KTRACE /* - * We have to obtain the MP lock no matter what if + * We have to obtain Giant no matter what if * we are ktracing */ if (KTRPOINT(p, KTR_SYSCALL)) { @@ -1124,13 +1118,10 @@ syscall(frame) p->p_retval[0] = 0; p->p_retval[1] = frame.tf_edx; - STOPEVENT(p, S_SCE, narg); /* MP aware */ + STOPEVENT(p, S_SCE, narg); error = (*callp->sy_call)(p, args); - /* - * MP SAFE (we may or may not have the MP lock at this point) - */ switch (error) { case 0: frame.tf_eax = p->p_retval[0]; @@ -1163,7 +1154,7 @@ bad: } /* - * Traced syscall. trapsignal() is not MP aware. + * Traced syscall. */ if ((frame.tf_eflags & PSL_T) && !(frame.tf_eflags & PSL_VM)) { if (!mtx_owned(&Giant))