- Use memory barrier with atomic operations in ntoskrnl_lock_dpc() and
ntoskrnl_unlocl_dpc(). - hal_raise_irql(), hal_lower_irql() and hal_irql() didn't work right on SMP (priority inheritance makes things... interesting). For now, use only two states: DISPATCH_LEVEL (PI_REALTIME) and PASSIVE_LEVEL (everything else). Tested on a dual PIII box. - Use ndis_thsuspend() in ndis_sleep() instead of tsleep(). (I added ndis_thsuspend() and ndis_thresume() to replace kthread_suspend() and kthread_resume(); the former will preserve a thread's priority when it wakes up, the latter will not.) - Change use of tsleep() in ndis_stop_thread() to prevent priority change on wakeup.
This commit is contained in:
parent
2b19543d6c
commit
1ea56deba6
@ -348,7 +348,7 @@ ndis_stop_thread(t)
|
||||
|
||||
/* wait for thread exit */
|
||||
|
||||
tsleep(r, PPAUSE|PCATCH, "ndisthrexit", hz * 60);
|
||||
tsleep(r, curthread->td_priority|PCATCH, "ndisthexit", hz * 60);
|
||||
|
||||
/* Now empty the job list. */
|
||||
|
||||
|
@ -204,10 +204,10 @@ typedef struct nt_dispatch_header nt_dispatch_header;
|
||||
((td)->td_proc->p_flag & P_KTHREAD == FALSE)
|
||||
|
||||
#define AT_DISPATCH_LEVEL(td) \
|
||||
((td)->td_priority == PI_SOFT)
|
||||
((td)->td_priority == PI_REALTIME)
|
||||
|
||||
#define AT_DIRQL_LEVEL(td) \
|
||||
((td)->td_priority < PRI_MIN_KERN)
|
||||
((td)->td_priority <= PI_NET)
|
||||
|
||||
#define AT_HIGH_LEVEL(td) \
|
||||
((td)->td_critnest != 0)
|
||||
|
@ -266,6 +266,7 @@ hal_lock(/*lock*/void)
|
||||
|
||||
oldirql = FASTCALL1(hal_raise_irql, DISPATCH_LEVEL);
|
||||
FASTCALL1(ntoskrnl_lock_dpc, lock);
|
||||
|
||||
return(oldirql);
|
||||
}
|
||||
|
||||
@ -288,10 +289,6 @@ hal_irql(void)
|
||||
{
|
||||
if (AT_DISPATCH_LEVEL(curthread))
|
||||
return(DISPATCH_LEVEL);
|
||||
if (AT_DIRQL_LEVEL(curthread))
|
||||
return(DEVICE_LEVEL);
|
||||
if (AT_HIGH_LEVEL(curthread))
|
||||
return(HIGH_LEVEL);
|
||||
return(PASSIVE_LEVEL);
|
||||
}
|
||||
|
||||
@ -313,27 +310,13 @@ hal_raise_irql(/*irql*/ void)
|
||||
|
||||
__asm__ __volatile__ ("" : "=c" (irql));
|
||||
|
||||
switch(irql) {
|
||||
case HIGH_LEVEL:
|
||||
oldirql = hal_irql();
|
||||
critical_enter();
|
||||
break;
|
||||
case DEVICE_LEVEL:
|
||||
mtx_lock_spin(&sched_lock);
|
||||
oldirql = curthread->td_priority;
|
||||
sched_prio(curthread, PI_REALTIME);
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
break;
|
||||
case DISPATCH_LEVEL:
|
||||
mtx_lock_spin(&sched_lock);
|
||||
oldirql = curthread->td_priority;
|
||||
sched_prio(curthread, PI_SOFT);
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
break;
|
||||
default:
|
||||
panic("can't raise IRQL to unknown level %d", irql);
|
||||
break;
|
||||
}
|
||||
if (irql < hal_irql())
|
||||
panic("IRQL_NOT_LESS_THAN");
|
||||
|
||||
mtx_lock_spin(&sched_lock);
|
||||
oldirql = curthread->td_priority;
|
||||
sched_prio(curthread, PI_REALTIME);
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
|
||||
return(oldirql);
|
||||
}
|
||||
@ -342,26 +325,15 @@ __stdcall void
|
||||
hal_lower_irql(/*oldirql*/ void)
|
||||
{
|
||||
uint8_t oldirql;
|
||||
uint8_t irql;
|
||||
|
||||
__asm__ __volatile__ ("" : "=c" (oldirql));
|
||||
|
||||
irql = hal_irql();
|
||||
if (hal_irql() != DISPATCH_LEVEL)
|
||||
panic("IRQL_NOT_GREATER_THAN");
|
||||
|
||||
switch (irql) {
|
||||
case HIGH_LEVEL:
|
||||
critical_exit();
|
||||
break;
|
||||
case DEVICE_LEVEL:
|
||||
case DISPATCH_LEVEL:
|
||||
mtx_lock_spin(&sched_lock);
|
||||
sched_prio(curthread, oldirql);
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
break;
|
||||
default:
|
||||
panic("can't lower IRQL to unknown level %d", irql);
|
||||
break;
|
||||
}
|
||||
mtx_lock_spin(&sched_lock);
|
||||
sched_prio(curthread, oldirql);
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
|
||||
return;
|
||||
}
|
||||
|
@ -2162,12 +2162,11 @@ ndis_sleep(usecs)
|
||||
uint32_t usecs;
|
||||
{
|
||||
struct timeval tv;
|
||||
uint32_t dummy;
|
||||
|
||||
tv.tv_sec = 0;
|
||||
tv.tv_usec = usecs;
|
||||
|
||||
tsleep(&dummy, PPAUSE|PCATCH, "ndis", tvtohz(&tv));
|
||||
ndis_thsuspend(curthread->td_proc, tvtohz(&tv));
|
||||
|
||||
return;
|
||||
}
|
||||
|
@ -406,12 +406,6 @@ ntoskrnl_time(tval)
|
||||
* synchronization event wakes up just one. Also, a synchronization event
|
||||
* is auto-clearing, which means we automatically set the event back to
|
||||
* the non-signalled state once the wakeup is done.
|
||||
*
|
||||
* The problem with KeWaitForSingleObject() is that it can be called
|
||||
* either from the main kernel 'process' or from a kthread. When sleeping
|
||||
* inside a kernel thread, we need to use kthread_resume(), but that
|
||||
* won't work in the kernel context proper. So if kthread_resume() returns
|
||||
* EINVAL, we need to use tsleep() instead.
|
||||
*/
|
||||
|
||||
__stdcall uint32_t
|
||||
@ -1034,8 +1028,8 @@ ntoskrnl_lock_dpc(/*lock*/ void)
|
||||
|
||||
__asm__ __volatile__ ("" : "=c" (lock));
|
||||
|
||||
while (atomic_cmpset_int((volatile u_int *)lock, 0, 1) == 0)
|
||||
/* do noting */;
|
||||
while (atomic_cmpset_acq_int((volatile u_int *)lock, 0, 1) == 0)
|
||||
/* do nothing */;
|
||||
|
||||
return;
|
||||
}
|
||||
@ -1047,7 +1041,7 @@ ntoskrnl_unlock_dpc(/*lock*/ void)
|
||||
|
||||
__asm__ __volatile__ ("" : "=c" (lock));
|
||||
|
||||
atomic_cmpset_int((volatile u_int *)lock, 1, 0);
|
||||
atomic_cmpset_rel_int((volatile u_int *)lock, 1, 0);
|
||||
|
||||
return;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user