Fix some problems introduced with the last descriptors tables locking

patch:
- Do the correct test for ldt allocation
- Drop dt_lock just before to call kmem_free (since it acquires blocking
  locks inside)
- Solve a deadlock with smp_rendezvous() where other CPU will wait
  undefinitively for dt_lock acquisition.
- Add dt_lock in the WITNESS list of spinlocks

While applying these modifies, change the requirement for user_ldt_free()
making that returning without dt_lock held.

Tested by: marcus, tegge
Reviewed by: tegge
Approved by: jeff (mentor)
This commit is contained in:
Attilio Rao 2007-05-29 18:55:41 +00:00
parent aeefab2b98
commit 02b0a160dc
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=170110
4 changed files with 22 additions and 8 deletions

View File

@ -1174,7 +1174,8 @@ exec_setregs(td, entry, stack, ps_strings)
mtx_lock_spin(&dt_lock);
if (td->td_proc->p_md.md_ldt)
user_ldt_free(td);
mtx_unlock_spin(&dt_lock);
else
mtx_unlock_spin(&dt_lock);
bzero((char *)regs, sizeof(struct trapframe));
regs->tf_eip = entry;

View File

@ -428,7 +428,7 @@ user_ldt_alloc(struct mdproc *mdp, int len)
}
/*
* Must be called with dt_lock held.
* Must be called with dt_lock held. Returns with dt_lock unheld.
*/
void
user_ldt_free(struct thread *td)
@ -446,6 +446,7 @@ user_ldt_free(struct thread *td)
}
mdp->md_ldt = NULL;
mtx_unlock_spin(&dt_lock);
if (refcount_release(&pldt->ldt_refcnt)) {
kmem_free(kernel_map, (vm_offset_t)pldt->ldt_base,
pldt->ldt_len * sizeof(union descriptor));
@ -701,7 +702,7 @@ i386_ldt_grow(struct thread *td, int len)
len = NLDT + 1;
/* Allocate a user ldt. */
if ((pldt = mdp->md_ldt) != NULL || len > pldt->ldt_len) {
if ((pldt = mdp->md_ldt) == NULL || len > pldt->ldt_len) {
struct proc_ldt *new_ldt;
new_ldt = user_ldt_alloc(mdp, len);
@ -716,26 +717,37 @@ i386_ldt_grow(struct thread *td, int len)
pldt->ldt_sd = new_ldt->ldt_sd;
pldt->ldt_base = new_ldt->ldt_base;
pldt->ldt_len = new_ldt->ldt_len;
mtx_unlock_spin(&dt_lock);
kmem_free(kernel_map, (vm_offset_t)old_ldt_base,
old_ldt_len * sizeof(union descriptor));
FREE(new_ldt, M_SUBPROC);
mtx_lock_spin(&dt_lock);
} else {
/*
* If other threads already did the work,
* do nothing.
*/
mtx_unlock_spin(&dt_lock);
kmem_free(kernel_map,
(vm_offset_t)new_ldt->ldt_base,
new_ldt->ldt_len * sizeof(union descriptor));
FREE(new_ldt, M_SUBPROC);
mtx_lock_spin(&dt_lock);
return (0);
}
} else
mdp->md_ldt = pldt = new_ldt;
#ifdef SMP
/* signal other cpus to reload ldt */
/*
* Signal other cpus to reload ldt. We need to unlock dt_lock
* here because other CPU will contest on it since their
* curthreads won't hold the lock and will block when trying
* to acquire it.
*/
mtx_unlock_spin(&dt_lock);
smp_rendezvous(NULL, (void (*)(void *))set_user_ldt_rv,
NULL, td);
mtx_lock_spin(&dt_lock);
#else
set_user_ldt(mdp);
#endif

View File

@ -168,8 +168,8 @@ cpu_fork(td1, p2, td2, flags)
mdp1->md_ldt = pldt;
set_user_ldt(mdp1);
user_ldt_free(td1);
}
mtx_unlock_spin(&dt_lock);
} else
mtx_unlock_spin(&dt_lock);
}
return;
}
@ -312,8 +312,8 @@ cpu_exit(struct thread *td)
td->td_pcb->pcb_gs = _udatasel;
load_gs(_udatasel);
user_ldt_free(td);
}
mtx_unlock_spin(&dt_lock);
} else
mtx_unlock_spin(&dt_lock);
}
void

View File

@ -394,6 +394,7 @@ static struct witness_order_list_entry order_lists[] = {
{ "sio", &lock_class_mtx_spin },
#ifdef __i386__
{ "cy", &lock_class_mtx_spin },
{ "descriptor tables", &lock_class_mtx_spin },
#endif
{ "scc_hwmtx", &lock_class_mtx_spin },
{ "uart_hwmtx", &lock_class_mtx_spin },