Don't forget to unlock the pmap lock on failure.

This commit is contained in:
Olivier Houchard 2012-11-16 00:14:02 +00:00
parent a0c8989b3a
commit bf014f0bab

View File

@ -2361,8 +2361,10 @@ pmap_change_attr(vm_offset_t sva, vm_size_t len, int mode)
* Only supported on kernel virtual addresses, including the direct
* map but excluding the recursive map.
*/
if (base < DMAP_MIN_ADDRESS)
if (base < DMAP_MIN_ADDRESS) {
PMAP_UNLOCK(kernel_pmap);
return (EINVAL);
}
#endif
for (tmpva = base; tmpva < base + size; ) {
next_bucket = L2_NEXT_BUCKET(tmpva);
@ -2377,8 +2379,10 @@ pmap_change_attr(vm_offset_t sva, vm_size_t len, int mode)
ptep = &l2b->l2b_kva[l2pte_index(tmpva)];
if (*ptep == 0)
if (*ptep == 0) {
PMAP_UNLOCK(kernel_pmap);
return(EINVAL);
}
pte = *ptep &~ L2_S_CACHE_MASK;
cpu_idcache_wbinv_range(tmpva, PAGE_SIZE);