Properly handle the case where the linux_cdev_handle_insert() function

in the LinuxKPI returns NULL. This happens when the VM area's private
data handle already exists and could cause a so-called NULL pointer
dereferencing issue prior to this fix.

Found by:	greg@unrelenting.technology
MFC after:	1 week
Sponsored by:	Mellanox Technologies
This commit is contained in:
hselasky 2017-11-13 18:16:26 +00:00
parent 83a341a5aa
commit 160a26a6fa

View File

@ -607,24 +607,6 @@ linux_cdev_handle_free(struct vm_area_struct *vmap)
kfree(vmap);
}
static struct vm_area_struct *
linux_cdev_handle_insert(void *handle, struct vm_area_struct *vmap)
{
struct vm_area_struct *ptr;
rw_wlock(&linux_vma_lock);
TAILQ_FOREACH(ptr, &linux_vma_head, vm_entry) {
if (ptr->vm_private_data == handle) {
rw_wunlock(&linux_vma_lock);
linux_cdev_handle_free(vmap);
return (NULL);
}
}
TAILQ_INSERT_TAIL(&linux_vma_head, vmap, vm_entry);
rw_wunlock(&linux_vma_lock);
return (vmap);
}
static void
linux_cdev_handle_remove(struct vm_area_struct *vmap)
{
@ -1318,20 +1300,55 @@ linux_dev_mmap_single(struct cdev *dev, vm_ooffset_t *offset,
attr = pgprot2cachemode(vmap->vm_page_prot);
if (vmap->vm_ops != NULL) {
struct vm_area_struct *ptr;
void *vm_private_data;
bool vm_no_fault;
if (vmap->vm_ops->open == NULL ||
vmap->vm_ops->close == NULL ||
vmap->vm_private_data == NULL) {
/* free allocated VM area struct */
linux_cdev_handle_free(vmap);
return (EINVAL);
}
vm_private_data = vmap->vm_private_data;
vmap = linux_cdev_handle_insert(vm_private_data, vmap);
rw_wlock(&linux_vma_lock);
TAILQ_FOREACH(ptr, &linux_vma_head, vm_entry) {
if (ptr->vm_private_data == vm_private_data)
break;
}
/* check if there is an existing VM area struct */
if (ptr != NULL) {
/* check if the VM area structure is invalid */
if (ptr->vm_ops == NULL ||
ptr->vm_ops->open == NULL ||
ptr->vm_ops->close == NULL) {
error = ESTALE;
vm_no_fault = 1;
} else {
error = EEXIST;
vm_no_fault = (ptr->vm_ops->fault == NULL);
}
} else {
/* insert VM area structure into list */
TAILQ_INSERT_TAIL(&linux_vma_head, vmap, vm_entry);
error = 0;
vm_no_fault = (vmap->vm_ops->fault == NULL);
}
rw_wunlock(&linux_vma_lock);
if (vmap->vm_ops->fault == NULL) {
if (error != 0) {
/* free allocated VM area struct */
linux_cdev_handle_free(vmap);
/* check for stale VM area struct */
if (error != EEXIST)
return (error);
}
/* check if there is no fault handler */
if (vm_no_fault) {
*object = cdev_pager_allocate(vm_private_data, OBJT_DEVICE,
&linux_cdev_pager_ops[1], size, nprot, *offset,
curthread->td_ucred);
@ -1341,9 +1358,14 @@ linux_dev_mmap_single(struct cdev *dev, vm_ooffset_t *offset,
curthread->td_ucred);
}
/* check if allocating the VM object failed */
if (*object == NULL) {
linux_cdev_handle_remove(vmap);
linux_cdev_handle_free(vmap);
if (error == 0) {
/* remove VM area struct from list */
linux_cdev_handle_remove(vmap);
/* free allocated VM area struct */
linux_cdev_handle_free(vmap);
}
return (EINVAL);
}
} else {