Fix misuse of the kernel map in miscellaneous image activators.

Vnode-backed mappings cannot be put into the kernel map, since it is a
system map.

Use exec_map for transient mappings, and remove the mappings with
kmem_free_wakeup() to notify the waiters on available map space.

Do not map the whole executable into KVA at all to copy it out into
usermode.  Directly use vn_rdwr() for the case of not page aligned
binary.

There is one place left where the potentially unbounded amount of data
is mapped into exec_map, namely, in the COFF image activator
enumeration of the needed shared libraries.

Reviewed by:   alc
MFC after:     2 weeks
This commit is contained in:
Konstantin Belousov 2012-02-17 23:47:16 +00:00
parent 45cbfcdabc
commit 3494f31ad2
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=231885
5 changed files with 44 additions and 88 deletions

View File

@ -229,9 +229,9 @@ linux_uselib(struct thread *td, struct linux_uselib_args *args)
struct vattr attr;
vm_offset_t vmaddr;
unsigned long file_offset;
vm_offset_t buffer;
unsigned long bss_size;
char *library;
ssize_t aresid;
int error;
int locked, vfslocked;
@ -308,8 +308,8 @@ linux_uselib(struct thread *td, struct linux_uselib_args *args)
if (error)
goto cleanup;
/* Pull in executable header into kernel_map */
error = vm_mmap(kernel_map, (vm_offset_t *)&a_out, PAGE_SIZE,
/* Pull in executable header into exec_map */
error = vm_mmap(exec_map, (vm_offset_t *)&a_out, PAGE_SIZE,
VM_PROT_READ, VM_PROT_READ, 0, OBJT_VNODE, vp, 0);
if (error)
goto cleanup;
@ -402,24 +402,15 @@ linux_uselib(struct thread *td, struct linux_uselib_args *args)
if (error)
goto cleanup;
/* map file into kernel_map */
error = vm_mmap(kernel_map, &buffer,
round_page(a_out->a_text + a_out->a_data + file_offset),
VM_PROT_READ, VM_PROT_READ, 0, OBJT_VNODE, vp,
trunc_page(file_offset));
if (error)
error = vn_rdwr(UIO_READ, vp, (void *)vmaddr, file_offset,
a_out->a_text + a_out->a_data, UIO_USERSPACE, 0,
td->td_ucred, NOCRED, &aresid, td);
if (error != 0)
goto cleanup;
/* copy from kernel VM space to user space */
error = copyout(PTRIN(buffer + file_offset),
(void *)vmaddr, a_out->a_text + a_out->a_data);
/* release temporary kernel space */
vm_map_remove(kernel_map, buffer, buffer +
round_page(a_out->a_text + a_out->a_data + file_offset));
if (error)
if (aresid != 0) {
error = ENOEXEC;
goto cleanup;
}
} else {
#ifdef DEBUG
printf("uselib: Page aligned binary %lu\n", file_offset);
@ -463,10 +454,9 @@ linux_uselib(struct thread *td, struct linux_uselib_args *args)
VFS_UNLOCK_GIANT(vfslocked);
}
/* Release the kernel mapping. */
/* Release the temporary mapping. */
if (a_out)
vm_map_remove(kernel_map, (vm_offset_t)a_out,
(vm_offset_t)a_out + PAGE_SIZE);
kmem_free_wakeup(exec_map, (vm_offset_t)a_out, PAGE_SIZE);
return (error);
}

View File

@ -66,8 +66,8 @@ exec_svr4_imgact(imgp)
struct vmspace *vmspace;
vm_offset_t vmaddr;
unsigned long virtual_offset, file_offset;
vm_offset_t buffer;
unsigned long bss_size;
ssize_t aresid;
int error;
if (((a_out->a_magic >> 16) & 0xff) != 0x64)
@ -145,21 +145,15 @@ exec_svr4_imgact(imgp)
if (error)
goto fail;
error = vm_mmap(kernel_map, &buffer,
round_page(a_out->a_text + a_out->a_data + file_offset),
VM_PROT_READ, VM_PROT_READ, 0,
OBJT_VNODE, imgp->vp, trunc_page(file_offset));
if (error)
goto fail;
error = copyout((caddr_t)(buffer + file_offset), (caddr_t)vmaddr,
a_out->a_text + a_out->a_data);
vm_map_remove(kernel_map, buffer,
buffer + round_page(a_out->a_text + a_out->a_data + file_offset));
if (error)
goto fail;
error = vn_rdwr(UIO_READ, imgp->vp, (void *)vmaddr, file_offset,
a_out->a_text + a_out->a_data, UIO_USERSPACE, 0,
curthread->td_ucred, NOCRED, &aresid, curthread);
if (error != 0)
goto fail;
if (aresid != 0) {
error = ENOEXEC;
goto fail;
}
/*
* remove write enable on the 'text' part

View File

@ -146,10 +146,7 @@ load_coff_section(struct vmspace *vmspace, struct vnode *vp, vm_offset_t offset,
error = copyout(data_buf, (caddr_t) map_addr, copy_len);
if (vm_map_remove(exec_map,
(vm_offset_t) data_buf,
(vm_offset_t) data_buf + PAGE_SIZE))
panic("load_coff_section vm_map_remove failed");
kmem_free_wakeup(exec_map, (vm_offset_t)data_buf, PAGE_SIZE);
return error;
}
@ -280,11 +277,7 @@ coff_load_file(struct thread *td, char *name)
error = 0;
dealloc_and_fail:
if (vm_map_remove(exec_map,
(vm_offset_t) ptr,
(vm_offset_t) ptr + PAGE_SIZE))
panic("%s vm_map_remove failed", __func__);
kmem_free_wakeup(exec_map, (vm_offset_t)ptr, PAGE_SIZE);
fail:
VOP_UNLOCK(vp, 0);
unlocked_fail:
@ -421,10 +414,7 @@ exec_coff_imgact(imgp)
}
free(libbuf, M_TEMP);
}
if (vm_map_remove(exec_map,
(vm_offset_t) buf,
(vm_offset_t) buf + len))
panic("exec_coff_imgact vm_map_remove failed");
kmem_free_wakeup(exec_map, (vm_offset_t)buf, len);
if (error)
goto fail;
}

View File

@ -64,8 +64,8 @@ exec_linux_imgact(struct image_params *imgp)
struct vmspace *vmspace;
vm_offset_t vmaddr;
unsigned long virtual_offset, file_offset;
vm_offset_t buffer;
unsigned long bss_size;
ssize_t aresid;
int error;
if (((a_out->a_magic >> 16) & 0xff) != 0x64)
@ -144,21 +144,15 @@ exec_linux_imgact(struct image_params *imgp)
if (error)
goto fail;
error = vm_mmap(kernel_map, &buffer,
round_page(a_out->a_text + a_out->a_data + file_offset),
VM_PROT_READ, VM_PROT_READ, 0, OBJT_VNODE,
imgp->vp, trunc_page(file_offset));
if (error)
goto fail;
error = copyout((void *)(uintptr_t)(buffer + file_offset),
(void *)vmaddr, a_out->a_text + a_out->a_data);
vm_map_remove(kernel_map, buffer,
buffer + round_page(a_out->a_text + a_out->a_data + file_offset));
if (error)
goto fail;
error = vn_rdwr(UIO_READ, imgp->vp, (void *)vmaddr, file_offset,
a_out->a_text + a_out->a_data, UIO_USERSPACE, 0,
curthread->td_ucred, NOCRED, &aresid, curthread);
if (error != 0)
goto fail;
if (aresid != 0) {
error = ENOEXEC;
goto fail;
}
/*
* remove write enable on the 'text' part

View File

@ -70,7 +70,7 @@ static int
exec_gzip_imgact(imgp)
struct image_params *imgp;
{
int error, error2 = 0;
int error;
const u_char *p = (const u_char *) imgp->image_header;
struct imgact_gzip igz;
struct inflate infl;
@ -136,22 +136,17 @@ exec_gzip_imgact(imgp)
VM_PROT_READ|VM_PROT_EXECUTE,0);
}
if (igz.inbuf) {
error2 =
vm_map_remove(kernel_map, (vm_offset_t) igz.inbuf,
(vm_offset_t) igz.inbuf + PAGE_SIZE);
}
if (igz.error || error || error2) {
if (igz.inbuf)
kmem_free_wakeup(exec_map, (vm_offset_t)igz.inbuf, PAGE_SIZE);
if (igz.error || error) {
printf("Output=%lu ", igz.output);
printf("Inflate_error=%d igz.error=%d error2=%d where=%d\n",
error, igz.error, error2, igz.where);
printf("Inflate_error=%d igz.error=%d where=%d\n",
error, igz.error, igz.where);
}
if (igz.error)
return igz.error;
if (error)
return ENOEXEC;
if (error2)
return error2;
return 0;
}
@ -314,18 +309,11 @@ NextByte(void *vp)
if (igz->inbuf && igz->idx < (igz->offset + PAGE_SIZE)) {
return igz->inbuf[(igz->idx++) - igz->offset];
}
if (igz->inbuf) {
error = vm_map_remove(kernel_map, (vm_offset_t) igz->inbuf,
(vm_offset_t) igz->inbuf + PAGE_SIZE);
if (error) {
igz->where = __LINE__;
igz->error = error;
return GZ_EOF;
}
}
if (igz->inbuf)
kmem_free_wakeup(exec_map, (vm_offset_t)igz->inbuf, PAGE_SIZE);
igz->offset = igz->idx & ~PAGE_MASK;
error = vm_mmap(kernel_map, /* map */
error = vm_mmap(exec_map, /* map */
(vm_offset_t *) & igz->inbuf, /* address */
PAGE_SIZE, /* size */
VM_PROT_READ, /* protection */