Clean up /dev/mem now that pmap handles illegal aliases properly. Don't
allow access to device memory through /dev/mem, or try to make modifying kernel text through /dev/mem safe (it is not).
This commit is contained in:
parent
5262ab2453
commit
56a6b03a6d
@ -64,11 +64,15 @@
|
||||
|
||||
#include <vm/vm.h>
|
||||
#include <vm/vm_param.h>
|
||||
#include <vm/vm_page.h>
|
||||
#include <vm/vm_kern.h>
|
||||
#include <vm/pmap.h>
|
||||
#include <vm/vm_extern.h>
|
||||
|
||||
#include <machine/cache.h>
|
||||
#include <machine/md_var.h>
|
||||
#include <machine/pmap.h>
|
||||
#include <machine/tlb.h>
|
||||
#include <machine/upa.h>
|
||||
|
||||
static dev_t memdev, kmemdev;
|
||||
@ -115,19 +119,26 @@ mmopen(dev_t dev, int flags, int fmt, struct thread *td)
|
||||
return (0);
|
||||
}
|
||||
|
||||
#define IOSTART UPA_MEMSTART
|
||||
|
||||
/*ARGSUSED*/
|
||||
static int
|
||||
mmrw(dev_t dev, struct uio *uio, int flags)
|
||||
{
|
||||
struct iovec *iov;
|
||||
int error = 0;
|
||||
vm_offset_t addr, eaddr, o, v = 0;
|
||||
vm_offset_t eva;
|
||||
vm_offset_t off;
|
||||
vm_offset_t ova;
|
||||
vm_offset_t pa;
|
||||
vm_offset_t va;
|
||||
vm_prot_t prot;
|
||||
vm_size_t c = 0;
|
||||
u_long asi;
|
||||
char *buf = NULL;
|
||||
vm_size_t cnt;
|
||||
vm_page_t m;
|
||||
int color;
|
||||
int error;
|
||||
int i;
|
||||
|
||||
cnt = 0;
|
||||
error = 0;
|
||||
ova = 0;
|
||||
|
||||
GIANT_REQUIRED;
|
||||
|
||||
@ -143,69 +154,74 @@ mmrw(dev_t dev, struct uio *uio, int flags)
|
||||
switch (minor(dev)) {
|
||||
case 0:
|
||||
/* mem (physical memory) */
|
||||
if (buf == NULL) {
|
||||
buf = malloc(PAGE_SIZE, M_DEVBUF, M_WAITOK);
|
||||
if (buf == NULL) {
|
||||
error = ENOMEM;
|
||||
pa = uio->uio_offset & ~PAGE_MASK;
|
||||
if (!is_physical_memory(pa)) {
|
||||
error = EFAULT;
|
||||
break;
|
||||
}
|
||||
|
||||
off = uio->uio_offset & PAGE_MASK;
|
||||
cnt = PAGE_SIZE - ((vm_offset_t)iov->iov_base &
|
||||
PAGE_MASK);
|
||||
cnt = min(cnt, PAGE_SIZE - off);
|
||||
cnt = min(cnt, iov->iov_len);
|
||||
|
||||
m = NULL;
|
||||
for (i = 0; phys_avail[i] != 0; i += 2) {
|
||||
if (pa >= phys_avail[i] &&
|
||||
pa < phys_avail[i + 1]) {
|
||||
m = PHYS_TO_VM_PAGE(pa);
|
||||
break;
|
||||
}
|
||||
}
|
||||
v = uio->uio_offset;
|
||||
asi = ASI_PHYS_USE_EC;
|
||||
/* Access device memory noncacheable. */
|
||||
if (v >= IOSTART)
|
||||
asi = ASI_PHYS_BYPASS_EC_WITH_EBIT;
|
||||
o = v & PAGE_MASK;
|
||||
c = ulmin(iov->iov_len, PAGE_SIZE - o);
|
||||
/*
|
||||
* This double copy could be avoided, at the cost of
|
||||
* inlining a version of uiomove. Since this is not
|
||||
* performance-critical, it is probably not worth it.
|
||||
*/
|
||||
if (uio->uio_rw == UIO_READ)
|
||||
ascopyfrom(asi, v, buf, c);
|
||||
error = uiomove(buf, c, uio);
|
||||
if (error == 0 && uio->uio_rw == UIO_WRITE)
|
||||
ascopyto(buf, asi, v, c);
|
||||
/*
|
||||
* If a write was evil enough to change kernel code,
|
||||
* I$ must be flushed. Also, D$ must be flushed if there
|
||||
* is a chance that there is a cacheable mapping to
|
||||
* avoid working with stale data.
|
||||
*/
|
||||
if (v < IOSTART && uio->uio_rw == UIO_WRITE) {
|
||||
icache_inval_phys(v, v + c);
|
||||
dcache_inval_phys(v, v + c);
|
||||
|
||||
if (m != NULL) {
|
||||
if (ova == 0) {
|
||||
ova = kmem_alloc_wait(kernel_map,
|
||||
PAGE_SIZE * DCACHE_COLORS);
|
||||
}
|
||||
if ((color = m->md.color) == -1)
|
||||
va = ova;
|
||||
else
|
||||
va = ova + color * PAGE_SIZE;
|
||||
pmap_qenter(va, &m, 1);
|
||||
error = uiomove((void *)(va + off), cnt,
|
||||
uio);
|
||||
pmap_qremove(va, 1);
|
||||
} else {
|
||||
va = TLB_PHYS_TO_DIRECT(pa);
|
||||
error = uiomove((void *)(va + off), cnt,
|
||||
uio);
|
||||
}
|
||||
break;
|
||||
case 1:
|
||||
/* kmem (kernel memory) */
|
||||
c = iov->iov_len;
|
||||
va = trunc_page(uio->uio_offset);
|
||||
eva = round_page(uio->uio_offset + iov->iov_len);
|
||||
|
||||
/*
|
||||
* Make sure that all of the pages are currently resident so
|
||||
* that we don't create any zero-fill pages.
|
||||
* Make sure that all of the pages are currently
|
||||
* resident so we don't create any zero fill pages.
|
||||
*/
|
||||
addr = trunc_page(uio->uio_offset);
|
||||
eaddr = round_page(uio->uio_offset + c);
|
||||
|
||||
for (; addr < eaddr; addr += PAGE_SIZE)
|
||||
if (pmap_extract(kernel_pmap, addr) == 0)
|
||||
return EFAULT;
|
||||
for (; va < eva; va += PAGE_SIZE)
|
||||
if (pmap_kextract(va) == 0)
|
||||
return (EFAULT);
|
||||
|
||||
prot = (uio->uio_rw == UIO_READ) ? VM_PROT_READ :
|
||||
VM_PROT_WRITE;
|
||||
v = uio->uio_offset;
|
||||
if (v < VM_MIN_DIRECT_ADDRESS &&
|
||||
kernacc((caddr_t)v, c, prot) == FALSE)
|
||||
va = uio->uio_offset;
|
||||
if (va < VM_MIN_DIRECT_ADDRESS &&
|
||||
kernacc((void *)va, iov->iov_len, prot) == FALSE)
|
||||
return (EFAULT);
|
||||
error = uiomove((caddr_t)v, c, uio);
|
||||
if (uio->uio_rw == UIO_WRITE)
|
||||
icache_flush(v, v + c);
|
||||
|
||||
error = uiomove((void *)va, iov->iov_len, uio);
|
||||
break;
|
||||
default:
|
||||
return (ENODEV);
|
||||
}
|
||||
}
|
||||
if (buf != NULL)
|
||||
free(buf, M_DEVBUF);
|
||||
if (ova != 0)
|
||||
kmem_free_wakeup(kernel_map, ova, PAGE_SIZE * DCACHE_COLORS);
|
||||
return (error);
|
||||
}
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user