Usa a globally visible region of zeros for both /dev/zero and the md
device. There are likely other kernel uses of "blob of zeros" than can be converted. Reviewed by: alc MFC after: 1 week
This commit is contained in:
parent
14f9771b27
commit
89cb2a19ec
@ -205,9 +205,6 @@ struct md_s {
|
||||
vm_object_t object;
|
||||
};
|
||||
|
||||
/* Used for BIO_DELETE on MD_VNODE */
|
||||
static u_char zero[PAGE_SIZE];
|
||||
|
||||
static struct indir *
|
||||
new_indir(u_int shift)
|
||||
{
|
||||
@ -560,7 +557,8 @@ mdstart_vnode(struct md_s *sc, struct bio *bp)
|
||||
* that the two cases end up having very little in common.
|
||||
*/
|
||||
if (bp->bio_cmd == BIO_DELETE) {
|
||||
zerosize = sizeof(zero) - (sizeof(zero) % sc->sectorsize);
|
||||
zerosize = ZERO_REGION_SIZE -
|
||||
(ZERO_REGION_SIZE % sc->sectorsize);
|
||||
auio.uio_iov = &aiov;
|
||||
auio.uio_iovcnt = 1;
|
||||
auio.uio_offset = (vm_ooffset_t)bp->bio_offset;
|
||||
@ -573,7 +571,7 @@ mdstart_vnode(struct md_s *sc, struct bio *bp)
|
||||
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
|
||||
error = 0;
|
||||
while (auio.uio_offset < end) {
|
||||
aiov.iov_base = zero;
|
||||
aiov.iov_base = __DECONST(void *, zero_region);
|
||||
aiov.iov_len = end - auio.uio_offset;
|
||||
if (aiov.iov_len > zerosize)
|
||||
aiov.iov_len = zerosize;
|
||||
|
@ -65,8 +65,6 @@ static struct cdevsw zero_cdevsw = {
|
||||
.d_flags = D_MMAP_ANON,
|
||||
};
|
||||
|
||||
static void *zbuf;
|
||||
|
||||
/* ARGSUSED */
|
||||
static int
|
||||
null_write(struct cdev *dev __unused, struct uio *uio, int flags __unused)
|
||||
@ -95,10 +93,19 @@ null_ioctl(struct cdev *dev __unused, u_long cmd, caddr_t data __unused,
|
||||
static int
|
||||
zero_read(struct cdev *dev __unused, struct uio *uio, int flags __unused)
|
||||
{
|
||||
void *zbuf;
|
||||
ssize_t len;
|
||||
int error = 0;
|
||||
|
||||
while (uio->uio_resid > 0 && error == 0)
|
||||
error = uiomove(zbuf, MIN(uio->uio_resid, PAGE_SIZE), uio);
|
||||
KASSERT(uio->uio_rw == UIO_READ,
|
||||
("Can't be in %s for write", __func__));
|
||||
zbuf = __DECONST(void *, zero_region);
|
||||
while (uio->uio_resid > 0 && error == 0) {
|
||||
len = uio->uio_resid;
|
||||
if (len > ZERO_REGION_SIZE)
|
||||
len = ZERO_REGION_SIZE;
|
||||
error = uiomove(zbuf, len, uio);
|
||||
}
|
||||
|
||||
return (error);
|
||||
}
|
||||
@ -111,7 +118,6 @@ null_modevent(module_t mod __unused, int type, void *data __unused)
|
||||
case MOD_LOAD:
|
||||
if (bootverbose)
|
||||
printf("null: <null device, zero device>\n");
|
||||
zbuf = (void *)malloc(PAGE_SIZE, M_TEMP, M_WAITOK | M_ZERO);
|
||||
null_dev = make_dev_credf(MAKEDEV_ETERNAL_KLD, &null_cdevsw, 0,
|
||||
NULL, UID_ROOT, GID_WHEEL, 0666, "null");
|
||||
zero_dev = make_dev_credf(MAKEDEV_ETERNAL_KLD, &zero_cdevsw, 0,
|
||||
@ -121,7 +127,6 @@ null_modevent(module_t mod __unused, int type, void *data __unused)
|
||||
case MOD_UNLOAD:
|
||||
destroy_dev(null_dev);
|
||||
destroy_dev(zero_dev);
|
||||
free(zbuf, M_TEMP);
|
||||
break;
|
||||
|
||||
case MOD_SHUTDOWN:
|
||||
|
@ -125,6 +125,9 @@ extern char static_hints[]; /* by config for now */
|
||||
|
||||
extern char **kenvp;
|
||||
|
||||
extern const void *zero_region; /* address space maps to a zeroed page */
|
||||
#define ZERO_REGION_SIZE (2048 * 1024)
|
||||
|
||||
/*
|
||||
* General function declarations.
|
||||
*/
|
||||
|
@ -91,6 +91,9 @@ vm_map_t exec_map=0;
|
||||
vm_map_t pipe_map;
|
||||
vm_map_t buffer_map=0;
|
||||
|
||||
const void *zero_region;
|
||||
CTASSERT((ZERO_REGION_SIZE & PAGE_MASK) == 0);
|
||||
|
||||
/*
|
||||
* kmem_alloc_nofault:
|
||||
*
|
||||
@ -527,6 +530,35 @@ kmem_free_wakeup(map, addr, size)
|
||||
vm_map_unlock(map);
|
||||
}
|
||||
|
||||
static void
|
||||
kmem_init_zero_region(void)
|
||||
{
|
||||
vm_offset_t addr;
|
||||
vm_page_t m;
|
||||
unsigned int i;
|
||||
int error;
|
||||
|
||||
/* Allocate virtual address space. */
|
||||
addr = kmem_alloc_nofault(kernel_map, ZERO_REGION_SIZE);
|
||||
|
||||
/* Allocate a page and zero it. */
|
||||
m = vm_page_alloc(NULL, OFF_TO_IDX(addr - VM_MIN_KERNEL_ADDRESS),
|
||||
VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO);
|
||||
if ((m->flags & PG_ZERO) == 0)
|
||||
pmap_zero_page(m);
|
||||
|
||||
/* Map the address space to the page. */
|
||||
for (i = 0; i < ZERO_REGION_SIZE; i += PAGE_SIZE)
|
||||
pmap_qenter(addr + i, &m, 1);
|
||||
|
||||
/* Protect it r/o. */
|
||||
error = vm_map_protect(kernel_map, addr, addr + ZERO_REGION_SIZE,
|
||||
VM_PROT_READ, TRUE);
|
||||
KASSERT(error == 0, ("error=%d", error));
|
||||
|
||||
zero_region = (const void *)addr;
|
||||
}
|
||||
|
||||
/*
|
||||
* kmem_init:
|
||||
*
|
||||
@ -555,6 +587,8 @@ kmem_init(start, end)
|
||||
start, VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT);
|
||||
/* ... and ending with the completion of the above `insert' */
|
||||
vm_map_unlock(m);
|
||||
|
||||
kmem_init_zero_region();
|
||||
}
|
||||
|
||||
#ifdef DIAGNOSTIC
|
||||
|
Loading…
Reference in New Issue
Block a user