Remove references to vm_zone.h and switch over to the new uma API.

Also, remove maxsockets.  If you look carefully you'll notice that the old
zone allocator never honored this anyway.
This commit is contained in:
Jeff Roberson 2002-03-20 04:09:59 +00:00
parent e38eb7fbe4
commit c897b81311
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=92751
20 changed files with 111 additions and 139 deletions

View File

@ -67,7 +67,7 @@
#include <vm/vm.h>
#include <vm/vm_extern.h>
#include <vm/vm_zone.h>
#include <vm/uma.h>
static MALLOC_DEFINE(M_FILEDESC, "file desc", "Open file descriptor table");
static MALLOC_DEFINE(M_SIGIO, "sigio", "sigio structures");

View File

@ -49,7 +49,7 @@
#include <sys/sysproto.h>
#include <sys/uio.h>
#include <vm/vm_zone.h>
#include <vm/uma.h>
MALLOC_DEFINE(M_KQUEUE, "kqueue", "memory for kqueue system");
@ -107,7 +107,7 @@ static struct filterops proc_filtops =
static struct filterops timer_filtops =
{ 0, filt_timerattach, filt_timerdetach, filt_timer };
static vm_zone_t knote_zone;
static uma_zone_t knote_zone;
static int kq_ncallouts = 0;
static int kq_calloutmax = (4 * 1024);
SYSCTL_INT(_kern, OID_AUTO, kq_calloutmax, CTLFLAG_RW,
@ -1063,18 +1063,20 @@ knote_dequeue(struct knote *kn)
static void
knote_init(void)
{
knote_zone = zinit("KNOTE", sizeof(struct knote), 0, 0, 1);
knote_zone = uma_zcreate("KNOTE", sizeof(struct knote), NULL, NULL,
NULL, NULL, UMA_ALIGN_PTR, 0);
}
SYSINIT(knote, SI_SUB_PSEUDO, SI_ORDER_ANY, knote_init, NULL)
static struct knote *
knote_alloc(void)
{
return ((struct knote *)zalloc(knote_zone));
return ((struct knote *)uma_zalloc(knote_zone, M_WAITOK));
}
static void
knote_free(struct knote *kn)
{
zfree(knote_zone, kn);
uma_zfree(knote_zone, kn);
}

View File

@ -70,7 +70,7 @@
#include <vm/vm_extern.h>
#include <vm/pmap.h>
#include <vm/vm_map.h>
#include <vm/vm_zone.h>
#include <vm/uma.h>
#include <sys/user.h>
/* Required to be non-static for SysVR4 emulator */
@ -654,7 +654,7 @@ wait1(td, uap, compat)
*/
vm_waitproc(p);
mtx_destroy(&p->p_mtx);
zfree(proc_zone, p);
uma_zfree(proc_zone, p);
nprocs--;
error = 0;
goto done2;

View File

@ -66,7 +66,7 @@
#include <vm/pmap.h>
#include <vm/vm_map.h>
#include <vm/vm_extern.h>
#include <vm/vm_zone.h>
#include <vm/uma.h>
#include <sys/vmmeter.h>
#include <sys/user.h>
@ -325,7 +325,7 @@ fork1(td, flags, procp)
}
/* Allocate new proc. */
newproc = zalloc(proc_zone);
newproc = uma_zalloc(proc_zone, M_WAITOK);
/*
* Setup linkage for kernel based threading

View File

@ -53,7 +53,7 @@
#include <vm/vm.h>
#include <vm/pmap.h>
#include <vm/vm_map.h>
#include <vm/vm_zone.h>
#include <vm/uma.h>
MALLOC_DEFINE(M_PGRP, "pgrp", "process group header");
MALLOC_DEFINE(M_SESSION, "session", "session header");
@ -80,8 +80,8 @@ struct proclist zombproc;
struct sx allproc_lock;
struct sx proctree_lock;
struct sx pgrpsess_lock;
vm_zone_t proc_zone;
vm_zone_t ithread_zone;
uma_zone_t proc_zone;
uma_zone_t ithread_zone;
/*
* Initialize global process hashing structures.
@ -98,7 +98,8 @@ procinit()
LIST_INIT(&zombproc);
pidhashtbl = hashinit(maxproc / 4, M_PROC, &pidhash);
pgrphashtbl = hashinit(maxproc / 4, M_PROC, &pgrphash);
proc_zone = zinit("PROC", sizeof (struct proc), 0, 0, 5);
proc_zone = uma_zcreate("PROC", sizeof (struct proc), NULL, NULL,
NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
uihashinit();
/*
* This should really be a compile time warning, but I do

View File

@ -79,7 +79,7 @@
#include <vm/pmap.h>
#include <vm/vm_map.h>
#include <vm/vm_page.h>
#include <vm/vm_zone.h>
#include <vm/uma.h>
/*
* Use this define if you want to disable *fancy* VM things. Expect an
@ -175,7 +175,7 @@ static void pipe_clone_write_buffer(struct pipe *wpipe);
#endif
static int pipespace(struct pipe *cpipe, int size);
static vm_zone_t pipe_zone;
static uma_zone_t pipe_zone;
SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_ANY, pipeinit, NULL);
@ -336,7 +336,7 @@ pipe_create(cpipep)
struct pipe *cpipe;
int error;
*cpipep = zalloc(pipe_zone);
*cpipep = uma_zalloc(pipe_zone, M_WAITOK);
if (*cpipep == NULL)
return (ENOMEM);
@ -1337,7 +1337,7 @@ pipeclose(cpipe)
}
mtx_lock(&Giant);
pipe_free_kmem(cpipe);
zfree(pipe_zone, cpipe);
uma_zfree(pipe_zone, cpipe);
mtx_unlock(&Giant);
}

View File

@ -42,7 +42,7 @@
#include <sys/kernel.h>
#include <sys/socketvar.h>
#include <sys/systm.h>
#include <vm/vm_zone.h>
#include <vm/uma.h>
/*
* System initialization
@ -122,18 +122,11 @@ domaininit(void *dummy)
{
/*
* Before we do any setup, make sure to initialize the
* zone allocator we get struct sockets from. The obvious
* maximum number of sockets is `maxfiles', but it is possible
* to have a socket without an open file (e.g., a connection waiting
* to be accept(2)ed). Rather than think up and define a
* better value, we just use nmbclusters, since that's what people
* are told to increase first when the network runs out of memory.
* Perhaps we should have two pools, one of unlimited size
* for use during socreate(), and one ZONE_INTERRUPT pool for
* use in sonewconn().
* zone allocator we get struct sockets from.
*/
socket_zone = zinit("socket", sizeof(struct socket), maxsockets,
ZONE_INTERRUPT, 0);
socket_zone = uma_zcreate("socket", sizeof(struct socket), NULL, NULL,
NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
if (max_linkhdr < 16) /* XXX */
max_linkhdr = 16;

View File

@ -55,8 +55,6 @@
#include <sys/aio.h> /* for aio_swake proto */
#include <sys/event.h>
int maxsockets;
void (*aio_swake)(struct socket *, struct sockbuf *);
/*
@ -1004,17 +1002,5 @@ SYSCTL_INT(_kern, KERN_DUMMY, dummy, CTLFLAG_RW, &dummy, 0, "");
SYSCTL_INT(_kern_ipc, KIPC_MAXSOCKBUF, maxsockbuf, CTLFLAG_RW,
&sb_max, 0, "Maximum socket buffer size");
SYSCTL_INT(_kern_ipc, OID_AUTO, maxsockets, CTLFLAG_RD,
&maxsockets, 0, "Maximum number of sockets avaliable");
SYSCTL_INT(_kern_ipc, KIPC_SOCKBUF_WASTE, sockbuf_waste_factor, CTLFLAG_RW,
&sb_efficiency, 0, "");
/*
* Initialise maxsockets
*/
static void init_maxsockets(void *ignored)
{
TUNABLE_INT_FETCH("kern.ipc.maxsockets", &maxsockets);
maxsockets = imax(maxsockets, imax(maxfiles, nmbclusters));
}
SYSINIT(param, SI_SUB_TUNABLES, SI_ORDER_ANY, init_maxsockets, NULL);

View File

@ -59,7 +59,7 @@
#include <sys/uio.h>
#include <sys/jail.h>
#include <vm/vm_zone.h>
#include <vm/uma.h>
#include <machine/limits.h>
@ -80,7 +80,7 @@ static struct filterops soread_filtops =
static struct filterops sowrite_filtops =
{ 1, NULL, filt_sowdetach, filt_sowrite };
vm_zone_t socket_zone;
uma_zone_t socket_zone;
so_gen_t so_gencnt; /* generation count for sockets */
MALLOC_DEFINE(M_SONAME, "soname", "socket name");
@ -119,7 +119,7 @@ soalloc(waitok)
{
struct socket *so;
so = zalloc(socket_zone);
so = uma_zalloc(socket_zone, waitok);
if (so) {
/* XXX race condition for reentrant kernel */
bzero(so, sizeof *so);
@ -225,7 +225,7 @@ sodealloc(struct socket *so)
#endif
crfree(so->so_cred);
/* sx_destroy(&so->so_sxlock); */
zfree(so->so_zone, so);
uma_zfree(so->so_zone, so);
--numopensockets;
}

View File

@ -55,8 +55,6 @@
#include <sys/aio.h> /* for aio_swake proto */
#include <sys/event.h>
int maxsockets;
void (*aio_swake)(struct socket *, struct sockbuf *);
/*
@ -1004,17 +1002,5 @@ SYSCTL_INT(_kern, KERN_DUMMY, dummy, CTLFLAG_RW, &dummy, 0, "");
SYSCTL_INT(_kern_ipc, KIPC_MAXSOCKBUF, maxsockbuf, CTLFLAG_RW,
&sb_max, 0, "Maximum socket buffer size");
SYSCTL_INT(_kern_ipc, OID_AUTO, maxsockets, CTLFLAG_RD,
&maxsockets, 0, "Maximum number of sockets avaliable");
SYSCTL_INT(_kern_ipc, KIPC_SOCKBUF_WASTE, sockbuf_waste_factor, CTLFLAG_RW,
&sb_efficiency, 0, "");
/*
* Initialise maxsockets
*/
static void init_maxsockets(void *ignored)
{
TUNABLE_INT_FETCH("kern.ipc.maxsockets", &maxsockets);
maxsockets = imax(maxsockets, imax(maxfiles, nmbclusters));
}
SYSINIT(param, SI_SUB_TUNABLES, SI_ORDER_ANY, init_maxsockets, NULL);

View File

@ -50,7 +50,7 @@
#include <vm/vm_extern.h>
#include <vm/pmap.h>
#include <vm/vm_map.h>
#include <vm/vm_zone.h>
#include <vm/uma.h>
#include <sys/aio.h>
#include <machine/limits.h>
@ -271,7 +271,7 @@ static int filt_aio(struct knote *kn, long hint);
* aiol list io job pointer - internal to aio_suspend XXX
* aiolio list io jobs
*/
static vm_zone_t kaio_zone, aiop_zone, aiocb_zone, aiol_zone, aiolio_zone;
static uma_zone_t kaio_zone, aiop_zone, aiocb_zone, aiol_zone, aiolio_zone;
/* kqueue filters for aio */
static struct filterops aio_filtops =
@ -336,11 +336,16 @@ aio_onceonly(void)
TAILQ_INIT(&aio_activeproc);
TAILQ_INIT(&aio_jobs);
TAILQ_INIT(&aio_bufjobs);
kaio_zone = zinit("AIO", sizeof(struct kaioinfo), 0, 0, 1);
aiop_zone = zinit("AIOP", sizeof(struct aiothreadlist), 0, 0, 1);
aiocb_zone = zinit("AIOCB", sizeof(struct aiocblist), 0, 0, 1);
aiol_zone = zinit("AIOL", AIO_LISTIO_MAX*sizeof(intptr_t), 0, 0, 1);
aiolio_zone = zinit("AIOLIO", sizeof(struct aio_liojob), 0, 0, 1);
kaio_zone = uma_zcreate("AIO", sizeof(struct kaioinfo), NULL, NULL,
NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
aiop_zone = uma_zcreate("AIOP", sizeof(struct aiothreadlist), NULL,
NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
aiocb_zone = uma_zcreate("AIOCB", sizeof(struct aiocblist), NULL, NULL,
NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
aiol_zone = uma_zcreate("AIOL", AIO_LISTIO_MAX*sizeof(intptr_t) , NULL,
NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
aiolio_zone = uma_zcreate("AIOLIO", sizeof(struct aio_liojob), NULL,
NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
aiod_timeout = AIOD_TIMEOUT_DEFAULT;
aiod_lifetime = AIOD_LIFETIME_DEFAULT;
jobrefid = 1;
@ -377,7 +382,7 @@ aio_init_aioinfo(struct proc *p)
{
struct kaioinfo *ki;
if (p->p_aioinfo == NULL) {
ki = zalloc(kaio_zone);
ki = uma_zalloc(kaio_zone, M_WAITOK);
p->p_aioinfo = ki;
ki->kaio_flags = 0;
ki->kaio_maxactive_count = max_aio_per_proc;
@ -500,11 +505,11 @@ aio_free_entry(struct aiocblist *aiocbe)
}
if (lj && (lj->lioj_buffer_count == 0) && (lj->lioj_queue_count == 0)) {
TAILQ_REMOVE(&ki->kaio_liojoblist, lj, lioj_list);
zfree(aiolio_zone, lj);
uma_zfree(aiolio_zone, lj);
}
aiocbe->jobstate = JOBST_NULL;
untimeout(process_signal, aiocbe, aiocbe->timeouthandle);
zfree(aiocb_zone, aiocbe);
uma_zfree(aiocb_zone, aiocbe);
return 0;
}
@ -624,7 +629,7 @@ aio_proc_rundown(struct proc *p)
if ((lj->lioj_buffer_count == 0) && (lj->lioj_queue_count ==
0)) {
TAILQ_REMOVE(&ki->kaio_liojoblist, lj, lioj_list);
zfree(aiolio_zone, lj);
uma_zfree(aiolio_zone, lj);
} else {
#ifdef DIAGNOSTIC
printf("LIO job not cleaned up: B:%d, BF:%d, Q:%d, "
@ -636,7 +641,7 @@ aio_proc_rundown(struct proc *p)
}
}
zfree(kaio_zone, ki);
uma_zfree(kaio_zone, ki);
p->p_aioinfo = NULL;
}
@ -789,7 +794,7 @@ aio_daemon(void *uproc)
* Allocate and ready the aio control info. There is one aiop structure
* per daemon.
*/
aiop = zalloc(aiop_zone);
aiop = uma_zalloc(aiop_zone, M_WAITOK);
aiop->aiothread = td;
aiop->aiothreadflags |= AIOP_FREE;
@ -955,7 +960,7 @@ aio_daemon(void *uproc)
s = splnet();
if (aiocbe->jobflags & AIOCBLIST_ASYNCFREE) {
aiocbe->jobflags &= ~AIOCBLIST_ASYNCFREE;
zfree(aiocb_zone, aiocbe);
uma_zfree(aiocb_zone, aiocbe);
} else {
TAILQ_REMOVE(&ki->kaio_jobqueue, aiocbe, plist);
TAILQ_INSERT_TAIL(&ki->kaio_jobdone, aiocbe,
@ -1031,7 +1036,7 @@ aio_daemon(void *uproc)
(num_aio_procs > target_aio_procs)) {
TAILQ_REMOVE(&aio_freeproc, aiop, list);
splx(s);
zfree(aiop_zone, aiop);
uma_zfree(aiop_zone, aiop);
num_aio_procs--;
#ifdef DIAGNOSTIC
if (mycp->p_vmspace->vm_refcnt <= 1) {
@ -1332,7 +1337,7 @@ _aio_aqueue(struct thread *td, struct aiocb *job, struct aio_liojob *lj, int typ
struct kqueue *kq;
struct file *kq_fp;
aiocbe = zalloc(aiocb_zone);
aiocbe = uma_zalloc(aiocb_zone, M_WAITOK);
aiocbe->inputcharge = 0;
aiocbe->outputcharge = 0;
callout_handle_init(&aiocbe->timeouthandle);
@ -1345,12 +1350,12 @@ _aio_aqueue(struct thread *td, struct aiocb *job, struct aio_liojob *lj, int typ
error = copyin(job, &aiocbe->uaiocb, sizeof(aiocbe->uaiocb));
if (error) {
suword(&job->_aiocb_private.error, error);
zfree(aiocb_zone, aiocbe);
uma_zfree(aiocb_zone, aiocbe);
return error;
}
if (aiocbe->uaiocb.aio_sigevent.sigev_notify == SIGEV_SIGNAL &&
!_SIG_VALID(aiocbe->uaiocb.aio_sigevent.sigev_signo)) {
zfree(aiocb_zone, aiocbe);
uma_zfree(aiocb_zone, aiocbe);
return EINVAL;
}
@ -1370,7 +1375,7 @@ _aio_aqueue(struct thread *td, struct aiocb *job, struct aio_liojob *lj, int typ
*/
fd = aiocbe->uaiocb.aio_fildes;
if (fd >= fdp->fd_nfiles) {
zfree(aiocb_zone, aiocbe);
uma_zfree(aiocb_zone, aiocbe);
if (type == 0)
suword(&job->_aiocb_private.error, EBADF);
return EBADF;
@ -1379,14 +1384,14 @@ _aio_aqueue(struct thread *td, struct aiocb *job, struct aio_liojob *lj, int typ
fp = aiocbe->fd_file = fdp->fd_ofiles[fd];
if ((fp == NULL) || ((opcode == LIO_WRITE) && ((fp->f_flag & FWRITE) ==
0))) {
zfree(aiocb_zone, aiocbe);
uma_zfree(aiocb_zone, aiocbe);
if (type == 0)
suword(&job->_aiocb_private.error, EBADF);
return EBADF;
}
if (aiocbe->uaiocb.aio_offset == -1LL) {
zfree(aiocb_zone, aiocbe);
uma_zfree(aiocb_zone, aiocbe);
if (type == 0)
suword(&job->_aiocb_private.error, EINVAL);
return EINVAL;
@ -1394,7 +1399,7 @@ _aio_aqueue(struct thread *td, struct aiocb *job, struct aio_liojob *lj, int typ
error = suword(&job->_aiocb_private.kernelinfo, jobrefid);
if (error) {
zfree(aiocb_zone, aiocbe);
uma_zfree(aiocb_zone, aiocbe);
if (type == 0)
suword(&job->_aiocb_private.error, EINVAL);
return error;
@ -1407,7 +1412,7 @@ _aio_aqueue(struct thread *td, struct aiocb *job, struct aio_liojob *lj, int typ
jobrefid++;
if (opcode == LIO_NOP) {
zfree(aiocb_zone, aiocbe);
uma_zfree(aiocb_zone, aiocbe);
if (type == 0) {
suword(&job->_aiocb_private.error, 0);
suword(&job->_aiocb_private.status, 0);
@ -1417,7 +1422,7 @@ _aio_aqueue(struct thread *td, struct aiocb *job, struct aio_liojob *lj, int typ
}
if ((opcode != LIO_READ) && (opcode != LIO_WRITE)) {
zfree(aiocb_zone, aiocbe);
uma_zfree(aiocb_zone, aiocbe);
if (type == 0) {
suword(&job->_aiocb_private.status, 0);
suword(&job->_aiocb_private.error, EINVAL);
@ -1461,7 +1466,7 @@ _aio_aqueue(struct thread *td, struct aiocb *job, struct aio_liojob *lj, int typ
error = kqueue_register(kq, &kev, td);
aqueue_fail:
if (error) {
zfree(aiocb_zone, aiocbe);
uma_zfree(aiocb_zone, aiocbe);
if (type == 0)
suword(&job->_aiocb_private.error, error);
goto done;
@ -1687,8 +1692,8 @@ aio_suspend(struct thread *td, struct aio_suspend_args *uap)
return EAGAIN;
njoblist = 0;
ijoblist = zalloc(aiol_zone);
ujoblist = zalloc(aiol_zone);
ijoblist = uma_zalloc(aiol_zone, M_WAITOK);
ujoblist = uma_zalloc(aiol_zone, M_WAITOK);
cbptr = uap->aiocbp;
for (i = 0; i < uap->nent; i++) {
@ -1701,8 +1706,8 @@ aio_suspend(struct thread *td, struct aio_suspend_args *uap)
}
if (njoblist == 0) {
zfree(aiol_zone, ijoblist);
zfree(aiol_zone, ujoblist);
uma_zfree(aiol_zone, ijoblist);
uma_zfree(aiol_zone, ujoblist);
return 0;
}
@ -1715,8 +1720,8 @@ aio_suspend(struct thread *td, struct aio_suspend_args *uap)
ijoblist[i]) {
if (ujoblist[i] != cb->uuaiocb)
error = EINVAL;
zfree(aiol_zone, ijoblist);
zfree(aiol_zone, ujoblist);
uma_zfree(aiol_zone, ijoblist);
uma_zfree(aiol_zone, ujoblist);
return error;
}
}
@ -1732,8 +1737,8 @@ aio_suspend(struct thread *td, struct aio_suspend_args *uap)
splx(s);
if (ujoblist[i] != cb->uuaiocb)
error = EINVAL;
zfree(aiol_zone, ijoblist);
zfree(aiol_zone, ujoblist);
uma_zfree(aiol_zone, ijoblist);
uma_zfree(aiol_zone, ujoblist);
return error;
}
}
@ -1744,12 +1749,12 @@ aio_suspend(struct thread *td, struct aio_suspend_args *uap)
splx(s);
if (error == ERESTART || error == EINTR) {
zfree(aiol_zone, ijoblist);
zfree(aiol_zone, ujoblist);
uma_zfree(aiol_zone, ijoblist);
uma_zfree(aiol_zone, ujoblist);
return EINTR;
} else if (error == EWOULDBLOCK) {
zfree(aiol_zone, ijoblist);
zfree(aiol_zone, ujoblist);
uma_zfree(aiol_zone, ijoblist);
uma_zfree(aiol_zone, ujoblist);
return EAGAIN;
}
}
@ -2009,7 +2014,7 @@ lio_listio(struct thread *td, struct lio_listio_args *uap)
if ((nent + ki->kaio_queue_count) > ki->kaio_qallowed_count)
return EAGAIN;
lj = zalloc(aiolio_zone);
lj = uma_zalloc(aiolio_zone, M_WAITOK);
if (!lj)
return EAGAIN;
@ -2027,11 +2032,11 @@ lio_listio(struct thread *td, struct lio_listio_args *uap)
error = copyin(uap->sig, &lj->lioj_signal,
sizeof(lj->lioj_signal));
if (error) {
zfree(aiolio_zone, lj);
uma_zfree(aiolio_zone, lj);
return error;
}
if (!_SIG_VALID(lj->lioj_signal.sigev_signo)) {
zfree(aiolio_zone, lj);
uma_zfree(aiolio_zone, lj);
return EINVAL;
}
lj->lioj_flags |= LIOJ_SIGNAL;

View File

@ -63,7 +63,6 @@
#include <vm/vm_page.h>
#include <vm/vm_pager.h>
#include <vm/vnode_pager.h>
#include <vm/vm_zone.h>
static int vop_nolookup(struct vop_lookup_args *);
static int vop_nostrategy(struct vop_strategy_args *);

View File

@ -72,8 +72,8 @@
#include <vm/vm.h>
#include <vm/vm_object.h>
#include <vm/vm_zone.h>
#include <vm/vm_page.h>
#include <vm/uma.h>
static int change_dir(struct nameidata *ndp, struct thread *td);
static void checkdirs(struct vnode *olddp, struct vnode *newdp);
@ -1538,7 +1538,7 @@ symlink(td, uap)
int error;
struct nameidata nd;
path = zalloc(namei_zone);
path = uma_zalloc(namei_zone, M_WAITOK);
if ((error = copyinstr(SCARG(uap, path), path, MAXPATHLEN, NULL)) != 0)
goto out;
restart:
@ -1574,7 +1574,7 @@ symlink(td, uap)
ASSERT_VOP_UNLOCKED(nd.ni_dvp, "symlink");
ASSERT_VOP_UNLOCKED(nd.ni_vp, "symlink");
out:
zfree(namei_zone, path);
uma_zfree(namei_zone, path);
return (error);
}

View File

@ -47,7 +47,6 @@
#include <sys/sysctl.h>
#include <sys/vnode.h>
#include <sys/malloc.h>
#include <vm/vm_zone.h>
MALLOC_DEFINE(M_VNODE, "vnodes", "Dynamically allocated vnodes");

View File

@ -55,12 +55,12 @@
#include <sys/ktrace.h>
#endif
#include <vm/vm_zone.h>
#include <vm/uma.h>
/*
* Allocation zone for namei
*/
vm_zone_t namei_zone;
uma_zone_t namei_zone;
static void
nameiinit(void *dummy __unused)
@ -118,7 +118,7 @@ namei(ndp)
* name into the buffer.
*/
if ((cnp->cn_flags & HASBUF) == 0)
cnp->cn_pnbuf = zalloc(namei_zone);
cnp->cn_pnbuf = uma_zalloc(namei_zone, M_WAITOK);
if (ndp->ni_segflg == UIO_SYSSPACE)
error = copystr(ndp->ni_dirp, cnp->cn_pnbuf,
MAXPATHLEN, (size_t *)&ndp->ni_pathlen);
@ -133,7 +133,7 @@ namei(ndp)
error = ENOENT;
if (error) {
zfree(namei_zone, cnp->cn_pnbuf);
uma_zfree(namei_zone, cnp->cn_pnbuf);
ndp->ni_vp = NULL;
return (error);
}
@ -171,7 +171,7 @@ namei(ndp)
ndp->ni_startdir = dp;
error = lookup(ndp);
if (error) {
zfree(namei_zone, cnp->cn_pnbuf);
uma_zfree(namei_zone, cnp->cn_pnbuf);
return (error);
}
/*
@ -179,7 +179,7 @@ namei(ndp)
*/
if ((cnp->cn_flags & ISSYMLINK) == 0) {
if ((cnp->cn_flags & (SAVENAME | SAVESTART)) == 0)
zfree(namei_zone, cnp->cn_pnbuf);
uma_zfree(namei_zone, cnp->cn_pnbuf);
else
cnp->cn_flags |= HASBUF;
@ -199,7 +199,7 @@ namei(ndp)
break;
}
if (ndp->ni_pathlen > 1)
cp = zalloc(namei_zone);
cp = uma_zalloc(namei_zone, M_WAITOK);
else
cp = cnp->cn_pnbuf;
aiov.iov_base = cp;
@ -214,25 +214,25 @@ namei(ndp)
error = VOP_READLINK(ndp->ni_vp, &auio, cnp->cn_cred);
if (error) {
if (ndp->ni_pathlen > 1)
zfree(namei_zone, cp);
uma_zfree(namei_zone, cp);
break;
}
linklen = MAXPATHLEN - auio.uio_resid;
if (linklen == 0) {
if (ndp->ni_pathlen > 1)
zfree(namei_zone, cp);
uma_zfree(namei_zone, cp);
error = ENOENT;
break;
}
if (linklen + ndp->ni_pathlen >= MAXPATHLEN) {
if (ndp->ni_pathlen > 1)
zfree(namei_zone, cp);
uma_zfree(namei_zone, cp);
error = ENAMETOOLONG;
break;
}
if (ndp->ni_pathlen > 1) {
bcopy(ndp->ni_next, cp + linklen, ndp->ni_pathlen);
zfree(namei_zone, cnp->cn_pnbuf);
uma_zfree(namei_zone, cnp->cn_pnbuf);
cnp->cn_pnbuf = cp;
} else
cnp->cn_pnbuf[linklen] = '\0';
@ -240,7 +240,7 @@ namei(ndp)
vput(ndp->ni_vp);
dp = ndp->ni_dvp;
}
zfree(namei_zone, cnp->cn_pnbuf);
uma_zfree(namei_zone, cnp->cn_pnbuf);
vrele(ndp->ni_dvp);
vput(ndp->ni_vp);
ndp->ni_vp = NULL;

View File

@ -69,7 +69,7 @@
#include <vm/pmap.h>
#include <vm/vm_map.h>
#include <vm/vm_page.h>
#include <vm/vm_zone.h>
#include <vm/uma.h>
static MALLOC_DEFINE(M_NETADDR, "Export Host", "Export host address structure");
@ -188,8 +188,8 @@ static struct mtx spechash_mtx;
struct nfs_public nfs_pub;
/* Zone for allocation of new vnodes - used exclusively by getnewvnode() */
static vm_zone_t vnode_zone;
static vm_zone_t vnodepoll_zone;
static uma_zone_t vnode_zone;
static uma_zone_t vnodepoll_zone;
/* Set to 1 to print out reclaim of active vnodes */
int prtactive;
@ -257,7 +257,7 @@ SYSCTL_INT(_debug, OID_AUTO, vnlru_nowhere, CTLFLAG_RW, &vnlru_nowhere, 0,
void
v_addpollinfo(struct vnode *vp)
{
vp->v_pollinfo = zalloc(vnodepoll_zone);
vp->v_pollinfo = uma_zalloc(vnodepoll_zone, M_WAITOK);
mtx_init(&vp->v_pollinfo->vpi_lock, "vnode pollinfo", MTX_DEF);
}
@ -276,8 +276,10 @@ vntblinit(void *dummy __unused)
mtx_init(&spechash_mtx, "spechash", MTX_DEF);
TAILQ_INIT(&vnode_free_list);
mtx_init(&vnode_free_list_mtx, "vnode_free_list", MTX_DEF);
vnode_zone = zinit("VNODE", sizeof (struct vnode), 0, 0, 5);
vnodepoll_zone = zinit("VNODEPOLL", sizeof (struct vpollinfo), 0, 0, 5);
vnode_zone = uma_zcreate("VNODE", sizeof (struct vnode), NULL, NULL,
NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
vnodepoll_zone = uma_zcreate("VNODEPOLL", sizeof (struct vpollinfo),
NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
/*
* Initialize the filesystem syncer.
*/
@ -796,7 +798,7 @@ getnewvnode(tag, mp, vops, vpp)
#endif
if (vp->v_pollinfo) {
mtx_destroy(&vp->v_pollinfo->vpi_lock);
zfree(vnodepoll_zone, vp->v_pollinfo);
uma_zfree(vnodepoll_zone, vp->v_pollinfo);
}
vp->v_pollinfo = NULL;
vp->v_flag = 0;
@ -807,7 +809,7 @@ getnewvnode(tag, mp, vops, vpp)
vp->v_socket = 0;
} else {
mtx_unlock(&vnode_free_list_mtx);
vp = (struct vnode *) zalloc(vnode_zone);
vp = (struct vnode *) uma_zalloc(vnode_zone, M_WAITOK);
bzero((char *) vp, sizeof *vp);
mtx_init(&vp->v_interlock, "vnode interlock", MTX_DEF);
vp->v_dd = vp;
@ -3037,7 +3039,7 @@ NDFREE(ndp, flags)
{
if (!(flags & NDF_NO_FREE_PNBUF) &&
(ndp->ni_cnd.cn_flags & HASBUF)) {
zfree(namei_zone, ndp->ni_cnd.cn_pnbuf);
uma_zfree(namei_zone, ndp->ni_cnd.cn_pnbuf);
ndp->ni_cnd.cn_flags &= ~HASBUF;
}
if (!(flags & NDF_NO_DVP_UNLOCK) &&

View File

@ -72,8 +72,8 @@
#include <vm/vm.h>
#include <vm/vm_object.h>
#include <vm/vm_zone.h>
#include <vm/vm_page.h>
#include <vm/uma.h>
static int change_dir(struct nameidata *ndp, struct thread *td);
static void checkdirs(struct vnode *olddp, struct vnode *newdp);
@ -1538,7 +1538,7 @@ symlink(td, uap)
int error;
struct nameidata nd;
path = zalloc(namei_zone);
path = uma_zalloc(namei_zone, M_WAITOK);
if ((error = copyinstr(SCARG(uap, path), path, MAXPATHLEN, NULL)) != 0)
goto out;
restart:
@ -1574,7 +1574,7 @@ symlink(td, uap)
ASSERT_VOP_UNLOCKED(nd.ni_dvp, "symlink");
ASSERT_VOP_UNLOCKED(nd.ni_vp, "symlink");
out:
zfree(namei_zone, path);
uma_zfree(namei_zone, path);
return (error);
}

View File

@ -57,7 +57,7 @@
#endif
#include <sys/ucred.h>
#include <machine/proc.h> /* Machine-dependent proc substruct. */
#include <vm/vm_zone.h>
#include <vm/uma.h>
/*
* One structure allocated per session.
@ -702,7 +702,7 @@ extern struct proclist zombproc; /* List of zombie processes. */
extern struct proc *initproc, *pageproc; /* Process slots for init, pager. */
extern struct proc *updateproc; /* Process slot for syncer (sic). */
extern vm_zone_t proc_zone;
extern uma_zone_t proc_zone;
extern int lastpid;

View File

@ -40,7 +40,7 @@
#include <sys/queue.h> /* for TAILQ macros */
#include <sys/sx.h> /* SX locks */
#include <sys/selinfo.h> /* for struct selinfo */
#include <vm/vm_zone.h>
#include <vm/uma.h>
/*
* Kernel structure per socket.
@ -53,7 +53,7 @@ typedef u_quad_t so_gen_t;
struct accept_filter;
struct socket {
vm_zone_t so_zone; /* zone we were allocated from */
uma_zone_t so_zone; /* zone we were allocated from */
int so_count; /* reference count */
short so_type; /* generic type, see socket.h */
short so_options; /* from socket call, see socket.h */
@ -318,9 +318,8 @@ MALLOC_DECLARE(M_SONAME);
MALLOC_DECLARE(M_ACCF);
#endif
extern int maxsockets;
extern u_long sb_max;
extern vm_zone_t socket_zone;
extern uma_zone_t socket_zone;
extern so_gen_t so_gencnt;
struct file;

View File

@ -49,7 +49,7 @@
#include <sys/selinfo.h>
#include <sys/uio.h>
#include <sys/acl.h>
#include <vm/vm_zone.h>
#include <vm/uma.h>
/*
* The vnode is the focus of all file activity in UNIX. There is a
@ -303,7 +303,7 @@ extern int vttoif_tab[];
*/
extern struct vnode *rootvnode; /* root (i.e. "/") vnode */
extern int desiredvnodes; /* number of vnodes desired */
extern vm_zone_t namei_zone;
extern uma_zone_t namei_zone;
extern int prtactive; /* nonzero to call vprint() */
extern struct vattr va_null; /* predefined null vattr structure */
extern int vfs_ioopt;