1997-08-05 00:07:31 +00:00
|
|
|
/*
|
1998-04-25 04:50:03 +00:00
|
|
|
* Copyright (c) 1997, 1998 John S. Dyson
|
1997-08-05 00:07:31 +00:00
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
1997-08-05 22:24:31 +00:00
|
|
|
* notice immediately at the beginning of the file, without modification,
|
|
|
|
* this list of conditions, and the following disclaimer.
|
1997-12-22 11:48:13 +00:00
|
|
|
* 2. Absolutely no warranty of function or purpose is made by the author
|
1997-08-05 22:24:31 +00:00
|
|
|
* John S. Dyson.
|
1997-08-05 00:07:31 +00:00
|
|
|
*
|
1999-08-28 01:08:13 +00:00
|
|
|
* $FreeBSD$
|
1997-08-05 00:07:31 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include <sys/param.h>
|
|
|
|
#include <sys/systm.h>
|
|
|
|
#include <sys/kernel.h>
|
1997-12-05 19:55:52 +00:00
|
|
|
#include <sys/lock.h>
|
1997-08-05 00:07:31 +00:00
|
|
|
#include <sys/malloc.h>
|
2001-07-04 16:20:28 +00:00
|
|
|
#include <sys/proc.h>
|
2001-01-22 07:01:50 +00:00
|
|
|
#include <sys/mutex.h>
|
|
|
|
#include <sys/queue.h>
|
1997-08-06 04:58:05 +00:00
|
|
|
#include <sys/sysctl.h>
|
1999-09-30 07:35:50 +00:00
|
|
|
#include <sys/vmmeter.h>
|
1997-08-05 00:07:31 +00:00
|
|
|
|
|
|
|
#include <vm/vm.h>
|
|
|
|
#include <vm/vm_object.h>
|
|
|
|
#include <vm/vm_page.h>
|
2001-08-04 20:17:05 +00:00
|
|
|
#include <vm/vm_param.h>
|
1997-12-15 05:16:09 +00:00
|
|
|
#include <vm/vm_map.h>
|
1997-08-05 00:07:31 +00:00
|
|
|
#include <vm/vm_kern.h>
|
|
|
|
#include <vm/vm_extern.h>
|
|
|
|
#include <vm/vm_zone.h>
|
|
|
|
|
1997-10-12 20:26:33 +00:00
|
|
|
static MALLOC_DEFINE(M_ZONE, "ZONE", "Zone header");
|
1997-10-11 18:31:40 +00:00
|
|
|
|
2001-01-22 07:01:50 +00:00
|
|
|
#define ZENTRY_FREE (void*)0x12342378
|
2000-12-27 02:54:37 +00:00
|
|
|
|
2001-01-22 07:01:50 +00:00
|
|
|
#define ZONE_ROUNDING 32
|
2000-12-27 02:54:37 +00:00
|
|
|
|
1997-08-05 00:07:31 +00:00
|
|
|
/*
|
|
|
|
* This file comprises a very simple zone allocator. This is used
|
|
|
|
* in lieu of the malloc allocator, where needed or more optimal.
|
|
|
|
*
|
|
|
|
* Note that the initial implementation of this had coloring, and
|
|
|
|
* absolutely no improvement (actually perf degradation) occurred.
|
|
|
|
*
|
VM level code cleanups.
1) Start using TSM.
Struct procs continue to point to upages structure, after being freed.
Struct vmspace continues to point to pte object and kva space for kstack.
u_map is now superfluous.
2) vm_map's don't need to be reference counted. They always exist either
in the kernel or in a vmspace. The vmspaces are managed by reference
counts.
3) Remove the "wired" vm_map nonsense.
4) No need to keep a cache of kernel stack kva's.
5) Get rid of strange looking ++var, and change to var++.
6) Change more data structures to use our "zone" allocator. Added
struct proc, struct vmspace and struct vnode. This saves a significant
amount of kva space and physical memory. Additionally, this enables
TSM for the zone managed memory.
7) Keep ioopt disabled for now.
8) Remove the now bogus "single use" map concept.
9) Use generation counts or id's for data structures residing in TSM, where
it allows us to avoid unneeded restart overhead during traversals, where
blocking might occur.
10) Account better for memory deficits, so the pageout daemon will be able
to make enough memory available (experimental.)
11) Fix some vnode locking problems. (From Tor, I think.)
12) Add a check in ufs_lookup, to avoid lots of unneeded calls to bcmp.
(experimental.)
13) Significantly shrink, cleanup, and make slightly faster the vm_fault.c
code. Use generation counts, get rid of unneded collpase operations,
and clean up the cluster code.
14) Make vm_zone more suitable for TSM.
This commit is partially as a result of discussions and contributions from
other people, including DG, Tor Egge, PHK, and probably others that I
have forgotten to attribute (so let me know, if I forgot.)
This is not the infamous, final cleanup of the vnode stuff, but a necessary
step. Vnode mgmt should be correct, but things might still change, and
there is still some missing stuff (like ioopt, and physical backing of
non-merged cache files, debugging of layering concepts.)
1998-01-22 17:30:44 +00:00
|
|
|
* Note also that the zones are type stable. The only restriction is
|
|
|
|
* that the first two longwords of a data structure can be changed
|
|
|
|
* between allocations. Any data that must be stable between allocations
|
|
|
|
* must reside in areas after the first two longwords.
|
|
|
|
*
|
1997-08-05 22:24:31 +00:00
|
|
|
* zinitna, zinit, zbootinit are the initialization routines.
|
2001-01-22 07:01:50 +00:00
|
|
|
* zalloc, zfree, are the allocation/free routines.
|
1997-08-05 00:07:31 +00:00
|
|
|
*/
|
|
|
|
|
2001-01-22 07:01:50 +00:00
|
|
|
/*
|
|
|
|
* Subsystem lock. Never grab it while holding a zone lock.
|
|
|
|
*/
|
|
|
|
static struct mtx zone_mtx;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Singly-linked list of zones, for book-keeping purposes
|
|
|
|
*/
|
|
|
|
static SLIST_HEAD(vm_zone_list, vm_zone) zlist;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Statistics
|
|
|
|
*/
|
|
|
|
static int zone_kmem_pages; /* Number of interrupt-safe pages allocated */
|
|
|
|
static int zone_kern_pages; /* Number of KVA pages allocated */
|
|
|
|
static int zone_kmem_kvaspace; /* Number of non-intsafe pages allocated */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Subsystem initialization, called from vm_mem_init()
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
vm_zone_init(void)
|
|
|
|
{
|
|
|
|
mtx_init(&zone_mtx, "zone subsystem", MTX_DEF);
|
|
|
|
SLIST_INIT(&zlist);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
vm_zone_init2(void)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* LATER: traverse zlist looking for partially initialized
|
|
|
|
* LATER: zones and finish initializing them.
|
|
|
|
*/
|
|
|
|
}
|
1997-08-06 04:58:05 +00:00
|
|
|
|
1997-08-05 22:24:31 +00:00
|
|
|
/*
|
|
|
|
* Create a zone, but don't allocate the zone structure. If the
|
|
|
|
* zone had been previously created by the zone boot code, initialize
|
|
|
|
* various parts of the zone code.
|
|
|
|
*
|
|
|
|
* If waits are not allowed during allocation (e.g. during interrupt
|
|
|
|
* code), a-priori allocate the kernel virtual space, and allocate
|
|
|
|
* only pages when needed.
|
|
|
|
*
|
|
|
|
* Arguments:
|
|
|
|
* z pointer to zone structure.
|
|
|
|
* obj pointer to VM object (opt).
|
|
|
|
* name name of zone.
|
|
|
|
* size size of zone entries.
|
|
|
|
* nentries number of zone entries allocated (only ZONE_INTERRUPT.)
|
1997-09-21 11:41:12 +00:00
|
|
|
* flags ZONE_INTERRUPT -- items can be allocated at interrupt time.
|
1997-08-05 22:24:31 +00:00
|
|
|
* zalloc number of pages allocated when memory is needed.
|
|
|
|
*
|
|
|
|
* Note that when using ZONE_INTERRUPT, the size of the zone is limited
|
|
|
|
* by the nentries argument. The size of the memory allocatable is
|
|
|
|
* unlimited if ZONE_INTERRUPT is not set.
|
|
|
|
*
|
|
|
|
*/
|
1997-08-05 00:07:31 +00:00
|
|
|
int
|
1997-08-05 22:24:31 +00:00
|
|
|
zinitna(vm_zone_t z, vm_object_t obj, char *name, int size,
|
1997-09-21 11:41:12 +00:00
|
|
|
int nentries, int flags, int zalloc)
|
|
|
|
{
|
2000-12-13 10:01:00 +00:00
|
|
|
int totsize, oldzflags;
|
1997-08-05 00:07:31 +00:00
|
|
|
|
2001-07-04 16:20:28 +00:00
|
|
|
GIANT_REQUIRED;
|
|
|
|
|
2000-12-13 10:01:00 +00:00
|
|
|
oldzflags = z->zflags;
|
1997-08-05 00:07:31 +00:00
|
|
|
if ((z->zflags & ZONE_BOOT) == 0) {
|
1997-12-14 05:17:44 +00:00
|
|
|
z->zsize = (size + ZONE_ROUNDING - 1) & ~(ZONE_ROUNDING - 1);
|
1997-08-05 00:07:31 +00:00
|
|
|
z->zfreecnt = 0;
|
1997-08-06 04:58:05 +00:00
|
|
|
z->ztotal = 0;
|
|
|
|
z->zmax = 0;
|
1997-08-05 00:07:31 +00:00
|
|
|
z->zname = name;
|
1997-08-07 03:52:55 +00:00
|
|
|
z->znalloc = 0;
|
1997-09-21 04:24:27 +00:00
|
|
|
z->zitems = NULL;
|
1997-08-05 00:07:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
z->zflags |= flags;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we cannot wait, allocate KVA space up front, and we will fill
|
|
|
|
* in pages as needed.
|
|
|
|
*/
|
1997-08-05 22:24:31 +00:00
|
|
|
if (z->zflags & ZONE_INTERRUPT) {
|
1997-08-05 00:07:31 +00:00
|
|
|
totsize = round_page(z->zsize * nentries);
|
2001-01-23 03:40:27 +00:00
|
|
|
atomic_add_int(&zone_kmem_kvaspace, totsize);
|
1997-08-05 00:07:31 +00:00
|
|
|
z->zkva = kmem_alloc_pageable(kernel_map, totsize);
|
2001-01-22 07:01:50 +00:00
|
|
|
if (z->zkva == 0)
|
1997-08-05 00:07:31 +00:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
z->zpagemax = totsize / PAGE_SIZE;
|
|
|
|
if (obj == NULL) {
|
|
|
|
z->zobj = vm_object_allocate(OBJT_DEFAULT, z->zpagemax);
|
|
|
|
} else {
|
|
|
|
z->zobj = obj;
|
|
|
|
_vm_object_allocate(OBJT_DEFAULT, z->zpagemax, obj);
|
|
|
|
}
|
1997-08-05 22:24:31 +00:00
|
|
|
z->zallocflag = VM_ALLOC_INTERRUPT;
|
1997-08-06 04:58:05 +00:00
|
|
|
z->zmax += nentries;
|
1997-08-05 22:24:31 +00:00
|
|
|
} else {
|
|
|
|
z->zallocflag = VM_ALLOC_SYSTEM;
|
1997-08-06 04:58:05 +00:00
|
|
|
z->zmax = 0;
|
1997-08-05 00:07:31 +00:00
|
|
|
}
|
|
|
|
|
1997-08-06 04:58:05 +00:00
|
|
|
|
1997-09-21 11:41:12 +00:00
|
|
|
if (z->zsize > PAGE_SIZE)
|
1997-08-05 00:07:31 +00:00
|
|
|
z->zfreemin = 1;
|
|
|
|
else
|
|
|
|
z->zfreemin = PAGE_SIZE / z->zsize;
|
|
|
|
|
|
|
|
z->zpagecount = 0;
|
|
|
|
if (zalloc)
|
|
|
|
z->zalloc = zalloc;
|
|
|
|
else
|
|
|
|
z->zalloc = 1;
|
|
|
|
|
2001-01-22 07:01:50 +00:00
|
|
|
/* our zone is good and ready, add it to the list */
|
|
|
|
if ((z->zflags & ZONE_BOOT) == 0) {
|
|
|
|
mtx_init(&(z)->zmtx, "zone", MTX_DEF);
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_lock(&zone_mtx);
|
2001-01-22 07:01:50 +00:00
|
|
|
SLIST_INSERT_HEAD(&zlist, z, zent);
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_unlock(&zone_mtx);
|
2001-01-22 07:01:50 +00:00
|
|
|
}
|
|
|
|
|
1997-08-05 00:07:31 +00:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
1997-08-05 22:24:31 +00:00
|
|
|
/*
|
|
|
|
* Subroutine same as zinitna, except zone data structure is allocated
|
|
|
|
* automatically by malloc. This routine should normally be used, except
|
|
|
|
* in certain tricky startup conditions in the VM system -- then
|
|
|
|
* zbootinit and zinitna can be used. Zinit is the standard zone
|
|
|
|
* initialization call.
|
|
|
|
*/
|
1997-08-05 00:07:31 +00:00
|
|
|
vm_zone_t
|
1997-09-21 11:41:12 +00:00
|
|
|
zinit(char *name, int size, int nentries, int flags, int zalloc)
|
|
|
|
{
|
1997-08-05 00:07:31 +00:00
|
|
|
vm_zone_t z;
|
1997-09-21 11:41:12 +00:00
|
|
|
|
2000-12-13 10:01:00 +00:00
|
|
|
z = (vm_zone_t) malloc(sizeof (struct vm_zone), M_ZONE, M_NOWAIT | M_ZERO);
|
1997-08-05 00:07:31 +00:00
|
|
|
if (z == NULL)
|
|
|
|
return NULL;
|
|
|
|
|
1997-08-05 22:24:31 +00:00
|
|
|
if (zinitna(z, NULL, name, size, nentries, flags, zalloc) == 0) {
|
1997-08-05 00:07:31 +00:00
|
|
|
free(z, M_ZONE);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return z;
|
|
|
|
}
|
|
|
|
|
1997-08-05 22:24:31 +00:00
|
|
|
/*
|
2001-01-22 07:01:50 +00:00
|
|
|
* Initialize a zone before the system is fully up.
|
|
|
|
*
|
|
|
|
* We can't rely on being able to allocate items dynamically, so we
|
|
|
|
* kickstart the zone with a number of static items provided by the
|
|
|
|
* caller.
|
|
|
|
*
|
|
|
|
* This routine should only be called before full VM startup.
|
1997-08-05 22:24:31 +00:00
|
|
|
*/
|
1997-08-05 00:07:31 +00:00
|
|
|
void
|
1997-09-21 11:41:12 +00:00
|
|
|
zbootinit(vm_zone_t z, char *name, int size, void *item, int nitems)
|
|
|
|
{
|
1997-08-05 00:07:31 +00:00
|
|
|
int i;
|
|
|
|
|
|
|
|
z->zname = name;
|
|
|
|
z->zsize = size;
|
|
|
|
z->zpagemax = 0;
|
|
|
|
z->zobj = NULL;
|
|
|
|
z->zflags = ZONE_BOOT;
|
|
|
|
z->zfreemin = 0;
|
|
|
|
z->zallocflag = 0;
|
|
|
|
z->zpagecount = 0;
|
|
|
|
z->zalloc = 0;
|
1997-08-07 03:52:55 +00:00
|
|
|
z->znalloc = 0;
|
2001-01-22 07:01:50 +00:00
|
|
|
mtx_init(&(z)->zmtx, "zone", MTX_DEF);
|
1997-08-05 00:07:31 +00:00
|
|
|
|
VM level code cleanups.
1) Start using TSM.
Struct procs continue to point to upages structure, after being freed.
Struct vmspace continues to point to pte object and kva space for kstack.
u_map is now superfluous.
2) vm_map's don't need to be reference counted. They always exist either
in the kernel or in a vmspace. The vmspaces are managed by reference
counts.
3) Remove the "wired" vm_map nonsense.
4) No need to keep a cache of kernel stack kva's.
5) Get rid of strange looking ++var, and change to var++.
6) Change more data structures to use our "zone" allocator. Added
struct proc, struct vmspace and struct vnode. This saves a significant
amount of kva space and physical memory. Additionally, this enables
TSM for the zone managed memory.
7) Keep ioopt disabled for now.
8) Remove the now bogus "single use" map concept.
9) Use generation counts or id's for data structures residing in TSM, where
it allows us to avoid unneeded restart overhead during traversals, where
blocking might occur.
10) Account better for memory deficits, so the pageout daemon will be able
to make enough memory available (experimental.)
11) Fix some vnode locking problems. (From Tor, I think.)
12) Add a check in ufs_lookup, to avoid lots of unneeded calls to bcmp.
(experimental.)
13) Significantly shrink, cleanup, and make slightly faster the vm_fault.c
code. Use generation counts, get rid of unneded collpase operations,
and clean up the cluster code.
14) Make vm_zone more suitable for TSM.
This commit is partially as a result of discussions and contributions from
other people, including DG, Tor Egge, PHK, and probably others that I
have forgotten to attribute (so let me know, if I forgot.)
This is not the infamous, final cleanup of the vnode stuff, but a necessary
step. Vnode mgmt should be correct, but things might still change, and
there is still some missing stuff (like ioopt, and physical backing of
non-merged cache files, debugging of layering concepts.)
1998-01-22 17:30:44 +00:00
|
|
|
bzero(item, nitems * z->zsize);
|
1997-09-21 04:24:27 +00:00
|
|
|
z->zitems = NULL;
|
1997-08-05 00:07:31 +00:00
|
|
|
for (i = 0; i < nitems; i++) {
|
1997-09-21 04:24:27 +00:00
|
|
|
((void **) item)[0] = z->zitems;
|
1999-01-10 01:58:29 +00:00
|
|
|
#ifdef INVARIANTS
|
2001-01-22 07:01:50 +00:00
|
|
|
((void **) item)[1] = ZENTRY_FREE;
|
1997-09-21 04:24:27 +00:00
|
|
|
#endif
|
1997-08-05 00:07:31 +00:00
|
|
|
z->zitems = item;
|
|
|
|
(char *) item += z->zsize;
|
|
|
|
}
|
1997-08-06 04:58:05 +00:00
|
|
|
z->zfreecnt = nitems;
|
|
|
|
z->zmax = nitems;
|
|
|
|
z->ztotal = nitems;
|
|
|
|
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_lock(&zone_mtx);
|
2001-01-22 07:01:50 +00:00
|
|
|
SLIST_INSERT_HEAD(&zlist, z, zent);
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_unlock(&zone_mtx);
|
1997-08-05 00:07:31 +00:00
|
|
|
}
|
|
|
|
|
2001-08-04 20:17:05 +00:00
|
|
|
/*
|
|
|
|
* Destroy a zone, freeing the allocated memory.
|
|
|
|
* This does not do any locking for the zone; make sure it is not used
|
|
|
|
* any more before calling. All zalloc()'ated memory in the zone must have
|
|
|
|
* been zfree()'d.
|
|
|
|
* zdestroy() may not be used with zbootinit()'ed zones.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
zdestroy(vm_zone_t z)
|
|
|
|
{
|
|
|
|
int i, nitems, nbytes;
|
|
|
|
void *item, *min, **itp;
|
|
|
|
vm_map_t map;
|
|
|
|
vm_map_entry_t entry;
|
|
|
|
vm_object_t obj;
|
|
|
|
vm_pindex_t pindex;
|
|
|
|
vm_prot_t prot;
|
|
|
|
boolean_t wired;
|
|
|
|
|
|
|
|
GIANT_REQUIRED;
|
|
|
|
KASSERT(z != NULL, ("invalid zone"));
|
|
|
|
/*
|
|
|
|
* This is needed, or the algorithm used for non-interrupt zones will
|
|
|
|
* blow up badly.
|
|
|
|
*/
|
|
|
|
KASSERT(z->ztotal == z->zfreecnt,
|
|
|
|
("zdestroy() used with an active zone"));
|
|
|
|
KASSERT((z->zflags & ZONE_BOOT) == 0,
|
2001-08-05 03:55:02 +00:00
|
|
|
("zdestroy() used with a zbootinit()'ed zone"));
|
2001-08-04 20:17:05 +00:00
|
|
|
|
|
|
|
if (z->zflags & ZONE_INTERRUPT) {
|
|
|
|
kmem_free(kernel_map, z->zkva, z->zpagemax * PAGE_SIZE);
|
|
|
|
vm_object_deallocate(z->zobj);
|
|
|
|
atomic_subtract_int(&zone_kmem_kvaspace,
|
|
|
|
z->zpagemax * PAGE_SIZE);
|
|
|
|
atomic_subtract_int(&zone_kmem_pages,
|
|
|
|
z->zpagecount);
|
|
|
|
cnt.v_wire_count -= z->zpagecount;
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* This is evil h0h0 magic:
|
|
|
|
* The items may be in z->zitems in a random oder; we have to
|
|
|
|
* free the start of an allocated area, but do not want to save
|
|
|
|
* extra information. Additionally, we may not access items that
|
|
|
|
* were in a freed area.
|
|
|
|
* This is achieved in the following way: the smallest address
|
|
|
|
* is selected, and, after removing all items that are in a
|
|
|
|
* range of z->zalloc * PAGE_SIZE (one allocation unit) from
|
|
|
|
* it, kmem_free is called on it (since it is the smallest one,
|
|
|
|
* it must be the start of an area). This is repeated until all
|
|
|
|
* items are gone.
|
|
|
|
*/
|
|
|
|
nbytes = z->zalloc * PAGE_SIZE;
|
|
|
|
nitems = nbytes / z->zsize;
|
|
|
|
while (z->zitems != NULL) {
|
|
|
|
/* Find minimal element. */
|
|
|
|
item = min = z->zitems;
|
|
|
|
while (item != NULL) {
|
|
|
|
if (item < min)
|
|
|
|
min = item;
|
|
|
|
item = ((void **)item)[0];
|
|
|
|
}
|
|
|
|
/* Free. */
|
|
|
|
itp = &z->zitems;
|
|
|
|
i = 0;
|
|
|
|
while (*itp != NULL && i < nitems) {
|
|
|
|
if ((char *)*itp >= (char *)min &&
|
|
|
|
(char *)*itp < (char *)min + nbytes) {
|
|
|
|
*itp = ((void **)*itp)[0];
|
|
|
|
i++;
|
|
|
|
} else
|
|
|
|
itp = &((void **)*itp)[0];
|
|
|
|
}
|
|
|
|
KASSERT(i == nitems, ("zdestroy(): corrupt zone"));
|
|
|
|
/*
|
|
|
|
* We can allocate from kmem_map (kmem_malloc) or
|
|
|
|
* kernel_map (kmem_alloc).
|
|
|
|
* kmem_map is a submap of kernel_map, so we can use
|
|
|
|
* vm_map_lookup to retrieve the map we need to use.
|
|
|
|
*/
|
|
|
|
map = kernel_map;
|
|
|
|
if (vm_map_lookup(&map, (vm_offset_t)min, VM_PROT_NONE,
|
|
|
|
&entry, &obj, &pindex, &prot, &wired) !=
|
|
|
|
KERN_SUCCESS)
|
|
|
|
panic("zalloc mapping lost");
|
|
|
|
/* Need to unlock. */
|
|
|
|
vm_map_lookup_done(map, entry);
|
|
|
|
if (map == kmem_map) {
|
|
|
|
atomic_subtract_int(&zone_kmem_pages,
|
|
|
|
z->zalloc);
|
|
|
|
} else if (map == kernel_map) {
|
|
|
|
atomic_subtract_int(&zone_kern_pages,
|
|
|
|
z->zalloc);
|
|
|
|
} else
|
|
|
|
panic("zdestroy(): bad map");
|
|
|
|
kmem_free(map, (vm_offset_t)min, nbytes);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
mtx_lock(&zone_mtx);
|
|
|
|
SLIST_REMOVE(&zlist, z, vm_zone, zent);
|
|
|
|
mtx_unlock(&zone_mtx);
|
|
|
|
mtx_destroy(&z->zmtx);
|
|
|
|
free(z, M_ZONE);
|
|
|
|
}
|
|
|
|
|
1997-08-05 22:24:31 +00:00
|
|
|
/*
|
2001-01-22 07:01:50 +00:00
|
|
|
* Grow the specified zone to accomodate more items.
|
1997-08-05 22:24:31 +00:00
|
|
|
*/
|
2001-01-21 22:23:11 +00:00
|
|
|
static void *
|
1997-09-21 11:41:12 +00:00
|
|
|
_zget(vm_zone_t z)
|
|
|
|
{
|
1997-08-05 00:07:31 +00:00
|
|
|
int i;
|
|
|
|
vm_page_t m;
|
1997-09-21 04:24:27 +00:00
|
|
|
int nitems, nbytes;
|
1997-08-05 22:24:31 +00:00
|
|
|
void *item;
|
1997-08-05 00:07:31 +00:00
|
|
|
|
2001-01-22 07:01:50 +00:00
|
|
|
KASSERT(z != NULL, ("invalid zone"));
|
1997-09-21 04:24:27 +00:00
|
|
|
|
1997-08-05 22:24:31 +00:00
|
|
|
if (z->zflags & ZONE_INTERRUPT) {
|
2001-11-17 00:40:48 +00:00
|
|
|
nbytes = z->zpagecount * PAGE_SIZE;
|
|
|
|
nbytes -= nbytes % z->zsize;
|
|
|
|
item = (char *) z->zkva + nbytes;
|
1997-09-21 11:41:12 +00:00
|
|
|
for (i = 0; ((i < z->zalloc) && (z->zpagecount < z->zpagemax));
|
|
|
|
i++) {
|
VM level code cleanups.
1) Start using TSM.
Struct procs continue to point to upages structure, after being freed.
Struct vmspace continues to point to pte object and kva space for kstack.
u_map is now superfluous.
2) vm_map's don't need to be reference counted. They always exist either
in the kernel or in a vmspace. The vmspaces are managed by reference
counts.
3) Remove the "wired" vm_map nonsense.
4) No need to keep a cache of kernel stack kva's.
5) Get rid of strange looking ++var, and change to var++.
6) Change more data structures to use our "zone" allocator. Added
struct proc, struct vmspace and struct vnode. This saves a significant
amount of kva space and physical memory. Additionally, this enables
TSM for the zone managed memory.
7) Keep ioopt disabled for now.
8) Remove the now bogus "single use" map concept.
9) Use generation counts or id's for data structures residing in TSM, where
it allows us to avoid unneeded restart overhead during traversals, where
blocking might occur.
10) Account better for memory deficits, so the pageout daemon will be able
to make enough memory available (experimental.)
11) Fix some vnode locking problems. (From Tor, I think.)
12) Add a check in ufs_lookup, to avoid lots of unneeded calls to bcmp.
(experimental.)
13) Significantly shrink, cleanup, and make slightly faster the vm_fault.c
code. Use generation counts, get rid of unneded collpase operations,
and clean up the cluster code.
14) Make vm_zone more suitable for TSM.
This commit is partially as a result of discussions and contributions from
other people, including DG, Tor Egge, PHK, and probably others that I
have forgotten to attribute (so let me know, if I forgot.)
This is not the infamous, final cleanup of the vnode stuff, but a necessary
step. Vnode mgmt should be correct, but things might still change, and
there is still some missing stuff (like ioopt, and physical backing of
non-merged cache files, debugging of layering concepts.)
1998-01-22 17:30:44 +00:00
|
|
|
vm_offset_t zkva;
|
1997-08-05 00:07:31 +00:00
|
|
|
|
1997-09-21 11:41:12 +00:00
|
|
|
m = vm_page_alloc(z->zobj, z->zpagecount,
|
|
|
|
z->zallocflag);
|
|
|
|
if (m == NULL)
|
1997-08-05 00:07:31 +00:00
|
|
|
break;
|
|
|
|
|
VM level code cleanups.
1) Start using TSM.
Struct procs continue to point to upages structure, after being freed.
Struct vmspace continues to point to pte object and kva space for kstack.
u_map is now superfluous.
2) vm_map's don't need to be reference counted. They always exist either
in the kernel or in a vmspace. The vmspaces are managed by reference
counts.
3) Remove the "wired" vm_map nonsense.
4) No need to keep a cache of kernel stack kva's.
5) Get rid of strange looking ++var, and change to var++.
6) Change more data structures to use our "zone" allocator. Added
struct proc, struct vmspace and struct vnode. This saves a significant
amount of kva space and physical memory. Additionally, this enables
TSM for the zone managed memory.
7) Keep ioopt disabled for now.
8) Remove the now bogus "single use" map concept.
9) Use generation counts or id's for data structures residing in TSM, where
it allows us to avoid unneeded restart overhead during traversals, where
blocking might occur.
10) Account better for memory deficits, so the pageout daemon will be able
to make enough memory available (experimental.)
11) Fix some vnode locking problems. (From Tor, I think.)
12) Add a check in ufs_lookup, to avoid lots of unneeded calls to bcmp.
(experimental.)
13) Significantly shrink, cleanup, and make slightly faster the vm_fault.c
code. Use generation counts, get rid of unneded collpase operations,
and clean up the cluster code.
14) Make vm_zone more suitable for TSM.
This commit is partially as a result of discussions and contributions from
other people, including DG, Tor Egge, PHK, and probably others that I
have forgotten to attribute (so let me know, if I forgot.)
This is not the infamous, final cleanup of the vnode stuff, but a necessary
step. Vnode mgmt should be correct, but things might still change, and
there is still some missing stuff (like ioopt, and physical backing of
non-merged cache files, debugging of layering concepts.)
1998-01-22 17:30:44 +00:00
|
|
|
zkva = z->zkva + z->zpagecount * PAGE_SIZE;
|
|
|
|
pmap_kenter(zkva, VM_PAGE_TO_PHYS(m));
|
|
|
|
bzero((caddr_t) zkva, PAGE_SIZE);
|
1997-08-05 22:24:31 +00:00
|
|
|
z->zpagecount++;
|
2001-01-23 03:40:27 +00:00
|
|
|
atomic_add_int(&zone_kmem_pages, 1);
|
1999-09-30 07:35:50 +00:00
|
|
|
cnt.v_wire_count++;
|
1997-08-05 00:07:31 +00:00
|
|
|
}
|
2001-11-17 00:40:48 +00:00
|
|
|
nitems = ((z->zpagecount * PAGE_SIZE) - nbytes) / z->zsize;
|
1997-08-05 00:07:31 +00:00
|
|
|
} else {
|
2001-08-04 20:17:05 +00:00
|
|
|
/* Please check zdestroy() when changing this! */
|
1997-09-21 04:24:27 +00:00
|
|
|
nbytes = z->zalloc * PAGE_SIZE;
|
1997-12-15 05:16:09 +00:00
|
|
|
|
1997-08-05 00:07:31 +00:00
|
|
|
/*
|
1997-12-15 05:16:09 +00:00
|
|
|
* Check to see if the kernel map is already locked. We could allow
|
|
|
|
* for recursive locks, but that eliminates a valuable debugging
|
|
|
|
* mechanism, and opens up the kernel map for potential corruption
|
|
|
|
* by inconsistent data structure manipulation. We could also use
|
|
|
|
* the interrupt allocation mechanism, but that has size limitations.
|
|
|
|
* Luckily, we have kmem_map that is a submap of kernel map available
|
|
|
|
* for memory allocation, and manipulation of that map doesn't affect
|
|
|
|
* the kernel map structures themselves.
|
|
|
|
*
|
|
|
|
* We can wait, so just do normal map allocation in the appropriate
|
|
|
|
* map.
|
1997-08-05 00:07:31 +00:00
|
|
|
*/
|
2001-05-19 01:28:09 +00:00
|
|
|
mtx_unlock(&z->zmtx);
|
1999-12-11 16:13:02 +00:00
|
|
|
if (lockstatus(&kernel_map->lock, NULL)) {
|
1997-12-15 05:16:09 +00:00
|
|
|
item = (void *) kmem_malloc(kmem_map, nbytes, M_WAITOK);
|
2000-04-04 21:00:39 +00:00
|
|
|
if (item != NULL)
|
2001-01-23 03:40:27 +00:00
|
|
|
atomic_add_int(&zone_kmem_pages, z->zalloc);
|
1997-12-15 05:16:09 +00:00
|
|
|
} else {
|
|
|
|
item = (void *) kmem_alloc(kernel_map, nbytes);
|
2000-04-04 21:00:39 +00:00
|
|
|
if (item != NULL)
|
2001-01-23 03:40:27 +00:00
|
|
|
atomic_add_int(&zone_kern_pages, z->zalloc);
|
2000-04-04 21:00:39 +00:00
|
|
|
}
|
|
|
|
if (item != NULL) {
|
|
|
|
bzero(item, nbytes);
|
|
|
|
} else {
|
|
|
|
nbytes = 0;
|
1997-12-15 05:16:09 +00:00
|
|
|
}
|
1997-09-21 04:24:27 +00:00
|
|
|
nitems = nbytes / z->zsize;
|
2001-05-19 01:28:09 +00:00
|
|
|
mtx_lock(&z->zmtx);
|
1997-08-05 00:07:31 +00:00
|
|
|
}
|
1997-08-06 04:58:05 +00:00
|
|
|
z->ztotal += nitems;
|
1997-08-05 00:07:31 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Save one for immediate allocation
|
|
|
|
*/
|
1997-08-05 22:24:31 +00:00
|
|
|
if (nitems != 0) {
|
|
|
|
nitems -= 1;
|
|
|
|
for (i = 0; i < nitems; i++) {
|
1997-09-21 04:24:27 +00:00
|
|
|
((void **) item)[0] = z->zitems;
|
1999-01-10 01:58:29 +00:00
|
|
|
#ifdef INVARIANTS
|
2001-01-22 07:01:50 +00:00
|
|
|
((void **) item)[1] = ZENTRY_FREE;
|
1997-09-21 04:24:27 +00:00
|
|
|
#endif
|
1997-08-05 22:24:31 +00:00
|
|
|
z->zitems = item;
|
|
|
|
(char *) item += z->zsize;
|
|
|
|
}
|
|
|
|
z->zfreecnt += nitems;
|
2000-06-05 06:34:41 +00:00
|
|
|
z->znalloc++;
|
1997-08-05 22:24:31 +00:00
|
|
|
} else if (z->zfreecnt > 0) {
|
|
|
|
item = z->zitems;
|
1997-09-21 04:24:27 +00:00
|
|
|
z->zitems = ((void **) item)[0];
|
1999-01-10 01:58:29 +00:00
|
|
|
#ifdef INVARIANTS
|
2001-01-22 07:01:50 +00:00
|
|
|
KASSERT(((void **) item)[1] == ZENTRY_FREE,
|
|
|
|
("item is not free"));
|
1997-09-21 04:24:27 +00:00
|
|
|
((void **) item)[1] = 0;
|
|
|
|
#endif
|
1997-08-05 22:24:31 +00:00
|
|
|
z->zfreecnt--;
|
2000-06-05 06:34:41 +00:00
|
|
|
z->znalloc++;
|
1997-08-05 22:24:31 +00:00
|
|
|
} else {
|
|
|
|
item = NULL;
|
1997-08-05 00:07:31 +00:00
|
|
|
}
|
1997-08-05 22:24:31 +00:00
|
|
|
|
2001-01-22 07:01:50 +00:00
|
|
|
mtx_assert(&z->zmtx, MA_OWNED);
|
1997-08-05 00:07:31 +00:00
|
|
|
return item;
|
|
|
|
}
|
|
|
|
|
2001-01-22 07:01:50 +00:00
|
|
|
/*
|
|
|
|
* Allocates an item from the specified zone.
|
|
|
|
*/
|
|
|
|
void *
|
|
|
|
zalloc(vm_zone_t z)
|
1997-08-06 04:58:05 +00:00
|
|
|
{
|
2001-01-22 07:01:50 +00:00
|
|
|
void *item;
|
1997-09-21 11:41:12 +00:00
|
|
|
|
2001-01-22 07:01:50 +00:00
|
|
|
KASSERT(z != NULL, ("invalid zone"));
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_lock(&z->zmtx);
|
2001-01-22 07:01:50 +00:00
|
|
|
|
|
|
|
if (z->zfreecnt <= z->zfreemin) {
|
|
|
|
item = _zget(z);
|
2001-05-19 01:28:09 +00:00
|
|
|
goto out;
|
2001-01-22 07:01:50 +00:00
|
|
|
}
|
1997-09-21 11:41:12 +00:00
|
|
|
|
2001-01-22 07:01:50 +00:00
|
|
|
item = z->zitems;
|
|
|
|
z->zitems = ((void **) item)[0];
|
|
|
|
#ifdef INVARIANTS
|
|
|
|
KASSERT(((void **) item)[1] == ZENTRY_FREE,
|
|
|
|
("item is not free"));
|
|
|
|
((void **) item)[1] = 0;
|
|
|
|
#endif
|
1997-09-21 11:41:12 +00:00
|
|
|
|
2001-01-22 07:01:50 +00:00
|
|
|
z->zfreecnt--;
|
|
|
|
z->znalloc++;
|
2001-05-19 01:28:09 +00:00
|
|
|
|
|
|
|
out:
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_unlock(&z->zmtx);
|
2001-01-22 07:01:50 +00:00
|
|
|
return item;
|
1997-08-06 04:58:05 +00:00
|
|
|
}
|
|
|
|
|
2001-01-22 07:01:50 +00:00
|
|
|
/*
|
|
|
|
* Frees an item back to the specified zone.
|
|
|
|
*/
|
1997-09-21 04:24:27 +00:00
|
|
|
void
|
2001-01-22 07:01:50 +00:00
|
|
|
zfree(vm_zone_t z, void *item)
|
|
|
|
{
|
|
|
|
KASSERT(z != NULL, ("invalid zone"));
|
|
|
|
KASSERT(item != NULL, ("invalid item"));
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_lock(&z->zmtx);
|
2001-01-22 07:01:50 +00:00
|
|
|
|
|
|
|
((void **) item)[0] = z->zitems;
|
|
|
|
#ifdef INVARIANTS
|
|
|
|
KASSERT(((void **) item)[1] != ZENTRY_FREE,
|
|
|
|
("item is already free"));
|
|
|
|
((void **) item)[1] = (void *) ZENTRY_FREE;
|
|
|
|
#endif
|
|
|
|
z->zitems = item;
|
|
|
|
z->zfreecnt++;
|
|
|
|
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_unlock(&z->zmtx);
|
2001-01-22 07:01:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Sysctl handler for vm.zone
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
sysctl_vm_zone(SYSCTL_HANDLER_ARGS)
|
1997-09-21 11:41:12 +00:00
|
|
|
{
|
2001-04-27 22:24:45 +00:00
|
|
|
int error, len, cnt;
|
|
|
|
const int linesize = 128; /* conservative */
|
|
|
|
char *tmpbuf, *offset;
|
2001-01-22 07:01:50 +00:00
|
|
|
vm_zone_t z;
|
2001-02-22 14:44:39 +00:00
|
|
|
char *p;
|
2001-01-22 07:01:50 +00:00
|
|
|
|
2001-04-27 22:24:45 +00:00
|
|
|
cnt = 0;
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_lock(&zone_mtx);
|
2001-04-27 22:24:45 +00:00
|
|
|
SLIST_FOREACH(z, &zlist, zent)
|
|
|
|
cnt++;
|
|
|
|
mtx_unlock(&zone_mtx);
|
|
|
|
MALLOC(tmpbuf, char *, (cnt == 0 ? 1 : cnt) * linesize,
|
|
|
|
M_TEMP, M_WAITOK);
|
|
|
|
len = snprintf(tmpbuf, linesize,
|
2001-01-22 07:01:50 +00:00
|
|
|
"\nITEM SIZE LIMIT USED FREE REQUESTS\n\n");
|
2001-04-27 22:24:45 +00:00
|
|
|
if (cnt == 0)
|
|
|
|
tmpbuf[len - 1] = '\0';
|
|
|
|
error = SYSCTL_OUT(req, tmpbuf, cnt == 0 ? len-1 : len);
|
|
|
|
if (error || cnt == 0)
|
|
|
|
goto out;
|
|
|
|
offset = tmpbuf;
|
|
|
|
mtx_lock(&zone_mtx);
|
2001-01-22 07:01:50 +00:00
|
|
|
SLIST_FOREACH(z, &zlist, zent) {
|
2001-04-27 22:24:45 +00:00
|
|
|
if (cnt == 0) /* list may have changed size */
|
|
|
|
break;
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_lock(&z->zmtx);
|
2001-04-27 22:24:45 +00:00
|
|
|
len = snprintf(offset, linesize,
|
2001-02-22 14:44:39 +00:00
|
|
|
"%-12.12s %6.6u, %8.8u, %6.6u, %6.6u, %8.8u\n",
|
2001-01-22 07:01:50 +00:00
|
|
|
z->zname, z->zsize, z->zmax, (z->ztotal - z->zfreecnt),
|
|
|
|
z->zfreecnt, z->znalloc);
|
2001-04-27 22:24:45 +00:00
|
|
|
mtx_unlock(&z->zmtx);
|
|
|
|
for (p = offset + 12; p > offset && *p == ' '; --p)
|
2001-02-22 14:44:39 +00:00
|
|
|
/* nothing */ ;
|
|
|
|
p[1] = ':';
|
2001-04-27 22:24:45 +00:00
|
|
|
cnt--;
|
|
|
|
offset += len;
|
1997-09-21 04:24:27 +00:00
|
|
|
}
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_unlock(&zone_mtx);
|
2001-07-09 03:37:33 +00:00
|
|
|
*offset++ = '\0';
|
2001-04-27 22:24:45 +00:00
|
|
|
error = SYSCTL_OUT(req, tmpbuf, offset - tmpbuf);
|
|
|
|
out:
|
|
|
|
FREE(tmpbuf, M_TEMP);
|
2001-01-22 07:01:50 +00:00
|
|
|
return (error);
|
1997-09-21 04:24:27 +00:00
|
|
|
}
|
|
|
|
|
2001-01-22 07:01:50 +00:00
|
|
|
SYSCTL_OID(_vm, OID_AUTO, zone, CTLTYPE_STRING|CTLFLAG_RD,
|
|
|
|
NULL, 0, sysctl_vm_zone, "A", "Zone Info");
|
1998-02-23 07:42:43 +00:00
|
|
|
|
2001-01-22 07:01:50 +00:00
|
|
|
SYSCTL_INT(_vm, OID_AUTO, zone_kmem_pages, CTLFLAG_RD, &zone_kmem_pages, 0,
|
|
|
|
"Number of interrupt safe pages allocated by zone");
|
|
|
|
SYSCTL_INT(_vm, OID_AUTO, zone_kmem_kvaspace, CTLFLAG_RD, &zone_kmem_kvaspace, 0,
|
|
|
|
"KVA space allocated by zone");
|
|
|
|
SYSCTL_INT(_vm, OID_AUTO, zone_kern_pages, CTLFLAG_RD, &zone_kern_pages, 0,
|
|
|
|
"Number of non-interrupt safe pages allocated by zone");
|