- If swap metadata does not fit into the KVM, reduce the number of

struct swblock entries by dividing the number of the entries by 2
until the swap metadata fits.

- Reject swapon(2) upon failure of swap_zone allocation.

This is just a temporary fix. Better solutions include:
(suggested by:	dillon)

o reserving swap in SWAP_META_PAGES chunks, and
o swapping the swblock structures themselves.

Reviewed by:	alfred, dillon
This commit is contained in:
Seigo Tanimura 2000-12-13 10:01:00 +00:00
parent 6d43764a10
commit 21cd6e6232
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=69972
11 changed files with 75 additions and 26 deletions

View File

@ -260,6 +260,7 @@ cpu_startup(dummy)
vm_size_t size = 0;
int firstaddr;
vm_offset_t minaddr;
int physmem_est;
if (boothowto & RB_VERBOSE)
bootverbose++;
@ -328,6 +329,16 @@ cpu_startup(dummy)
valloc(callout, struct callout, ncallout);
valloc(callwheel, struct callout_tailq, callwheelsize);
/*
* Discount the physical memory larger than the size of kernel_map
* to avoid eating up all of KVA space.
*/
if (kernel_map->first_free == NULL) {
printf("Warning: no free entries in kernel_map.\n");
physmem_est = physmem;
} else
physmem_est = min(physmem, kernel_map->max_offset - kernel_map->min_offset);
/*
* The nominal buffer size (and minimum KVA allocation) is BKVASIZE.
* For the first 64MB of ram nominally allocate sufficient buffers to
@ -340,10 +351,10 @@ cpu_startup(dummy)
int factor = 4 * BKVASIZE / PAGE_SIZE;
nbuf = 50;
if (physmem > 1024)
nbuf += min((physmem - 1024) / factor, 16384 / factor);
if (physmem > 16384)
nbuf += (physmem - 16384) * 2 / (factor * 5);
if (physmem_est > 1024)
nbuf += min((physmem_est - 1024) / factor, 16384 / factor);
if (physmem_est > 16384)
nbuf += (physmem_est - 16384) * 2 / (factor * 5);
}
/*

View File

@ -260,6 +260,7 @@ cpu_startup(dummy)
vm_size_t size = 0;
int firstaddr;
vm_offset_t minaddr;
int physmem_est;
if (boothowto & RB_VERBOSE)
bootverbose++;
@ -328,6 +329,16 @@ cpu_startup(dummy)
valloc(callout, struct callout, ncallout);
valloc(callwheel, struct callout_tailq, callwheelsize);
/*
* Discount the physical memory larger than the size of kernel_map
* to avoid eating up all of KVA space.
*/
if (kernel_map->first_free == NULL) {
printf("Warning: no free entries in kernel_map.\n");
physmem_est = physmem;
} else
physmem_est = min(physmem, kernel_map->max_offset - kernel_map->min_offset);
/*
* The nominal buffer size (and minimum KVA allocation) is BKVASIZE.
* For the first 64MB of ram nominally allocate sufficient buffers to
@ -340,10 +351,10 @@ cpu_startup(dummy)
int factor = 4 * BKVASIZE / PAGE_SIZE;
nbuf = 50;
if (physmem > 1024)
nbuf += min((physmem - 1024) / factor, 16384 / factor);
if (physmem > 16384)
nbuf += (physmem - 16384) * 2 / (factor * 5);
if (physmem_est > 1024)
nbuf += min((physmem_est - 1024) / factor, 16384 / factor);
if (physmem_est > 16384)
nbuf += (physmem_est - 16384) * 2 / (factor * 5);
}
/*

View File

@ -46,6 +46,7 @@
#include <vm/vm_object.h>
#include <vm/vm_page.h>
#include <vm/vm_pager.h>
#include <vm/vm_zone.h>
#include <vm/swap_pager.h>
static vm_object_t default_pager_alloc __P((void *, vm_ooffset_t, vm_prot_t,

View File

@ -90,13 +90,16 @@
#include "opt_swap.h"
#include <vm/vm.h>
#include <vm/pmap.h>
#include <vm/vm_map.h>
#include <vm/vm_kern.h>
#include <vm/vm_object.h>
#include <vm/vm_page.h>
#include <vm/vm_pager.h>
#include <vm/vm_pageout.h>
#include <vm/vm_zone.h>
#include <vm/swap_pager.h>
#include <vm/vm_extern.h>
#include <vm/vm_zone.h>
#define SWM_FREE 0x02 /* free, period */
#define SWM_POP 0x04 /* pop out */
@ -273,7 +276,7 @@ swap_pager_init()
void
swap_pager_swap_init()
{
int n;
int n, n2;
/*
* Number of in-transit swap bp operations. Don't
@ -311,15 +314,23 @@ swap_pager_swap_init()
* can hold 16 pages, so this is probably overkill.
*/
n = cnt.v_page_count * 2;
n = min(cnt.v_page_count, (kernel_map->max_offset - kernel_map->min_offset) / PAGE_SIZE) * 2;
n2 = n;
swap_zone = zinit(
"SWAPMETA",
sizeof(struct swblock),
n,
ZONE_INTERRUPT,
1
);
while (n > 0
&& (swap_zone = zinit(
"SWAPMETA",
sizeof(struct swblock),
n,
ZONE_INTERRUPT,
1
)) == NULL)
n >>= 1;
if (swap_zone == NULL)
printf("WARNING: failed to init swap_zone!\n");
if (n2 != n)
printf("Swap zone entries reduced to %d.\n", n);
n2 = n;
/*
* Initialize our meta-data hash table. The swapper does not need to
@ -330,7 +341,7 @@ swap_pager_swap_init()
* swhash_mask: hash table index mask
*/
for (n = 1; n < cnt.v_page_count / 4; n <<= 1)
for (n = 1; n < n2 ; n <<= 1)
;
swhash = malloc(sizeof(struct swblock *) * n, M_VMPGDATA, M_WAITOK | M_ZERO);

View File

@ -84,6 +84,7 @@ struct swblock {
extern struct pagerlst swap_pager_un_object_list;
extern int swap_pager_full;
extern struct blist *swapblist;
extern vm_zone_t swap_zone;
void swap_pager_putpages __P((vm_object_t, vm_page_t *, int, boolean_t, int *));
boolean_t swap_pager_haspage __P((vm_object_t object, vm_pindex_t pindex, int *before, int *after));

View File

@ -489,4 +489,3 @@ kmem_init(start, end)
/* ... and ending with the completion of the above `insert' */
vm_map_unlock(m);
}

View File

@ -86,8 +86,8 @@
#include <vm/vm_pager.h>
#include <vm/vm_kern.h>
#include <vm/vm_extern.h>
#include <vm/swap_pager.h>
#include <vm/vm_zone.h>
#include <vm/swap_pager.h>
/*
* Virtual memory maps provide for the mapping, protection,

View File

@ -84,10 +84,10 @@
#include <vm/vm_page.h>
#include <vm/vm_pageout.h>
#include <vm/vm_pager.h>
#include <vm/vm_zone.h>
#include <vm/swap_pager.h>
#include <vm/vm_kern.h>
#include <vm/vm_extern.h>
#include <vm/vm_zone.h>
static void vm_object_qcollapse __P((vm_object_t object));

View File

@ -93,6 +93,7 @@
#include <vm/vm_map.h>
#include <vm/vm_pageout.h>
#include <vm/vm_pager.h>
#include <vm/vm_zone.h>
#include <vm/swap_pager.h>
#include <vm/vm_extern.h>

View File

@ -53,6 +53,7 @@
#include <sys/stat.h>
#include <vm/vm.h>
#include <vm/vm_extern.h>
#include <vm/vm_zone.h>
#include <vm/swap_pager.h>
/*
@ -194,6 +195,13 @@ swapon(p, uap)
if (error)
return (error);
/*
* Swap metadata may not fit in the KVM if we have physical
* memory of >1GB.
*/
if (swap_zone == NULL)
return (ENOMEM);
NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, uap->name, p);
error = namei(&nd);
if (error)

View File

@ -80,8 +80,11 @@ int
zinitna(vm_zone_t z, vm_object_t obj, char *name, int size,
int nentries, int flags, int zalloc)
{
int totsize;
int totsize, oldzflags;
vm_zone_t oldzlist;
oldzflags = z->zflags;
oldzlist = zlist;
if ((z->zflags & ZONE_BOOT) == 0) {
z->zsize = (size + ZONE_ROUNDING - 1) & ~(ZONE_ROUNDING - 1);
simple_lock_init(&z->zlock);
@ -112,8 +115,12 @@ zinitna(vm_zone_t z, vm_object_t obj, char *name, int size,
zone_kmem_kvaspace += totsize;
z->zkva = kmem_alloc_pageable(kernel_map, totsize);
if (z->zkva == 0)
if (z->zkva == 0) {
/* Clean up the zlist in case we messed it. */
if ((oldzflags & ZONE_BOOT) == 0)
zlist = oldzlist;
return 0;
}
z->zpagemax = totsize / PAGE_SIZE;
if (obj == NULL) {
@ -156,11 +163,10 @@ zinit(char *name, int size, int nentries, int flags, int zalloc)
{
vm_zone_t z;
z = (vm_zone_t) malloc(sizeof (struct vm_zone), M_ZONE, M_NOWAIT);
z = (vm_zone_t) malloc(sizeof (struct vm_zone), M_ZONE, M_NOWAIT | M_ZERO);
if (z == NULL)
return NULL;
z->zflags = 0;
if (zinitna(z, NULL, name, size, nentries, flags, zalloc) == 0) {
free(z, M_ZONE);
return NULL;