The swap bitmap allocator was not calculating the bitmap size properly

in the face of non-stripe-aligned swap areas.  The bug could cause a
      panic during boot.

      Refuse to configure a swap area that is too large (67 GB or so)

      Properly document the power-of-2 requirement for SWB_NPAGES.

      The patch is slightly different then the one Tor enclosed in the P.R.,
      but accomplishes the same thing.

PR: kern/20273
Submitted by: Tor.Egge@fast.no
This commit is contained in:
Matthew Dillon 2000-10-13 16:44:34 +00:00
parent 91ef813ce3
commit 64bcb9c815
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=67082
3 changed files with 21 additions and 10 deletions

View File

@ -172,7 +172,7 @@ static void waitchainbuf(struct bio *bp, int count, int done);
/*
* dmmax is in page-sized chunks with the new swap system. It was
* dev-bsized chunks in the old.
* dev-bsized chunks in the old. dmmax is always a power of 2.
*
* swap_*() routines are externally accessible. swp_*() routines are
* internal.

View File

@ -48,12 +48,9 @@
#define _SWAP_PAGER_ 1
/*
* SWB_NPAGES can be set to any value from 1 to 16 pages per allocation,
* however, due to the allocation spilling into non-swap pager backed memory,
* suggest keeping SWB_NPAGES small (1-4). If high performance is mandatory
* perhaps up to 8 pages might be in order????
* Above problem has been fixed, now we support 16 pages per block. Unused
* space is recovered by the swap pager now...
* SWB_NPAGES must be a power of 2. It may be set to 1, 2, 4, 8, or 16
* pages per allocation. We recommend you stick with the default of 8.
* The 16-page limit is due to the radix code (kern/subr_blist.c).
*/
#if !defined(SWB_NPAGES)
#define SWB_NPAGES 8

View File

@ -237,6 +237,7 @@ swaponvp(p, vp, dev, nblks)
register long blk;
swblk_t dvbase;
int error;
u_long aligned_nblks;
if (!swapdev_vp) {
error = getnewvnode(VT_NON, NULL, swapdev_vnodeop_p,
@ -271,6 +272,17 @@ swaponvp(p, vp, dev, nblks)
(void) VOP_CLOSE(vp, FREAD | FWRITE, p->p_ucred, p);
return (ENXIO);
}
/*
* If we go beyond this, we get overflows in the radix
* tree bitmap code.
*/
if (nblks > 0x40000000 / BLIST_META_RADIX / nswdev) {
printf("exceeded maximum of %d blocks per swap unit\n",
0x40000000 / BLIST_META_RADIX / nswdev);
(void) VOP_CLOSE(vp, FREAD | FWRITE, p->p_ucred, p);
return (ENXIO);
}
/*
* nblks is in DEV_BSIZE'd chunks, convert to PAGE_SIZE'd chunks.
* First chop nblks off to page-align it, then convert.
@ -288,11 +300,13 @@ swaponvp(p, vp, dev, nblks)
/*
* nblks, nswap, and dmmax are PAGE_SIZE'd parameters now, not
* DEV_BSIZE'd.
* DEV_BSIZE'd. aligned_nblks is used to calculate the
* size of the swap bitmap, taking into account the stripe size.
*/
aligned_nblks = (nblks + (dmmax - 1)) & ~(u_long)(dmmax - 1);
if (nblks * nswdev > nswap)
nswap = (nblks+1) * nswdev;
if (aligned_nblks * nswdev > nswap)
nswap = aligned_nblks * nswdev;
if (swapblist == NULL)
swapblist = blist_create(nswap);