Fix bug related to map entry allocations where a sleep might be attempted
when allocating memory for network buffers at interrupt time. This is due to inadequate checking for the new mcl_map. Fixed by merging mb_map and mcl_map into a single mb_map. Reviewed by: wollman
This commit is contained in:
parent
38e3dae8e4
commit
17dd64e0ab
@ -344,16 +344,15 @@ cpu_startup(dummy)
|
|||||||
* Finally, allocate mbuf pool. Since mclrefcnt is an off-size
|
* Finally, allocate mbuf pool. Since mclrefcnt is an off-size
|
||||||
* we use the more space efficient malloc in place of kmem_alloc.
|
* we use the more space efficient malloc in place of kmem_alloc.
|
||||||
*/
|
*/
|
||||||
mclrefcnt = (char *)malloc(nmbclusters+PAGE_SIZE/MCLBYTES,
|
|
||||||
M_MBUF, M_NOWAIT);
|
|
||||||
bzero(mclrefcnt, nmbclusters+PAGE_SIZE/MCLBYTES);
|
|
||||||
mcl_map = kmem_suballoc(kmem_map, (vm_offset_t *)&mbutl, &maxaddr,
|
|
||||||
nmbclusters * MCLBYTES, FALSE);
|
|
||||||
{
|
{
|
||||||
vm_size_t mb_map_size;
|
vm_offset_t mb_map_size;
|
||||||
mb_map_size = nmbufs * MSIZE;
|
|
||||||
mb_map = kmem_suballoc(kmem_map, &minaddr, &maxaddr,
|
mb_map_size = nmbufs * MSIZE + nmbclusters * MCLBYTES;
|
||||||
round_page(mb_map_size), FALSE);
|
mb_map_size = roundup2(mb_map_size, max(MCLBYTES, PAGE_SIZE));
|
||||||
|
mclrefcnt = malloc(mb_map_size / MCLBYTES, M_MBUF, M_NOWAIT);
|
||||||
|
bzero(mclrefcnt, mb_map_size / MCLBYTES);
|
||||||
|
mb_map = kmem_suballoc(kmem_map, (vm_offset_t *)&mbutl, &maxaddr,
|
||||||
|
mb_map_size, FALSE);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -100,8 +100,8 @@ int extravnodes = EXTRAVNODES; /* spare vnodes to allocate */
|
|||||||
#endif
|
#endif
|
||||||
int nmbclusters = NMBCLUSTERS;
|
int nmbclusters = NMBCLUSTERS;
|
||||||
|
|
||||||
/* allocate same amount of virtual address space for mbufs XXX */
|
/* allocate 1/4th amount of virtual address space for mbufs XXX */
|
||||||
int nmbufs = NMBCLUSTERS * (MCLBYTES / MSIZE);
|
int nmbufs = NMBCLUSTERS * 4;
|
||||||
|
|
||||||
int fscale = FSCALE; /* kernel uses `FSCALE', user uses `fscale' */
|
int fscale = FSCALE; /* kernel uses `FSCALE', user uses `fscale' */
|
||||||
|
|
||||||
|
@ -344,16 +344,15 @@ cpu_startup(dummy)
|
|||||||
* Finally, allocate mbuf pool. Since mclrefcnt is an off-size
|
* Finally, allocate mbuf pool. Since mclrefcnt is an off-size
|
||||||
* we use the more space efficient malloc in place of kmem_alloc.
|
* we use the more space efficient malloc in place of kmem_alloc.
|
||||||
*/
|
*/
|
||||||
mclrefcnt = (char *)malloc(nmbclusters+PAGE_SIZE/MCLBYTES,
|
|
||||||
M_MBUF, M_NOWAIT);
|
|
||||||
bzero(mclrefcnt, nmbclusters+PAGE_SIZE/MCLBYTES);
|
|
||||||
mcl_map = kmem_suballoc(kmem_map, (vm_offset_t *)&mbutl, &maxaddr,
|
|
||||||
nmbclusters * MCLBYTES, FALSE);
|
|
||||||
{
|
{
|
||||||
vm_size_t mb_map_size;
|
vm_offset_t mb_map_size;
|
||||||
mb_map_size = nmbufs * MSIZE;
|
|
||||||
mb_map = kmem_suballoc(kmem_map, &minaddr, &maxaddr,
|
mb_map_size = nmbufs * MSIZE + nmbclusters * MCLBYTES;
|
||||||
round_page(mb_map_size), FALSE);
|
mb_map_size = roundup2(mb_map_size, max(MCLBYTES, PAGE_SIZE));
|
||||||
|
mclrefcnt = malloc(mb_map_size / MCLBYTES, M_MBUF, M_NOWAIT);
|
||||||
|
bzero(mclrefcnt, mb_map_size / MCLBYTES);
|
||||||
|
mb_map = kmem_suballoc(kmem_map, (vm_offset_t *)&mbutl, &maxaddr,
|
||||||
|
mb_map_size, FALSE);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -100,8 +100,8 @@ int extravnodes = EXTRAVNODES; /* spare vnodes to allocate */
|
|||||||
#endif
|
#endif
|
||||||
int nmbclusters = NMBCLUSTERS;
|
int nmbclusters = NMBCLUSTERS;
|
||||||
|
|
||||||
/* allocate same amount of virtual address space for mbufs XXX */
|
/* allocate 1/4th amount of virtual address space for mbufs XXX */
|
||||||
int nmbufs = NMBCLUSTERS * (MCLBYTES / MSIZE);
|
int nmbufs = NMBCLUSTERS * 4;
|
||||||
|
|
||||||
int fscale = FSCALE; /* kernel uses `FSCALE', user uses `fscale' */
|
int fscale = FSCALE; /* kernel uses `FSCALE', user uses `fscale' */
|
||||||
|
|
||||||
|
@ -150,11 +150,11 @@ m_clalloc(ncl, nowait)
|
|||||||
* to get any more (nothing is ever freed back to the
|
* to get any more (nothing is ever freed back to the
|
||||||
* map).
|
* map).
|
||||||
*/
|
*/
|
||||||
if (mcl_map_full)
|
if (mb_map_full)
|
||||||
return (0);
|
return (0);
|
||||||
|
|
||||||
npg = ncl;
|
npg = ncl;
|
||||||
p = (caddr_t)kmem_malloc(mcl_map, ctob(npg),
|
p = (caddr_t)kmem_malloc(mb_map, ctob(npg),
|
||||||
nowait ? M_NOWAIT : M_WAITOK);
|
nowait ? M_NOWAIT : M_WAITOK);
|
||||||
/*
|
/*
|
||||||
* Either the map is now full, or this is nowait and there
|
* Either the map is now full, or this is nowait and there
|
||||||
|
@ -98,8 +98,6 @@ vm_map_t u_map=0;
|
|||||||
vm_map_t buffer_map=0;
|
vm_map_t buffer_map=0;
|
||||||
vm_map_t mb_map=0;
|
vm_map_t mb_map=0;
|
||||||
int mb_map_full=0;
|
int mb_map_full=0;
|
||||||
vm_map_t mcl_map=0;
|
|
||||||
int mcl_map_full=0;
|
|
||||||
vm_map_t io_map=0;
|
vm_map_t io_map=0;
|
||||||
vm_map_t phys_map=0;
|
vm_map_t phys_map=0;
|
||||||
|
|
||||||
@ -289,8 +287,8 @@ kmem_malloc(map, size, waitflag)
|
|||||||
vm_offset_t addr;
|
vm_offset_t addr;
|
||||||
vm_page_t m;
|
vm_page_t m;
|
||||||
|
|
||||||
if (map != kmem_map && map != mb_map && map != mcl_map)
|
if (map != kmem_map && map != mb_map)
|
||||||
panic("kmem_malloc: map != {kmem,mb,mcl}_map");
|
panic("kmem_malloc: map != {kmem,mb}_map");
|
||||||
|
|
||||||
size = round_page(size);
|
size = round_page(size);
|
||||||
addr = vm_map_min(map);
|
addr = vm_map_min(map);
|
||||||
@ -305,13 +303,7 @@ kmem_malloc(map, size, waitflag)
|
|||||||
vm_map_unlock(map);
|
vm_map_unlock(map);
|
||||||
if (map == mb_map) {
|
if (map == mb_map) {
|
||||||
mb_map_full = TRUE;
|
mb_map_full = TRUE;
|
||||||
log(LOG_ERR, "Out of mbufs - increase maxusers!\n");
|
log(LOG_ERR, "Out of mbuf clusters - increase maxusers!\n");
|
||||||
return (0);
|
|
||||||
}
|
|
||||||
if (map == mcl_map) {
|
|
||||||
mcl_map_full = TRUE;
|
|
||||||
log(LOG_ERR,
|
|
||||||
"Out of mbuf clusters - increase maxusers!\n");
|
|
||||||
return (0);
|
return (0);
|
||||||
}
|
}
|
||||||
if (waitflag == M_WAITOK)
|
if (waitflag == M_WAITOK)
|
||||||
|
@ -73,8 +73,6 @@ extern vm_map_t kernel_map;
|
|||||||
extern vm_map_t kmem_map;
|
extern vm_map_t kmem_map;
|
||||||
extern vm_map_t mb_map;
|
extern vm_map_t mb_map;
|
||||||
extern int mb_map_full;
|
extern int mb_map_full;
|
||||||
extern vm_map_t mcl_map;
|
|
||||||
extern int mcl_map_full;
|
|
||||||
extern vm_map_t io_map;
|
extern vm_map_t io_map;
|
||||||
extern vm_map_t clean_map;
|
extern vm_map_t clean_map;
|
||||||
extern vm_map_t phys_map;
|
extern vm_map_t phys_map;
|
||||||
|
Loading…
Reference in New Issue
Block a user