Support garbage collecting the pmap pv entries. The management doesn't

happen until the system would have nearly failed anyway, so no signficant
overhead is added.  This helps large systems with lots of processes.
This commit is contained in:
John Dyson 1997-10-25 02:41:56 +00:00
parent 0a80f406b3
commit 5985940e79
3 changed files with 117 additions and 5 deletions

View File

@ -39,7 +39,7 @@
* SUCH DAMAGE.
*
* from: @(#)pmap.c 7.7 (Berkeley) 5/12/91
* $Id: pmap.c,v 1.163 1997/10/11 18:31:18 phk Exp $
* $Id: pmap.c,v 1.164 1997/10/24 23:41:04 dyson Exp $
*/
/*
@ -169,6 +169,9 @@ extern vm_offset_t clean_sva, clean_eva;
vm_zone_t pvzone;
struct vm_zone pvzone_store;
struct vm_object pvzone_obj;
int pv_entry_count=0, pv_entry_max=0,
pv_entry_high_water=0, pv_entry_low_water=0;
int pmap_pagedaemon_waken = 0;
#define NPVINIT 8192
struct pv_entry pvinit[NPVINIT];
@ -221,6 +224,7 @@ static vm_page_t pmap_page_alloc __P((vm_object_t object, vm_pindex_t pindex));
static vm_page_t pmap_page_lookup __P((vm_object_t object, vm_pindex_t pindex));
static int pmap_unuse_pt __P((pmap_t, vm_offset_t, vm_page_t));
vm_offset_t pmap_kmem_choose(vm_offset_t addr) ;
void pmap_collect(void);
#define PDSTACKMAX 6
static vm_offset_t pdstack[PDSTACKMAX];
@ -535,8 +539,15 @@ pmap_init(phys_start, phys_end)
void
pmap_init2() {
<<<<<<< pmap.c
pv_entry_max = PMAP_SHPGPERPROC * maxproc + pv_npg;
pv_entry_high_water = 9 * (pv_entry_max / 10);
pv_entry_low_water = 4 * (pv_entry_max / 10);
zinitna(pvzone, &pvzone_obj, NULL, 0, pv_entry_max, ZONE_INTERRUPT, 1);
=======
zinitna(pvzone, &pvzone_obj, NULL, 0,
PMAP_SHPGPERPROC * maxproc + pv_npg, ZONE_INTERRUPT, 1);
>>>>>>> 1.164
}
/*
@ -1532,6 +1543,7 @@ static inline void
free_pv_entry(pv)
pv_entry_t pv;
{
pv_entry_count--;
zfreei(pvzone, pv);
}
@ -1541,12 +1553,53 @@ free_pv_entry(pv)
* the memory allocation is performed bypassing the malloc code
* because of the possibility of allocations at interrupt time.
*/
static inline pv_entry_t
static pv_entry_t
get_pv_entry(void)
{
pv_entry_count++;
if ((pv_entry_count > pv_entry_high_water) &&
(pmap_pagedaemon_waken == 0)) {
pmap_pagedaemon_waken = 1;
wakeup (&vm_pages_needed);
}
return zalloci(pvzone);
}
/*
* This routine is very drastic, but can save the system
* in a pinch.
*/
void
pmap_collect() {
pv_table_t *ppv;
pv_entry_t pv;
int i;
vm_offset_t pa;
vm_page_t m;
static int warningdone=0;
if (pmap_pagedaemon_waken == 0)
return;
if (warningdone < 5) {
printf("pmap_collect: collecting pv entries -- increase PMAP_SHPGPERPROC");
warningdone++;
}
for(i = 0; i < pv_npg; i++) {
if ((ppv = &pv_table[i]) == 0)
continue;
m = ppv->pv_vm_page;
if ((pa = VM_PAGE_TO_PHYS(m)) == 0)
continue;
if (m->wire_count || m->hold_count || m->busy || (m->flags & PG_BUSY))
continue;
pmap_remove_all(pa);
}
pmap_pagedaemon_waken = 0;
}
/*
* If it is the first entry on the list, it is actually
* in the header and we must copy the following entry up

View File

@ -39,7 +39,7 @@
* SUCH DAMAGE.
*
* from: @(#)pmap.c 7.7 (Berkeley) 5/12/91
* $Id: pmap.c,v 1.163 1997/10/11 18:31:18 phk Exp $
* $Id: pmap.c,v 1.164 1997/10/24 23:41:04 dyson Exp $
*/
/*
@ -169,6 +169,9 @@ extern vm_offset_t clean_sva, clean_eva;
vm_zone_t pvzone;
struct vm_zone pvzone_store;
struct vm_object pvzone_obj;
int pv_entry_count=0, pv_entry_max=0,
pv_entry_high_water=0, pv_entry_low_water=0;
int pmap_pagedaemon_waken = 0;
#define NPVINIT 8192
struct pv_entry pvinit[NPVINIT];
@ -221,6 +224,7 @@ static vm_page_t pmap_page_alloc __P((vm_object_t object, vm_pindex_t pindex));
static vm_page_t pmap_page_lookup __P((vm_object_t object, vm_pindex_t pindex));
static int pmap_unuse_pt __P((pmap_t, vm_offset_t, vm_page_t));
vm_offset_t pmap_kmem_choose(vm_offset_t addr) ;
void pmap_collect(void);
#define PDSTACKMAX 6
static vm_offset_t pdstack[PDSTACKMAX];
@ -535,8 +539,15 @@ pmap_init(phys_start, phys_end)
void
pmap_init2() {
<<<<<<< pmap.c
pv_entry_max = PMAP_SHPGPERPROC * maxproc + pv_npg;
pv_entry_high_water = 9 * (pv_entry_max / 10);
pv_entry_low_water = 4 * (pv_entry_max / 10);
zinitna(pvzone, &pvzone_obj, NULL, 0, pv_entry_max, ZONE_INTERRUPT, 1);
=======
zinitna(pvzone, &pvzone_obj, NULL, 0,
PMAP_SHPGPERPROC * maxproc + pv_npg, ZONE_INTERRUPT, 1);
>>>>>>> 1.164
}
/*
@ -1532,6 +1543,7 @@ static inline void
free_pv_entry(pv)
pv_entry_t pv;
{
pv_entry_count--;
zfreei(pvzone, pv);
}
@ -1541,12 +1553,53 @@ free_pv_entry(pv)
* the memory allocation is performed bypassing the malloc code
* because of the possibility of allocations at interrupt time.
*/
static inline pv_entry_t
static pv_entry_t
get_pv_entry(void)
{
pv_entry_count++;
if ((pv_entry_count > pv_entry_high_water) &&
(pmap_pagedaemon_waken == 0)) {
pmap_pagedaemon_waken = 1;
wakeup (&vm_pages_needed);
}
return zalloci(pvzone);
}
/*
* This routine is very drastic, but can save the system
* in a pinch.
*/
void
pmap_collect() {
pv_table_t *ppv;
pv_entry_t pv;
int i;
vm_offset_t pa;
vm_page_t m;
static int warningdone=0;
if (pmap_pagedaemon_waken == 0)
return;
if (warningdone < 5) {
printf("pmap_collect: collecting pv entries -- increase PMAP_SHPGPERPROC");
warningdone++;
}
for(i = 0; i < pv_npg; i++) {
if ((ppv = &pv_table[i]) == 0)
continue;
m = ppv->pv_vm_page;
if ((pa = VM_PAGE_TO_PHYS(m)) == 0)
continue;
if (m->wire_count || m->hold_count || m->busy || (m->flags & PG_BUSY))
continue;
pmap_remove_all(pa);
}
pmap_pagedaemon_waken = 0;
}
/*
* If it is the first entry on the list, it is actually
* in the header and we must copy the following entry up

View File

@ -65,7 +65,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_pageout.c,v 1.98 1997/09/01 03:17:26 bde Exp $
* $Id: vm_pageout.c,v 1.99 1997/10/06 02:48:16 dyson Exp $
*/
/*
@ -184,6 +184,7 @@ static freeer_fcn_t vm_pageout_object_deactivate_pages;
static void vm_req_vmdaemon __P((void));
#endif
static void vm_pageout_page_stats(void);
void pmap_collect(void);
/*
* vm_pageout_clean:
@ -590,6 +591,11 @@ vm_pageout_scan()
int vnodes_skipped = 0;
int s;
/*
* Do whatever cleanup that the pmap code can.
*/
pmap_collect();
/*
* Start scanning the inactive queue for pages we can free. We keep
* scanning until we have enough free pages or we have scanned through