Integrated VM system improvements/fixes from FreeBSD-1.1.5.

This commit is contained in:
David Greenman 1994-08-04 03:06:48 +00:00
parent d319b932ae
commit bbc0ec5284
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=1827
5 changed files with 363 additions and 349 deletions

View File

@ -59,7 +59,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id$
* $Id: vm_glue.c,v 1.3 1994/08/02 07:55:19 davidg Exp $
*/
#include <sys/param.h>
@ -109,19 +109,16 @@ useracc(addr, len, rw)
vm_prot_t prot = rw == B_READ ? VM_PROT_READ : VM_PROT_WRITE;
/*
* XXX - specially disallow access to user page tables - they are
* in the map.
*
* XXX - don't specially disallow access to the user area - treat
* it as incorrectly as elsewhere.
* XXX - check separately to disallow access to user area and user
* page tables - they are in the map.
*
* XXX - VM_MAXUSER_ADDRESS is an end address, not a max. It was
* only used (as an end address) in trap.c. Use it as an end
* address here too.
* once only used (as an end address) in trap.c. Use it as an end
* address here too. This bogusness has spread. I just fixed
* where it was used as a max in vm_mmap.c.
*/
if ((vm_offset_t) addr >= VM_MAXUSER_ADDRESS
|| (vm_offset_t) addr + len > VM_MAXUSER_ADDRESS
|| (vm_offset_t) addr + len <= (vm_offset_t) addr) {
if ((vm_offset_t) addr + len > /* XXX */ VM_MAXUSER_ADDRESS
|| (vm_offset_t) addr + len < (vm_offset_t) addr) {
return (FALSE);
}
@ -276,23 +273,24 @@ void
vm_init_limits(p)
register struct proc *p;
{
int tmp;
int rss_limit;
/*
* Set up the initial limits on process VM.
* Set the maximum resident set size to be all
* of (reasonably) available memory. This causes
* any single, large process to start random page
* replacement once it fills memory.
* Set the maximum resident set size to be half
* of (reasonably) available memory. Since this
* is a soft limit, it comes into effect only
* when the system is out of memory - half of
* main memory helps to favor smaller processes,
* and reduces thrashing of the object cache.
*/
p->p_rlimit[RLIMIT_STACK].rlim_cur = DFLSSIZ;
p->p_rlimit[RLIMIT_STACK].rlim_max = MAXSSIZ;
p->p_rlimit[RLIMIT_DATA].rlim_cur = DFLDSIZ;
p->p_rlimit[RLIMIT_DATA].rlim_max = MAXDSIZ;
tmp = ((2 * cnt.v_free_count) / 3) - 32;
if (cnt.v_free_count < 512)
tmp = cnt.v_free_count;
p->p_rlimit[RLIMIT_RSS].rlim_cur = ptoa(tmp);
/* limit the limit to no less than 128K */
rss_limit = max(cnt.v_free_count / 2, 32);
p->p_rlimit[RLIMIT_RSS].rlim_cur = ptoa(rss_limit);
p->p_rlimit[RLIMIT_RSS].rlim_max = RLIM_INFINITY;
}

View File

@ -38,7 +38,7 @@
* from: Utah $Hdr: vm_mmap.c 1.6 91/10/21$
*
* @(#)vm_mmap.c 8.4 (Berkeley) 1/12/94
* $Id$
* $Id: vm_mmap.c,v 1.3 1994/08/02 07:55:28 davidg Exp $
*/
/*
@ -216,13 +216,13 @@ mmap(p, uap, retval)
* Note that VM_*_ADDRESS are not constants due to casts (argh).
*/
if (flags & MAP_FIXED) {
if (VM_MAXUSER_ADDRESS > 0 && addr + size >= VM_MAXUSER_ADDRESS)
if (VM_MAXUSER_ADDRESS > 0 && addr + size > VM_MAXUSER_ADDRESS)
return (EINVAL);
#ifndef i386
if (VM_MIN_ADDRESS > 0 && addr < VM_MIN_ADDRESS)
return (EINVAL);
#endif
if (addr > addr + size)
if (addr + size < addr)
return (EINVAL);
}
/*
@ -401,13 +401,13 @@ munmap(p, uap, retval)
* Check for illegal addresses. Watch out for address wrap...
* Note that VM_*_ADDRESS are not constants due to casts (argh).
*/
if (VM_MAXUSER_ADDRESS > 0 && addr + size >= VM_MAXUSER_ADDRESS)
if (VM_MAXUSER_ADDRESS > 0 && addr + size > VM_MAXUSER_ADDRESS)
return (EINVAL);
#ifndef i386
if (VM_MIN_ADDRESS > 0 && addr < VM_MIN_ADDRESS)
return (EINVAL);
#endif
if (addr > addr + size)
if (addr + size < addr)
return (EINVAL);
map = &p->p_vmspace->vm_map;
/*

View File

@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id$
* $Id: vm_page.h,v 1.3 1994/08/02 07:55:32 davidg Exp $
*/
/*
@ -132,6 +132,7 @@ struct vm_page {
#define PG_FAKE 0x0200 /* page is placeholder for pagein (O) */
#define PG_FILLED 0x0400 /* client flag to set when filled */
#define PG_DIRTY 0x0800 /* client flag to set when dirty */
#define PG_REFERENCED 0x1000 /* page has been referenced */
#define PG_PAGEROWNED 0x4000 /* DEBUG: async paging op in progress */
#define PG_PTPAGE 0x8000 /* DEBUG: is a user page table page */

View File

@ -65,7 +65,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_pageout.c,v 1.4 1994/08/01 11:25:45 davidg Exp $
* $Id: vm_pageout.c,v 1.5 1994/08/02 07:55:33 davidg Exp $
*/
/*
@ -108,6 +108,7 @@ extern int swap_pager_ready();
#define LOWATER ((2048*1024)/NBPG)
#define VM_PAGEOUT_PAGE_COUNT 8
int vm_pageout_page_count = VM_PAGEOUT_PAGE_COUNT;
static vm_offset_t vm_space_needed;
int vm_pageout_req_do_stats;
@ -190,7 +191,7 @@ vm_pageout_clean(m, sync)
ms[0] = m;
if( pager = object->pager) {
for(i=1;i<VM_PAGEOUT_PAGE_COUNT;i++) {
for(i=1;i<vm_pageout_page_count;i++) {
if( ms[i] = vm_page_lookup( object, offset+i*NBPG)) {
if((((ms[i]->flags & (PG_CLEAN|PG_INACTIVE|PG_BUSY)) == PG_INACTIVE)
|| (( ms[i]->flags & PG_CLEAN) == 0 && sync == VM_PAGEOUT_FORCE))
@ -619,7 +620,7 @@ vm_pageout_scan()
/*
* if the next page has been re-activated, start scanning again
*/
if (next && (next->flags & PG_INACTIVE) == 0)
if (!next || (next->flags & PG_INACTIVE) == 0)
goto rescan1;
} else if (pmap_is_referenced(VM_PAGE_TO_PHYS(m))) {
pmap_clear_reference(VM_PAGE_TO_PHYS(m));

File diff suppressed because it is too large Load Diff