Remove MAP_ENTRY_IS_A_MAP 'share' maps. These maps were once used to

attempt to optimize forks but were essentially given-up on due to
    problems and replaced with an explicit dup of the vm_map_entry structure.
    Prior to the removal, they were entirely unused.
This commit is contained in:
Matthew Dillon 1999-02-07 21:48:23 +00:00
parent a0e7b3e5ce
commit 9fdfe602fc
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=43748
10 changed files with 49 additions and 114 deletions

View File

@ -36,7 +36,7 @@
*
* @(#)procfs_status.c 8.3 (Berkeley) 2/17/94
*
* $Id: procfs_map.c,v 1.19 1999/01/21 08:29:06 dillon Exp $
* $Id: procfs_map.c,v 1.20 1999/02/05 06:18:54 jdp Exp $
*/
#include <sys/param.h>
@ -98,7 +98,7 @@ procfs_domap(curp, p, pfs, uio)
int resident, privateresident;
char *type;
if (entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP))
if (entry->eflags & MAP_ENTRY_IS_SUB_MAP)
continue;
obj = entry->object.vm_object;

View File

@ -26,7 +26,7 @@
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* $Id: imgact_elf.c,v 1.50 1999/02/05 03:47:47 newton Exp $
* $Id: imgact_elf.c,v 1.51 1999/02/05 22:24:26 jdp Exp $
*/
#include "opt_rlimit.h"
@ -844,7 +844,7 @@ each_writable_segment(p, func, closure)
entry = entry->next) {
vm_object_t obj;
if (entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP) ||
if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) ||
(entry->protection & (VM_PROT_READ|VM_PROT_WRITE)) !=
(VM_PROT_READ|VM_PROT_WRITE))
continue;

View File

@ -36,7 +36,7 @@
*
* @(#)procfs_status.c 8.3 (Berkeley) 2/17/94
*
* $Id: procfs_map.c,v 1.19 1999/01/21 08:29:06 dillon Exp $
* $Id: procfs_map.c,v 1.20 1999/02/05 06:18:54 jdp Exp $
*/
#include <sys/param.h>
@ -98,7 +98,7 @@ procfs_domap(curp, p, pfs, uio)
int resident, privateresident;
char *type;
if (entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP))
if (entry->eflags & MAP_ENTRY_IS_SUB_MAP)
continue;
obj = entry->object.vm_object;

View File

@ -66,7 +66,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_fault.c,v 1.97 1999/01/24 00:55:04 dillon Exp $
* $Id: vm_fault.c,v 1.98 1999/01/24 06:04:52 dillon Exp $
*/
/*
@ -625,9 +625,8 @@ RetryFault:;
* grab the lock if we need to
*/
(fs.lookup_still_valid ||
(((fs.entry->eflags & MAP_ENTRY_IS_A_MAP) == 0) &&
lockmgr(&fs.map->lock,
LK_EXCLUSIVE|LK_NOWAIT, (void *)0, curproc) == 0))) {
lockmgr(&fs.map->lock, LK_EXCLUSIVE|LK_NOWAIT, (void *)0, curproc) == 0)
) {
fs.lookup_still_valid = 1;
/*

View File

@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_map.c,v 1.146 1999/02/01 08:49:30 dillon Exp $
* $Id: vm_map.c,v 1.147 1999/02/03 01:57:16 dillon Exp $
*/
/*
@ -491,7 +491,7 @@ vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
if (
(object == NULL) &&
(prev_entry != &map->header) &&
((prev_entry->eflags & (MAP_ENTRY_IS_A_MAP | MAP_ENTRY_IS_SUB_MAP)) == 0) &&
((prev_entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) &&
((prev_entry->object.vm_object == NULL) ||
(prev_entry->object.vm_object->type == OBJT_DEFAULT) ||
(prev_entry->object.vm_object->type == OBJT_SWAP)) &&
@ -901,7 +901,7 @@ vm_map_simplify_entry(map, entry)
vm_map_entry_t next, prev;
vm_size_t prevsize, esize;
if (entry->eflags & (MAP_ENTRY_IS_SUB_MAP|MAP_ENTRY_IS_A_MAP))
if (entry->eflags & MAP_ENTRY_IS_SUB_MAP)
return;
prev = entry->prev;
@ -1013,7 +1013,7 @@ _vm_map_clip_start(map, entry, start)
vm_map_entry_link(map, entry->prev, new_entry);
if ((entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) == 0) {
if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
if (new_entry->object.vm_object->ref_count == 1)
vm_object_set_flag(new_entry->object.vm_object,
OBJ_ONEMAPPING);
@ -1077,7 +1077,7 @@ _vm_map_clip_end(map, entry, end)
vm_map_entry_link(map, entry, new_entry);
if ((entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) == 0) {
if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
if (new_entry->object.vm_object->ref_count == 1)
vm_object_set_flag(new_entry->object.vm_object,
OBJ_ONEMAPPING);
@ -1141,7 +1141,7 @@ vm_map_submap(map, start, end, submap)
vm_map_clip_end(map, entry, end);
if ((entry->start == start) && (entry->end == end) &&
((entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_COW)) == 0) &&
((entry->eflags & MAP_ENTRY_COW) == 0) &&
(entry->object.vm_object == NULL)) {
entry->object.sub_map = submap;
entry->eflags |= MAP_ENTRY_IS_SUB_MAP;
@ -1223,40 +1223,9 @@ vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
#define MASK(entry) (((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \
VM_PROT_ALL)
if (current->eflags & MAP_ENTRY_IS_A_MAP) {
vm_map_entry_t share_entry;
vm_offset_t share_end;
vm_map_lock(current->object.share_map);
(void) vm_map_lookup_entry(
current->object.share_map,
current->offset,
&share_entry);
share_end = current->offset +
(current->end - current->start);
while ((share_entry !=
&current->object.share_map->header) &&
(share_entry->start < share_end)) {
pmap_protect(map->pmap,
(qmax(share_entry->start,
current->offset) -
current->offset +
current->start),
min(share_entry->end,
share_end) -
current->offset +
current->start,
current->protection &
MASK(share_entry));
share_entry = share_entry->next;
}
vm_map_unlock(current->object.share_map);
} else
pmap_protect(map->pmap, current->start,
current->end,
current->protection & MASK(entry));
pmap_protect(map->pmap, current->start,
current->end,
current->protection & MASK(entry));
#undef MASK
}
@ -1300,7 +1269,7 @@ vm_map_madvise(map, pmap, start, end, advise)
current = current->next) {
vm_size_t size;
if (current->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) {
if (current->eflags & MAP_ENTRY_IS_SUB_MAP) {
continue;
}
@ -1491,7 +1460,7 @@ vm_map_user_pageable(map, start, end, new_pageable)
/* Here on entry being newly wired */
if ((entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) == 0) {
if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
int copyflag = entry->eflags & MAP_ENTRY_NEEDS_COPY;
if (copyflag && ((entry->protection & VM_PROT_WRITE) != 0)) {
@ -1689,7 +1658,7 @@ vm_map_pageable(map, start, end, new_pageable)
* point to sharing maps, because we won't
* hold the lock on the sharing map.
*/
if ((entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) == 0) {
if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
int copyflag = entry->eflags & MAP_ENTRY_NEEDS_COPY;
if (copyflag &&
((entry->protection & VM_PROT_WRITE) != 0)) {
@ -1858,12 +1827,12 @@ vm_map_clean(map, start, end, syncio, invalidate)
for (current = entry; current->start < end; current = current->next) {
offset = current->offset + (start - current->start);
size = (end <= current->end ? end : current->end) - start;
if (current->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) {
if (current->eflags & MAP_ENTRY_IS_SUB_MAP) {
vm_map_t smap;
vm_map_entry_t tentry;
vm_size_t tsize;
smap = current->object.share_map;
smap = current->object.sub_map;
vm_map_lock_read(smap);
(void) vm_map_lookup_entry(smap, offset, &tentry);
tsize = tentry->end - offset;
@ -1955,7 +1924,7 @@ vm_map_entry_delete(map, entry)
vm_map_entry_unlink(map, entry);
map->size -= entry->end - entry->start;
if ((entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) == 0) {
if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
vm_object_deallocate(entry->object.vm_object);
}
@ -2257,8 +2226,7 @@ vm_map_copy_entry(src_map, dst_map, src_entry, dst_entry)
{
vm_object_t src_object;
if ((dst_entry->eflags|src_entry->eflags) &
(MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP))
if ((dst_entry->eflags|src_entry->eflags) & MAP_ENTRY_IS_SUB_MAP)
return;
if (src_entry->wired_count == 0) {
@ -2406,7 +2374,6 @@ vmspace_fork(vm1)
*new_entry = *old_entry;
new_entry->wired_count = 0;
new_entry->object.vm_object = NULL;
new_entry->eflags &= ~MAP_ENTRY_IS_A_MAP;
vm_map_entry_link(new_map, new_map->header.prev,
new_entry);
vm_map_copy_entry(old_map, new_map, old_entry,
@ -2507,7 +2474,9 @@ vm_map_lookup(vm_map_t *var_map, /* IN/OUT */
vm_map_entry_t entry;
vm_map_t map = *var_map;
vm_prot_t prot;
#if 0
boolean_t su;
#endif
vm_prot_t fault_type = fault_typea;
RetryLookup:;
@ -2596,33 +2565,8 @@ RetryLookup:;
* If we don't already have a VM object, track it down.
*/
su = (entry->eflags & MAP_ENTRY_IS_A_MAP) == 0;
if (su) {
share_map = map;
share_offset = vaddr;
} else {
vm_map_entry_t share_entry;
/*
* Compute the sharing map, and offset into it.
*/
share_map = entry->object.share_map;
share_offset = (vaddr - entry->start) + entry->offset;
/*
* Look for the backing store object and offset
*/
vm_map_lock_read(share_map);
if (!vm_map_lookup_entry(share_map, share_offset,
&share_entry)) {
vm_map_unlock_read(share_map);
RETURN(KERN_INVALID_ADDRESS);
}
entry = share_entry;
}
share_map = map;
share_offset = vaddr;
/*
* If the entry was copy-on-write, we either ...
@ -2721,13 +2665,6 @@ vm_map_lookup_done(map, entry)
vm_map_t map;
vm_map_entry_t entry;
{
/*
* If this entry references a map, unlock it first.
*/
if (entry->eflags & MAP_ENTRY_IS_A_MAP)
vm_map_unlock_read(entry->object.share_map);
/*
* Unlock the main-level map
*/
@ -3100,19 +3037,18 @@ DB_SHOW_COMMAND(map, vm_map_print)
if (entry->wired_count != 0)
db_printf(", wired");
}
if (entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) {
if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
/* XXX no %qd in kernel. Truncate entry->offset. */
db_printf(", share=%p, offset=0x%lx\n",
(void *)entry->object.share_map,
(void *)entry->object.sub_map,
(long)entry->offset);
nlines++;
if ((entry->prev == &map->header) ||
((entry->prev->eflags & MAP_ENTRY_IS_A_MAP) == 0) ||
(entry->prev->object.share_map !=
entry->object.share_map)) {
(entry->prev->object.sub_map !=
entry->object.sub_map)) {
db_indent += 2;
vm_map_print((db_expr_t)(intptr_t)
entry->object.share_map,
entry->object.sub_map,
full, 0, (char *)0);
db_indent -= 2;
}
@ -3128,7 +3064,6 @@ DB_SHOW_COMMAND(map, vm_map_print)
nlines++;
if ((entry->prev == &map->header) ||
(entry->prev->eflags & MAP_ENTRY_IS_A_MAP) ||
(entry->prev->object.vm_object !=
entry->object.vm_object)) {
db_indent += 2;

View File

@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_map.h,v 1.33 1999/01/06 23:05:42 julian Exp $
* $Id: vm_map.h,v 1.34 1999/01/26 02:49:52 julian Exp $
*/
/*
@ -87,7 +87,6 @@
union vm_map_object {
struct vm_object *vm_object; /* object object */
struct vm_map *share_map; /* share map */
struct vm_map *sub_map; /* belongs to another map */
};
@ -113,7 +112,7 @@ struct vm_map_entry {
int wired_count; /* can be paged if = 0 */
};
#define MAP_ENTRY_IS_A_MAP 0x1
#define MAP_ENTRY_UNUSED_01 0x1
#define MAP_ENTRY_IS_SUB_MAP 0x2
#define MAP_ENTRY_COW 0x4
#define MAP_ENTRY_NEEDS_COPY 0x8
@ -165,6 +164,7 @@ struct vmspace {
};
#if 0
/*
* Map versions are used to validate a previous lookup attempt.
*
@ -176,10 +176,11 @@ struct vmspace {
*/
typedef struct {
int main_timestamp;
vm_map_t share_map;
int share_timestamp;
} vm_map_version_t;
#endif
/*
* Macros: vm_map_lock, etc.
* Function:

View File

@ -31,7 +31,7 @@
* SUCH DAMAGE.
*
* @(#)vm_meter.c 8.4 (Berkeley) 1/4/94
* $Id: vm_meter.c,v 1.28 1999/01/21 08:29:11 dillon Exp $
* $Id: vm_meter.c,v 1.29 1999/01/21 09:41:52 dillon Exp $
*/
#include <sys/param.h>
@ -180,7 +180,7 @@ vmtotal SYSCTL_HANDLER_ARGS
paging = 0;
for (map = &p->p_vmspace->vm_map, entry = map->header.next;
entry != &map->header; entry = entry->next) {
if ((entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) ||
if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) ||
entry->object.vm_object == NULL)
continue;
vm_object_set_flag(entry->object.vm_object, OBJ_ACTIVE);

View File

@ -38,7 +38,7 @@
* from: Utah $Hdr: vm_mmap.c 1.6 91/10/21$
*
* @(#)vm_mmap.c 8.4 (Berkeley) 1/12/94
* $Id: vm_mmap.c,v 1.87 1999/01/21 08:29:11 dillon Exp $
* $Id: vm_mmap.c,v 1.88 1999/01/26 02:49:52 julian Exp $
*/
/*
@ -713,7 +713,7 @@ mincore(p, uap)
/*
* ignore submaps (for now) or null objects
*/
if ((current->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) ||
if ((current->eflags & MAP_ENTRY_IS_SUB_MAP) ||
current->object.vm_object == NULL)
continue;

View File

@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_object.c,v 1.144 1999/02/04 17:47:52 dillon Exp $
* $Id: vm_object.c,v 1.145 1999/02/07 08:44:53 dillon Exp $
*/
/*
@ -1569,8 +1569,8 @@ _vm_object_in_map(map, object, entry)
}
tmpe = tmpe->next;
}
} else if (entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) {
tmpm = entry->object.share_map;
} else if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
tmpm = entry->object.sub_map;
tmpe = tmpm->header.next;
entcount = tmpm->nentries;
while (entcount-- && tmpe != &tmpm->header) {

View File

@ -65,7 +65,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_pageout.c,v 1.133 1999/01/24 01:33:22 dillon Exp $
* $Id: vm_pageout.c,v 1.134 1999/01/24 06:04:52 dillon Exp $
*/
/*
@ -578,7 +578,7 @@ vm_pageout_map_deactivate_pages(map, desired)
*/
tmpe = map->header.next;
while (tmpe != &map->header) {
if ((tmpe->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) == 0) {
if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
obj = tmpe->object.vm_object;
if ((obj != NULL) && (obj->shadow_count <= 1) &&
((bigobj == NULL) ||
@ -600,7 +600,7 @@ vm_pageout_map_deactivate_pages(map, desired)
while (tmpe != &map->header) {
if (vm_map_pmap(map)->pm_stats.resident_count <= desired)
break;
if ((tmpe->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) == 0) {
if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
obj = tmpe->object.vm_object;
if (obj)
vm_pageout_object_deactivate_pages(map, obj, desired, 0);