Remove unused vars & funcs, make things static, protoize a little bit.

This commit is contained in:
phk 1995-11-20 12:20:02 +00:00
parent 6ec6816809
commit 4636ba8017
13 changed files with 39 additions and 144 deletions

View File

@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: kern_lock.c,v 1.7 1995/07/13 08:48:12 davidg Exp $
* $Id: kern_lock.c,v 1.8 1995/11/06 08:44:15 davidg Exp $
*/
/*
@ -199,8 +199,6 @@ boolean_t
lock_read_to_write(l)
register lock_t l;
{
register int i;
l->read_count--;
if (l->proc == curproc) {

View File

@ -39,7 +39,7 @@
* from: Utah $Hdr: swap_pager.c 1.4 91/04/30$
*
* @(#)swap_pager.c 8.9 (Berkeley) 3/21/94
* $Id: swap_pager.c,v 1.49 1995/11/14 20:53:20 phk Exp $
* $Id: swap_pager.c,v 1.50 1995/11/16 09:51:19 bde Exp $
*/
/*
@ -248,7 +248,6 @@ swap_pager_alloc(handle, size, prot, offset)
vm_offset_t offset;
{
vm_object_t object;
int i;
/*
* If this is a "named" anonymous region, look it up and use the

View File

@ -66,7 +66,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_fault.c,v 1.35 1995/11/02 06:42:47 davidg Exp $
* $Id: vm_fault.c,v 1.36 1995/11/05 20:45:58 dyson Exp $
*/
/*
@ -132,11 +132,9 @@ vm_fault(map, vaddr, fault_type, change_wiring)
boolean_t wired;
boolean_t su;
boolean_t lookup_still_valid;
boolean_t page_exists;
vm_page_t old_m;
vm_object_t next_object;
vm_page_t marray[VM_FAULT_READ];
int spl;
int hardfault = 0;
struct vnode *vp = NULL;

View File

@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_map.c,v 1.25 1995/10/23 05:35:44 dyson Exp $
* $Id: vm_map.c,v 1.26 1995/11/12 08:58:58 davidg Exp $
*/
/*
@ -1727,8 +1727,6 @@ vm_map_copy_entry(src_map, dst_map, src_entry, dst_entry)
vm_map_t src_map, dst_map;
register vm_map_entry_t src_entry, dst_entry;
{
vm_object_t temp_object;
if (src_entry->is_sub_map || dst_entry->is_sub_map)
return;

View File

@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_object.c,v 1.54 1995/10/23 03:49:43 dyson Exp $
* $Id: vm_object.c,v 1.55 1995/11/05 20:46:00 dyson Exp $
*/
/*
@ -160,8 +160,6 @@ _vm_object_allocate(type, size, object)
void
vm_object_init(vm_offset_t nothing)
{
register int i;
TAILQ_INIT(&vm_object_cached_list);
TAILQ_INIT(&vm_object_list);
vm_object_count = 0;
@ -345,8 +343,7 @@ void
vm_object_terminate(object)
register vm_object_t object;
{
register vm_page_t p, next;
vm_object_t backing_object;
register vm_page_t p;
int s;
/*
@ -444,7 +441,6 @@ vm_object_page_clean(object, start, end, syncio, lockflag)
end = round_page(end);
}
startover:
tstart = start;
if (end == 0) {
tend = object->size;
@ -599,7 +595,6 @@ vm_object_pmap_remove(object, start, end)
register vm_offset_t end;
{
register vm_page_t p;
int s;
if (object == NULL)
return;
@ -629,12 +624,6 @@ vm_object_copy(src_object, src_offset, size,
vm_offset_t *dst_offset;/* OUT */
boolean_t *src_needs_copy; /* OUT */
{
register vm_object_t new_copy;
register vm_object_t old_copy;
vm_offset_t new_start, new_end;
register vm_page_t p;
if (src_object == NULL) {
/*
* Nothing to copy
@ -1338,8 +1327,10 @@ vm_object_check() {
object->size);
}
if (!vm_object_in_map(object)) {
printf("vmochk: internal obj is not in a map: ref: %d, size: %d: 0x%x, backing_object: 0x%x\n",
object->ref_count, object->size, object->backing_object);
printf("vmochk: internal obj is not in a map: "
"ref: %d, size: %d: 0x%x, backing_object: 0x%x\n",
object->ref_count, object->size,
object->size, object->backing_object);
}
}
}

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91
* $Id: vm_page.c,v 1.36 1995/09/03 20:40:43 dyson Exp $
* $Id: vm_page.c,v 1.37 1995/10/23 05:35:46 dyson Exp $
*/
/*
@ -85,7 +85,7 @@
struct pglist *vm_page_buckets; /* Array of buckets */
int vm_page_bucket_count; /* How big is array? */
int vm_page_hash_mask; /* Mask for hash function */
static int vm_page_hash_mask; /* Mask for hash function */
struct pglist vm_page_queue_free;
struct pglist vm_page_queue_zero;
@ -114,6 +114,7 @@ static u_short vm_page_dev_bsize_chunks[] = {
0x1ff, 0x3ff, 0x7ff, 0xfff, 0x1fff, 0x3fff, 0x7fff, 0xffff
};
static void vm_page_unqueue __P((vm_page_t ));
/*
* vm_set_page_size:
@ -340,9 +341,7 @@ vm_page_startup(starta, enda, vaddr)
* NOTE: This macro depends on vm_page_bucket_count being a power of 2.
*/
inline const int
vm_page_hash(object, offset)
vm_object_t object;
vm_offset_t offset;
vm_page_hash(vm_object_t object, vm_offset_t offset)
{
return ((unsigned) object + (offset >> PAGE_SHIFT)) & vm_page_hash_mask;
}
@ -500,7 +499,7 @@ vm_page_rename(mem, new_object, new_offset)
/*
* vm_page_unqueue must be called at splhigh();
*/
inline void
static inline void
vm_page_unqueue(vm_page_t mem)
{
int origflags;
@ -798,7 +797,7 @@ vm_page_free(mem)
if (mem->bmapped || mem->busy || flags & (PG_BUSY|PG_FREE)) {
if (flags & PG_FREE)
panic("vm_page_free: freeing free page");
printf("vm_page_free: offset(%d), bmapped(%d), busy(%d), PG_BUSY(%d)\n",
printf("vm_page_free: offset(%ld), bmapped(%d), busy(%d), PG_BUSY(%d)\n",
mem->offset, mem->bmapped, mem->busy, (flags & PG_BUSY) ? 1 : 0);
panic("vm_page_free: freeing busy page");
}
@ -1063,18 +1062,6 @@ vm_page_set_validclean(m, base, size)
pmap_clear_modify(VM_PAGE_TO_PHYS(m));
}
/*
* set a page (partially) valid
*/
void
vm_page_set_valid(m, base, size)
vm_page_t m;
int base;
int size;
{
m->valid |= vm_page_bits(base, size);
}
/*
* set a page (partially) invalid
*/
@ -1109,27 +1096,6 @@ vm_page_is_valid(m, base, size)
}
/*
* set a page (partially) dirty
*/
void
vm_page_set_dirty(m, base, size)
vm_page_t m;
int base;
int size;
{
if ((base != 0) || (size != PAGE_SIZE)) {
if (pmap_is_modified(VM_PAGE_TO_PHYS(m))) {
m->dirty = VM_PAGE_BITS_ALL;
pmap_clear_modify(VM_PAGE_TO_PHYS(m));
return;
}
m->dirty |= vm_page_bits(base, size);
} else {
m->dirty = VM_PAGE_BITS_ALL;
pmap_clear_modify(VM_PAGE_TO_PHYS(m));
}
}
void
vm_page_test_dirty(m)
@ -1141,42 +1107,9 @@ vm_page_test_dirty(m)
}
}
/*
* set a page (partially) clean
*/
void
vm_page_set_clean(m, base, size)
vm_page_t m;
int base;
int size;
{
m->dirty &= ~vm_page_bits(base, size);
if( base == 0 && size == PAGE_SIZE)
pmap_clear_modify(VM_PAGE_TO_PHYS(m));
}
/*
* is (partial) page clean
*/
int
vm_page_is_clean(m, base, size)
vm_page_t m;
int base;
int size;
{
if (pmap_is_modified(VM_PAGE_TO_PHYS(m))) {
m->dirty = VM_PAGE_BITS_ALL;
pmap_clear_modify(VM_PAGE_TO_PHYS(m));
}
if ((m->dirty & m->valid & vm_page_bits(base, size)) == 0)
return 1;
else
return 0;
}
#ifdef DDB
void
print_page_info()
print_page_info(void)
{
printf("cnt.v_free_count: %d\n", cnt.v_free_count);
printf("cnt.v_cache_count: %d\n", cnt.v_cache_count);

View File

@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_page.h,v 1.20 1995/09/03 20:11:26 dyson Exp $
* $Id: vm_page.h,v 1.21 1995/10/23 04:29:39 dyson Exp $
*/
/*
@ -243,15 +243,10 @@ vm_offset_t vm_page_startup __P((vm_offset_t, vm_offset_t, vm_offset_t));
void vm_page_unwire __P((vm_page_t));
void vm_page_wire __P((vm_page_t));
boolean_t vm_page_zero_fill __P((vm_page_t));
void vm_page_set_dirty __P((vm_page_t, int, int));
void vm_page_set_clean __P((vm_page_t, int, int));
int vm_page_is_clean __P((vm_page_t, int, int));
void vm_page_set_valid __P((vm_page_t, int, int));
void vm_page_set_validclean __P((vm_page_t, int, int));
void vm_page_set_invalid __P((vm_page_t, int, int));
int vm_page_is_valid __P((vm_page_t, int, int));
void vm_page_test_dirty __P((vm_page_t));
void vm_page_unqueue __P((vm_page_t ));
int vm_page_bits __P((int, int));

View File

@ -65,7 +65,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_pageout.c,v 1.58 1995/10/23 05:35:48 dyson Exp $
* $Id: vm_pageout.c,v 1.59 1995/11/05 20:46:02 dyson Exp $
*/
/*
@ -95,6 +95,8 @@
/* the kernel process "vm_pageout"*/
static void vm_pageout __P((void));
static int vm_pageout_clean __P((vm_page_t, int));
static int vm_pageout_scan __P((void));
struct proc *pageproc;
static struct kproc_desc page_kp = {
@ -155,13 +157,12 @@ static void vm_req_vmdaemon __P((void));
* inactive queue. (However, any other page on the inactive queue may
* move!)
*/
int
static int
vm_pageout_clean(m, sync)
vm_page_t m;
int sync;
{
register vm_object_t object;
int pageout_status[VM_PAGEOUT_PAGE_COUNT];
vm_page_t mc[2*VM_PAGEOUT_PAGE_COUNT];
int pageout_count;
int i, forward_okay, backward_okay, page_base;
@ -540,7 +541,7 @@ vm_req_vmdaemon()
/*
* vm_pageout_scan does the dirty work for the pageout daemon.
*/
int
static int
vm_pageout_scan()
{
vm_page_t m;
@ -706,7 +707,6 @@ vm_pageout_scan()
}
if (m->object->ref_count && ((m->flags & (PG_REFERENCED|PG_WANTED)) ||
pmap_is_referenced(VM_PAGE_TO_PHYS(m)))) {
int s;
pmap_clear_reference(VM_PAGE_TO_PHYS(m));
m->flags &= ~PG_REFERENCED;

View File

@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_pageout.h,v 1.14 1995/08/28 09:19:25 julian Exp $
* $Id: vm_pageout.h,v 1.15 1995/11/05 20:46:03 dyson Exp $
*/
#ifndef _VM_VM_PAGEOUT_H_
@ -123,10 +123,8 @@ vm_wait()
#ifdef KERNEL
int vm_pageout_scan __P((void));
void vm_pageout_page __P((vm_page_t, vm_object_t));
void vm_pageout_cluster __P((vm_page_t, vm_object_t));
int vm_pageout_clean __P((vm_page_t, int));
int vm_pageout_flush __P((vm_page_t *, int, int));
#endif

View File

@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_pager.c,v 1.16 1995/07/13 08:48:42 davidg Exp $
* $Id: vm_pager.c,v 1.17 1995/07/29 11:44:29 bde Exp $
*/
/*
@ -86,13 +86,13 @@ extern struct pagerops swappagerops;
extern struct pagerops vnodepagerops;
extern struct pagerops devicepagerops;
struct pagerops *pagertab[] = {
static struct pagerops *pagertab[] = {
&defaultpagerops, /* OBJT_DEFAULT */
&swappagerops, /* OBJT_SWAP */
&vnodepagerops, /* OBJT_VNODE */
&devicepagerops, /* OBJT_DEVICE */
};
int npagers = sizeof(pagertab) / sizeof(pagertab[0]);
static int npagers = sizeof(pagertab) / sizeof(pagertab[0]);
/*
* Kernel address space for mapping pages.
@ -240,18 +240,6 @@ vm_pager_unmap_page(kva)
kmem_free_wakeup(pager_map, kva, PAGE_SIZE);
}
vm_page_t
vm_pager_atop(kva)
vm_offset_t kva;
{
vm_offset_t pa;
pa = pmap_kextract(kva);
if (pa == 0)
panic("vm_pager_atop");
return (PHYS_TO_VM_PAGE(pa));
}
vm_object_t
vm_pager_object_lookup(pg_list, handle)
register struct pagerlst *pg_list;

View File

@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* @(#)vm_pager.h 8.4 (Berkeley) 1/12/94
* $Id: vm_pager.h,v 1.8 1995/07/13 08:48:44 davidg Exp $
* $Id: vm_pager.h,v 1.9 1995/07/29 11:44:30 bde Exp $
*/
/*
@ -79,7 +79,6 @@ extern vm_map_t pager_map;
extern int pager_map_size;
vm_object_t vm_pager_allocate __P((objtype_t, void *, vm_size_t, vm_prot_t, vm_offset_t));
vm_page_t vm_pager_atop __P((vm_offset_t));
void vm_pager_bufferinit __P((void));
void vm_pager_deallocate __P((vm_object_t));
int vm_pager_get_pages __P((vm_object_t, vm_page_t *, int, int));

View File

@ -31,7 +31,7 @@
* SUCH DAMAGE.
*
* @(#)vm_swap.c 8.5 (Berkeley) 2/17/94
* $Id: vm_swap.c,v 1.23 1995/07/29 11:44:31 bde Exp $
* $Id: vm_swap.c,v 1.24 1995/11/12 06:43:26 bde Exp $
*/
#include <sys/param.h>
@ -58,10 +58,10 @@
#define NSWAPDEV 4
#endif
static struct swdevt should_be_malloced[NSWAPDEV];
struct swdevt *swdevt = should_be_malloced;
static struct swdevt *swdevt = should_be_malloced;
struct vnode *swapdev_vp;
int nswap; /* first block after the interleaved devs */
int nswdev = NSWAPDEV;
static int nswdev = NSWAPDEV;
int vm_swap_size;
void
@ -138,11 +138,9 @@ swapon(p, uap, retval)
int *retval;
{
register struct vnode *vp;
register struct swdevt *sp;
dev_t dev;
int error,i;
struct nameidata nd;
struct vattr attr;
int error;
error = suser(p->p_ucred, &p->p_acflag);
if (error)
@ -207,9 +205,7 @@ swaponvp(p, vp, dev, nblks)
register swblk_t vsbase;
register long blk;
swblk_t dvbase;
struct swdevt *swp;
int error;
int perdev;
for (sp = swdevt, index = 0 ; index < nswdev; index++, sp++) {
if (sp->sw_vp == vp)

View File

@ -38,7 +38,7 @@
* SUCH DAMAGE.
*
* from: @(#)vnode_pager.c 7.5 (Berkeley) 4/20/91
* $Id: vnode_pager.c,v 1.51 1995/10/23 02:23:29 dyson Exp $
* $Id: vnode_pager.c,v 1.52 1995/10/30 17:56:30 bde Exp $
*/
/*
@ -197,7 +197,7 @@ vnode_pager_haspage(object, offset, before, after)
{
struct vnode *vp = object->handle;
daddr_t bn;
int err, run;
int err;
daddr_t reqblock;
int poff;
int bsize;
@ -863,8 +863,9 @@ vnode_pager_leaf_putpages(object, m, count, sync, rtvals)
rtvals[i] = VM_PAGER_BAD;
}
if (ncount == 0) {
printf("vnode_pager_putpages: write past end of file: %d, %d\n",
m[0]->offset, object->un_pager.vnp.vnp_size);
printf("vnode_pager_putpages: write past end of file: %ld, %ld\n",
m[0]->offset,
object->un_pager.vnp.vnp_size);
return rtvals[0];
}
}
@ -892,7 +893,8 @@ vnode_pager_leaf_putpages(object, m, count, sync, rtvals)
printf("vnode_pager_putpages: I/O error %d\n", error);
}
if (auio.uio_resid) {
printf("vnode_pager_putpages: residual I/O %d at %d\n", auio.uio_resid, m[0]->offset);
printf("vnode_pager_putpages: residual I/O %d at %ld\n",
auio.uio_resid, m[0]->offset);
}
for (i = 0; i < count; i++) {
m[i]->busy--;